java-openjdk/java-openjdk-s390-size_t.patch
Severin Gehwolf eb6f5c2dfb Clean up patches.
- Remove unneeded ones.
- Rename patches for clarity. Update JDK-8201788
  from upstream.
- Add patches needed to build on more arches (Zero).
2018-04-23 10:19:42 +02:00

135 lines
6.6 KiB
Diff

diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -405,7 +405,7 @@
add_heap(heap);
// Reserve Space
- size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
+ size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
size_initial = align_up(size_initial, os::vm_page_size());
if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp
@@ -200,7 +200,7 @@
const size_t num_overflow_elems = of_stack->size();
const size_t space_available = queue->max_elems() - queue->size();
const size_t num_take_elems = MIN3(space_available / 4,
- ParGCDesiredObjsFromOverflowList,
+ (size_t)ParGCDesiredObjsFromOverflowList,
num_overflow_elems);
// Transfer the most recent num_take_elems from the overflow
// stack to our work queue.
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -2296,7 +2296,7 @@
// of things to do) or totally (at the very end).
size_t target_size;
if (partially) {
- target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
+ target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
} else {
target_size = 0;
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
@@ -31,7 +31,7 @@
}
size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
- size_t words_to_scan = MIN2(remaining, ObjArrayMarkingStride);
+ size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride);
if (remaining > ObjArrayMarkingStride) {
push_array_slice(start_from + ObjArrayMarkingStride);
diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
@@ -100,7 +100,7 @@
return reserved_size() - committed_size();
}
-size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
@@ -115,7 +115,7 @@
const size_t beg_index = size_t(index);
assert(beg_index < len || len == 0, "index too large");
- const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
+ const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
const size_t end_index = beg_index + stride;
T* const base = (T*)obj->base();
T* const beg = base + beg_index;
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -905,8 +905,8 @@
void PSParallelCompact::initialize_dead_wood_limiter()
{
const size_t max = 100;
- _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
- _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
+ _dwl_mean = double(MIN2((size_t)ParallelOldDeadWoodLimiterMean, max)) / 100.0;
+ _dwl_std_dev = double(MIN2((size_t)ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
_dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
DEBUG_ONLY(_dwl_initialized = true;)
_dwl_adjustment = normal_distribution(1.0);
diff --git a/src/hotspot/share/gc/shared/plab.cpp b/src/hotspot/share/gc/shared/plab.cpp
--- a/src/hotspot/share/gc/shared/plab.cpp
+++ b/src/hotspot/share/gc/shared/plab.cpp
@@ -32,7 +32,7 @@
size_t PLAB::min_size() {
// Make sure that we return something that is larger than AlignmentReserve
- return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
+ return align_object_size(MAX2(MinTLABSize / HeapWordSize, (size_t)oopDesc::header_size())) + AlignmentReserve;
}
size_t PLAB::max_size() {
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -1096,7 +1096,7 @@
WB_END
WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name))
- uintx result;
+ size_t result;
if (GetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -1586,7 +1586,7 @@
// Increase the code cache size - tiered compiles a lot more.
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize,
- MIN2(CODE_CACHE_DEFAULT_LIMIT, ReservedCodeCacheSize * 5));
+ MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)(ReservedCodeCacheSize * 5)));
}
// Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp
--- a/src/hotspot/share/runtime/arguments.hpp
+++ b/src/hotspot/share/runtime/arguments.hpp
@@ -328,7 +328,7 @@
// Value of the conservative maximum heap alignment needed
static size_t _conservative_max_heap_alignment;
- static uintx _min_heap_size;
+ static size_t _min_heap_size;
// -Xrun arguments
static AgentLibraryList _libraryList;