[glibc] - Replace patch for 789238 with official version from upstream.
Jeffrey Law
law at fedoraproject.org
Fri Aug 10 15:44:50 UTC 2012
commit 49bbcdd4a054a68ea966c4eebbe926bbff94fdeb
Author: Jeff Law <law at redhat.com>
Date: Fri Aug 10 09:44:26 2012 -0600
- Replace patch for 789238 with official version from upstream.
glibc-rh789238-2.patch | 111 --------------------------------------
glibc-rh789238.patch | 139 +++++++++++++++++++++++++++++++++++++++++-------
glibc.spec | 15 ++---
3 files changed, 126 insertions(+), 139 deletions(-)
---
diff --git a/glibc-rh789238.patch b/glibc-rh789238.patch
index 1801123..403b840 100644
--- a/glibc-rh789238.patch
+++ b/glibc-rh789238.patch
@@ -1,21 +1,116 @@
-Only in b/malloc: arena.c.orig
-Only in b/malloc: hooks.c.orig
-diff -rup a/malloc/malloc.c b/malloc/malloc.c
---- a/malloc/malloc.c 2012-02-14 10:08:22.062534892 -0700
-+++ b/malloc/malloc.c 2012-02-14 10:19:43.088724473 -0700
-@@ -2936,8 +2936,9 @@ public_mALLOc(size_t bytes)
+2012-08-09 Jeff Law <law at redhat.com>
+
+ [BZ #13939]
+ * malloc.c/arena.c (reused_arena): New parameter, avoid_arena.
+ When avoid_arena is set, don't retry in the that arena. Pick the
+ next one, whatever it might be.
+ (arena_get2): New parameter avoid_arena, pass through to reused_arena.
+ (arena_lock): Pass in new parameter to arena_get2.
+ * malloc/malloc.c (__libc_memalign): Pass in new parameter to
+ arena_get2.
+ (__libc_malloc): Unify retrying after main arena failure with
+ __libc_memalign version.
+ (__libc_valloc, __libc_pvalloc, __libc_calloc): Likewise.
+
+diff --git a/malloc/arena.c b/malloc/arena.c
+index 33c4ff3..7270bbe 100644
+--- a/malloc/arena.c
++++ b/malloc/arena.c
+@@ -120,14 +120,14 @@ int __malloc_initialized = -1;
+ if(ptr) \
+ (void)mutex_lock(&ptr->mutex); \
+ else \
+- ptr = arena_get2(ptr, (size)); \
++ ptr = arena_get2(ptr, (size), NULL); \
+ } while(0)
+ #else
+ # define arena_lock(ptr, size) do { \
+ if(ptr && !mutex_trylock(&ptr->mutex)) { \
+ THREAD_STAT(++(ptr->stat_lock_direct)); \
+ } else \
+- ptr = arena_get2(ptr, (size)); \
++ ptr = arena_get2(ptr, (size), NULL); \
+ } while(0)
+ #endif
+
+@@ -778,9 +778,11 @@ get_free_list (void)
+ return result;
+ }
+
+-
++/* Lock and return an arena that can be reused for memory allocation.
++ Avoid AVOID_ARENA as we have already failed to allocate memory in
++ it and it is currently locked. */
+ static mstate
+-reused_arena (void)
++reused_arena (mstate avoid_arena)
+ {
+ mstate result;
+ static mstate next_to_use;
+@@ -797,6 +799,11 @@ reused_arena (void)
+ }
+ while (result != next_to_use);
+
++ /* Avoid AVOID_ARENA as we have already failed to allocate memory
++ in that arena and it is currently locked. */
++ if (result == avoid_arena)
++ result = result->next;
++
+ /* No arena available. Wait for the next in line. */
+ (void)mutex_lock(&result->mutex);
+
+@@ -811,7 +818,7 @@ reused_arena (void)
+
+ static mstate
+ internal_function
+-arena_get2(mstate a_tsd, size_t size)
++arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
+ {
+ mstate a;
+
+@@ -856,7 +863,7 @@ arena_get2(mstate a_tsd, size_t size)
+ catomic_decrement (&narenas);
+ }
+ else
+- a = reused_arena ();
++ a = reused_arena (avoid_arena);
+ }
+ #else
+ if(!a_tsd)
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index 28039b4..1e4f929 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -2865,9 +2865,11 @@ __libc_malloc(size_t bytes)
+ victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
} else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
+- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- (void)mutex_unlock(&main_arena.mutex);
++ /* ... or sbrk() has failed and there is still a chance to mmap()
++ Grab ar_ptr->next prior to releasing its lock. */
+ mstate prev = ar_ptr->next ? ar_ptr : 0;
+ (void)mutex_unlock(&ar_ptr->mutex);
-+ ar_ptr = arena_get2(prev, bytes);
++ ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) {
victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
-@@ -3151,23 +3152,26 @@ public_vALLOc(size_t bytes)
+@@ -3043,10 +3045,11 @@ __libc_memalign(size_t alignment, size_t bytes)
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ } else {
+- /* ... or sbrk() has failed and there is still a chance to mmap() */
++ /* ... or sbrk() has failed and there is still a chance to mmap()
++ Grab ar_ptr->next prior to releasing its lock. */
+ mstate prev = ar_ptr->next ? ar_ptr : 0;
+ (void)mutex_unlock(&ar_ptr->mutex);
+- ar_ptr = arena_get2(prev, bytes);
++ ar_ptr = arena_get2(prev, bytes, ar_ptr);
+ if(ar_ptr) {
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+@@ -3083,23 +3086,27 @@ __libc_valloc(size_t bytes)
if(!ar_ptr)
return 0;
p = _int_valloc(ar_ptr, bytes);
@@ -29,11 +124,13 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
} else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
+- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
++ /* ... or sbrk() has failed and there is still a chance to mmap()
++ Grab ar_ptr->next prior to releasing its lock. */
+ mstate prev = ar_ptr->next ? ar_ptr : 0;
+ (void)mutex_unlock(&ar_ptr->mutex);
-+ ar_ptr = arena_get2(prev, bytes);
++ ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
@@ -41,11 +138,11 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
}
- }
+ } else
-+ (void)mutex_unlock(&ar_ptr->mutex);
++ (void)mutex_unlock (&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
-@@ -3195,24 +3199,26 @@ public_pVALLOc(size_t bytes)
+@@ -3127,24 +3134,27 @@ __libc_pvalloc(size_t bytes)
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
@@ -59,12 +156,14 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
} else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
+- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
- bytes + 2*pagesz + MINSIZE);
++ /* ... or sbrk() has failed and there is still a chance to mmap()
++ Grab ar_ptr->next prior to releasing its lock. */
+ mstate prev = ar_ptr->next ? ar_ptr : 0;
+ (void)mutex_unlock(&ar_ptr->mutex);
-+ ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE);
++ ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, ar_ptr);
if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
@@ -76,7 +175,7 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
-@@ -3277,8 +3283,6 @@ public_cALLOc(size_t n, size_t elem_size
+@@ -3209,8 +3219,6 @@ __libc_calloc(size_t n, size_t elem_size)
#endif
mem = _int_malloc(av, sz);
@@ -85,7 +184,7 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
av == arena_for_chunk(mem2chunk(mem)));
-@@ -3286,21 +3290,23 @@ public_cALLOc(size_t n, size_t elem_size
+@@ -3218,21 +3226,24 @@ __libc_calloc(size_t n, size_t elem_size)
if (mem == 0) {
/* Maybe the failure is due to running out of mmapped areas. */
if(av != &main_arena) {
@@ -94,13 +193,15 @@ diff -rup a/malloc/malloc.c b/malloc/malloc.c
mem = _int_malloc(&main_arena, sz);
(void)mutex_unlock(&main_arena.mutex);
} else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
+- /* ... or sbrk() has failed and there is still a chance to mmap() */
- (void)mutex_lock(&main_arena.mutex);
- av = arena_get2(av->next ? av : 0, sz);
- (void)mutex_unlock(&main_arena.mutex);
++ /* ... or sbrk() has failed and there is still a chance to mmap()
++ Grab av->next prior to releasing its lock. */
+ mstate prev = av->next ? av : 0;
+ (void)mutex_unlock(&av->mutex);
-+ av = arena_get2(prev, sz);
++ av = arena_get2(prev, sz, av);
if(av) {
mem = _int_malloc(av, sz);
(void)mutex_unlock(&av->mutex);
diff --git a/glibc.spec b/glibc.spec
index 3bcf203..751eb68 100644
--- a/glibc.spec
+++ b/glibc.spec
@@ -28,7 +28,7 @@
Summary: The GNU libc libraries
Name: glibc
Version: %{glibcversion}
-Release: 7%{?dist}
+Release: 8%{?dist}
# GPLv2+ is used in a bunch of programs, LGPLv2+ is used for libraries.
# Things that are linked directly into dynamically linked programs
# and shared libraries (e.g. crt files, lib*_nonshared.a) have an additional
@@ -109,6 +109,7 @@ Patch0034: %{name}-rh841318.patch
#
# Patches from upstream
#
+Patch1025: %{name}-rh789238.patch
Patch1035: %{name}-rh845960.patch
@@ -141,9 +142,6 @@ Patch2022: %{name}-rh791161.patch
# Upstream BZ 9954
Patch2024: %{name}-rh739743.patch
-# Upstream BZ 13939
-Patch2025: %{name}-rh789238.patch
-
#Upstream BZ 13818
Patch2026: %{name}-rh800224.patch
@@ -153,9 +151,6 @@ Patch2027: %{name}-rh827510.patch
Patch2028: %{name}-rh803286.patch
-# Upstream BZ 13939
-Patch2029: %{name}-rh789238-2.patch
-
# Upstream BZ 13761
Patch2030: %{name}-rh788989-2.patch
@@ -413,11 +408,10 @@ rm -rf %{glibcportsdir}
%patch2021 -p1
%patch2022 -p1
%patch2024 -p1
-%patch2025 -p1
+%patch1025 -p1
%patch2026 -p1
%patch2027 -p1
%patch2028 -p1
-%patch2029 -p1
%patch2030 -p1
%patch2031 -p1
%patch2032 -p1
@@ -1308,6 +1302,9 @@ rm -f *.filelist*
%endif
%changelog
+* Fri Aug 10 2012 Jeff Law <law at redhat.com> - 2.16-8
+ - Replace patch for 789238 with official version from upstream.
+
* Wed Jul 25 2012 Jeff Law <law at redhat.com> - 2.16-7
- Pack IPv4 servers at the start of nsaddr_list and
only track the number of IPV4 servers in EXT(statp->nscounti (#808147)
More information about the scm-commits
mailing list