source: trunk/third/gcc/boehm-gc/mark_rts.c @ 18474

Revision 18474, 16.8 KB checked in by ghudson, 21 years ago (diff)
This commit was generated by cvs2svn to compensate for changes in r18473, which included commits to RCS files with non-trunk default branches.
Line 
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose,  provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14# include <stdio.h>
15# include "private/gc_priv.h"
16
17/* Data structure for list of root sets.                                */
18/* We keep a hash table, so that we can filter out duplicate additions. */
19/* Under Win32, we need to do a better job of filtering overlaps, so    */
20/* we resort to sequential search, and pay the price.                   */
21/* This is really declared in gc_priv.h:
22struct roots {
23        ptr_t r_start;
24        ptr_t r_end;
25 #      if !defined(MSWIN32) && !defined(MSWINCE)
26          struct roots * r_next;
27 #      endif
28        GC_bool r_tmp;
29                -- Delete before registering new dynamic libraries
30};
31
32struct roots GC_static_roots[MAX_ROOT_SETS];
33*/
34
35int GC_no_dls = 0;      /* Register dynamic library data segments.      */
36
37static int n_root_sets = 0;
38
39        /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
40
41# if !defined(NO_DEBUGGING)
42/* For debugging:       */
43void GC_print_static_roots()
44{
45    register int i;
46    size_t total = 0;
47   
48    for (i = 0; i < n_root_sets; i++) {
49        GC_printf2("From 0x%lx to 0x%lx ",
50                   (unsigned long) GC_static_roots[i].r_start,
51                   (unsigned long) GC_static_roots[i].r_end);
52        if (GC_static_roots[i].r_tmp) {
53            GC_printf0(" (temporary)\n");
54        } else {
55            GC_printf0("\n");
56        }
57        total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
58    }
59    GC_printf1("Total size: %ld\n", (unsigned long) total);
60    if (GC_root_size != total) {
61        GC_printf1("GC_root_size incorrect: %ld!!\n",
62                   (unsigned long) GC_root_size);
63    }
64}
65# endif /* NO_DEBUGGING */
66
67/* Primarily for debugging support:     */
68/* Is the address p in one of the registered static                     */
69/* root sections?                                                       */
70GC_bool GC_is_static_root(p)
71ptr_t p;
72{
73    static int last_root_set = MAX_ROOT_SETS;
74    register int i;
75   
76   
77    if (last_root_set < n_root_sets
78        && p >= GC_static_roots[last_root_set].r_start
79        && p < GC_static_roots[last_root_set].r_end) return(TRUE);
80    for (i = 0; i < n_root_sets; i++) {
81        if (p >= GC_static_roots[i].r_start
82            && p < GC_static_roots[i].r_end) {
83            last_root_set = i;
84            return(TRUE);
85        }
86    }
87    return(FALSE);
88}
89
90#if !defined(MSWIN32) && !defined(MSWINCE)
91/*
92#   define LOG_RT_SIZE 6
93#   define RT_SIZE (1 << LOG_RT_SIZE)  -- Power of 2, may be != MAX_ROOT_SETS
94
95    struct roots * GC_root_index[RT_SIZE];
96        -- Hash table header.  Used only to check whether a range is
97        -- already present.
98        -- really defined in gc_priv.h
99*/
100
101static int rt_hash(addr)
102char * addr;
103{
104    word result = (word) addr;
105#   if CPP_WORDSZ > 8*LOG_RT_SIZE
106        result ^= result >> 8*LOG_RT_SIZE;
107#   endif
108#   if CPP_WORDSZ > 4*LOG_RT_SIZE
109        result ^= result >> 4*LOG_RT_SIZE;
110#   endif
111    result ^= result >> 2*LOG_RT_SIZE;
112    result ^= result >> LOG_RT_SIZE;
113    result &= (RT_SIZE-1);
114    return(result);
115}
116
117/* Is a range starting at b already in the table? If so return a        */
118/* pointer to it, else NIL.                                             */
119struct roots * GC_roots_present(b)
120char *b;
121{
122    register int h = rt_hash(b);
123    register struct roots *p = GC_root_index[h];
124   
125    while (p != 0) {
126        if (p -> r_start == (ptr_t)b) return(p);
127        p = p -> r_next;
128    }
129    return(FALSE);
130}
131
132/* Add the given root structure to the index. */
133static void add_roots_to_index(p)
134struct roots *p;
135{
136    register int h = rt_hash(p -> r_start);
137   
138    p -> r_next = GC_root_index[h];
139    GC_root_index[h] = p;
140}
141
142# else /* MSWIN32 || MSWINCE */
143
144#   define add_roots_to_index(p)
145
146# endif
147
148
149
150
151word GC_root_size = 0;
152
153void GC_add_roots(b, e)
154char * b; char * e;
155{
156    DCL_LOCK_STATE;
157   
158    DISABLE_SIGNALS();
159    LOCK();
160    GC_add_roots_inner(b, e, FALSE);
161    UNLOCK();
162    ENABLE_SIGNALS();
163}
164
165
166/* Add [b,e) to the root set.  Adding the same interval a second time   */
167/* is a moderately fast noop, and hence benign.  We do not handle       */
168/* different but overlapping intervals efficiently.  (We do handle      */
169/* them correctly.)                                                     */
170/* Tmp specifies that the interval may be deleted before                */
171/* reregistering dynamic libraries.                                     */
172void GC_add_roots_inner(b, e, tmp)
173char * b; char * e;
174GC_bool tmp;
175{
176    struct roots * old;
177   
178#   if defined(MSWIN32) || defined(MSWINCE)
179      /* Spend the time to ensure that there are no overlapping */
180      /* or adjacent intervals.                                 */
181      /* This could be done faster with e.g. a                  */
182      /* balanced tree.  But the execution time here is         */
183      /* virtually guaranteed to be dominated by the time it    */
184      /* takes to scan the roots.                               */
185      {
186        register int i;
187       
188        for (i = 0; i < n_root_sets; i++) {
189            old = GC_static_roots + i;
190            if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
191                if ((ptr_t)b < old -> r_start) {
192                    old -> r_start = (ptr_t)b;
193                    GC_root_size += (old -> r_start - (ptr_t)b);
194                }
195                if ((ptr_t)e > old -> r_end) {
196                    old -> r_end = (ptr_t)e;
197                    GC_root_size += ((ptr_t)e - old -> r_end);
198                }
199                old -> r_tmp &= tmp;
200                break;
201            }
202        }
203        if (i < n_root_sets) {
204          /* merge other overlapping intervals */
205            struct roots *other;
206           
207            for (i++; i < n_root_sets; i++) {
208              other = GC_static_roots + i;
209              b = (char *)(other -> r_start);
210              e = (char *)(other -> r_end);
211              if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
212                if ((ptr_t)b < old -> r_start) {
213                    old -> r_start = (ptr_t)b;
214                    GC_root_size += (old -> r_start - (ptr_t)b);
215                }
216                if ((ptr_t)e > old -> r_end) {
217                    old -> r_end = (ptr_t)e;
218                    GC_root_size += ((ptr_t)e - old -> r_end);
219                }
220                old -> r_tmp &= other -> r_tmp;
221                /* Delete this entry. */
222                  GC_root_size -= (other -> r_end - other -> r_start);
223                  other -> r_start = GC_static_roots[n_root_sets-1].r_start;
224                  other -> r_end = GC_static_roots[n_root_sets-1].r_end;
225                                  n_root_sets--;
226              }
227            }
228          return;
229        }
230      }
231#   else
232      old = GC_roots_present(b);
233      if (old != 0) {
234        if ((ptr_t)e <= old -> r_end) /* already there */ return;
235        /* else extend */
236        GC_root_size += (ptr_t)e - old -> r_end;
237        old -> r_end = (ptr_t)e;
238        return;
239      }
240#   endif
241    if (n_root_sets == MAX_ROOT_SETS) {
242        ABORT("Too many root sets\n");
243    }
244    GC_static_roots[n_root_sets].r_start = (ptr_t)b;
245    GC_static_roots[n_root_sets].r_end = (ptr_t)e;
246    GC_static_roots[n_root_sets].r_tmp = tmp;
247#   if !defined(MSWIN32) && !defined(MSWINCE)
248      GC_static_roots[n_root_sets].r_next = 0;
249#   endif
250    add_roots_to_index(GC_static_roots + n_root_sets);
251    GC_root_size += (ptr_t)e - (ptr_t)b;
252    n_root_sets++;
253}
254
255static GC_bool roots_were_cleared = FALSE;
256
257void GC_clear_roots GC_PROTO((void))
258{
259    DCL_LOCK_STATE;
260   
261    DISABLE_SIGNALS();
262    LOCK();
263    roots_were_cleared = TRUE;
264    n_root_sets = 0;
265    GC_root_size = 0;
266#   if !defined(MSWIN32) && !defined(MSWINCE)
267    {
268        register int i;
269       
270        for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
271    }
272#   endif
273    UNLOCK();
274    ENABLE_SIGNALS();
275}
276
277/* Internal use only; lock held.        */
278void GC_remove_tmp_roots()
279{
280    register int i;
281   
282    for (i = 0; i < n_root_sets; ) {
283        if (GC_static_roots[i].r_tmp) {
284            GC_root_size -=
285                (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
286            GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
287            GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
288            GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
289            n_root_sets--;
290        } else {
291            i++;
292        }
293    }
294#   if !defined(MSWIN32) && !defined(MSWINCE)
295    {
296        register int i;
297       
298        for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
299        for (i = 0; i < n_root_sets; i++)
300                add_roots_to_index(GC_static_roots + i);
301    }
302#   endif
303   
304}
305
306#if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
307/* Workaround for the OS mapping and unmapping behind our back:         */
308/* Is the address p in one of the temporary static root sections?       */
309GC_bool GC_is_tmp_root(p)
310ptr_t p;
311{
312    static int last_root_set = MAX_ROOT_SETS;
313    register int i;
314   
315    if (last_root_set < n_root_sets
316        && p >= GC_static_roots[last_root_set].r_start
317        && p < GC_static_roots[last_root_set].r_end)
318        return GC_static_roots[last_root_set].r_tmp;
319    for (i = 0; i < n_root_sets; i++) {
320        if (p >= GC_static_roots[i].r_start
321            && p < GC_static_roots[i].r_end) {
322            last_root_set = i;
323            return GC_static_roots[i].r_tmp;
324        }
325    }
326    return(FALSE);
327}
328#endif /* MSWIN32 || _WIN32_WCE_EMULATION */
329
330ptr_t GC_approx_sp()
331{
332    word dummy;
333
334#   ifdef _MSC_VER
335#     pragma warning(disable:4172)
336#   endif
337    return((ptr_t)(&dummy));
338#   ifdef _MSC_VER
339#     pragma warning(default:4172)
340#   endif
341}
342
343/*
344 * Data structure for excluded static roots.
345 * Real declaration is in gc_priv.h.
346
347struct exclusion {
348    ptr_t e_start;
349    ptr_t e_end;
350};
351
352struct exclusion GC_excl_table[MAX_EXCLUSIONS];
353                                        -- Array of exclusions, ascending
354                                        -- address order.
355*/
356
357size_t GC_excl_table_entries = 0;       /* Number of entries in use.      */
358
359/* Return the first exclusion range that includes an address >= start_addr */
360/* Assumes the exclusion table contains at least one entry (namely the     */
361/* GC data structures).                                                    */
362struct exclusion * GC_next_exclusion(start_addr)
363ptr_t start_addr;
364{
365    size_t low = 0;
366    size_t high = GC_excl_table_entries - 1;
367    size_t mid;
368
369    while (high > low) {
370        mid = (low + high) >> 1;
371        /* low <= mid < high    */
372        if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
373            low = mid + 1;
374        } else {
375            high = mid;
376        }
377    }
378    if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
379    return GC_excl_table + low;
380}
381
382void GC_exclude_static_roots(start, finish)
383GC_PTR start;
384GC_PTR finish;
385{
386    struct exclusion * next;
387    size_t next_index, i;
388
389    if (0 == GC_excl_table_entries) {
390        next = 0;
391    } else {
392        next = GC_next_exclusion(start);
393    }
394    if (0 != next) {
395      if ((word)(next -> e_start) < (word) finish) {
396        /* incomplete error check. */
397        ABORT("exclusion ranges overlap");
398      } 
399      if ((word)(next -> e_start) == (word) finish) {
400        /* extend old range backwards   */
401          next -> e_start = (ptr_t)start;
402          return;
403      }
404      next_index = next - GC_excl_table;
405      for (i = GC_excl_table_entries; i > next_index; --i) {
406        GC_excl_table[i] = GC_excl_table[i-1];
407      }
408    } else {
409      next_index = GC_excl_table_entries;
410    }
411    if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
412    GC_excl_table[next_index].e_start = (ptr_t)start;
413    GC_excl_table[next_index].e_end = (ptr_t)finish;
414    ++GC_excl_table_entries;
415}
416
417/* Invoke push_conditional on ranges that are not excluded. */
418void GC_push_conditional_with_exclusions(bottom, top, all)
419ptr_t bottom;
420ptr_t top;
421int all;
422{
423    struct exclusion * next;
424    ptr_t excl_start;
425
426    while (bottom < top) {
427        next = GC_next_exclusion(bottom);
428        if (0 == next || (excl_start = next -> e_start) >= top) {
429            GC_push_conditional(bottom, top, all);
430            return;
431        }
432        if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
433        bottom = next -> e_end;
434    }
435}
436
437/*
438 * In the absence of threads, push the stack contents.
439 * In the presence of threads, push enough of the current stack
440 * to ensure that callee-save registers saved in collector frames have been
441 * seen.
442 */
443void GC_push_current_stack(cold_gc_frame)
444ptr_t cold_gc_frame;
445{
446#   if defined(THREADS)
447        if (0 == cold_gc_frame) return;
448#       ifdef STACK_GROWS_DOWN
449          GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
450          /* For IA64, the register stack backing store is handled      */
451          /* in the thread-specific code.                               */
452#       else
453          GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
454#       endif
455#   else
456#       ifdef STACK_GROWS_DOWN
457            GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
458                                               cold_gc_frame );
459#           ifdef IA64
460              /* We also need to push the register stack backing store. */
461              /* This should really be done in the same way as the      */
462              /* regular stack.  For now we fudge it a bit.             */
463              /* Note that the backing store grows up, so we can't use  */
464              /* GC_push_all_stack_partially_eager.                     */
465              {
466                extern word GC_save_regs_ret_val;
467                        /* Previously set to backing store pointer.     */
468                ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
469                ptr_t cold_gc_bs_pointer;
470                if (GC_all_interior_pointers) {
471                  cold_gc_bs_pointer = bsp - 2048;
472                  if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
473                    cold_gc_bs_pointer = BACKING_STORE_BASE;
474                  } else {
475                    GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
476                  }
477                } else {
478                  cold_gc_bs_pointer = BACKING_STORE_BASE;
479                }
480                GC_push_all_eager(cold_gc_bs_pointer, bsp);
481                /* All values should be sufficiently aligned that we    */
482                /* dont have to worry about the boundary.               */
483              }
484#           endif
485#       else
486            GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
487                                               cold_gc_frame );
488#       endif
489#   endif /* !THREADS */
490}
491
492/*
493 * Push GC internal roots.  Only called if there is some reason to believe
494 * these would not otherwise get registered.
495 */
496void GC_push_gc_structures GC_PROTO((void))
497{
498    GC_push_finalizer_structures();
499    GC_push_stubborn_structures();
500#   if defined(THREADS)
501      GC_push_thread_structures();
502#   endif
503}
504
505#ifdef THREAD_LOCAL_ALLOC
506  void GC_mark_thread_local_free_lists();
507#endif
508
509/*
510 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
511 * on groups of pointers) on every top level accessible pointer.
512 * If all is FALSE, arrange to push only possibly altered values.
513 * Cold_gc_frame is an address inside a GC frame that
514 * remains valid until all marking is complete.
515 * A zero value indicates that it's OK to miss some
516 * register values.
517 */
518void GC_push_roots(all, cold_gc_frame)
519GC_bool all;
520ptr_t cold_gc_frame;
521{
522    register int i;
523
524    /*
525     * Next push static data.  This must happen early on, since it's
526     * not robust against mark stack overflow.
527     */
528     /* Reregister dynamic libraries, in case one got added.    */
529#      if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
530           || defined(PCR)) && !defined(SRC_M3)
531         GC_remove_tmp_roots();
532         if (!GC_no_dls) GC_register_dynamic_libraries();
533#      else
534         GC_no_dls = TRUE;
535#      endif
536
537     /* Mark everything in static data areas                             */
538       for (i = 0; i < n_root_sets; i++) {
539         GC_push_conditional_with_exclusions(
540                             GC_static_roots[i].r_start,
541                             GC_static_roots[i].r_end, all);
542       }
543
544     /* Mark from GC internal roots if those might otherwise have       */
545     /* been excluded.                                                  */
546       if (GC_no_dls || roots_were_cleared) {
547           GC_push_gc_structures();
548       }
549
550     /* Mark thread local free lists, even if their mark        */
551     /* descriptor excludes the link field.                     */
552#      ifdef THREAD_LOCAL_ALLOC
553         GC_mark_thread_local_free_lists();
554#      endif
555
556    /*
557     * Now traverse stacks, and mark from register contents.
558     * These must be done last, since they can legitimately overflow
559     * the mark stack.
560     */
561#   ifdef USE_GENERIC_PUSH_REGS
562        GC_generic_push_regs(cold_gc_frame);
563        /* Also pushes stack, so that we catch callee-save registers    */
564        /* saved inside the GC_push_regs frame.                         */
565#   else
566       /*
567        * push registers - i.e., call GC_push_one(r) for each
568        * register contents r.
569        */
570        GC_push_regs(); /* usually defined in machine_dep.c */
571        GC_push_current_stack(cold_gc_frame);
572        /* In the threads case, this only pushes collector frames.      */
573        /* In the case of linux threads on IA64, the hot section of     */
574        /* the main stack is marked here, but the register stack        */
575        /* backing store is handled in the threads-specific code.       */
576#   endif
577    if (GC_push_other_roots != 0) (*GC_push_other_roots)();
578        /* In the threads case, this also pushes thread stacks. */
579        /* Note that without interior pointer recognition lots  */
580        /* of stuff may have been pushed already, and this      */
581        /* should be careful about mark stack overflows.        */
582}
583
Note: See TracBrowser for help on using the repository browser.