From: Malcolm Beattie Date: Fri, 27 Feb 1998 18:06:41 +0000 (+0000) Subject: Make refcounts atomic for threading (dependent on appropriate X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=dce16143c60882f40eb787063b9483c00bb82139;p=p5sagit%2Fp5-mst-13.2.git Make refcounts atomic for threading (dependent on appropriate arch-dependent and compiler-dependent definitions in atomic.h or else falls back to a global mutex to protect refcounts). p4raw-id: //depot/perl@598 --- diff --git a/atomic.h b/atomic.h new file mode 100644 index 0000000..714bf23 --- /dev/null +++ b/atomic.h @@ -0,0 +1,85 @@ +#ifdef __GNUC__ + +/* + * These atomic operations copied from the linux kernel and altered + * only slightly. I need to get official permission to distribute + * under the Artistic License. + */ +/* We really need to integrate the atomic typedef with the typedef + * used by sv_refcnt of an SV. It's possible that for CPUs like alpha + * where we'd need to up sv_refcnt from 32 to 64 bits, we may be better + * off sticking with EMULATE_ATOMIC_REFCOUNTS instead. + */ +typedef U32 atomic_t; /* kludge */ + +#ifdef i386 + +# ifdef NO_SMP +# define LOCK "" +# else +# define LOCK "lock ; " +# endif + +# define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x) +static __inline__ void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + LOCK "incl %0" + :"=m" (__atomic_fool_gcc(v)) + :"m" (__atomic_fool_gcc(v))); +} + +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK "decl %0; sete %1" + :"=m" (__atomic_fool_gcc(v)), "=qm" (c) + :"m" (__atomic_fool_gcc(v))); + return c != 0; +} +# else +/* XXX What symbol does gcc define for sparc64? */ +# ifdef sparc64 +# define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x) +typedef U32 atomic_t; +extern __inline__ void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__(" +1: lduw [%1], %%g5 + add %%g5, %0, %%g7 + cas [%1], %%g5, %%g7 + sub %%g5, %%g7, %%g5 + brnz,pn %%g5, 1b + nop" + : /* No outputs */ + : "HIr" (i), "r" (__atomic_fool_gcc(v)) + : "g5", "g7", "memory"); +} + +extern __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long oldval; + __asm__ __volatile__(" +1: lduw [%2], %%g5 + sub %%g5, %1, %%g7 + cas [%2], %%g5, %%g7 + sub %%g5, %%g7, %%g5 + brnz,pn %%g5, 1b + sub %%g7, %1, %0" + : "=&r" (oldval) + : "HIr" (i), "r" (__atomic_fool_gcc(v)) + : "g5", "g7", "memory"); + return (int)oldval; +} + +#define atomic_inc(v) atomic_add(1,(v)) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +/* Add further gcc architectures here */ +# endif /* sparc64 */ +#endif /* i386 */ +#else +/* Add non-gcc native atomic operations here */ +# define EMULATE_ATOMIC_REFCOUNTS +#endif diff --git a/perl.c b/perl.c index 0c52557..6d222ce 100644 --- a/perl.c +++ b/perl.c @@ -130,6 +130,9 @@ perl_construct(register PerlInterpreter *sv_interp) COND_INIT(&eval_cond); MUTEX_INIT(&threads_mutex); COND_INIT(&nthreads_cond); +#ifdef EMULATE_ATOMIC_REFCOUNTS + MUTEX_INIT(&svref_mutex); +#endif /* EMULATE_ATOMIC_REFCOUNTS */ thr = init_main_thread(); #endif /* USE_THREADS */ diff --git a/perlvars.h b/perlvars.h index 8a72312..69206a5 100644 --- a/perlvars.h +++ b/perlvars.h @@ -19,6 +19,7 @@ PERLVAR(Geval_owner, struct perl_thread *) /* Owner thread for doeval */ PERLVAR(Gnthreads, int) /* Number of threads currently */ PERLVAR(Gthreads_mutex, perl_mutex) /* Mutex for nthreads and thread list */ PERLVAR(Gnthreads_cond, perl_cond) /* Condition variable for nthreads */ +PERLVAR(Gsvref_mutex, perl_mutex) /* Mutex for SvREFCNT_{inc,dec} */ PERLVARI(Gthreadsv_names, char *, THREADSV_NAMES) #ifdef FAKE_THREADS PERLVAR(Gcurthr, struct perl_thread *) /* Currently executing (fake) thread */ diff --git a/sv.c b/sv.c index 0edfdb2..af74e28 100644 --- a/sv.c +++ b/sv.c @@ -2870,13 +2870,15 @@ SV * sv_newref(SV *sv) { if (sv) - SvREFCNT(sv)++; + ATOMIC_INC(SvREFCNT(sv)); return sv; } void sv_free(SV *sv) { + int refcount_is_zero; + if (!sv) return; if (SvREADONLY(sv)) { @@ -2891,7 +2893,8 @@ sv_free(SV *sv) warn("Attempt to free unreferenced scalar"); return; } - if (--SvREFCNT(sv) > 0) + ATOMIC_DEC_AND_TEST(refcount_is_zero, SvREFCNT(sv)); + if (!refcount_is_zero) return; #ifdef DEBUGGING if (SvTEMP(sv)) { diff --git a/sv.h b/sv.h index 5993a8d..bf34547 100644 --- a/sv.h +++ b/sv.h @@ -72,13 +72,46 @@ struct io { #define SvFLAGS(sv) (sv)->sv_flags #define SvREFCNT(sv) (sv)->sv_refcnt +#ifdef USE_THREADS + +# ifndef EMULATE_ATOMIC_REFCOUNTS +# include "atomic.h" +# endif + +# ifdef EMULATE_ATOMIC_REFCOUNTS +# define ATOMIC_INC(count) STMT_START { \ + MUTEX_LOCK(&svref_mutex); \ + ++count; \ + MUTEX_UNLOCK(&svref_mutex); \ + } STMT_END +# define ATOMIC_DEC_AND_TEST(res,count) \ + MUTEX_LOCK(&svref_mutex); \ + res = (--count == 0); \ + MUTEX_UNLOCK(&svref_mutex); \ + } STMT_END +# else +# define ATOMIC_INC(count) atomic_inc(&count) +# define ATOMIC_DEC_AND_TEST(res,count) (res = atomic_dec_and_test(&count)) +# endif /* EMULATE_ATOMIC_REFCOUNTS */ +#else +# define ATOMIC_INC(count) (++count) +# define ATOMIC_DEC_AND_TEST(res, count) (res = --count) +#endif /* USE_THREADS */ + #ifdef __GNUC__ -# define SvREFCNT_inc(sv) ({SV* nsv=(SV*)(sv); if(nsv) ++SvREFCNT(nsv); nsv;}) +# define SvREFCNT_inc(sv) \ + ({ \ + SV *nsv = (SV*)(sv); \ + if (nsv) \ + ATOMIC_INC(SvREFCNT(nsv)); \ + nsv; \ + }) #else # if defined(CRIPPLED_CC) || defined(USE_THREADS) # define SvREFCNT_inc(sv) sv_newref((SV*)sv) # else -# define SvREFCNT_inc(sv) ((Sv=(SV*)(sv)), (Sv && ++SvREFCNT(Sv)), (SV*)Sv) +# define SvREFCNT_inc(sv) \ + ((Sv=(SV*)(sv)), (Sv && ATOMIC_INC(SvREFCNT(Sv))), (SV*)Sv) # endif #endif