--- /dev/null
+#ifdef __GNUC__
+
+/*
+ * These atomic operations copied from the linux kernel and altered
+ * only slightly. I need to get official permission to distribute
+ * under the Artistic License.
+ */
+/* We really need to integrate the atomic typedef with the typedef
+ * used by sv_refcnt of an SV. It's possible that for CPUs like alpha
+ * where we'd need to up sv_refcnt from 32 to 64 bits, we may be better
+ * off sticking with EMULATE_ATOMIC_REFCOUNTS instead.
+ */
+typedef U32 atomic_t; /* kludge */
+
+#ifdef i386
+
+# ifdef NO_SMP
+# define LOCK ""
+# else
+# define LOCK "lock ; "
+# endif
+
+# define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "decl %0; sete %1"
+ :"=m" (__atomic_fool_gcc(v)), "=qm" (c)
+ :"m" (__atomic_fool_gcc(v)));
+ return c != 0;
+}
+# else
+/* XXX What symbol does gcc define for sparc64? */
+# ifdef sparc64
+# define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x)
+typedef U32 atomic_t;
+extern __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__("
+1: lduw [%1], %%g5
+ add %%g5, %0, %%g7
+ cas [%1], %%g5, %%g7
+ sub %%g5, %%g7, %%g5
+ brnz,pn %%g5, 1b
+ nop"
+ : /* No outputs */
+ : "HIr" (i), "r" (__atomic_fool_gcc(v))
+ : "g5", "g7", "memory");
+}
+
+extern __inline__ int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long oldval;
+ __asm__ __volatile__("
+1: lduw [%2], %%g5
+ sub %%g5, %1, %%g7
+ cas [%2], %%g5, %%g7
+ sub %%g5, %%g7, %%g5
+ brnz,pn %%g5, 1b
+ sub %%g7, %1, %0"
+ : "=&r" (oldval)
+ : "HIr" (i), "r" (__atomic_fool_gcc(v))
+ : "g5", "g7", "memory");
+ return (int)oldval;
+}
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+/* Add further gcc architectures here */
+# endif /* sparc64 */
+#endif /* i386 */
+#else
+/* Add non-gcc native atomic operations here */
+# define EMULATE_ATOMIC_REFCOUNTS
+#endif
COND_INIT(&eval_cond);
MUTEX_INIT(&threads_mutex);
COND_INIT(&nthreads_cond);
+#ifdef EMULATE_ATOMIC_REFCOUNTS
+ MUTEX_INIT(&svref_mutex);
+#endif /* EMULATE_ATOMIC_REFCOUNTS */
thr = init_main_thread();
#endif /* USE_THREADS */
PERLVAR(Gnthreads, int) /* Number of threads currently */
PERLVAR(Gthreads_mutex, perl_mutex) /* Mutex for nthreads and thread list */
PERLVAR(Gnthreads_cond, perl_cond) /* Condition variable for nthreads */
+PERLVAR(Gsvref_mutex, perl_mutex) /* Mutex for SvREFCNT_{inc,dec} */
PERLVARI(Gthreadsv_names, char *, THREADSV_NAMES)
#ifdef FAKE_THREADS
PERLVAR(Gcurthr, struct perl_thread *) /* Currently executing (fake) thread */
sv_newref(SV *sv)
{
if (sv)
- SvREFCNT(sv)++;
+ ATOMIC_INC(SvREFCNT(sv));
return sv;
}
void
sv_free(SV *sv)
{
+ int refcount_is_zero;
+
if (!sv)
return;
if (SvREADONLY(sv)) {
warn("Attempt to free unreferenced scalar");
return;
}
- if (--SvREFCNT(sv) > 0)
+ ATOMIC_DEC_AND_TEST(refcount_is_zero, SvREFCNT(sv));
+ if (!refcount_is_zero)
return;
#ifdef DEBUGGING
if (SvTEMP(sv)) {
#define SvFLAGS(sv) (sv)->sv_flags
#define SvREFCNT(sv) (sv)->sv_refcnt
+#ifdef USE_THREADS
+
+# ifndef EMULATE_ATOMIC_REFCOUNTS
+# include "atomic.h"
+# endif
+
+# ifdef EMULATE_ATOMIC_REFCOUNTS
+# define ATOMIC_INC(count) STMT_START { \
+ MUTEX_LOCK(&svref_mutex); \
+ ++count; \
+ MUTEX_UNLOCK(&svref_mutex); \
+ } STMT_END
+# define ATOMIC_DEC_AND_TEST(res,count) \
+ MUTEX_LOCK(&svref_mutex); \
+ res = (--count == 0); \
+ MUTEX_UNLOCK(&svref_mutex); \
+ } STMT_END
+# else
+# define ATOMIC_INC(count) atomic_inc(&count)
+# define ATOMIC_DEC_AND_TEST(res,count) (res = atomic_dec_and_test(&count))
+# endif /* EMULATE_ATOMIC_REFCOUNTS */
+#else
+# define ATOMIC_INC(count) (++count)
+# define ATOMIC_DEC_AND_TEST(res, count) (res = --count)
+#endif /* USE_THREADS */
+
#ifdef __GNUC__
-# define SvREFCNT_inc(sv) ({SV* nsv=(SV*)(sv); if(nsv) ++SvREFCNT(nsv); nsv;})
+# define SvREFCNT_inc(sv) \
+ ({ \
+ SV *nsv = (SV*)(sv); \
+ if (nsv) \
+ ATOMIC_INC(SvREFCNT(nsv)); \
+ nsv; \
+ })
#else
# if defined(CRIPPLED_CC) || defined(USE_THREADS)
# define SvREFCNT_inc(sv) sv_newref((SV*)sv)
# else
-# define SvREFCNT_inc(sv) ((Sv=(SV*)(sv)), (Sv && ++SvREFCNT(Sv)), (SV*)Sv)
+# define SvREFCNT_inc(sv) \
+ ((Sv=(SV*)(sv)), (Sv && ATOMIC_INC(SvREFCNT(Sv))), (SV*)Sv)
# endif
#endif