diff options
Diffstat (limited to 'subversion/libsvn_subr/cache-membuffer.c')
-rw-r--r-- | subversion/libsvn_subr/cache-membuffer.c | 57 |
1 files changed, 56 insertions, 1 deletions
diff --git a/subversion/libsvn_subr/cache-membuffer.c b/subversion/libsvn_subr/cache-membuffer.c index 131d914..7aa90cd 100644 --- a/subversion/libsvn_subr/cache-membuffer.c +++ b/subversion/libsvn_subr/cache-membuffer.c @@ -101,6 +101,21 @@ * on their hash key. */ +/* APR's read-write lock implementation on Windows is horribly inefficient. + * Even with very low contention a runtime overhead of 35% percent has been + * measured for 'svn-bench null-export' over ra_serf. + * + * Use a simple mutex on Windows. Because there is one mutex per segment, + * large machines should (and usually can) be configured with large caches + * such that read contention is kept low. This is basically the situation + * we head before 1.8. + */ +#ifdef WIN32 +# define USE_SIMPLE_MUTEX 1 +#else +# define USE_SIMPLE_MUTEX 0 +#endif + /* A 16-way associative cache seems to be a good compromise between * performance (worst-case lookups) and efficiency-loss due to collisions. * @@ -465,11 +480,15 @@ struct svn_membuffer_t * the cache's creator doesn't feel the cache needs to be * thread-safe. */ +# if USE_SIMPLE_MUTEX + svn_mutex__t *lock; +# else apr_thread_rwlock_t *lock; +# endif /* If set, write access will wait until they get exclusive access. * Otherwise, they will become no-ops if the segment is currently - * read-locked. + * read-locked. Only used when LOCK is an r/w lock. */ svn_boolean_t allow_blocking_writes; #endif @@ -489,12 +508,16 @@ static svn_error_t * read_lock_cache(svn_membuffer_t *cache) { #if APR_HAS_THREADS +# if USE_SIMPLE_MUTEX + return svn_mutex__lock(cache->lock); +# else if (cache->lock) { apr_status_t status = apr_thread_rwlock_rdlock(cache->lock); if (status) return svn_error_wrap_apr(status, _("Can't lock cache mutex")); } +# endif #endif return SVN_NO_ERROR; } @@ -505,6 +528,12 @@ static svn_error_t * write_lock_cache(svn_membuffer_t *cache, svn_boolean_t *success) { #if APR_HAS_THREADS +# if USE_SIMPLE_MUTEX + + return svn_mutex__lock(cache->lock); + +# else + if (cache->lock) { apr_status_t status; @@ -526,6 +555,8 @@ write_lock_cache(svn_membuffer_t *cache, svn_boolean_t *success) return svn_error_wrap_apr(status, _("Can't write-lock cache mutex")); } + +# endif #endif return SVN_NO_ERROR; } @@ -537,10 +568,18 @@ static svn_error_t * force_write_lock_cache(svn_membuffer_t *cache) { #if APR_HAS_THREADS +# if USE_SIMPLE_MUTEX + + return svn_mutex__lock(cache->lock); + +# else + apr_status_t status = apr_thread_rwlock_wrlock(cache->lock); if (status) return svn_error_wrap_apr(status, _("Can't write-lock cache mutex")); + +# endif #endif return SVN_NO_ERROR; } @@ -552,6 +591,12 @@ static svn_error_t * unlock_cache(svn_membuffer_t *cache, svn_error_t *err) { #if APR_HAS_THREADS +# if USE_SIMPLE_MUTEX + + return svn_mutex__unlock(cache->lock, err); + +# else + if (cache->lock) { apr_status_t status = apr_thread_rwlock_unlock(cache->lock); @@ -561,6 +606,8 @@ unlock_cache(svn_membuffer_t *cache, svn_error_t *err) if (status) return svn_error_wrap_apr(status, _("Can't unlock cache mutex")); } + +# endif #endif return err; } @@ -1290,6 +1337,12 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache, * the cache's creator doesn't feel the cache needs to be * thread-safe. */ +# if USE_SIMPLE_MUTEX + + SVN_ERR(svn_mutex__init(&c[seg].lock, thread_safe, pool)); + +# else + c[seg].lock = NULL; if (thread_safe) { @@ -1299,6 +1352,8 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache, return svn_error_wrap_apr(status, _("Can't create cache mutex")); } +# endif + /* Select the behavior of write operations. */ c[seg].allow_blocking_writes = allow_blocking_writes; |