cache-inprocess.c revision 299742
1193640Sariff/*
2193640Sariff * cache-inprocess.c: in-memory caching for Subversion
3193640Sariff *
4193640Sariff * ====================================================================
5193640Sariff *    Licensed to the Apache Software Foundation (ASF) under one
6193640Sariff *    or more contributor license agreements.  See the NOTICE file
7193640Sariff *    distributed with this work for additional information
8193640Sariff *    regarding copyright ownership.  The ASF licenses this file
9193640Sariff *    to you under the Apache License, Version 2.0 (the
10193640Sariff *    "License"); you may not use this file except in compliance
11193640Sariff *    with the License.  You may obtain a copy of the License at
12193640Sariff *
13193640Sariff *      http://www.apache.org/licenses/LICENSE-2.0
14193640Sariff *
15193640Sariff *    Unless required by applicable law or agreed to in writing,
16193640Sariff *    software distributed under the License is distributed on an
17193640Sariff *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18193640Sariff *    KIND, either express or implied.  See the License for the
19193640Sariff *    specific language governing permissions and limitations
20193640Sariff *    under the License.
21193640Sariff * ====================================================================
22193640Sariff */
23193640Sariff
24193640Sariff#include <assert.h>
25193640Sariff
26193640Sariff#include <apr_thread_mutex.h>
27193640Sariff
28193640Sariff#include "svn_pools.h"
29193640Sariff
30193640Sariff#include "svn_private_config.h"
31193640Sariff
32193640Sariff#include "cache.h"
33193640Sariff#include "private/svn_mutex.h"
34193640Sariff
35193640Sariff/* The (internal) cache object. */
36193640Sarifftypedef struct inprocess_cache_t {
37193640Sariff  /* A user-defined identifier for this cache instance. */
38193640Sariff  const char *id;
39193640Sariff
40193640Sariff  /* HASH maps a key (of size KLEN) to a struct cache_entry. */
41193640Sariff  apr_hash_t *hash;
42193640Sariff  apr_ssize_t klen;
43193640Sariff
44193640Sariff  /* Used to copy values into the cache. */
45193640Sariff  svn_cache__serialize_func_t serialize_func;
46193640Sariff
47193640Sariff  /* Used to copy values out of the cache. */
48193640Sariff  svn_cache__deserialize_func_t deserialize_func;
49193640Sariff
50193640Sariff  /* Maximum number of pages that this cache instance may allocate */
51193640Sariff  apr_uint64_t total_pages;
52193640Sariff  /* The number of pages we're allowed to allocate before having to
53193640Sariff   * try to reuse one. */
54193640Sariff  apr_uint64_t unallocated_pages;
55193640Sariff  /* Number of cache entries stored on each page.  Must be at least 1. */
56193640Sariff  apr_uint64_t items_per_page;
57193640Sariff
58193640Sariff  /* A dummy cache_page serving as the head of a circular doubly
59193640Sariff   * linked list of cache_pages.  SENTINEL->NEXT is the most recently
60193640Sariff   * used page, and SENTINEL->PREV is the least recently used page.
61193640Sariff   * All pages in this list are "full"; the page currently being
62193640Sariff   * filled (PARTIAL_PAGE) is not in the list. */
63193640Sariff  struct cache_page *sentinel;
64193640Sariff
65193640Sariff  /* A page currently being filled with entries, or NULL if there's no
66193640Sariff   * partially-filled page.  This page is not in SENTINEL's list. */
67193640Sariff  struct cache_page *partial_page;
68193640Sariff  /* If PARTIAL_PAGE is not null, this is the number of entries
69193640Sariff   * currently on PARTIAL_PAGE. */
70193640Sariff  apr_uint64_t partial_page_number_filled;
71193640Sariff
72193640Sariff  /* The pool that the svn_cache__t itself, HASH, and all pages are
73193640Sariff   * allocated in; subpools of this pool are used for the cache_entry
74193640Sariff   * structs, as well as the dup'd values and hash keys.
75193640Sariff   */
76193640Sariff  apr_pool_t *cache_pool;
77193640Sariff
78193640Sariff  /* Sum of the SIZE members of all cache_entry elements that are
79193640Sariff   * accessible from HASH. This is used to make statistics available
80193640Sariff   * even if the sub-pools have already been destroyed.
81193640Sariff   */
82193640Sariff  apr_size_t data_size;
83193640Sariff
84193640Sariff  /* A lock for intra-process synchronization to the cache, or NULL if
85193640Sariff   * the cache's creator doesn't feel the cache needs to be
86193640Sariff   * thread-safe. */
87193640Sariff  svn_mutex__t *mutex;
88193640Sariff} inprocess_cache_t;
89193640Sariff
90193640Sariff/* A cache page; all items on the page are allocated from the same
91193640Sariff * pool. */
92193640Sariffstruct cache_page {
93193640Sariff  /* Pointers for the LRU list anchored at the cache's SENTINEL.
94193640Sariff   * (NULL for the PARTIAL_PAGE.) */
95193640Sariff  struct cache_page *prev;
96193640Sariff  struct cache_page *next;
97193640Sariff
98193640Sariff  /* The pool in which cache_entry structs, hash keys, and dup'd
99193640Sariff   * values are allocated.  The CACHE_PAGE structs are allocated
100193640Sariff   * in CACHE_POOL and have the same lifetime as the cache itself.
101193640Sariff   * (The cache will never allocate more than TOTAL_PAGES page
102193640Sariff   * structs (inclusive of the sentinel) from CACHE_POOL.)
103193640Sariff   */
104193640Sariff  apr_pool_t *page_pool;
105193640Sariff
106193640Sariff  /* A singly linked list of the entries on this page; used to remove
107193640Sariff   * them from the cache's HASH before reusing the page. */
108193640Sariff  struct cache_entry *first_entry;
109193640Sariff};
110193640Sariff
111193640Sariff/* An cache entry. */
112193640Sariffstruct cache_entry {
113193640Sariff  const void *key;
114193640Sariff
115193640Sariff  /* serialized value */
116193640Sariff  void *value;
117193640Sariff
118193640Sariff  /* length of the serialized value in bytes */
119193640Sariff  apr_size_t size;
120193640Sariff
121193640Sariff  /* The page it's on (needed so that the LRU list can be
122193640Sariff   * maintained). */
123193640Sariff  struct cache_page *page;
124193640Sariff
125193640Sariff  /* Next entry on the page. */
126193640Sariff  struct cache_entry *next_entry;
127193640Sariff};
128193640Sariff
129193640Sariff
130193640Sariff/* Removes PAGE from the doubly-linked list it is in (leaving its PREV
131193640Sariff * and NEXT fields undefined). */
132193640Sariffstatic void
133193640Sariffremove_page_from_list(struct cache_page *page)
134193640Sariff{
135193640Sariff  page->prev->next = page->next;
136193640Sariff  page->next->prev = page->prev;
137193640Sariff}
138193640Sariff
139193640Sariff/* Inserts PAGE after CACHE's sentinel. */
140193640Sariffstatic void
141193640Sariffinsert_page(inprocess_cache_t *cache,
142193640Sariff            struct cache_page *page)
143193640Sariff{
144193640Sariff  struct cache_page *pred = cache->sentinel;
145193640Sariff
146193640Sariff  page->prev = pred;
147193640Sariff  page->next = pred->next;
148193640Sariff  page->prev->next = page;
149193640Sariff  page->next->prev = page;
150193640Sariff}
151193640Sariff
152193640Sariff/* If PAGE is in the circularly linked list (eg, its NEXT isn't NULL),
153193640Sariff * move it to the front of the list. */
154193640Sariffstatic svn_error_t *
155193640Sariffmove_page_to_front(inprocess_cache_t *cache,
156193640Sariff                   struct cache_page *page)
157193640Sariff{
158193640Sariff  /* This function is called whilst CACHE is locked. */
159193640Sariff
160193640Sariff  SVN_ERR_ASSERT(page != cache->sentinel);
161193640Sariff
162193640Sariff  if (! page->next)
163193640Sariff    return SVN_NO_ERROR;
164193640Sariff
165193640Sariff  remove_page_from_list(page);
166193640Sariff  insert_page(cache, page);
167193640Sariff
168193640Sariff  return SVN_NO_ERROR;
169193640Sariff}
170193640Sariff
171193640Sariff/* Return a copy of KEY inside POOL, using CACHE->KLEN to figure out
172193640Sariff * how. */
173193640Sariffstatic const void *
174193640Sariffduplicate_key(inprocess_cache_t *cache,
175193640Sariff              const void *key,
176193640Sariff              apr_pool_t *pool)
177193640Sariff{
178193640Sariff  if (cache->klen == APR_HASH_KEY_STRING)
179193640Sariff    return apr_pstrdup(pool, key);
180193640Sariff  else
181193640Sariff    return apr_pmemdup(pool, key, cache->klen);
182193640Sariff}
183193640Sariff
184193640Sariffstatic svn_error_t *
185193640Sariffinprocess_cache_get_internal(char **buffer,
186193640Sariff                             apr_size_t *size,
187193640Sariff                             inprocess_cache_t *cache,
188193640Sariff                             const void *key,
189193640Sariff                             apr_pool_t *result_pool)
190193640Sariff{
191193640Sariff  struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
192193640Sariff
193193640Sariff  if (entry)
194193640Sariff    {
195193640Sariff      SVN_ERR(move_page_to_front(cache, entry->page));
196193640Sariff
197193640Sariff      /* duplicate the buffer entry */
198193640Sariff      *buffer = apr_palloc(result_pool, entry->size);
199193640Sariff      if (entry->size)
200193640Sariff        memcpy(*buffer, entry->value, entry->size);
201193640Sariff
202193640Sariff      *size = entry->size;
203193640Sariff    }
204193640Sariff  else
205193640Sariff    {
206193640Sariff      *buffer = NULL;
207193640Sariff      *size = 0;
208193640Sariff    }
209193640Sariff
210193640Sariff  return SVN_NO_ERROR;
211193640Sariff}
212193640Sariff
213193640Sariffstatic svn_error_t *
214193640Sariffinprocess_cache_get(void **value_p,
215193640Sariff                    svn_boolean_t *found,
216193640Sariff                    void *cache_void,
217193640Sariff                    const void *key,
218193640Sariff                    apr_pool_t *result_pool)
219193640Sariff{
220193640Sariff  inprocess_cache_t *cache = cache_void;
221193640Sariff
222193640Sariff  if (key)
223193640Sariff    {
224193640Sariff      char* buffer;
225193640Sariff      apr_size_t size;
226193640Sariff
227193640Sariff      SVN_MUTEX__WITH_LOCK(cache->mutex,
228193640Sariff                           inprocess_cache_get_internal(&buffer,
229193640Sariff                                                        &size,
230193640Sariff                                                        cache,
231193640Sariff                                                        key,
232193640Sariff                                                        result_pool));
233193640Sariff      /* deserialize the buffer content. Usually, this will directly
234193640Sariff         modify the buffer content directly. */
235193640Sariff      *found = (buffer != NULL);
236193640Sariff      if (!buffer || !size)
237193640Sariff        *value_p = NULL;
238193640Sariff      else
239193640Sariff        return cache->deserialize_func(value_p, buffer, size, result_pool);
240193640Sariff    }
241193640Sariff  else
242193640Sariff    {
243193640Sariff      *value_p = NULL;
244193640Sariff      *found = FALSE;
245193640Sariff    }
246193640Sariff
247193640Sariff  return SVN_NO_ERROR;
248193640Sariff}
249193640Sariff
250193640Sariffstatic svn_error_t *
251193640Sariffinprocess_cache_has_key_internal(svn_boolean_t *found,
252193640Sariff                                 inprocess_cache_t *cache,
253193640Sariff                                 const void *key,
254193640Sariff                                 apr_pool_t *scratch_pool)
255193640Sariff{
256193640Sariff  *found = apr_hash_get(cache->hash, key, cache->klen) != NULL;
257193640Sariff
258193640Sariff  return SVN_NO_ERROR;
259193640Sariff}
260193640Sariff
261193640Sariffstatic svn_error_t *
262193640Sariffinprocess_cache_has_key(svn_boolean_t *found,
263193640Sariff                        void *cache_void,
264193640Sariff                        const void *key,
265193640Sariff                        apr_pool_t *scratch_pool)
266193640Sariff{
267193640Sariff  inprocess_cache_t *cache = cache_void;
268193640Sariff
269193640Sariff  if (key)
270193640Sariff    SVN_MUTEX__WITH_LOCK(cache->mutex,
271193640Sariff                         inprocess_cache_has_key_internal(found,
272193640Sariff                                                          cache,
273193640Sariff                                                          key,
274193640Sariff                                                          scratch_pool));
275193640Sariff  else
276193640Sariff    *found = FALSE;
277193640Sariff
278193640Sariff  return SVN_NO_ERROR;
279193640Sariff}
280193640Sariff
281193640Sariff/* Removes PAGE from the LRU list, removes all of its entries from
282193640Sariff * CACHE's hash, clears its pool, and sets its entry pointer to NULL.
283193640Sariff * Finally, puts it in the "partial page" slot in the cache and sets
284193640Sariff * partial_page_number_filled to 0.  Must be called on a page actually
285193640Sariff * in the list. */
286193640Sariffstatic void
287193640Sarifferase_page(inprocess_cache_t *cache,
288193640Sariff           struct cache_page *page)
289193640Sariff{
290193640Sariff  struct cache_entry *e;
291193640Sariff
292193640Sariff  remove_page_from_list(page);
293193640Sariff
294193640Sariff  for (e = page->first_entry;
295193640Sariff       e;
296193640Sariff       e = e->next_entry)
297193640Sariff    {
298193640Sariff      cache->data_size -= e->size;
299193640Sariff      apr_hash_set(cache->hash, e->key, cache->klen, NULL);
300193640Sariff    }
301
302  svn_pool_clear(page->page_pool);
303
304  page->first_entry = NULL;
305  page->prev = NULL;
306  page->next = NULL;
307
308  cache->partial_page = page;
309  cache->partial_page_number_filled = 0;
310}
311
312
313static svn_error_t *
314inprocess_cache_set_internal(inprocess_cache_t *cache,
315                             const void *key,
316                             void *value,
317                             apr_pool_t *scratch_pool)
318{
319  struct cache_entry *existing_entry;
320
321  existing_entry = apr_hash_get(cache->hash, key, cache->klen);
322
323  /* Is it already here, but we can do the one-item-per-page
324   * optimization? */
325  if (existing_entry && cache->items_per_page == 1)
326    {
327      /* Special case!  ENTRY is the *only* entry on this page, so
328       * why not wipe it (so as not to leak the previous value).
329       */
330      struct cache_page *page = existing_entry->page;
331
332      /* This can't be the partial page: items_per_page == 1
333       * *never* has a partial page (except for in the temporary state
334       * that we're about to fake). */
335      SVN_ERR_ASSERT(page->next != NULL);
336      SVN_ERR_ASSERT(cache->partial_page == NULL);
337
338      erase_page(cache, page);
339      existing_entry = NULL;
340    }
341
342  /* Is it already here, and we just have to leak the old value? */
343  if (existing_entry)
344    {
345      struct cache_page *page = existing_entry->page;
346
347      SVN_ERR(move_page_to_front(cache, page));
348      cache->data_size -= existing_entry->size;
349      if (value)
350        {
351          SVN_ERR(cache->serialize_func(&existing_entry->value,
352                                        &existing_entry->size,
353                                        value,
354                                        page->page_pool));
355          cache->data_size += existing_entry->size;
356          if (existing_entry->size == 0)
357            existing_entry->value = NULL;
358        }
359      else
360        {
361          existing_entry->value = NULL;
362          existing_entry->size = 0;
363        }
364
365      return SVN_NO_ERROR;
366    }
367
368  /* Do we not have a partial page to put it on, but we are allowed to
369   * allocate more? */
370  if (cache->partial_page == NULL && cache->unallocated_pages > 0)
371    {
372      cache->partial_page = apr_pcalloc(cache->cache_pool,
373                                        sizeof(*(cache->partial_page)));
374      cache->partial_page->page_pool = svn_pool_create(cache->cache_pool);
375      cache->partial_page_number_filled = 0;
376      (cache->unallocated_pages)--;
377    }
378
379  /* Do we really not have a partial page to put it on, even after the
380   * one-item-per-page optimization and checking the unallocated page
381   * count? */
382  if (cache->partial_page == NULL)
383    {
384      struct cache_page *oldest_page = cache->sentinel->prev;
385
386      SVN_ERR_ASSERT(oldest_page != cache->sentinel);
387
388      /* Erase the page and put it in cache->partial_page. */
389      erase_page(cache, oldest_page);
390    }
391
392  SVN_ERR_ASSERT(cache->partial_page != NULL);
393
394  {
395    struct cache_page *page = cache->partial_page;
396    struct cache_entry *new_entry = apr_pcalloc(page->page_pool,
397                                                sizeof(*new_entry));
398
399    /* Copy the key and value into the page's pool.  */
400    new_entry->key = duplicate_key(cache, key, page->page_pool);
401    if (value)
402      {
403        SVN_ERR(cache->serialize_func(&new_entry->value,
404                                      &new_entry->size,
405                                      value,
406                                      page->page_pool));
407        cache->data_size += new_entry->size;
408        if (new_entry->size == 0)
409          new_entry->value = NULL;
410      }
411    else
412      {
413        new_entry->value = NULL;
414        new_entry->size = 0;
415      }
416
417    /* Add the entry to the page's list. */
418    new_entry->page = page;
419    new_entry->next_entry = page->first_entry;
420    page->first_entry = new_entry;
421
422    /* Add the entry to the hash, using the *entry's* copy of the
423     * key. */
424    apr_hash_set(cache->hash, new_entry->key, cache->klen, new_entry);
425
426    /* We've added something else to the partial page. */
427    (cache->partial_page_number_filled)++;
428
429    /* Is it full? */
430    if (cache->partial_page_number_filled >= cache->items_per_page)
431      {
432        insert_page(cache, page);
433        cache->partial_page = NULL;
434      }
435  }
436
437  return SVN_NO_ERROR;
438}
439
440static svn_error_t *
441inprocess_cache_set(void *cache_void,
442                    const void *key,
443                    void *value,
444                    apr_pool_t *scratch_pool)
445{
446  inprocess_cache_t *cache = cache_void;
447
448  if (key)
449    SVN_MUTEX__WITH_LOCK(cache->mutex,
450                         inprocess_cache_set_internal(cache,
451                                                      key,
452                                                      value,
453                                                      scratch_pool));
454
455  return SVN_NO_ERROR;
456}
457
458/* Baton type for svn_cache__iter. */
459struct cache_iter_baton {
460  svn_iter_apr_hash_cb_t user_cb;
461  void *user_baton;
462};
463
464/* Call the user's callback with the actual value, not the
465   cache_entry.  Implements the svn_iter_apr_hash_cb_t
466   prototype. */
467static svn_error_t *
468iter_cb(void *baton,
469        const void *key,
470        apr_ssize_t klen,
471        void *val,
472        apr_pool_t *pool)
473{
474  struct cache_iter_baton *b = baton;
475  struct cache_entry *entry = val;
476  return (b->user_cb)(b->user_baton, key, klen, entry->value, pool);
477}
478
479static svn_error_t *
480inprocess_cache_iter(svn_boolean_t *completed,
481                     void *cache_void,
482                     svn_iter_apr_hash_cb_t user_cb,
483                     void *user_baton,
484                     apr_pool_t *scratch_pool)
485{
486  inprocess_cache_t *cache = cache_void;
487  struct cache_iter_baton b;
488  b.user_cb = user_cb;
489  b.user_baton = user_baton;
490
491  SVN_MUTEX__WITH_LOCK(cache->mutex,
492                       svn_iter_apr_hash(completed, cache->hash,
493                                         iter_cb, &b, scratch_pool));
494
495  return SVN_NO_ERROR;
496}
497
498static svn_error_t *
499inprocess_cache_get_partial_internal(void **value_p,
500                                     svn_boolean_t *found,
501                                     inprocess_cache_t *cache,
502                                     const void *key,
503                                     svn_cache__partial_getter_func_t func,
504                                     void *baton,
505                                     apr_pool_t *result_pool)
506{
507  struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
508  if (! entry)
509    {
510      *found = FALSE;
511      return SVN_NO_ERROR;
512    }
513
514  SVN_ERR(move_page_to_front(cache, entry->page));
515
516  *found = TRUE;
517  return func(value_p, entry->value, entry->size, baton, result_pool);
518}
519
520static svn_error_t *
521inprocess_cache_get_partial(void **value_p,
522                            svn_boolean_t *found,
523                            void *cache_void,
524                            const void *key,
525                            svn_cache__partial_getter_func_t func,
526                            void *baton,
527                            apr_pool_t *result_pool)
528{
529  inprocess_cache_t *cache = cache_void;
530
531  if (key)
532    SVN_MUTEX__WITH_LOCK(cache->mutex,
533                         inprocess_cache_get_partial_internal(value_p,
534                                                              found,
535                                                              cache,
536                                                              key,
537                                                              func,
538                                                              baton,
539                                                              result_pool));
540  else
541    *found = FALSE;
542
543  return SVN_NO_ERROR;
544}
545
546static svn_error_t *
547inprocess_cache_set_partial_internal(inprocess_cache_t *cache,
548                                     const void *key,
549                                     svn_cache__partial_setter_func_t func,
550                                     void *baton,
551                                     apr_pool_t *scratch_pool)
552{
553  struct cache_entry *entry = apr_hash_get(cache->hash, key, cache->klen);
554  if (entry)
555    {
556      SVN_ERR(move_page_to_front(cache, entry->page));
557
558      cache->data_size -= entry->size;
559      SVN_ERR(func(&entry->value,
560                   &entry->size,
561                   baton,
562                   entry->page->page_pool));
563      cache->data_size += entry->size;
564    }
565
566  return SVN_NO_ERROR;
567}
568
569static svn_error_t *
570inprocess_cache_set_partial(void *cache_void,
571                            const void *key,
572                            svn_cache__partial_setter_func_t func,
573                            void *baton,
574                            apr_pool_t *scratch_pool)
575{
576  inprocess_cache_t *cache = cache_void;
577
578  if (key)
579    SVN_MUTEX__WITH_LOCK(cache->mutex,
580                         inprocess_cache_set_partial_internal(cache,
581                                                              key,
582                                                              func,
583                                                              baton,
584                                                              scratch_pool));
585
586  return SVN_NO_ERROR;
587}
588
589static svn_boolean_t
590inprocess_cache_is_cachable(void *cache_void, apr_size_t size)
591{
592  /* Be relatively strict: per page we should not allocate more than
593   * we could spare as "unused" memory.
594   * But in most cases, nobody will ask anyway. And in no case, this
595   * will be used for checks _inside_ the cache code.
596   */
597  inprocess_cache_t *cache = cache_void;
598  return size < SVN_ALLOCATOR_RECOMMENDED_MAX_FREE / cache->items_per_page;
599}
600
601static svn_error_t *
602inprocess_cache_get_info_internal(inprocess_cache_t *cache,
603                                  svn_cache__info_t *info,
604                                  apr_pool_t *result_pool)
605{
606  info->id = apr_pstrdup(result_pool, cache->id);
607
608  info->used_entries = apr_hash_count(cache->hash);
609  info->total_entries = cache->items_per_page * cache->total_pages;
610
611  info->used_size = cache->data_size;
612  info->data_size = cache->data_size;
613  info->total_size = cache->data_size
614                   + cache->items_per_page * sizeof(struct cache_page)
615                   + info->used_entries * sizeof(struct cache_entry);
616
617  return SVN_NO_ERROR;
618}
619
620
621static svn_error_t *
622inprocess_cache_get_info(void *cache_void,
623                         svn_cache__info_t *info,
624                         svn_boolean_t reset,
625                         apr_pool_t *result_pool)
626{
627  inprocess_cache_t *cache = cache_void;
628
629  SVN_MUTEX__WITH_LOCK(cache->mutex,
630                       inprocess_cache_get_info_internal(cache,
631                                                         info,
632                                                         result_pool));
633
634  return SVN_NO_ERROR;
635}
636
637static svn_cache__vtable_t inprocess_cache_vtable = {
638  inprocess_cache_get,
639  inprocess_cache_has_key,
640  inprocess_cache_set,
641  inprocess_cache_iter,
642  inprocess_cache_is_cachable,
643  inprocess_cache_get_partial,
644  inprocess_cache_set_partial,
645  inprocess_cache_get_info
646};
647
648svn_error_t *
649svn_cache__create_inprocess(svn_cache__t **cache_p,
650                            svn_cache__serialize_func_t serialize,
651                            svn_cache__deserialize_func_t deserialize,
652                            apr_ssize_t klen,
653                            apr_int64_t pages,
654                            apr_int64_t items_per_page,
655                            svn_boolean_t thread_safe,
656                            const char *id,
657                            apr_pool_t *pool)
658{
659  svn_cache__t *wrapper = apr_pcalloc(pool, sizeof(*wrapper));
660  inprocess_cache_t *cache = apr_pcalloc(pool, sizeof(*cache));
661
662  cache->id = apr_pstrdup(pool, id);
663
664  SVN_ERR_ASSERT(klen == APR_HASH_KEY_STRING || klen >= 1);
665
666  cache->hash = apr_hash_make(pool);
667  cache->klen = klen;
668
669  cache->serialize_func = serialize;
670  cache->deserialize_func = deserialize;
671
672  SVN_ERR_ASSERT(pages >= 1);
673  cache->total_pages = pages;
674  cache->unallocated_pages = pages;
675  SVN_ERR_ASSERT(items_per_page >= 1);
676  cache->items_per_page = items_per_page;
677
678  cache->sentinel = apr_pcalloc(pool, sizeof(*(cache->sentinel)));
679  cache->sentinel->prev = cache->sentinel;
680  cache->sentinel->next = cache->sentinel;
681  /* The sentinel doesn't need a pool.  (We're happy to crash if we
682   * accidentally try to treat it like a real page.) */
683
684  SVN_ERR(svn_mutex__init(&cache->mutex, thread_safe, pool));
685
686  cache->cache_pool = pool;
687
688  wrapper->vtable = &inprocess_cache_vtable;
689  wrapper->cache_internal = cache;
690  wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
691
692  *cache_p = wrapper;
693  return SVN_NO_ERROR;
694}
695