| 1 | /* |
| 2 | * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Jeffrey M. Hsu. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * 1. Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived |
| 17 | * from this software without specific, prior written permission. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 23 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 24 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 28 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 29 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 30 | * SUCH DAMAGE. |
| 31 | * |
| 32 | * $DragonFly: src/sys/kern/kern_objcache.c,v 1.15 2006/12/23 00:35:04 swildner Exp $ |
| 33 | */ |
| 34 | |
| 35 | #include <sys/param.h> |
| 36 | #include <sys/kernel.h> |
| 37 | #include <sys/systm.h> |
| 38 | #include <sys/callout.h> |
| 39 | #include <sys/globaldata.h> |
| 40 | #include <sys/malloc.h> |
| 41 | #include <sys/queue.h> |
| 42 | #include <sys/objcache.h> |
| 43 | #include <sys/spinlock.h> |
| 44 | #include <sys/thread.h> |
| 45 | #include <sys/thread2.h> |
| 46 | #include <sys/spinlock2.h> |
| 47 | |
| 48 | static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); |
| 49 | static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine"); |
| 50 | |
| 51 | #define INITIAL_MAG_CAPACITY 256 |
| 52 | |
| 53 | struct magazine { |
| 54 | int rounds; |
| 55 | int capacity; |
| 56 | int cleaning; |
| 57 | SLIST_ENTRY(magazine) nextmagazine; |
| 58 | void *objects[]; |
| 59 | }; |
| 60 | |
| 61 | SLIST_HEAD(magazinelist, magazine); |
| 62 | |
| 63 | /* |
| 64 | * per-cluster cache of magazines |
| 65 | * |
| 66 | * All fields in this structure are protected by the spinlock. |
| 67 | */ |
| 68 | struct magazinedepot { |
| 69 | /* |
| 70 | * The per-cpu object caches only exchanges completely full or |
| 71 | * completely empty magazines with the depot layer, so only have |
| 72 | * to cache these two types of magazines. |
| 73 | */ |
| 74 | struct magazinelist fullmagazines; |
| 75 | struct magazinelist emptymagazines; |
| 76 | int magcapacity; |
| 77 | |
| 78 | /* protect this structure */ |
| 79 | struct spinlock spin; |
| 80 | |
| 81 | /* magazines not yet allocated towards limit */ |
| 82 | int unallocated_objects; |
| 83 | |
| 84 | /* infrequently used fields */ |
| 85 | int waiting; /* waiting for another cpu to |
| 86 | * return a full magazine to |
| 87 | * the depot */ |
| 88 | int contested; /* depot contention count */ |
| 89 | }; |
| 90 | |
| 91 | /* |
| 92 | * per-cpu object cache |
| 93 | * All fields in this structure are protected by crit_enter(). |
| 94 | */ |
| 95 | struct percpu_objcache { |
| 96 | struct magazine *loaded_magazine; /* active magazine */ |
| 97 | struct magazine *previous_magazine; /* backup magazine */ |
| 98 | |
| 99 | /* statistics */ |
| 100 | int gets_cumulative; /* total calls to get */ |
| 101 | int gets_null; /* objcache_get returned NULL */ |
| 102 | int puts_cumulative; /* total calls to put */ |
| 103 | int puts_othercluster; /* returned to other cluster */ |
| 104 | |
| 105 | /* infrequently used fields */ |
| 106 | int waiting; /* waiting for a thread on this cpu to |
| 107 | * return an obj to the per-cpu cache */ |
| 108 | }; |
| 109 | |
| 110 | /* only until we have NUMA cluster topology information XXX */ |
| 111 | #define MAXCLUSTERS 1 |
| 112 | #define myclusterid 0 |
| 113 | #define CLUSTER_OF(obj) 0 |
| 114 | |
| 115 | /* |
| 116 | * Two-level object cache consisting of NUMA cluster-level depots of |
| 117 | * fully loaded or completely empty magazines and cpu-level caches of |
| 118 | * individual objects. |
| 119 | */ |
| 120 | struct objcache { |
| 121 | char *name; |
| 122 | |
| 123 | /* object constructor and destructor from blank storage */ |
| 124 | objcache_ctor_fn *ctor; |
| 125 | objcache_dtor_fn *dtor; |
| 126 | void *private; |
| 127 | |
| 128 | /* interface to underlying allocator */ |
| 129 | objcache_alloc_fn *alloc; |
| 130 | objcache_free_fn *free; |
| 131 | void *allocator_args; |
| 132 | |
| 133 | SLIST_ENTRY(objcache) oc_next; |
| 134 | |
| 135 | /* NUMA-cluster level caches */ |
| 136 | struct magazinedepot depot[MAXCLUSTERS]; |
| 137 | |
| 138 | struct percpu_objcache cache_percpu[]; /* per-cpu caches */ |
| 139 | }; |
| 140 | |
| 141 | static struct spinlock objcachelist_spin; |
| 142 | static SLIST_HEAD(objcachelist, objcache) allobjcaches; |
| 143 | |
| 144 | static struct magazine * |
| 145 | mag_alloc(int capacity) |
| 146 | { |
| 147 | struct magazine *mag; |
| 148 | |
| 149 | mag = kmalloc(__offsetof(struct magazine, objects[capacity]), |
| 150 | M_OBJMAG, M_INTWAIT | M_ZERO); |
| 151 | mag->capacity = capacity; |
| 152 | mag->rounds = 0; |
| 153 | mag->cleaning = 0; |
| 154 | return (mag); |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | * Utility routine for objects that don't require any de-construction. |
| 159 | */ |
| 160 | |
| 161 | static void |
| 162 | null_dtor(void *obj, void *private) |
| 163 | { |
| 164 | /* do nothing */ |
| 165 | } |
| 166 | |
| 167 | static boolean_t |
| 168 | null_ctor(void *obj, void *private, int ocflags) |
| 169 | { |
| 170 | return TRUE; |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Create an object cache. |
| 175 | */ |
| 176 | struct objcache * |
| 177 | objcache_create(const char *name, int cluster_limit, int mag_capacity, |
| 178 | objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *private, |
| 179 | objcache_alloc_fn *alloc, objcache_free_fn *free, |
| 180 | void *allocator_args) |
| 181 | { |
| 182 | struct objcache *oc; |
| 183 | struct magazinedepot *depot; |
| 184 | int cpuid; |
| 185 | |
| 186 | /* allocate object cache structure */ |
| 187 | oc = kmalloc(__offsetof(struct objcache, cache_percpu[ncpus]), |
| 188 | M_OBJCACHE, M_WAITOK | M_ZERO); |
| 189 | oc->name = kstrdup(name, M_TEMP); |
| 190 | oc->ctor = ctor ? ctor : null_ctor; |
| 191 | oc->dtor = dtor ? dtor : null_dtor; |
| 192 | oc->private = private; |
| 193 | oc->free = free; |
| 194 | oc->allocator_args = allocator_args; |
| 195 | |
| 196 | /* initialize depots */ |
| 197 | depot = &oc->depot[0]; |
| 198 | |
| 199 | spin_init(&depot->spin); |
| 200 | SLIST_INIT(&depot->fullmagazines); |
| 201 | SLIST_INIT(&depot->emptymagazines); |
| 202 | |
| 203 | if (mag_capacity == 0) |
| 204 | mag_capacity = INITIAL_MAG_CAPACITY; |
| 205 | depot->magcapacity = mag_capacity; |
| 206 | |
| 207 | /* |
| 208 | * The cluster_limit must be sufficient to have three magazines per |
| 209 | * cpu. |
| 210 | */ |
| 211 | if (cluster_limit == 0) { |
| 212 | depot->unallocated_objects = -1; |
| 213 | } else { |
| 214 | if (cluster_limit < mag_capacity * ncpus * 3) |
| 215 | cluster_limit = mag_capacity * ncpus * 3; |
| 216 | depot->unallocated_objects = cluster_limit; |
| 217 | } |
| 218 | oc->alloc = alloc; |
| 219 | |
| 220 | /* initialize per-cpu caches */ |
| 221 | for (cpuid = 0; cpuid < ncpus; cpuid++) { |
| 222 | struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid]; |
| 223 | |
| 224 | cache_percpu->loaded_magazine = mag_alloc(mag_capacity); |
| 225 | cache_percpu->previous_magazine = mag_alloc(mag_capacity); |
| 226 | } |
| 227 | spin_lock_wr(&objcachelist_spin); |
| 228 | SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next); |
| 229 | spin_unlock_wr(&objcachelist_spin); |
| 230 | |
| 231 | return (oc); |
| 232 | } |
| 233 | |
| 234 | struct objcache * |
| 235 | objcache_create_simple(malloc_type_t mtype, size_t objsize) |
| 236 | { |
| 237 | struct objcache_malloc_args *margs; |
| 238 | struct objcache *oc; |
| 239 | |
| 240 | margs = kmalloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO); |
| 241 | margs->objsize = objsize; |
| 242 | margs->mtype = mtype; |
| 243 | oc = objcache_create(mtype->ks_shortdesc, 0, 0, |
| 244 | NULL, NULL, NULL, |
| 245 | objcache_malloc_alloc, objcache_malloc_free, |
| 246 | margs); |
| 247 | return (oc); |
| 248 | } |
| 249 | |
| 250 | #define MAGAZINE_EMPTY(mag) (mag->rounds == 0) |
| 251 | #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0) |
| 252 | #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity) |
| 253 | |
| 254 | #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; }) |
| 255 | |
| 256 | /* |
| 257 | * Get an object from the object cache. |
| 258 | * |
| 259 | * WARNING! ocflags are only used when we have to go to the underlying |
| 260 | * allocator, so we cannot depend on flags such as M_ZERO. |
| 261 | */ |
| 262 | void * |
| 263 | objcache_get(struct objcache *oc, int ocflags) |
| 264 | { |
| 265 | struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; |
| 266 | struct magazine *loadedmag; |
| 267 | struct magazine *emptymag; |
| 268 | void *obj; |
| 269 | struct magazinedepot *depot; |
| 270 | |
| 271 | KKASSERT((ocflags & M_ZERO) == 0); |
| 272 | crit_enter(); |
| 273 | ++cpucache->gets_cumulative; |
| 274 | |
| 275 | retry: |
| 276 | /* |
| 277 | * Loaded magazine has an object. This is the hot path. |
| 278 | * It is lock-free and uses a critical section to block |
| 279 | * out interrupt handlers on the same processor. |
| 280 | */ |
| 281 | loadedmag = cpucache->loaded_magazine; |
| 282 | if (MAGAZINE_NOTEMPTY(loadedmag)) { |
| 283 | obj = loadedmag->objects[--loadedmag->rounds]; |
| 284 | crit_exit(); |
| 285 | return (obj); |
| 286 | } |
| 287 | |
| 288 | /* Previous magazine has an object. */ |
| 289 | if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) { |
| 290 | KKASSERT(cpucache->previous_magazine->cleaning + |
| 291 | cpucache->loaded_magazine->cleaning == 0); |
| 292 | swap(cpucache->loaded_magazine, cpucache->previous_magazine); |
| 293 | loadedmag = cpucache->loaded_magazine; |
| 294 | obj = loadedmag->objects[--loadedmag->rounds]; |
| 295 | crit_exit(); |
| 296 | return (obj); |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * Both magazines empty. Get a full magazine from the depot and |
| 301 | * move one of the empty ones to the depot. |
| 302 | * |
| 303 | * Obtain the depot spinlock. |
| 304 | * |
| 305 | * NOTE: Beyond this point, M_* flags are handled via oc->alloc() |
| 306 | */ |
| 307 | depot = &oc->depot[myclusterid]; |
| 308 | spin_lock_wr(&depot->spin); |
| 309 | |
| 310 | /* |
| 311 | * Recheck the cpucache after obtaining the depot spinlock. This |
| 312 | * shouldn't be necessary now but don't take any chances. |
| 313 | */ |
| 314 | if (MAGAZINE_NOTEMPTY(cpucache->loaded_magazine) || |
| 315 | MAGAZINE_NOTEMPTY(cpucache->previous_magazine) |
| 316 | ) { |
| 317 | spin_unlock_wr(&depot->spin); |
| 318 | goto retry; |
| 319 | } |
| 320 | |
| 321 | /* Check if depot has a full magazine. */ |
| 322 | if (!SLIST_EMPTY(&depot->fullmagazines)) { |
| 323 | emptymag = cpucache->previous_magazine; |
| 324 | cpucache->previous_magazine = cpucache->loaded_magazine; |
| 325 | cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines); |
| 326 | SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine); |
| 327 | |
| 328 | /* |
| 329 | * Return emptymag to the depot. |
| 330 | */ |
| 331 | KKASSERT(MAGAZINE_EMPTY(emptymag)); |
| 332 | SLIST_INSERT_HEAD(&depot->emptymagazines, |
| 333 | emptymag, nextmagazine); |
| 334 | spin_unlock_wr(&depot->spin); |
| 335 | goto retry; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * The depot does not have any non-empty magazines. If we have |
| 340 | * not hit our object limit we can allocate a new object using |
| 341 | * the back-end allocator. |
| 342 | * |
| 343 | * note: unallocated_objects can be initialized to -1, which has |
| 344 | * the effect of removing any allocation limits. |
| 345 | */ |
| 346 | if (depot->unallocated_objects) { |
| 347 | --depot->unallocated_objects; |
| 348 | spin_unlock_wr(&depot->spin); |
| 349 | crit_exit(); |
| 350 | |
| 351 | obj = oc->alloc(oc->allocator_args, ocflags); |
| 352 | if (obj) { |
| 353 | if (oc->ctor(obj, oc->private, ocflags)) |
| 354 | return (obj); |
| 355 | oc->free(obj, oc->allocator_args); |
| 356 | spin_lock_wr(&depot->spin); |
| 357 | ++depot->unallocated_objects; |
| 358 | spin_unlock_wr(&depot->spin); |
| 359 | if (depot->waiting) |
| 360 | wakeup(depot); |
| 361 | obj = NULL; |
| 362 | } |
| 363 | if (obj == NULL) { |
| 364 | crit_enter(); |
| 365 | /* |
| 366 | * makes debugging easier when gets_cumulative does |
| 367 | * not include gets_null. |
| 368 | */ |
| 369 | ++cpucache->gets_null; |
| 370 | --cpucache->gets_cumulative; |
| 371 | crit_exit(); |
| 372 | } |
| 373 | return(obj); |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * Otherwise block if allowed to. |
| 378 | */ |
| 379 | if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) { |
| 380 | ++cpucache->waiting; |
| 381 | ++depot->waiting; |
| 382 | msleep(depot, &depot->spin, 0, "objcache_get", 0); |
| 383 | --cpucache->waiting; |
| 384 | --depot->waiting; |
| 385 | spin_unlock_wr(&depot->spin); |
| 386 | goto retry; |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Otherwise fail |
| 391 | */ |
| 392 | ++cpucache->gets_null; |
| 393 | --cpucache->gets_cumulative; |
| 394 | crit_exit(); |
| 395 | spin_unlock_wr(&depot->spin); |
| 396 | return (NULL); |
| 397 | } |
| 398 | |
| 399 | /* |
| 400 | * Wrapper for malloc allocation routines. |
| 401 | */ |
| 402 | void * |
| 403 | objcache_malloc_alloc(void *allocator_args, int ocflags) |
| 404 | { |
| 405 | struct objcache_malloc_args *alloc_args = allocator_args; |
| 406 | |
| 407 | return (kmalloc(alloc_args->objsize, alloc_args->mtype, |
| 408 | ocflags & OC_MFLAGS)); |
| 409 | } |
| 410 | |
| 411 | void |
| 412 | objcache_malloc_free(void *obj, void *allocator_args) |
| 413 | { |
| 414 | struct objcache_malloc_args *alloc_args = allocator_args; |
| 415 | |
| 416 | kfree(obj, alloc_args->mtype); |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * Wrapper for allocation policies that pre-allocate at initialization time |
| 421 | * and don't do run-time allocation. |
| 422 | */ |
| 423 | void * |
| 424 | objcache_nop_alloc(void *allocator_args, int ocflags) |
| 425 | { |
| 426 | return (NULL); |
| 427 | } |
| 428 | |
| 429 | void |
| 430 | objcache_nop_free(void *obj, void *allocator_args) |
| 431 | { |
| 432 | } |
| 433 | |
| 434 | /* |
| 435 | * Return an object to the object cache. |
| 436 | */ |
| 437 | void |
| 438 | objcache_put(struct objcache *oc, void *obj) |
| 439 | { |
| 440 | struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; |
| 441 | struct magazine *loadedmag; |
| 442 | struct magazinedepot *depot; |
| 443 | |
| 444 | crit_enter(); |
| 445 | ++cpucache->puts_cumulative; |
| 446 | |
| 447 | if (CLUSTER_OF(obj) != myclusterid) { |
| 448 | #ifdef notyet |
| 449 | /* use lazy IPI to send object to owning cluster XXX todo */ |
| 450 | ++cpucache->puts_othercluster; |
| 451 | crit_exit(); |
| 452 | return; |
| 453 | #endif |
| 454 | } |
| 455 | |
| 456 | retry: |
| 457 | /* |
| 458 | * Free slot available in loaded magazine. This is the hot path. |
| 459 | * It is lock-free and uses a critical section to block out interrupt |
| 460 | * handlers on the same processor. |
| 461 | */ |
| 462 | loadedmag = cpucache->loaded_magazine; |
| 463 | if (!MAGAZINE_FULL(loadedmag)) { |
| 464 | loadedmag->objects[loadedmag->rounds++] = obj; |
| 465 | if (cpucache->waiting) |
| 466 | wakeup_mycpu(&oc->depot[myclusterid]); |
| 467 | crit_exit(); |
| 468 | return; |
| 469 | } |
| 470 | |
| 471 | /* |
| 472 | * Current magazine full, but previous magazine has room. XXX |
| 473 | */ |
| 474 | if (!MAGAZINE_FULL(cpucache->previous_magazine)) { |
| 475 | KKASSERT(cpucache->previous_magazine->cleaning + |
| 476 | cpucache->loaded_magazine->cleaning == 0); |
| 477 | swap(cpucache->loaded_magazine, cpucache->previous_magazine); |
| 478 | loadedmag = cpucache->loaded_magazine; |
| 479 | loadedmag->objects[loadedmag->rounds++] = obj; |
| 480 | if (cpucache->waiting) |
| 481 | wakeup_mycpu(&oc->depot[myclusterid]); |
| 482 | crit_exit(); |
| 483 | return; |
| 484 | } |
| 485 | |
| 486 | /* |
| 487 | * Both magazines full. Get an empty magazine from the depot and |
| 488 | * move a full loaded magazine to the depot. Even though the |
| 489 | * magazine may wind up with space available after we block on |
| 490 | * the spinlock, we still cycle it through to avoid the non-optimal |
| 491 | * corner-case. |
| 492 | * |
| 493 | * Obtain the depot spinlock. |
| 494 | */ |
| 495 | depot = &oc->depot[myclusterid]; |
| 496 | spin_lock_wr(&depot->spin); |
| 497 | |
| 498 | /* |
| 499 | * If an empty magazine is available in the depot, cycle it |
| 500 | * through and retry. |
| 501 | */ |
| 502 | if (!SLIST_EMPTY(&depot->emptymagazines)) { |
| 503 | KKASSERT(cpucache->previous_magazine->cleaning + |
| 504 | cpucache->loaded_magazine->cleaning == 0); |
| 505 | loadedmag = cpucache->previous_magazine; |
| 506 | cpucache->previous_magazine = cpucache->loaded_magazine; |
| 507 | cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines); |
| 508 | SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine); |
| 509 | |
| 510 | /* |
| 511 | * Return loadedmag to the depot. Due to blocking it may |
| 512 | * not be entirely full and could even be empty. |
| 513 | */ |
| 514 | if (MAGAZINE_EMPTY(loadedmag)) { |
| 515 | SLIST_INSERT_HEAD(&depot->emptymagazines, |
| 516 | loadedmag, nextmagazine); |
| 517 | spin_unlock_wr(&depot->spin); |
| 518 | } else { |
| 519 | SLIST_INSERT_HEAD(&depot->fullmagazines, |
| 520 | loadedmag, nextmagazine); |
| 521 | spin_unlock_wr(&depot->spin); |
| 522 | if (depot->waiting) |
| 523 | wakeup(depot); |
| 524 | } |
| 525 | goto retry; |
| 526 | } |
| 527 | |
| 528 | /* |
| 529 | * An empty mag is not available. This is a corner case which can |
| 530 | * occur due to cpus holding partially full magazines. Do not try |
| 531 | * to allocate a mag, just free the object. |
| 532 | */ |
| 533 | ++depot->unallocated_objects; |
| 534 | spin_unlock_wr(&depot->spin); |
| 535 | if (depot->waiting) |
| 536 | wakeup(depot); |
| 537 | crit_exit(); |
| 538 | oc->dtor(obj, oc->private); |
| 539 | oc->free(obj, oc->allocator_args); |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * The object is being put back into the cache, but the caller has |
| 544 | * indicated that the object is not in any shape to be reused and should |
| 545 | * be dtor'd immediately. |
| 546 | */ |
| 547 | void |
| 548 | objcache_dtor(struct objcache *oc, void *obj) |
| 549 | { |
| 550 | struct magazinedepot *depot; |
| 551 | |
| 552 | depot = &oc->depot[myclusterid]; |
| 553 | spin_lock_wr(&depot->spin); |
| 554 | ++depot->unallocated_objects; |
| 555 | spin_unlock_wr(&depot->spin); |
| 556 | if (depot->waiting) |
| 557 | wakeup(depot); |
| 558 | oc->dtor(obj, oc->private); |
| 559 | oc->free(obj, oc->allocator_args); |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * Deallocate all objects in a magazine and free the magazine if requested. |
| 564 | * The magazine must already be disassociated from the depot. |
| 565 | * |
| 566 | * Must be called with a critical section held when called with a per-cpu |
| 567 | * magazine. The magazine may be indirectly modified during the loop. |
| 568 | * |
| 569 | * The number of objects freed is returned. |
| 570 | */ |
| 571 | static int |
| 572 | mag_purge(struct objcache *oc, struct magazine *mag, int freeit) |
| 573 | { |
| 574 | int count; |
| 575 | void *obj; |
| 576 | |
| 577 | count = 0; |
| 578 | ++mag->cleaning; |
| 579 | while (mag->rounds) { |
| 580 | obj = mag->objects[--mag->rounds]; |
| 581 | oc->dtor(obj, oc->private); /* MAY BLOCK */ |
| 582 | oc->free(obj, oc->allocator_args); /* MAY BLOCK */ |
| 583 | ++count; |
| 584 | |
| 585 | /* |
| 586 | * Cycle for interrupts |
| 587 | */ |
| 588 | if ((count & 15) == 0) { |
| 589 | crit_exit(); |
| 590 | crit_enter(); |
| 591 | } |
| 592 | } |
| 593 | --mag->cleaning; |
| 594 | if (freeit) |
| 595 | kfree(mag, M_OBJMAG); |
| 596 | return(count); |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * Disassociate zero or more magazines from a magazine list associated with |
| 601 | * the depot, update the depot, and move the magazines to a temporary |
| 602 | * list. |
| 603 | * |
| 604 | * The caller must check the depot for waiters and wake it up, typically |
| 605 | * after disposing of the magazines this function loads onto the temporary |
| 606 | * list. |
| 607 | */ |
| 608 | static void |
| 609 | maglist_disassociate(struct magazinedepot *depot, struct magazinelist *maglist, |
| 610 | struct magazinelist *tmplist, boolean_t purgeall) |
| 611 | { |
| 612 | struct magazine *mag; |
| 613 | |
| 614 | while ((mag = SLIST_FIRST(maglist)) != NULL) { |
| 615 | SLIST_REMOVE_HEAD(maglist, nextmagazine); |
| 616 | SLIST_INSERT_HEAD(tmplist, mag, nextmagazine); |
| 617 | depot->unallocated_objects += mag->rounds; |
| 618 | } |
| 619 | } |
| 620 | |
| 621 | /* |
| 622 | * Deallocate all magazines and their contents from the passed temporary |
| 623 | * list. The magazines have already been accounted for by their depots. |
| 624 | * |
| 625 | * The total number of rounds freed is returned. This number is typically |
| 626 | * only used to determine whether a wakeup on the depot is needed or not. |
| 627 | */ |
| 628 | static int |
| 629 | maglist_purge(struct objcache *oc, struct magazinelist *maglist) |
| 630 | { |
| 631 | struct magazine *mag; |
| 632 | int count = 0; |
| 633 | |
| 634 | /* |
| 635 | * can't use SLIST_FOREACH because blocking releases the depot |
| 636 | * spinlock |
| 637 | */ |
| 638 | while ((mag = SLIST_FIRST(maglist)) != NULL) { |
| 639 | SLIST_REMOVE_HEAD(maglist, nextmagazine); |
| 640 | count += mag_purge(oc, mag, TRUE); |
| 641 | } |
| 642 | return(count); |
| 643 | } |
| 644 | |
| 645 | /* |
| 646 | * De-allocates all magazines on the full and empty magazine lists. |
| 647 | * |
| 648 | * Because this routine is called with a spinlock held, the magazines |
| 649 | * can only be disassociated and moved to a temporary list, not freed. |
| 650 | * |
| 651 | * The caller is responsible for freeing the magazines. |
| 652 | */ |
| 653 | static void |
| 654 | depot_disassociate(struct magazinedepot *depot, struct magazinelist *tmplist) |
| 655 | { |
| 656 | maglist_disassociate(depot, &depot->fullmagazines, tmplist, TRUE); |
| 657 | maglist_disassociate(depot, &depot->emptymagazines, tmplist, TRUE); |
| 658 | } |
| 659 | |
| 660 | #ifdef notneeded |
| 661 | void |
| 662 | objcache_reclaim(struct objcache *oc) |
| 663 | { |
| 664 | struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid]; |
| 665 | struct magazinedepot *depot = &oc->depot[myclusterid]; |
| 666 | struct magazinelist tmplist; |
| 667 | int count; |
| 668 | |
| 669 | SLIST_INIT(&tmplist); |
| 670 | crit_enter(); |
| 671 | count = mag_purge(oc, cache_percpu->loaded_magazine, FALSE); |
| 672 | count += mag_purge(oc, cache_percpu->previous_magazine, FALSE); |
| 673 | crit_exit(); |
| 674 | |
| 675 | spin_lock_wr(&depot->spin); |
| 676 | depot->unallocated_objects += count; |
| 677 | depot_disassociate(depot, &tmplist); |
| 678 | spin_unlock_wr(&depot->spin); |
| 679 | count += maglist_purge(oc, &tmplist); |
| 680 | if (count && depot->waiting) |
| 681 | wakeup(depot); |
| 682 | } |
| 683 | #endif |
| 684 | |
| 685 | /* |
| 686 | * Try to free up some memory. Return as soon as some free memory is found. |
| 687 | * For each object cache on the reclaim list, first try the current per-cpu |
| 688 | * cache, then the full magazine depot. |
| 689 | */ |
| 690 | boolean_t |
| 691 | objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags) |
| 692 | { |
| 693 | struct objcache *oc; |
| 694 | struct percpu_objcache *cpucache; |
| 695 | struct magazinedepot *depot; |
| 696 | struct magazinelist tmplist; |
| 697 | int i, count; |
| 698 | |
| 699 | SLIST_INIT(&tmplist); |
| 700 | |
| 701 | for (i = 0; i < nlist; i++) { |
| 702 | oc = oclist[i]; |
| 703 | cpucache = &oc->cache_percpu[mycpuid]; |
| 704 | depot = &oc->depot[myclusterid]; |
| 705 | |
| 706 | crit_enter(); |
| 707 | count = mag_purge(oc, cpucache->loaded_magazine, FALSE); |
| 708 | if (count == 0) |
| 709 | count += mag_purge(oc, cpucache->previous_magazine, FALSE); |
| 710 | crit_exit(); |
| 711 | if (count > 0) { |
| 712 | spin_lock_wr(&depot->spin); |
| 713 | depot->unallocated_objects += count; |
| 714 | spin_unlock_wr(&depot->spin); |
| 715 | if (depot->waiting) |
| 716 | wakeup(depot); |
| 717 | return (TRUE); |
| 718 | } |
| 719 | crit_exit(); |
| 720 | spin_lock_wr(&depot->spin); |
| 721 | maglist_disassociate(depot, &depot->fullmagazines, |
| 722 | &tmplist, FALSE); |
| 723 | spin_unlock_wr(&depot->spin); |
| 724 | count = maglist_purge(oc, &tmplist); |
| 725 | if (count > 0) { |
| 726 | if (depot->waiting) |
| 727 | wakeup(depot); |
| 728 | return (TRUE); |
| 729 | } |
| 730 | } |
| 731 | return (FALSE); |
| 732 | } |
| 733 | |
| 734 | /* |
| 735 | * Destroy an object cache. Must have no existing references. |
| 736 | */ |
| 737 | void |
| 738 | objcache_destroy(struct objcache *oc) |
| 739 | { |
| 740 | struct percpu_objcache *cache_percpu; |
| 741 | struct magazinedepot *depot; |
| 742 | int clusterid, cpuid; |
| 743 | struct magazinelist tmplist; |
| 744 | |
| 745 | SLIST_INIT(&tmplist); |
| 746 | for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) { |
| 747 | depot = &oc->depot[clusterid]; |
| 748 | spin_lock_wr(&depot->spin); |
| 749 | depot_disassociate(depot, &tmplist); |
| 750 | spin_unlock_wr(&depot->spin); |
| 751 | } |
| 752 | maglist_purge(oc, &tmplist); |
| 753 | |
| 754 | for (cpuid = 0; cpuid < ncpus; cpuid++) { |
| 755 | cache_percpu = &oc->cache_percpu[cpuid]; |
| 756 | |
| 757 | mag_purge(oc, cache_percpu->loaded_magazine, TRUE); |
| 758 | mag_purge(oc, cache_percpu->previous_magazine, TRUE); |
| 759 | cache_percpu->loaded_magazine = NULL; |
| 760 | cache_percpu->previous_magazine = NULL; |
| 761 | /* don't bother adjusting depot->unallocated_objects */ |
| 762 | } |
| 763 | |
| 764 | kfree(oc->name, M_TEMP); |
| 765 | kfree(oc, M_OBJCACHE); |
| 766 | } |
| 767 | |
| 768 | #if 0 |
| 769 | /* |
| 770 | * Populate the per-cluster depot with elements from a linear block |
| 771 | * of memory. Must be called for individually for each cluster. |
| 772 | * Populated depots should not be destroyed. |
| 773 | */ |
| 774 | void |
| 775 | objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size) |
| 776 | { |
| 777 | char *p = base; |
| 778 | char *end = (char *)base + (nelts * size); |
| 779 | struct magazinedepot *depot = &oc->depot[myclusterid]; |
| 780 | struct magazine *emptymag = mag_alloc(depot->magcapcity); |
| 781 | |
| 782 | while (p < end) { |
| 783 | emptymag->objects[emptymag->rounds++] = p; |
| 784 | if (MAGAZINE_FULL(emptymag)) { |
| 785 | spin_lock_wr(&depot->spin); |
| 786 | SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag, |
| 787 | nextmagazine); |
| 788 | depot->unallocated_objects += emptymag->rounds; |
| 789 | spin_unlock_wr(&depot->spin); |
| 790 | if (depot->waiting) |
| 791 | wakeup(depot); |
| 792 | emptymag = mag_alloc(depot->magcapacity); |
| 793 | } |
| 794 | p += size; |
| 795 | } |
| 796 | if (MAGAZINE_EMPTY(emptymag)) { |
| 797 | mag_purge(oc, emptymag, TRUE); |
| 798 | } else { |
| 799 | spin_lock_wr(&depot->spin); |
| 800 | SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag, |
| 801 | nextmagazine); |
| 802 | depot->unallocated_objects += emptymag->rounds; |
| 803 | spin_unlock_wr(&depot->spin); |
| 804 | if (depot->waiting) |
| 805 | wakeup(depot); |
| 806 | emptymag = mag_alloc(depot->magcapacity); |
| 807 | } |
| 808 | } |
| 809 | #endif |
| 810 | |
| 811 | #if 0 |
| 812 | /* |
| 813 | * Check depot contention once a minute. |
| 814 | * 2 contested locks per second allowed. |
| 815 | */ |
| 816 | static int objcache_rebalance_period; |
| 817 | static const int objcache_contention_rate = 120; |
| 818 | static struct callout objcache_callout; |
| 819 | |
| 820 | #define MAXMAGSIZE 512 |
| 821 | |
| 822 | /* |
| 823 | * Check depot contention and increase magazine size if necessary. |
| 824 | */ |
| 825 | static void |
| 826 | objcache_timer(void *dummy) |
| 827 | { |
| 828 | struct objcache *oc; |
| 829 | struct magazinedepot *depot; |
| 830 | struct magazinelist tmplist; |
| 831 | |
| 832 | XXX we need to detect when an objcache is destroyed out from under |
| 833 | us XXX |
| 834 | |
| 835 | SLIST_INIT(&tmplist); |
| 836 | |
| 837 | spin_lock_wr(&objcachelist_spin); |
| 838 | SLIST_FOREACH(oc, &allobjcaches, oc_next) { |
| 839 | depot = &oc->depot[myclusterid]; |
| 840 | if (depot->magcapacity < MAXMAGSIZE) { |
| 841 | if (depot->contested > objcache_contention_rate) { |
| 842 | spin_lock_wr(&depot->spin); |
| 843 | depot_disassociate(depot, &tmplist); |
| 844 | depot->magcapacity *= 2; |
| 845 | spin_unlock_wr(&depot->spin); |
| 846 | kprintf("objcache_timer: increasing cache %s" |
| 847 | " magsize to %d, contested %d times\n", |
| 848 | oc->name, depot->magcapacity, |
| 849 | depot->contested); |
| 850 | } |
| 851 | depot->contested = 0; |
| 852 | } |
| 853 | spin_unlock_wr(&objcachelist_spin); |
| 854 | if (maglist_purge(oc, &tmplist) > 0 && depot->waiting) |
| 855 | wakeup(depot); |
| 856 | spin_lock_wr(&objcachelist_spin); |
| 857 | } |
| 858 | spin_unlock_wr(&objcachelist_spin); |
| 859 | |
| 860 | callout_reset(&objcache_callout, objcache_rebalance_period, |
| 861 | objcache_timer, NULL); |
| 862 | } |
| 863 | |
| 864 | #endif |
| 865 | |
| 866 | static void |
| 867 | objcache_init(void) |
| 868 | { |
| 869 | spin_init(&objcachelist_spin); |
| 870 | #if 0 |
| 871 | callout_init(&objcache_callout); |
| 872 | objcache_rebalance_period = 60 * hz; |
| 873 | callout_reset(&objcache_callout, objcache_rebalance_period, |
| 874 | objcache_timer, NULL); |
| 875 | #endif |
| 876 | } |
| 877 | SYSINIT(objcache, SI_SUB_CPU, SI_ORDER_ANY, objcache_init, 0); |