1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_memory.c 248663 2013-03-23 20:46:47Z dumbbell $
27 **************************************************************************/
29 #define pr_fmt(fmt) "[TTM] " fmt
32 #include <drm/ttm/ttm_memory.h>
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_page_alloc.h>
35 #include <linux/export.h>
37 #define TTM_MEMORY_ALLOC_RETRIES 4
41 struct ttm_mem_global *glob;
50 static struct attribute ttm_mem_sys = {
51 .name = "zone_memory",
54 static struct attribute ttm_mem_emer = {
55 .name = "emergency_memory",
56 .mode = S_IRUGO | S_IWUSR
58 static struct attribute ttm_mem_max = {
59 .name = "available_memory",
60 .mode = S_IRUGO | S_IWUSR
62 static struct attribute ttm_mem_swap = {
64 .mode = S_IRUGO | S_IWUSR
66 static struct attribute ttm_mem_used = {
67 .name = "used_memory",
71 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
73 struct ttm_mem_zone *zone =
74 container_of(kobj, struct ttm_mem_zone, kobj);
76 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
77 zone->name, (unsigned long long)zone->used_mem >> 10);
81 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
82 struct attribute *attr,
85 struct ttm_mem_zone *zone =
86 container_of(kobj, struct ttm_mem_zone, kobj);
89 spin_lock(&zone->glob->lock);
90 if (attr == &ttm_mem_sys)
92 else if (attr == &ttm_mem_emer)
94 else if (attr == &ttm_mem_max)
96 else if (attr == &ttm_mem_swap)
97 val = zone->swap_limit;
98 else if (attr == &ttm_mem_used)
100 spin_unlock(&zone->glob->lock);
102 return ksnprintf(buffer, PAGE_SIZE, "%llu\n",
103 (unsigned long long) val >> 10);
106 static void ttm_check_swapping(struct ttm_mem_global *glob);
108 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
109 struct attribute *attr,
113 struct ttm_mem_zone *zone =
114 container_of(kobj, struct ttm_mem_zone, kobj);
119 chars = ksscanf(buffer, "%lu", &val);
126 spin_lock(&zone->glob->lock);
127 if (val64 > zone->zone_mem)
128 val64 = zone->zone_mem;
129 if (attr == &ttm_mem_emer) {
130 zone->emer_mem = val64;
131 if (zone->max_mem > val64)
132 zone->max_mem = val64;
133 } else if (attr == &ttm_mem_max) {
134 zone->max_mem = val64;
135 if (zone->emer_mem < val64)
136 zone->emer_mem = val64;
137 } else if (attr == &ttm_mem_swap)
138 zone->swap_limit = val64;
139 spin_unlock(&zone->glob->lock);
141 ttm_check_swapping(zone->glob);
146 static struct attribute *ttm_mem_zone_attrs[] = {
155 static const struct sysfs_ops ttm_mem_zone_ops = {
156 .show = &ttm_mem_zone_show,
157 .store = &ttm_mem_zone_store
160 static struct kobj_type ttm_mem_zone_kobj_type = {
161 .release = &ttm_mem_zone_kobj_release,
162 .sysfs_ops = &ttm_mem_zone_ops,
163 .default_attrs = ttm_mem_zone_attrs,
166 static void ttm_mem_global_kobj_release(struct kobject *kobj)
168 struct ttm_mem_global *glob =
169 container_of(kobj, struct ttm_mem_global, kobj);
174 static struct kobj_type ttm_mem_glob_kobj_type = {
175 .release = &ttm_mem_global_kobj_release,
178 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
179 bool from_wq, uint64_t extra)
182 struct ttm_mem_zone *zone;
185 for (i = 0; i < glob->num_zones; ++i) {
186 zone = glob->zones[i];
189 target = zone->swap_limit;
190 else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
191 target = zone->emer_mem;
193 target = zone->max_mem;
195 target = (extra > target) ? 0ULL : target;
197 if (zone->used_mem > target)
204 * At this point we only support a single shrink callback.
205 * Extend this if needed, perhaps using a linked list of callbacks.
206 * Note that this function is reentrant:
207 * many threads may try to swap out at any given time.
210 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
214 struct ttm_mem_shrink *shrink;
216 spin_lock(&glob->lock);
217 if (glob->shrink == NULL)
220 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
221 shrink = glob->shrink;
222 spin_unlock(&glob->lock);
223 ret = shrink->do_shrink(shrink);
224 spin_lock(&glob->lock);
225 if (unlikely(ret != 0))
229 spin_unlock(&glob->lock);
234 static void ttm_shrink_work(void *arg, int pending __unused)
236 struct ttm_mem_global *glob = arg;
238 ttm_shrink(glob, true, 0ULL);
241 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
244 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
247 zone->name = "kernel";
248 zone->zone_mem = mem;
249 zone->max_mem = mem >> 1;
250 zone->emer_mem = (mem >> 1) + (mem >> 2);
251 zone->swap_limit = zone->max_mem - (mem >> 3);
254 glob->zone_kernel = zone;
255 ret = kobject_init_and_add(
256 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
257 if (unlikely(ret != 0)) {
258 kobject_put(&zone->kobj);
261 glob->zones[glob->num_zones++] = zone;
265 #ifdef CONFIG_HIGHMEM
267 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
270 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
274 * No special dma32 zone needed.
277 if ((physmem * PAGE_SIZE) <= ((uint64_t) 1ULL << 32)) {
283 * Limit max dma32 memory to 4GB for now
284 * until we can figure out how big this
287 if (mem > ((uint64_t) 1ULL << 32))
288 mem = ((uint64_t) 1ULL << 32);
290 zone->name = "dma32";
291 zone->zone_mem = mem;
292 zone->max_mem = mem >> 1;
293 zone->emer_mem = (mem >> 1) + (mem >> 2);
294 zone->swap_limit = zone->max_mem - (mem >> 3);
297 glob->zone_dma32 = zone;
298 ret = kobject_init_and_add(
299 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
300 if (unlikely(ret != 0)) {
301 kobject_put(&zone->kobj);
304 glob->zones[glob->num_zones++] = zone;
309 int ttm_mem_global_init(struct ttm_mem_global *glob)
314 struct ttm_mem_zone *zone;
316 spin_init(&glob->lock, "ttmemglob");
317 glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
318 taskqueue_thread_enqueue, &glob->swap_queue);
319 taskqueue_start_threads(&glob->swap_queue, 1, TDPRI_KERN_DAEMON,
321 TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
322 ret = kobject_init_and_add(
323 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
324 if (unlikely(ret != 0)) {
325 kobject_put(&glob->kobj);
330 * Managed contiguous memory for TTM. Only use kernel-reserved
331 * dma memory for TTM, which can be controlled via /boot/loader.conf
332 * (e.g. vm.dma_reserved=256m). This is the only truly dependable
335 mem = (uint64_t)vm_contig_avail_pages() * PAGE_SIZE;
337 ret = ttm_mem_init_kernel_zone(glob, mem);
338 if (unlikely(ret != 0))
340 ret = ttm_mem_init_dma32_zone(glob, mem);
341 if (unlikely(ret != 0))
343 pr_info("(struct ttm_mem_global *)%p\n", glob);
344 for (i = 0; i < glob->num_zones; ++i) {
345 zone = glob->zones[i];
346 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
347 zone->name, (unsigned long long)zone->max_mem >> 10);
349 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
350 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
353 ttm_mem_global_release(glob);
356 EXPORT_SYMBOL(ttm_mem_global_init);
358 void ttm_mem_global_release(struct ttm_mem_global *glob)
361 struct ttm_mem_zone *zone;
363 /* let the page allocator first stop the shrink work. */
364 ttm_page_alloc_fini();
365 ttm_dma_page_alloc_fini();
367 taskqueue_drain(glob->swap_queue, &glob->work);
368 taskqueue_free(glob->swap_queue);
369 glob->swap_queue = NULL;
370 for (i = 0; i < glob->num_zones; ++i) {
371 zone = glob->zones[i];
372 kobject_del(&zone->kobj);
373 kobject_put(&zone->kobj);
375 kobject_del(&glob->kobj);
376 kobject_put(&glob->kobj);
379 EXPORT_SYMBOL(ttm_mem_global_release);
381 static void ttm_check_swapping(struct ttm_mem_global *glob)
383 bool needs_swapping = false;
385 struct ttm_mem_zone *zone;
387 spin_lock(&glob->lock);
388 for (i = 0; i < glob->num_zones; ++i) {
389 zone = glob->zones[i];
390 if (zone->used_mem > zone->swap_limit) {
391 needs_swapping = true;
395 spin_unlock(&glob->lock);
397 if (unlikely(needs_swapping))
398 taskqueue_enqueue(glob->swap_queue, &glob->work);
402 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
403 struct ttm_mem_zone *single_zone,
407 struct ttm_mem_zone *zone;
409 spin_lock(&glob->lock);
410 for (i = 0; i < glob->num_zones; ++i) {
411 zone = glob->zones[i];
412 if (single_zone && zone != single_zone)
414 zone->used_mem -= amount;
416 spin_unlock(&glob->lock);
419 void ttm_mem_global_free(struct ttm_mem_global *glob,
422 ttm_mem_global_free_zone(glob, NULL, amount);
424 EXPORT_SYMBOL(ttm_mem_global_free);
426 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
427 struct ttm_mem_zone *single_zone,
428 uint64_t amount, bool reserve)
433 struct ttm_mem_zone *zone;
435 spin_lock(&glob->lock);
436 for (i = 0; i < glob->num_zones; ++i) {
437 zone = glob->zones[i];
438 if (single_zone && zone != single_zone)
441 limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
442 zone->emer_mem : zone->max_mem;
444 if (zone->used_mem > limit)
449 for (i = 0; i < glob->num_zones; ++i) {
450 zone = glob->zones[i];
451 if (single_zone && zone != single_zone)
453 zone->used_mem += amount;
459 spin_unlock(&glob->lock);
460 ttm_check_swapping(glob);
466 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
467 struct ttm_mem_zone *single_zone,
469 bool no_wait, bool interruptible)
471 int count = TTM_MEMORY_ALLOC_RETRIES;
473 while (unlikely(ttm_mem_global_reserve(glob,
479 if (unlikely(count-- == 0))
481 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
487 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
488 bool no_wait, bool interruptible)
491 * Normal allocations of kernel memory are registered in
495 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
498 EXPORT_SYMBOL(ttm_mem_global_alloc);
500 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
502 bool no_wait, bool interruptible)
505 struct ttm_mem_zone *zone = NULL;
508 * Page allocations may be registed in a single zone
509 * only if highmem or !dma32.
512 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
513 zone = glob->zone_kernel;
514 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
518 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
520 struct ttm_mem_zone *zone = NULL;
522 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
523 zone = glob->zone_kernel;
524 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
528 size_t ttm_round_pot(size_t size)
530 if ((size & (size - 1)) == 0)
532 else if (size > PAGE_SIZE)
533 return PAGE_ALIGN(size);
537 while (tmp_size < size)
544 EXPORT_SYMBOL(ttm_round_pot);