1 /* _NVRM_COPYRIGHT_BEGIN_
3 * Copyright 2001-2002 by NVIDIA Corporation. All rights reserved. All
4 * information contained herein is proprietary and confidential to NVIDIA
5 * Corporation. Any use, reproduction, or disclosure without the written
6 * permission of NVIDIA Corporation is prohibited.
12 #include "os-interface.h"
14 #include "nv-freebsd.h"
17 /* DragonFly compat */
18 #include <machine/pmap_inval.h>
19 #if __DragonFly_version >= 200204
24 pmap_invalidate_range(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
26 struct pmap_inval_info info;
29 pmap_inval_init(&info);
30 for (va = sva; va < eva; va += PAGE_SIZE)
31 pmap_inval_add(&info, pmap, va);
32 pmap_inval_flush(&info);
37 * The NVIDIA kernel module's malloc identifier, needed for both tracking
38 * and actual allocation/freeing purposes. M_NVIDIA is declared elsewhere
39 * to make it known to other parts of the kernel module (nv-freebsd.h).
42 MALLOC_DEFINE(M_NVIDIA, "nvidia", "NVIDIA memory allocations");
45 #define MAX_ERROR_STRING 256
47 RM_STATUS NV_API_CALL os_alloc_contig_pages(
52 *address = contigmalloc(size, M_NVIDIA, 0, 0, ~0, PAGE_SIZE, 0);
53 return *address ? RM_OK : RM_ERROR;
56 void NV_API_CALL os_free_contig_pages(
61 contigfree(address, size, M_NVIDIA);
65 * The core resource manager's favorite source of memory, this routine is
66 * called from different contexts, including ISRs. This means that it can
67 * not be allowed to sleep when memory is low.
70 RM_STATUS NV_API_CALL os_alloc_mem(
75 /* XXX Fix me? (malloc flags) */
76 *address = malloc(size, M_NVIDIA, M_INTWAIT | M_ZERO | M_NULLOK);
77 return *address ? RM_OK : RM_ERROR;
80 void NV_API_CALL os_free_mem(void *address)
82 free(address, M_NVIDIA);
85 #define NV_MSECS_PER_TICK (1000 / hz)
86 #define NV_MSECS_TO_TICKS(ms) ((ms) * hz / 1000)
87 #define NV_USECS_PER_TICK (1000000 / hz)
88 #define NV_USECS_TO_TICKS(us) ((us) * hz / 1000000)
90 RM_STATUS NV_API_CALL os_delay(U032 MilliSeconds)
92 unsigned long MicroSeconds;
94 struct timeval tv_end, tv_aux;
96 getmicrotime(&tv_aux);
98 if (__NV_ITHREAD() && (MilliSeconds > NV_MAX_ISR_DELAY_MS))
101 if (__NV_ITHREAD()) {
102 DELAY(MilliSeconds * 1000);
106 MicroSeconds = MilliSeconds * 1000;
107 tv_end.tv_usec = MicroSeconds;
109 /* tv_end = tv_aux + tv_end */
110 NV_TIMERADD(&tv_aux, &tv_end, &tv_end);
112 ticks = NV_USECS_TO_TICKS(MicroSeconds);
116 tsleep((void *)os_delay, PCATCH, "delay", ticks);
117 getmicrotime(&tv_aux);
118 if (NV_TIMERCMP(&tv_aux, &tv_end, <)) {
119 /* tv_aux = tv_end - tv_aux */
120 NV_TIMERSUB(&tv_end, &tv_aux, &tv_aux);
121 MicroSeconds = tv_aux.tv_usec + (tv_aux.tv_sec * 1000000);
124 } while ((ticks = NV_USECS_TO_TICKS(MicroSeconds)) > 0);
127 if (MicroSeconds > 0)
133 RM_STATUS NV_API_CALL os_delay_us(U032 MicroSeconds)
135 if (__NV_ITHREAD() && (MicroSeconds > NV_MAX_ISR_DELAY_US))
141 U032 NV_API_CALL os_get_cpu_frequency(void)
143 return ((tsc_frequency + 4999) / 1000000);
146 U032 NV_API_CALL os_get_current_process(void)
148 return curproc->p_pid;
151 RM_STATUS NV_API_CALL os_get_current_time(
166 BOOL NV_API_CALL os_is_administrator(PHWINFO pDev)
168 #if __DragonFly_version >= 200204
169 return priv_check(CURTHREAD, PRIV_ROOT) ? FALSE : TRUE;
171 return suser(CURTHREAD) ? FALSE : TRUE;
175 U008 NV_API_CALL os_io_read_byte(
180 /* XXX Fix me? (bus_space access) */
184 void NV_API_CALL os_io_write_byte(
190 /* XXX Fix me? (bus_space access) */
191 outb(address, value);
194 U016 NV_API_CALL os_io_read_word(
199 /* XXX Fix me? (bus_space access) */
203 void NV_API_CALL os_io_write_word(
209 /* XXX Fix me? (bus_space access) */
210 return outw(address, value);
213 U032 NV_API_CALL os_io_read_dword(
218 /* XXX Fix me? (bus_space access) */
222 void NV_API_CALL os_io_write_dword(
228 /* XXX Fix me? (bus_space access) */
229 outl(address, value);
232 void* NV_API_CALL os_map_kernel_space(
240 #if defined(NVCPU_X86) && !defined(PAE)
241 if (start > 0xffffffff)
245 if (start & PAGE_MASK)
248 size = NV_ALIGN_UP(size, PAGE_SIZE);
251 case NV_MEMORY_CACHED:
252 map_mode = PAT_WRITE_BACK;
254 case NV_MEMORY_WRITECOMBINED:
255 map_mode = PAT_WRITE_COMBINING;
257 case NV_MEMORY_UNCACHED:
258 case NV_MEMORY_DEFAULT:
259 map_mode = PAT_UNCACHEABLE;
262 nv_printf(NV_DBG_ERRORS,
263 "NVRM: unknown mode in os_map_kernel_space()\n");
267 return pmap_mapdev_attr(start, size, map_mode);
270 void NV_API_CALL os_unmap_kernel_space(
275 pmap_unmapdev_attr((vm_offset_t)address, size);
278 RM_STATUS NV_API_CALL os_set_mem_range(
285 struct mem_range_desc mrd;
289 mrd.mr_flags = MDF_WRITECOMBINE;
291 strcpy(mrd.mr_owner, "NVIDIA");
292 arg = MEMRANGE_SET_UPDATE;
294 if (mem_range_attr_set(&mrd, &arg))
300 RM_STATUS NV_API_CALL os_unset_mem_range(
306 struct mem_range_desc mrd;
310 arg = MEMRANGE_SET_REMOVE;
312 if (mem_range_attr_set(&mrd, &arg))
320 * The current debug level is used to determine if certain debug messages
321 * are printed to the system console/log files or not. It defaults to the
322 * highest debug level, i.e. the lowest debug output.
325 U032 cur_debuglevel = 0xffffffff;
327 void NV_API_CALL os_dbg_init(void)
332 NV_UMA_ZONE_ALLOC_STACK(sp);
336 if (rm_read_registry_dword(sp, NULL, "NVreg", "ResmanDebugLevel",
337 &new_debuglevel) == RM_OK) {
338 if (new_debuglevel != 0xffffffff)
339 cur_debuglevel = new_debuglevel;
342 NV_UMA_ZONE_FREE_STACK(sp);
345 RM_STATUS NV_API_CALL os_schedule(void)
347 return RM_ERR_NOT_SUPPORTED;
350 void NV_API_CALL os_dbg_set_level(U032 new_debuglevel)
352 cur_debuglevel = new_debuglevel;
355 void NV_API_CALL os_dbg_breakpoint(void)
362 void NV_API_CALL out_string(const char *message)
364 #if defined(DEBUG) || defined(QA_BUILD)
365 kprintf("%s", message);
369 static char nv_error_string[MAX_ERROR_STRING];
371 int NV_API_CALL nv_printf(
377 char *message = nv_error_string;
379 int chars_written = 0;
381 if (debuglevel >= ((cur_debuglevel >> 4) & 3)) {
382 __va_start(arglist, format);
383 chars_written = kvsprintf(message, format, arglist);
385 kprintf("%s", message);
388 return chars_written;
391 int NV_API_CALL nv_snprintf(
401 __va_start(arglist, fmt);
402 chars_written = kvsnprintf(buf, size, fmt, arglist);
405 return chars_written;
408 void NV_API_CALL nv_os_log(
415 ksprintf(nv_error_string, "NVRM: ");
416 l = strlen(nv_error_string);
417 kvsnprintf(nv_error_string + l, MAX_ERROR_STRING - l, fmt, ap);
418 kprintf("%s", nv_error_string);
421 S032 NV_API_CALL os_mem_cmp(
427 return memcmp(buf0, buf1, length);
430 U008* NV_API_CALL os_mem_copy(
436 #if defined(NVCPU_X86_64)
438 for (i = 0; i < length; i++) dst[i] = src[i];
441 return memcpy(dst, src, length);
445 RM_STATUS NV_API_CALL os_memcpy_from_user(
451 if (src < (void *) VM_MAX_USER_ADDRESS)
452 return copyin(src, dst, length) ? RM_ERR_INVALID_POINTER : RM_OK;
454 return os_mem_copy(dst, src, length) ? RM_ERR_INVALID_POINTER : RM_OK;
457 RM_STATUS NV_API_CALL os_memcpy_to_user(
463 if (dst < (void *) VM_MAX_USER_ADDRESS)
464 return copyout(src, dst, length) ? RM_ERR_INVALID_POINTER : RM_OK;
466 return os_mem_copy(dst, src, length) ? RM_ERR_INVALID_POINTER : RM_OK;
469 void* NV_API_CALL os_mem_set(
475 return memset(b, c, length);
478 S032 NV_API_CALL os_string_compare(
483 return strcmp(s1, s2);
486 char* NV_API_CALL os_string_copy(
491 return strcpy(dst, src);
494 U032 NV_API_CALL os_string_length(const char* s)
499 RM_STATUS NV_API_CALL os_strncpy_from_user(
505 return copyinstr(src, dst, n, NULL) ? RM_ERR_INVALID_POINTER : RM_OK;
508 U032 NV_API_CALL os_get_page_size(void)
513 NvU64 NV_API_CALL os_get_page_mask(void)
516 * On FreeBSD, PAGE_MASK means (PAGE_SIZE - 1); on Linux it means the
517 * opposite, ~(PAGE_SIZE - 1); that is what this function is expected
523 NvU64 NV_API_CALL os_get_system_memory_size(void)
525 return ((NvU64)physmem * PAGE_SIZE) / RM_PAGE_SIZE;
528 U032 NV_API_CALL os_get_cpu_count(void)
533 RM_STATUS NV_API_CALL os_flush_cpu_cache(void)
536 * XXX This will do for now, but this may need to be extended
537 * to make IPI calls (flushing all caches).
539 __asm__ __volatile__("wbinvd": : :"memory");
543 void NV_API_CALL os_flush_cpu_write_combine_buffer(void)
545 __asm__ __volatile__("sfence": : :"memory");
548 RM_STATUS NV_API_CALL os_raise_smp_barrier(void)
550 return RM_ERR_NOT_SUPPORTED;
553 RM_STATUS NV_API_CALL os_clear_smp_barrier(void)
555 return RM_ERR_NOT_SUPPORTED;
560 struct spinlock lock;
564 RM_STATUS NV_API_CALL os_alloc_sema(void **semaphore)
567 struct os_mutex *mtx;
570 NV_UMA_ZONE_ALLOC_STACK(sp);
572 return RM_ERR_NO_FREE_MEM;
574 status = os_alloc_mem((void **)&mtx, sizeof(struct os_mutex));
575 if (status != RM_OK) {
576 NV_UMA_ZONE_FREE_STACK(sp);
580 spin_init(&mtx->lock);
585 *semaphore = (void *) mtx;
590 RM_STATUS NV_API_CALL os_free_sema(void *semaphore)
592 struct os_mutex *mtx = semaphore;
596 NV_UMA_ZONE_FREE_STACK(sp);
598 spin_uninit(&mtx->lock);
600 os_free_mem(semaphore);
605 RM_STATUS NV_API_CALL os_acquire_sema(void *semaphore)
607 struct os_mutex *mtx = semaphore;
609 spin_lock_wr(&mtx->lock);
611 rm_disable_interrupts(mtx->sp);
614 #if __DragonFly_version < 200206
615 msleep(mtx, &mtx->lock, 0, "nvsemaq", 0);
617 ssleep(mtx, &mtx->lock, 0, "nvsemaq", 0);
619 spin_unlock_wr(&mtx->lock);
624 BOOL NV_API_CALL os_cond_acquire_sema(void *semaphore)
626 struct os_mutex *mtx = semaphore;
628 spin_lock_wr(&mtx->lock);
629 if (mtx->refcnt < 1) {
630 spin_unlock_wr(&mtx->lock);
633 rm_disable_interrupts(mtx->sp);
635 spin_unlock_wr(&mtx->lock);
641 RM_STATUS NV_API_CALL os_release_sema(void *semaphore)
643 struct os_mutex *mtx = semaphore;
645 spin_lock_wr(&mtx->lock);
649 rm_enable_interrupts(mtx->sp);
651 spin_unlock_wr(&mtx->lock);
656 BOOL NV_API_CALL os_is_acquired_sema(void *semaphore)
658 struct os_mutex *mtx = semaphore;
659 return (mtx->refcnt < 1);
662 BOOL NV_API_CALL os_pat_supported(void)
665 * FreeBSD has no native PAT support and there's no good
666 * way to implement it privately as we do on Linux.
671 void NV_API_CALL os_register_compatible_ioctl(
678 void NV_API_CALL os_unregister_compatible_ioctl(
685 RM_STATUS NV_API_CALL os_disable_console_access(void)
690 RM_STATUS NV_API_CALL os_enable_console_access(void)
695 NvU64 NV_API_CALL os_acquire_spinlock(void *pSema)
697 struct os_mutex *mtx = pSema;
699 spin_lock_wr(&mtx->lock);
704 void NV_API_CALL os_release_spinlock(void *pSema, NvU64 oldIrql)
706 struct os_mutex *mtx = pSema;
708 spin_unlock_wr(&mtx->lock);