inet6: only mark autoconf addresses tentative if detached
[dragonfly.git] / sys / sys / usched.h
CommitLineData
a77ac49d
MD
1/*
2 * SYS/USCHED.H
3 *
4 * Userland scheduler API
a77ac49d
MD
5 */
6
7#ifndef _SYS_USCHED_H_
8#define _SYS_USCHED_H_
9
cb7f4ab1
MD
10#if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
11
1bd40720
MD
12#ifndef _SYS_TYPES_H_
13#include <sys/types.h>
14#endif
da82a65a 15#ifndef _SYS_CPUMASK_H_
16#include <sys/cpumask.h>
17#endif
a77ac49d
MD
18#ifndef _SYS_QUEUE_H_
19#include <sys/queue.h>
20#endif
1bd40720
MD
21#ifndef _SYS_SYSTIMER_H_
22#include <sys/systimer.h>
23#endif
a77ac49d 24
1b31454e
DR
25#define NAME_LENGTH 32
26
1bd40720 27struct lwp;
a77ac49d
MD
28struct proc;
29struct globaldata;
30
31struct usched {
32 TAILQ_ENTRY(usched) entry;
33 const char *name;
34 const char *desc;
cb7f4ab1
MD
35 void (*usched_register)(void);
36 void (*usched_unregister)(void);
553ea3c8
SS
37 void (*acquire_curproc)(struct lwp *);
38 void (*release_curproc)(struct lwp *);
553ea3c8 39 void (*setrunqueue)(struct lwp *);
553ea3c8
SS
40 void (*schedulerclock)(struct lwp *, sysclock_t, sysclock_t);
41 void (*recalculate)(struct lwp *);
42 void (*resetpriority)(struct lwp *);
43 void (*heuristic_forking)(struct lwp *, struct lwp *);
52cac9fb 44 void (*heuristic_exiting)(struct lwp *, struct proc *);
e28d8b15 45 void (*uload_update)(struct lwp *);
cb7f4ab1 46 void (*setcpumask)(struct usched *, cpumask_t);
c3149361 47 void (*yield)(struct lwp *);
d87af38c 48 void (*changedcpu)(struct lwp *);
352f5709
MD
49};
50
51union usched_data {
52 /*
da82a65a 53 * BSD4 scheduler.
352f5709
MD
54 */
55 struct {
56 short priority; /* lower is better */
52eedfb5 57 char unused01; /* (currently not used) */
352f5709 58 char rqindex;
52cac9fb 59 int batch; /* batch mode heuristic */
dcc99b62 60 int estcpu; /* dynamic priority modification */
52eedfb5
MD
61 u_short rqtype; /* protected copy of rtprio type */
62 u_short unused02;
352f5709 63 } bsd4;
e28d8b15
MD
64 struct {
65 short priority; /* lower is better */
08bb2a83 66 char forked; /* lock cpu during fork */
e28d8b15 67 char rqindex;
bc55d64f 68 short estfast; /* fast estcpu collapse mode */
d992c377 69 short uload; /* for delta uload adjustments */
e28d8b15
MD
70 int estcpu; /* dynamic priority modification */
71 u_short rqtype; /* protected copy of rtprio type */
72 u_short qcpu; /* which cpu are we enqueued on? */
e3e6be1f
MD
73 u_short rrcount; /* reset when moved to runq tail */
74 u_short unused01;
75 u_short unused02;
76 u_short unused03;
e28d8b15 77 } dfly;
352f5709 78
e3e6be1f 79 int pad[6]; /* PAD for future expansion */
a77ac49d
MD
80};
81
cb7f4ab1
MD
82/*
83 * Flags for usched_ctl()
84 */
85#define USCH_ADD 0x00000001
86#define USCH_REM 0x00000010
87
88#endif /* _KERNEL || _KERNEL_STRUCTURES */
89
a60ccb85
DX
90#define USCHED_SET_SCHEDULER 0
91#define USCHED_SET_CPU 1
92#define USCHED_ADD_CPU 2
93#define USCHED_DEL_CPU 3
dcf3266e 94#define USCHED_GET_CPU 4
91755bdd
SZ
95#define USCHED_GET_CPUMASK 5 /* since DragonFly 4.5 */
96#define USCHED_SET_CPUMASK 6 /* since DragonFly 4.7 */
a60ccb85 97
cb7f4ab1
MD
98/*
99 * Kernel variables and procedures, or user system calls.
100 */
101#ifdef _KERNEL
102
a77ac49d 103extern struct usched usched_bsd4;
e28d8b15 104extern struct usched usched_dfly;
52eedfb5 105extern struct usched usched_dummy;
b5516a55 106void dfly_acquire_curproc(struct lwp *);
a5ae2446 107extern cpumask_t usched_mastermask;
d6d39bc7 108extern int sched_ticks; /* From sys/kern/kern_clock.c */
a77ac49d 109
cb7f4ab1
MD
110int usched_ctl(struct usched *, int);
111struct usched *usched_init(void);
de4d4cb0 112void usched_schedulerclock(struct lwp *, sysclock_t, sysclock_t);
cb7f4ab1 113
702acf06
JT
114#endif
115
116#if !defined(_KERNEL) || defined(_KERNEL_VIRTUAL)
cb7f4ab1 117
a60ccb85 118int usched_set(pid_t, int, void *, int);
cb7f4ab1
MD
119
120#endif
121
a77ac49d
MD
122#endif
123