len = strlen(buf);
if (len > MAX_TEXT) {
#if defined(__DragonFly__) || __FreeBSD_version < 500000
- kprintf("text(%d) truncated to %d.\n", len, MAX_TEXT);
+ kprintf("text(%d) truncated to %lu.\n", len, MAX_TEXT);
#else
kprintf("text(%d) truncated to %td.\n", len, MAX_TEXT);
#endif
key = cmd & OHCI_KEY_MASK;
stat = res >> OHCI_STATUS_SHIFT;
#if defined(__DragonFly__) || __FreeBSD_version < 500000
- kprintf("%08x %s %s %s %s %5d %08x %08x %04x:%04x",
- db_tr->bus_addr,
+ kprintf("%08x %s %s %s %s %5d %08lx %08lx %04x:%04x",
+ (unsigned int)db_tr->bus_addr,
#else
- kprintf("%08jx %s %s %s %s %5d %08x %08x %04x:%04x",
+ kprintf("%08jx %s %s %s %s %5d %08lx %08lx %04x:%04x",
(uintmax_t)db_tr->bus_addr,
#endif
dbcode[(cmd >> 28) & 0xf],
dbcond[(cmd >> 20) & 0x3],
dbcond[(cmd >> 18) & 0x3],
cmd & OHCI_COUNT_MASK,
- FWOHCI_DMA_READ(db[i].db.desc.addr),
- FWOHCI_DMA_READ(db[i].db.desc.depend),
- stat,
- res & OHCI_COUNT_MASK);
+ (u_long)FWOHCI_DMA_READ(db[i].db.desc.addr),
+ (u_long)FWOHCI_DMA_READ(db[i].db.desc.depend),
+ (u_int)stat,
+ (u_int)(res & OHCI_COUNT_MASK));
if(stat & 0xff00){
kprintf(" %s%s%s%s%s%s %s(%x)\n",
stat & OHCI_CNTL_DMA_RUN ? "RUN," : "",
if (md_debug > 1) {
kprintf("mdstrategy(%p) %s %08x, %lld, %d, %p)\n",
- bp, devtoname(dev), bp->b_flags, bio->bio_offset,
+ bp, devtoname(dev), bp->b_flags,
+ (long long)bio->bio_offset,
bp->b_bcount, bp->b_data);
}
bio->bio_driver_info = dev;
if (md_debug > 1)
kprintf("mdstrategy_malloc(%p) %s %08xx, %lld, %d, %p)\n",
- bp, devtoname(dev), bp->b_flags, bio->bio_offset,
+ bp, devtoname(dev), bp->b_flags,
+ (long long)bio->bio_offset,
bp->b_bcount, bp->b_data);
sc = dev->si_drv1;
while (nsec--) {
if (secno < sc->nsecp) {
secpp = &sc->secp[secno];
- if ((u_int)*secpp > 255) {
+ if ((u_int)(uintptr_t)*secpp > 255) {
secp = *secpp;
secval = 0;
} else {
secp = 0;
- secval = (u_int) *secpp;
+ secval = (u_int)(uintptr_t)*secpp;
}
} else {
secpp = 0;
if (secp)
FREE(secp, M_MDSECT);
if (secpp)
- *secpp = (u_char *)uc;
+ *secpp = (u_char *)(uintptr_t)uc;
} else {
if (!secpp) {
MALLOC(secpp, u_char **, (secno + nsec + 1) * sizeof(u_char *), M_MD, M_WAITOK | M_ZERO);
if (i == DEV_BSIZE) {
if (secp)
FREE(secp, M_MDSECT);
- *secpp = (u_char *)uc;
+ *secpp = (u_char *)(uintptr_t)uc;
} else {
if (!secp)
MALLOC(secp, u_char *, DEV_BSIZE, M_MDSECT, M_WAITOK);
if (md_debug > 1)
kprintf("mdstrategy_preload(%p) %s %08x, %lld, %d, %p)\n",
- bp, devtoname(dev), bp->b_flags, bio->bio_offset,
+ bp, devtoname(dev), bp->b_flags,
+ (long long)bio->bio_offset,
bp->b_bcount, bp->b_data);
sc = dev->si_drv1;
RF_ACTIVE);
if (sc->intr == NULL) {
kprintf("psm%d: unable to allocate the IRQ resource (%d).\n",
- unit, irq);
+ unit, (int)irq);
endprobe(ENXIO);
} else {
bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr);
crit_exit();
if (COM_CONSOLE(flags) && !COM_LLCONSOLE(flags)) {
cp->cn_probegood = 1;
- cp->cn_private = (void *)unit;
+ cp->cn_private = (void *)(intptr_t)unit;
cp->cn_pri = COM_FORCECONSOLE(flags)
|| boothowto & RB_SERIAL
? CN_REMOTE : CN_NORMAL;
siogdbiobase = iobase;
siogdbunit = unit;
#if DDB > 0
- cp->cn_gdbprivate = (void *)unit;
+ cp->cn_gdbprivate = (void *)(intptr_t)unit;
gdb_tab = cp;
#endif
}
kprintf("configuration file (currently sio only).\n");
siogdbiobase = siocniobase;
siogdbunit = siocnunit;
- cp->cn_gdbprivate = (void *)siocnunit;
+ cp->cn_gdbprivate = (void *)(intptr_t)siocnunit;
gdb_tab = cp;
}
#endif
if (offset > adp->va_window_size - PAGE_SIZE)
return -1;
-#ifdef __i386__
+#if defined(__i386__)
return i386_btop(adp->va_info.vi_window + offset);
+#elif defined(__amd64__)
+ return amd64_btop(adp->va_info.vi_window + offset);
+#else
+#error "vga_mmap_buf needs to return something"
#endif
}
sys_owait(struct owait_args *uap)
{
int error, status;
+ int res = 0;
- error = kern_wait(WAIT_ANY, &status, 0, NULL, &uap->sysmsg_fds[0]);
+ error = kern_wait(WAIT_ANY, &status, 0, NULL, &res);
+ uap->sysmsg_fds[0] = res;
if (error == 0)
uap->sysmsg_fds[1] = status;
{
const char *image_header = imgp->image_header;
const char *ihp;
- int error, length, offset;
+ size_t length, offset;
+ int error;
/* a shell script? */
if (((const short *) image_header)[0] != SHELLMAGIC)
} else {
++cst->sharecount;
if (ccms_enable >= 9) {
- kprintf("CST SHARE %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST SHARE %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
}
break;
} else {
--cst->sharecount;
if (ccms_enable >= 9) {
- kprintf("CST EXCLS %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST EXCLS %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
}
break;
--cst->sharecount;
++cst->modifycount;
if (ccms_enable >= 9) {
- kprintf("CST MODXL %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST MODXL %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
}
break;
info->coll_cst = cst;
} else {
if (ccms_enable >= 9) {
- kprintf("CST SHARE %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST SHARE %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
++cst->sharecount;
}
} else {
--cst->sharecount;
if (ccms_enable >= 9) {
- kprintf("CST EXCLS %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST EXCLS %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
}
break;
--cst->sharecount;
++cst->modifycount;
if (ccms_enable >= 9) {
- kprintf("CST MODXL %d %lld-%lld\n", cst->sharecount,
- cst->beg_offset, cst->end_offset);
+ kprintf("CST MODXL %d %lld-%lld\n",
+ cst->sharecount,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
}
break;
--cst->sharecount;
if (ccms_enable >= 9) {
kprintf("CST UNSHR %d %lld-%lld (%d)\n", cst->sharecount,
- cst->beg_offset, cst->end_offset, cst->blocked);
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset,
+ cst->blocked);
}
if (cst->blocked && cst->sharecount == 0) {
cst->blocked = 0;
++cst->sharecount;
if (ccms_enable >= 9) {
kprintf("CST UNEXC %d %lld-%lld (%d)\n", cst->sharecount,
- cst->beg_offset, cst->end_offset, cst->blocked);
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset,
+ cst->blocked);
}
if (cst->blocked && cst->sharecount == 0) {
cst->blocked = 0;
--cst->modifycount;
if (ccms_enable >= 9) {
kprintf("CST UNMOD %d %lld-%lld (%d)\n", cst->sharecount,
- cst->beg_offset, cst->end_offset, cst->blocked);
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset,
+ cst->blocked);
}
if (cst->blocked && cst->sharecount == 0) {
cst->blocked = 0;
cst->lrefs = ocst->lrefs;
if (ccms_enable >= 9) {
kprintf("MERGELEFT %p %lld-%lld (%d)\n",
- ocst, cst->beg_offset, cst->end_offset,
+ ocst,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset,
cst->blocked);
}
if (ocst->blocked) {
cst->rrefs = ocst->rrefs;
if (ccms_enable >= 9) {
kprintf("MERGERIGHT %p %lld-%lld\n",
- ocst, cst->beg_offset, cst->end_offset);
+ ocst,
+ (long long)cst->beg_offset,
+ (long long)cst->end_offset);
}
objcache_put(ccms_oc, ocst);
}
{
int i;
int error;
- int off = 0;
+ size_t off = 0;
TRACE_ENTER;
error = elf_getnote(src, &off, "FreeBSD", NT_PRPSINFO,
sys_dup2(struct dup2_args *uap)
{
int error;
+ int fd = 0;
- error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
+ error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
+ uap->sysmsg_fds[0] = fd;
return (error);
}
sys_dup(struct dup_args *uap)
{
int error;
+ int fd = 0;
- error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
+ error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
+ uap->sysmsg_fds[0] = fd;
return (error);
}
struct nlookupdata nd;
struct filedesc *fdp;
struct file *fp;
- register_t retval;
+ int retval;
int i, error, flags, devnull;
fdp = p->p_fd;
*/
if (info->i_state == ISTATE_NOTHREAD) {
info->i_state = ISTATE_NORMAL;
- lwkt_create((void *)ithread_handler, (void *)intr, NULL,
+ lwkt_create((void *)ithread_handler, (void *)(intptr_t)intr, NULL,
&info->i_thread, TDF_STOPREQ|TDF_INTTHREAD|TDF_MPSAFE, -1,
"ithread %d", intr);
if (intr >= FIRST_SOFTINT)
{
struct intr_info *info;
- info = &intr_info_ary[(int)st->data];
+ info = &intr_info_ary[(int)(intptr_t)st->data];
if (info->i_state != ISTATE_NOTHREAD)
lwkt_schedule(&info->i_thread);
}
u_int ill_count; /* interrupt livelock counter */
ill_count = 0;
- intr = (int)arg;
+ intr = (int)(intptr_t)arg;
info = &intr_info_ary[intr];
list = &info->i_reclist;
gd = mycpu;
else if (use_limit > 500000)
use_limit = 500000;
systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup,
- (void *)intr, use_limit);
+ (void *)(intptr_t)intr, use_limit);
/* fall through */
case ISTATE_LIVELOCKED:
/*
v &= ~PAGE_MASK;
pmap_kenter((vm_offset_t)ptvmmap, v);
o = (int)uio->uio_offset & PAGE_MASK;
- c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
+ c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
{
int hv;
- hv = ((int)m / sizeof(vm_page_t)) + ((int)m >> 12);
+ hv = ((int)(intptr_t)m / sizeof(vm_page_t)) + ((int)(intptr_t)m >> 12);
return(hv & sf_buf_hashmask);
}
return;
dumpsize = Maxmem;
kprintf("\ndumping to dev %s, blockno %lld\n",
- devtoname(dumpdev), dumplo64);
+ devtoname(dumpdev),
+ (long long)dumplo64);
kprintf("dump ");
error = dev_ddump(dumpdev);
if (error == 0) {
error = (uap->upcid == -1) ? 0 : ENOENT;
for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
if (vu->vu_id == uap->upcid ||
- (uap->upcid == -1 && vu->vu_pending >= (int)uap->data && vu->vu_lwp == lp)
+ (uap->upcid == -1 &&
+ vu->vu_pending >= (int)(intptr_t)uap->data && vu->vu_lwp == lp)
) {
- if (vu->vu_pending < (int)uap->data)
- vu->vu_pending = (int)uap->data;
+ if (vu->vu_pending < (int)(intptr_t)uap->data)
+ vu->vu_pending = (int)(intptr_t)uap->data;
error = 0;
targlp = vu->vu_lwp;
targlp->lwp_proc->p_flag |= P_UPCALLPEND; /* XXX lwp flags */
error = (uap->upcid == -1) ? 0 : ENOENT;
for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
if (vu->vu_id == uap->upcid ||
- (uap->upcid == -1 && vu->vu_pending >= (int)uap->data && vu->vu_lwp == lp)
+ (uap->upcid == -1 &&
+ vu->vu_pending >= (int)(intptr_t)uap->data && vu->vu_lwp == lp)
) {
error = 0;
if (uap->upcid == -1)
struct mbuf *m;
caddr_t dst;
c_caddr_t src;
- int cplen, error, mleft, count;
+ int error, mleft, count;
+ size_t cplen;
m = mbp->mb_cur;
mleft = mbp->mb_mleft;
static int caps_process_msg(caps_kinfo_t caps, caps_kmsg_t msg, struct caps_sys_get_args *uap);
static void caps_free(caps_kinfo_t caps);
static void caps_free_msg(caps_kmsg_t msg);
-static int caps_name_check(const char *name, int len);
+static int caps_name_check(const char *name, size_t len);
static caps_kinfo_t caps_free_msg_mcaps(caps_kmsg_t msg);
static caps_kinfo_t kern_caps_sys_service(const char *name, uid_t uid,
gid_t gid, struct ucred *cred,
* Validate the service name
*/
static int
-caps_name_check(const char *name, int len)
+caps_name_check(const char *name, size_t len)
{
- int i;
+ size_t i;
char c;
for (i = len - 1; i >= 0; --i) {
struct ucred *cred = curproc->p_ucred;
char name[CAPS_MAXNAMELEN];
caps_kinfo_t caps;
- int len;
+ size_t len;
int error;
if (caps_enabled == 0)
return(EOPNOTSUPP);
if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
return(error);
- if (--len <= 0)
+ if ((ssize_t)--len <= 0)
return(EINVAL);
if ((error = caps_name_check(name, len)) != 0)
return(error);
struct ucred *cred = curproc->p_ucred;
char name[CAPS_MAXNAMELEN];
caps_kinfo_t caps;
- int len;
+ size_t len;
int error;
if (caps_enabled == 0)
return(EOPNOTSUPP);
if ((error = copyinstr(uap->name, name, CAPS_MAXNAMELEN, &len)) != 0)
return(error);
- if (--len <= 0)
+ if ((ssize_t)--len <= 0)
return(EINVAL);
if ((error = caps_name_check(name, len)) != 0)
return(error);
}
sname = dsname(dev, unit, slice, part, partname);
kprintf("%s%s: %s %sing ", sname, partname, what, term);
- kprintf("offset %012llx for %d", bio->bio_offset, bp->b_bcount);
+ kprintf("offset %012llx for %d",
+ (long long)bio->bio_offset,
+ bp->b_bcount);
if (donecnt)
kprintf(" (%d bytes completed)", donecnt);
}
dev_dstrategy(wdev, &bp2->b_bio1);
if (biowait(bp2) != 0) {
kprintf("%s: reading GPT partition table @ %lld: error %d\n",
- dname, bp2->b_bio1.bio_offset, bp2->b_error);
+ dname,
+ (long long)bp2->b_bio1.bio_offset,
+ bp2->b_error);
error = EIO;
goto done;
}
slice_info(const char *sname, struct diskslice *sp)
{
kprintf("%s: start %llu, end %llu, size %llu\n", sname,
- sp->ds_offset, sp->ds_offset + sp->ds_size - 1, sp->ds_size);
+ (long long)sp->ds_offset,
+ (long long)sp->ds_offset + sp->ds_size - 1,
+ (long long)sp->ds_size);
}
struct disklabel_ops disklabel32_ops = {
|| (dp->dp_scyl == 255 && dp->dp_shd == 255
&& dp->dp_ssect == 255)) {
TRACE(("%s: C/H/S start %d/%d/%d, start %llu: allow\n",
- sname, chs_scyl, dp->dp_shd, chs_ssect, ssector1));
+ sname, chs_scyl, dp->dp_shd, chs_ssect,
+ (long long)ssector1));
ssector = ssector1;
}
|| (dp->dp_ecyl == 255 && dp->dp_ehd == 255
&& dp->dp_esect == 255)) {
TRACE(("%s: C/H/S end %d/%d/%d, end %llu: allow\n",
- sname, chs_ecyl, dp->dp_ehd, chs_esect, esector1));
+ sname, chs_ecyl, dp->dp_ehd, chs_esect,
+ (long long)esector1));
esector = esector1;
}
error = (ssector == ssector1 && esector == esector1) ? 0 : EINVAL;
if (bootverbose)
kprintf("%s: type 0x%x, start %llu, end = %llu, size %lu %s\n",
- sname, dp->dp_typ, ssector1, esector1,
- (u_long)dp->dp_size, error ? "" : ": OK");
+ sname, dp->dp_typ,
+ (long long)ssector1, (long long)esector1,
+ (u_long)dp->dp_size, (error ? "" : ": OK"));
if (ssector != ssector1 && bootverbose)
kprintf("%s: C/H/S start %d/%d/%d (%llu) != start %llu: invalid\n",
sname, chs_scyl, dp->dp_shd, chs_ssect,
- ssector, ssector1);
+ (long long)ssector, (long long)ssector1);
if (esector != esector1 && bootverbose)
kprintf("%s: C/H/S end %d/%d/%d (%llu) != end %llu: invalid\n",
sname, chs_ecyl, dp->dp_ehd, chs_esect,
- esector, esector1);
+ (long long)esector, (long long)esector1);
return (error);
}
} else {
kprintf("%s: slice extends beyond end of disk: "
"truncating from %lu to %llu sectors\n",
- sname, (u_long)dp->dp_size, size);
+ sname, (long long)dp->dp_size, size);
}
sp->ds_offset = offset;
sp->ds_size = size;
if (bio->bio_offset < 0) {
kprintf("dscheck(%s): negative bio_offset %lld\n",
- devtoname(dev), bio->bio_offset);
+ devtoname(dev), (long long)bio->bio_offset);
goto bad;
}
if (slice >= ssp->dss_nslices) {
((ssp->dss_secsize << 1) - 1)) {
kprintf("%s: invalid BIO offset, not sector aligned or"
" invalid sector size (not power of 2) %08llx %d\n",
- devtoname(dev), bio->bio_offset, ssp->dss_secsize);
+ devtoname(dev), (long long)bio->bio_offset,
+ ssp->dss_secsize);
goto bad;
}
bad_blkno:
kprintf(
"dscheck(%s): bio_offset %lld is not on a sector boundary (ssize %d)\n",
- devtoname(dev), bio->bio_offset, ssp->dss_secsize);
+ devtoname(dev), (long long)bio->bio_offset, ssp->dss_secsize);
bad:
bp->b_error = EINVAL;
/* fall through */
SLIST_FOREACH(tp, &tty_list, t_list) {
t = *tp;
if (t.t_dev)
- t.t_dev = (cdev_t)dev2udev(t.t_dev);
+ t.t_dev = (cdev_t)(uintptr_t)dev2udev(t.t_dev);
error = SYSCTL_OUT(req, (caddr_t)&t, sizeof(t));
if (error)
return (error);
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
pu = pr->pr_usrreqs;
if (pu == NULL)
- panic("domaininit: %ssw[%d] has no usrreqs!",
- dp->dom_name, pr - dp->dom_protosw);
+ panic("domaininit: %ssw[%ld] has no usrreqs!",
+ dp->dom_name, (long)(pr - dp->dom_protosw));
PRU_NOTSUPP(pu, accept);
PRU_NOTSUPP(pu, bind);
PRU_NOTSUPP(pu, connect);
* as well.
*/
if (bp->b_vp != vp || bp->b_loffset != loffset) {
- kprintf("Warning buffer %p (vp %p loffset %lld) was recycled\n", bp, vp, loffset);
+ kprintf("Warning buffer %p (vp %p loffset %lld) "
+ "was recycled\n",
+ bp, vp, (long long)loffset);
BUF_UNLOCK(bp);
goto loop;
}
* block number translation.
*/
if ((bp->b_flags & B_INVAL) && (bp->b_bio2.bio_offset != NOOFFSET)) {
- kprintf("Warning invalid buffer %p (vp %p loffset %lld) did not have cleared bio_offset cache\n", bp, vp, loffset);
+ kprintf("Warning invalid buffer %p (vp %p loffset %lld)"
+ " did not have cleared bio_offset cache\n",
+ bp, vp, (long long)loffset);
clearbiocache(&bp->b_bio2);
}
(int) m->pindex, (int)(foff >> 32),
(int) foff & 0xffffffff, resid, i);
if (!vn_isdisk(vp, NULL))
- kprintf(" iosize: %ld, loffset: %lld, flags: 0x%08x, npages: %d\n",
+ kprintf(" iosize: %ld, loffset: %lld, "
+ "flags: 0x%08x, npages: %d\n",
bp->b_vp->v_mount->mnt_stat.f_iosize,
- bp->b_loffset,
+ (long long)bp->b_loffset,
bp->b_flags, bp->b_xio.xio_npages);
else
kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n",
- bp->b_loffset,
+ (long long)bp->b_loffset,
bp->b_flags, bp->b_xio.xio_npages);
kprintf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
m->valid, m->dirty, m->wire_count);
p = bp->b_xio.xio_pages[index];
if (p && (index < bp->b_xio.xio_npages)) {
if (p->busy) {
- kprintf("vm_hold_free_pages: doffset: %lld, loffset: %lld\n",
- bp->b_bio2.bio_offset, bp->b_loffset);
+ kprintf("vm_hold_free_pages: doffset: %lld, "
+ "loffset: %lld\n",
+ (long long)bp->b_bio2.bio_offset,
+ (long long)bp->b_loffset);
}
bp->b_xio.xio_pages[index] = NULL;
pmap_kremove(pg);
"b_resid = %d\n, b_data = %p, "
"bio_offset(disk) = %lld, bio_offset(phys) = %lld\n",
bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
- bp->b_data, bp->b_bio2.bio_offset, (bp->b_bio2.bio_next ? bp->b_bio2.bio_next->bio_offset : (off_t)-1));
+ bp->b_data,
+ (long long)bp->b_bio2.bio_offset,
+ (long long)(bp->b_bio2.bio_next ?
+ bp->b_bio2.bio_next->bio_offset : (off_t)-1));
if (bp->b_xio.xio_npages) {
int i;
db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ",
return (error);
if ((error = cache_vref(nch, cred, &pvp)) != 0)
return (error);
- if (ncvp_debug)
- kprintf("inefficient_scan: directory iosize %ld vattr fileid = %lld\n", vat.va_blocksize, vat.va_fileid);
+ if (ncvp_debug) {
+ kprintf("inefficient_scan: directory iosize %ld "
+ "vattr fileid = %lld\n",
+ vat.va_blocksize,
+ (long long)vat.va_fileid);
+ }
/*
* Use the supplied fakename if not NULL. Fake names are typically
if (ncvp_debug) {
kprintf("cache_inefficient_scan: "
"MATCHED inode %lld path %s/%*.*s\n",
- vat.va_fileid, nch->ncp->nc_name,
+ (long long)vat.va_fileid,
+ nch->ncp->nc_name,
den->d_namlen, den->d_namlen,
den->d_name);
}
{
struct journal *jo = info;
struct journal_rawrecbeg *rawp;
- int bytes;
int error;
- int avail;
- int res;
+ size_t avail;
+ size_t bytes;
+ size_t res;
for (;;) {
/*
bytes = res;
jo->fifo.rindex += bytes;
error = fp_write(jo->fp,
- jo->fifo.membase + ((jo->fifo.rindex - bytes) & jo->fifo.mask),
+ jo->fifo.membase +
+ ((jo->fifo.rindex - bytes) & jo->fifo.mask),
bytes, &res, UIO_SYSSPACE);
if (error) {
kprintf("journal_thread(%s) write, error %d\n", jo->id, error);
struct journal *jo = info;
int64_t transid;
int error;
- int count;
- int bytes;
+ size_t count;
+ size_t bytes;
transid = 0;
error = 0;
bytes = jo->fifo.rindex - jo->fifo.xindex;
if (bytes == 0) {
- kprintf("warning: unsent data acknowledged transid %08llx\n", transid);
+ kprintf("warning: unsent data acknowledged transid %08llx\n",
+ (long long)transid);
tsleep(&jo->fifo.xindex, 0, "jrseq", hz);
transid = 0;
continue;
*/
if (rawp->transid < transid) {
#if 1
- kprintf("ackskip %08llx/%08llx\n", rawp->transid, transid);
+ kprintf("ackskip %08llx/%08llx\n",
+ (long long)rawp->transid,
+ (long long)transid);
#endif
jo->fifo.xindex += (rawp->recsize + 15) & ~15;
jo->total_acked += (rawp->recsize + 15) & ~15;
}
if (rawp->transid == transid) {
#if 1
- kprintf("ackskip %08llx/%08llx\n", rawp->transid, transid);
+ kprintf("ackskip %08llx/%08llx\n",
+ (long long)rawp->transid,
+ (long long)transid);
#endif
jo->fifo.xindex += (rawp->recsize + 15) & ~15;
jo->total_acked += (rawp->recsize + 15) & ~15;
transid = 0;
continue;
}
- kprintf("warning: unsent data(2) acknowledged transid %08llx\n", transid);
+ kprintf("warning: unsent data(2) acknowledged transid %08llx\n",
+ (long long)transid);
transid = 0;
}
jo->flags &= ~MC_JOURNAL_RACTIVE;
if (tsleep(&vp->v_range.vh_list, 0, "vrnglk", hz * 3) == EWOULDBLOCK) {
if (warned == 0)
kprintf("warning: conflicted lock vp %p %lld,%lld blocked\n",
- vp, vr->vr_offset, vr->vr_length);
+ vp, (long long)vr->vr_offset, (long long)vr->vr_length);
warned = 1;
}
conflicted = vrange_lock_conflicted(vp, vr);
}
if (warned) {
kprintf("waring: conflicted lock vp %p %lld,%lld unblocked\n",
- vp, vr->vr_offset, vr->vr_length);
+ vp, (long long)vr->vr_offset, (long long)vr->vr_length);
}
}
if (flags != MNT_WAIT)
vmsc_flags |= VMSC_NOWAIT;
vmntvnodescan(mp, vmsc_flags, vfs_msync_scan1, vfs_msync_scan2,
- (void *)flags);
+ (void *)(intptr_t)flags);
}
/*
int
vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data)
{
- int flags = (int)data;
+ int flags = (int)(intptr_t)data;
if ((vp->v_flag & VRECLAIMED) == 0) {
if (vshouldmsync(vp))
vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data)
{
vm_object_t obj;
- int flags = (int)data;
+ int flags = (int)(intptr_t)data;
if (vp->v_flag & VRECLAIMED)
return(0);
int
vn_get_namelen(struct vnode *vp, int *namelen)
{
- int error, retval[2];
+ int error;
+ register_t retval[2];
error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval);
if (error)
return (error);
- *namelen = *retval;
+ *namelen = (int)retval[0];
return (0);
}
iconv_convstr(void *handle, char *dst, const char *src)
{
char *p = dst;
- int inlen, outlen, error;
+ int error;
+ size_t inlen, outlen;
if (handle == NULL) {
strcpy(dst, src);
{
const char *s = src;
char *d = dst;
- int inlen, outlen, error;
+ size_t inlen, outlen;
+ int error;
if (size == 0)
return dst;
{
/* Check length */
if (m->m_len != sizeof(struct ip6_fw)) {
- dprintf(("%s len=%d, want %d\n", err_prefix, m->m_len,
- sizeof(struct ip6_fw)));
+ dprintf(("%s len=%d, want %ld\n", err_prefix, m->m_len,
+ sizeof(struct ip6_fw)));
return (NULL);
}
return(check_ip6fw_struct(mtod(m, struct ip6_fw *)));
}
if (stage == IPV6_FW_DEL) {
if (m->m_len != sizeof(struct ip6_fw)) {
- dprintf(("%s len=%d, want %d\n", err_prefix, m->m_len,
- sizeof(struct ip6_fw)));
+ dprintf(("%s len=%d, want %ld\n", err_prefix, m->m_len,
+ sizeof(struct ip6_fw)));
error = EINVAL;
} else if (mtod(m, struct ip6_fw *)->fw_number == (u_short)-1) {
dprintf(("%s can't delete rule 65535\n", err_prefix));
KASSERT(static_count == 1,
("%u static rules remains\n", static_count));
KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
- ("%u bytes of static rules remains, should be %u\n",
+ ("%u bytes of static rules remains, should be %lu\n",
static_ioc_len, IOC_RULESIZE(ctx->ipfw_default_rule)));
}
static void
schednetisr_remote(void *data)
{
- int num = (int)data;
+ int num = (int)(intptr_t)data;
struct netisr *ni = &netisrs[num];
lwkt_port_t port = &netisr_cpu[0].td_msgport;
struct netmsg *pmsg;
if (mycpu->gd_cpuid != 0)
lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)num);
else
- schednetisr_remote((void *)num);
+ schednetisr_remote((void *)(intptr_t)num);
#else
- schednetisr_remote((void *)num);
+ schednetisr_remote((void *)(intptr_t)num);
#endif
}
int8_t val;
bcopy(data + *off, &val, sizeof(int8_t));
- switch ((int)type->info) {
+ switch ((int)(intptr_t)type->info) {
case INT_SIGNED:
fmt = "%d";
fval = val;
int16_t val;
bcopy(data + *off, &val, sizeof(int16_t));
- switch ((int)type->info) {
+ switch ((int)(intptr_t)type->info) {
case INT_SIGNED:
fmt = "%d";
fval = val;
int32_t val;
bcopy(data + *off, &val, sizeof(int32_t));
- switch ((int)type->info) {
+ switch ((int)(intptr_t)type->info) {
case INT_SIGNED:
fmt = "%ld";
fval = val;
int64_t val;
bcopy(data + *off, &val, sizeof(int64_t));
- switch ((int)type->info) {
+ switch ((int)(intptr_t)type->info) {
case INT_SIGNED:
fmt = "%lld";
fval = val;
if (node->name)
strncpy(ni->name, node->name, sizeof(ni->name) - 1);
strncpy(ni->type, node->type->name, sizeof(ni->type) - 1);
- ni->id = (u_int32_t) node;
+ ni->id = (u_int32_t)(uintptr_t)node;
ni->hooks = node->numhooks;
break;
}
}
static int
-smb_copy_iconv(struct mbchain *mbp, c_caddr_t src, caddr_t dst, int len)
+smb_copy_iconv(struct mbchain *mbp, c_caddr_t src, caddr_t dst, size_t len)
{
- int outlen = len;
+ size_t outlen = len;
return iconv_conv((struct iconv_drv*)mbp->mb_udata, &src, &len, &dst, &outlen);
}
__va_end(ap);
/* call the processes' main()... */
- cpu_set_fork_handler(lp2, (void (*)(void *))func, arg);
+ cpu_set_fork_handler(lp2,
+ (void (*)(void *, struct trapframe *))func, arg);
start_forked_proc(&lwp0, p2);
return 0;
while (narg) {
if (argnp)
db_printf("%s=", *argnp++);
- db_printf("%r", db_get_value((long)argp, 8, FALSE));
+ db_printf("%ld", (long)db_get_value((long)argp, 8, FALSE));
argp++;
if (--narg != 0)
db_printf(",");
rip = tf->tf_rip;
rbp = tf->tf_rbp;
db_printf(
- "--- trap %#r, rip = %#r, rsp = %#r, rbp = %#r ---\n",
+ "--- trap %016lx, rip = %016lx, rsp = %016lx, rbp = %016lx ---\n",
tf->tf_trapno, rip, rsp, rbp);
}
break;
rip = tf->tf_rip;
rbp = tf->tf_rbp;
db_printf(
- "--- syscall %#r, rip = %#r, rsp = %#r, rbp = %#r ---\n",
+ "--- syscall %016lx, rip = %016lx, rsp = %016lx, rbp = %016lx ---\n",
tf->tf_rax, rip, rsp, rbp);
}
break;
rip = tf->tf_rip;
rbp = tf->tf_rbp;
db_printf(
- "--- interrupt, rip = %#r, rsp = %#r, rbp = %#r ---\n",
+ "--- interrupt, rip = %016lx, rsp = %016lx, rbp = %016lx ---\n",
rip, rsp, rbp);
}
break;
char *modif)
{
struct amd64_frame *frame;
- int *argp;
+ long *argp;
db_addr_t callpc;
boolean_t first;
int i;
unsigned type, len;
type = (d.dr[7] >> (16 + (i * 4))) & 3;
len = (d.dr[7] >> (16 + (i * 4) + 2)) & 3;
- db_printf(" %-5d %-8s %10s %3d 0x%08x\n",
+ db_printf(" %-5d %-8s %10s %3d 0x%08lx\n",
i, "enabled", watchtype_str(type),
len + 1, DBREG_DRX((&d), i));
} else {
db_printf("\ndebug register values:\n");
for (i = 0; i < 8; i++)
- db_printf(" dr%d 0x%08x\n", i, DBREG_DRX((&d),i));
+ db_printf(" dr%d 0x%08lx\n", i, DBREG_DRX((&d),i));
db_printf("\n");
}
struct privatespace CPU_prvspace[];
#endif
-extern vm_paddr_t phys_avail[16];
-extern vm_paddr_t Maxmem;
vm_paddr_t Maxmem_bytes;
int MemImageFd = -1;
int DiskNum;
int NetifNum;
char *pid_file;
-extern struct msgbuf *msgbufp;
-extern caddr_t ptvmmap;
#if JG
u_int tsc_present;
vm_offset_t KvaStart;
vm_offset_t KvaSize;
vm_offset_t virtual_start;
#endif
-extern vm_offset_t virtual_end;
-extern vm_offset_t kernel_vm_end;
vm_offset_t crashdumpmap;
-extern vm_offset_t clean_sva;
-extern vm_offset_t clean_eva;
static void init_sys_memory(char *imageFile);
static void init_kern_memory(void);
* Temporary Debugging
*/
-static void PCHAR_(int);
+static void PCHAR_(int, void * __unused);
int
kprintf0(const char *fmt, ...)
}
static void
-PCHAR_(int c)
+PCHAR_(int c, void *dummy __unused)
{
const int COMC_TXWAIT = 0x40000;
const int COMPORT = 0x3f8;
#endif
extern void trap(struct trapframe *frame);
-extern void syscall2(struct trapframe *frame);
static int trap_pfault(struct trapframe *, int);
static void trap_fatal(struct trapframe *, vm_offset_t);
userexit(struct lwp *lp)
{
struct thread *td = lp->lwp_thread;
- globaldata_t gd = td->td_gd;
+/* globaldata_t gd = td->td_gd;*/
/*
* Handle stop requests at kernel priority. Any requests queued
MAKEMPSAFE(have_mplock);
i = trap_pfault(frame, TRUE);
if (frame->tf_rip == 0)
- kprintf("T_PAGEFLT: Warning %rip == 0!\n");
+ kprintf("T_PAGEFLT: Warning %%rip == 0!\n");
if (i == -1)
goto out;
if (i == 0)
*/
struct proc *p = td->td_proc;
kprintf("seg-fault accessing address %p rip=%p pid=%d p_comm=%s\n",
- va, frame->tf_rip, p->p_pid, p->p_comm);
+ (void *)va, (void *)frame->tf_rip, p->p_pid, p->p_comm);
/* Debugger("seg-fault"); */
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
regs->tf_rip = (long)params->func;
regs->tf_rsp = (long)params->stack;
/* Set up argument for function call */
- regs->tf_rdi = params->arg; /* JG Can this be in userspace addresses? */
+ regs->tf_rdi = (long)params->arg; /* JG Can this be in userspace addresses? */
/*
* Set up fake return address. As the lwp function may never return,
* we simply copy out a NULL pointer and force the lwp to receive
}
if (tsc_present)
- kprintf("TSC clock: %llu Hz, ", tsc_frequency);
+ kprintf("TSC clock: %llu Hz, ", (long long)tsc_frequency);
kprintf("i8254 clock: %u Hz\n", tot_count);
return (tot_count);
struct mbchain;
struct uio;
-typedef int mb_copy_t(struct mbchain *mbp, c_caddr_t src, caddr_t dst, int len);
+typedef int mb_copy_t(struct mbchain *mbp, c_caddr_t src, caddr_t dst, size_t len);
struct mbchain {
struct mbuf * mb_top; /* head of mbufs chain */
hammer_rel_volume(root_volume, 0);
if (hammer_debug_general & 0x0800) {
kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
- zone_offset, bytes);
+ (long long)zone_offset, bytes);
}
return(bytes);
}
hammer_rel_volume(root_volume, 0);
if (hammer_debug_general & 0x0800) {
kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
- zone_offset, result_offset);
+ (long long)zone_offset, (long long)result_offset);
}
return(result_offset);
}
if (cursor->index == node->count) {
if (hammer_debug_btree) {
kprintf("BRACKETU %016llx[%d] -> %016llx[%d] (td=%p)\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
- (cursor->parent ? cursor->parent->node_offset : -1),
+ (long long)(cursor->parent ? cursor->parent->node_offset : -1),
cursor->parent_index,
curthread);
}
s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
if (hammer_debug_btree) {
kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d (td=%p)\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
- elm[0].internal.base.obj_id,
+ (long long)elm[0].internal.base.obj_id,
elm[0].internal.base.rec_type,
- elm[0].internal.base.key,
+ (long long)elm[0].internal.base.key,
elm[0].internal.base.localization,
r,
curthread
);
kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index + 1,
- elm[1].internal.base.obj_id,
+ (long long)elm[1].internal.base.obj_id,
elm[1].internal.base.rec_type,
- elm[1].internal.base.key,
+ (long long)elm[1].internal.base.key,
elm[1].internal.base.localization,
s
);
r = hammer_btree_cmp(&cursor->key_end, &elm->base);
if (hammer_debug_btree) {
kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
(elm[0].leaf.base.btype ?
elm[0].leaf.base.btype : '?'),
- elm[0].leaf.base.obj_id,
+ (long long)elm[0].leaf.base.obj_id,
elm[0].leaf.base.rec_type,
- elm[0].leaf.base.key,
+ (long long)elm[0].leaf.base.key,
elm[0].leaf.base.localization,
r
);
hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
cursor->node, i,
- elm->internal.base.obj_id,
+ (long long)elm->internal.base.obj_id,
elm->internal.base.rec_type,
- elm->internal.base.key,
+ (long long)elm->internal.base.key,
elm->internal.base.localization
);
}
s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
if (hammer_debug_btree) {
kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
- elm[0].internal.base.obj_id,
+ (long long)elm[0].internal.base.obj_id,
elm[0].internal.base.rec_type,
- elm[0].internal.base.key,
+ (long long)elm[0].internal.base.key,
elm[0].internal.base.localization,
r
);
kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index + 1,
- elm[1].internal.base.obj_id,
+ (long long)elm[1].internal.base.obj_id,
elm[1].internal.base.rec_type,
- elm[1].internal.base.key,
+ (long long)elm[1].internal.base.key,
elm[1].internal.base.localization,
s
);
s = hammer_btree_cmp(&cursor->key_beg, &elm->base);
if (hammer_debug_btree) {
kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
(elm[0].leaf.base.btype ?
elm[0].leaf.base.btype : '?'),
- elm[0].leaf.base.obj_id,
+ (long long)elm[0].leaf.base.obj_id,
elm[0].leaf.base.rec_type,
- elm[0].leaf.base.key,
+ (long long)elm[0].leaf.base.key,
elm[0].leaf.base.localization,
s
);
hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
cursor->node, i,
- elm->internal.base.obj_id,
+ (long long)elm->internal.base.obj_id,
elm->internal.base.rec_type,
- elm->internal.base.key,
+ (long long)elm->internal.base.key,
elm->internal.base.localization
);
}
}
if (hammer_debug_btree) {
kprintf("CREATE_CHECK %016llx\n",
- cursor->create_check);
+ (long long)cursor->create_check);
}
cursor->key_beg.create_tid = cursor->create_check;
/* loop */
&error, &cursor->data_buffer);
if (hammer_crc_test_leaf(cursor->data, &elm->leaf) == 0) {
kprintf("CRC DATA @ %016llx/%d FAILED\n",
- elm->leaf.data_offset, elm->leaf.data_len);
+ (long long)elm->leaf.data_offset, elm->leaf.data_len);
if (hammer_debug_debug & 0x0001)
Debugger("CRC FAILED: DATA");
if (cursor->trans->flags & HAMMER_TRANSF_CRCDOM)
if (hammer_debug_btree) {
kprintf("SEARCH %016llx[%d] %016llx %02x key=%016llx cre=%016llx lo=%02x (td = %p)\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
cursor->index,
- cursor->key_beg.obj_id,
+ (long long)cursor->key_beg.obj_id,
cursor->key_beg.rec_type,
- cursor->key_beg.key,
- cursor->key_beg.create_tid,
+ (long long)cursor->key_beg.key,
+ (long long)cursor->key_beg.create_tid,
cursor->key_beg.localization,
curthread
);
if (cursor->parent)
kprintf("SEARCHP %016llx[%d] (%016llx/%016llx %016llx/%016llx) (%p/%p %p/%p)\n",
- cursor->parent->node_offset, cursor->parent_index,
- cursor->left_bound->obj_id,
- cursor->parent->ondisk->elms[cursor->parent_index].internal.base.obj_id,
- cursor->right_bound->obj_id,
- cursor->parent->ondisk->elms[cursor->parent_index+1].internal.base.obj_id,
+ (long long)cursor->parent->node_offset,
+ cursor->parent_index,
+ (long long)cursor->left_bound->obj_id,
+ (long long)cursor->parent->ondisk->elms[cursor->parent_index].internal.base.obj_id,
+ (long long)cursor->right_bound->obj_id,
+ (long long)cursor->parent->ondisk->elms[cursor->parent_index+1].internal.base.obj_id,
cursor->left_bound,
&cursor->parent->ondisk->elms[cursor->parent_index],
cursor->right_bound,
++hammer_stats_btree_iterations;
if (hammer_debug_btree) {
kprintf("SEARCH-I %016llx count=%d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
node->count);
}
if (hammer_debug_btree) {
kprintf("RESULT-I %016llx[%d] %016llx %02x "
"key=%016llx cre=%016llx lo=%02x\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
i,
- elm->internal.base.obj_id,
+ (long long)elm->internal.base.obj_id,
elm->internal.base.rec_type,
- elm->internal.base.key,
- elm->internal.base.create_tid,
+ (long long)elm->internal.base.key,
+ (long long)elm->internal.base.create_tid,
elm->internal.base.localization
);
}
KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
if (hammer_debug_btree) {
kprintf("SEARCH-L %016llx count=%d\n",
- cursor->node->node_offset,
+ (long long)cursor->node->node_offset,
node->count);
}
error = 0;
if (hammer_debug_btree) {
kprintf("RESULT-L %016llx[%d] (SUCCESS)\n",
- cursor->node->node_offset, i);
+ (long long)cursor->node->node_offset, i);
}
goto done;
}
failed:
if (hammer_debug_btree) {
kprintf("RESULT-L %016llx[%d] (FAILED)\n",
- cursor->node->node_offset, i);
+ (long long)cursor->node->node_offset, i);
}
/*
if (hammer_debug_general & 0x0002) {
kprintf("mirror_propagate: propagate "
"%016llx @%016llx:%d\n",
- mirror_tid, node->node_offset, cursor->index);
+ (long long)mirror_tid,
+ (long long)node->node_offset,
+ cursor->index);
}
if (hammer_debug_general & 0x0002) {
kprintf("mirror_propagate: propagate "
"%016llx @%016llx\n",
- mirror_tid, node->node_offset);
+ (long long)mirror_tid,
+ (long long)node->node_offset);
}
}
if (error == ENOENT)
int i;
kprintf("node %p count=%d parent=%016llx type=%c\n",
- ondisk, ondisk->count, ondisk->parent, ondisk->type);
+ ondisk, ondisk->count,
+ (long long)ondisk->parent, ondisk->type);
/*
* Dump both boundary elements if an internal node
hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
{
kprintf(" %2d", i);
- kprintf("\tobj_id = %016llx\n", elm->base.obj_id);
- kprintf("\tkey = %016llx\n", elm->base.key);
- kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
- kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
+ kprintf("\tobj_id = %016llx\n", (long long)elm->base.obj_id);
+ kprintf("\tkey = %016llx\n", (long long)elm->base.key);
+ kprintf("\tcreate_tid = %016llx\n", (long long)elm->base.create_tid);
+ kprintf("\tdelete_tid = %016llx\n", (long long)elm->base.delete_tid);
kprintf("\trec_type = %04x\n", elm->base.rec_type);
kprintf("\tobj_type = %02x\n", elm->base.obj_type);
kprintf("\tbtype = %02x (%c)\n",
switch(type) {
case HAMMER_BTREE_TYPE_INTERNAL:
kprintf("\tsubtree_off = %016llx\n",
- elm->internal.subtree_offset);
+ (long long)elm->internal.subtree_offset);
break;
case HAMMER_BTREE_TYPE_RECORD:
- kprintf("\tdata_offset = %016llx\n", elm->leaf.data_offset);
+ kprintf("\tdata_offset = %016llx\n",
+ (long long)elm->leaf.data_offset);
kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
break;
if (error == 0) {
KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node));
if (node->ondisk->parent != cursor->parent->node_offset)
- panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset);
+ panic("node %p %016llx vs %016llx\n", node, (long long)node->ondisk->parent, (long long)cursor->parent->node_offset);
KKASSERT(node->ondisk->parent == cursor->parent->node_offset);
}
break;
hammer_free_inode(ip);
ip = NULL;
} else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
- panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
+ panic("hammer_create_inode: duplicate obj_id %llx",
+ (long long)ip->obj_id);
/* not reached */
hammer_free_inode(ip);
}
done:
if (error) {
kprintf("hammer_direct_read: failed @ %016llx\n",
- zone2_offset);
+ (long long)zone2_offset);
bp->b_error = error;
bp->b_flags |= B_ERROR;
biodone(bio);
* to the tree so we do not have to worry about the backend.
*/
kprintf("hammer_direct_write: failed @ %016llx\n",
- leaf->data_offset);
+ (long long)leaf->data_offset);
bp = bio->bio_buf;
bp->b_resid = 0;
bp->b_error = EIO;
kprintf("DOINSERT LOOKUP %d\n", error);
if (error == 0) {
kprintf("hammer_ip_sync_record: duplicate rec "
- "at (%016llx)\n", record->leaf.base.key);
+ "at (%016llx)\n", (long long)record->leaf.base.key);
Debugger("duplicate record1");
error = EIO;
}
}
error = hammer_btree_insert(cursor, &record->leaf, &doprop);
- if (hammer_debug_inode && error)
- kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
+ if (hammer_debug_inode && error) {
+ kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n",
+ error,
+ (long long)cursor->node->node_offset,
+ cursor->index,
+ (long long)record->leaf.base.key);
+ }
/*
* Our record is on-disk and we normally mark the in-memory version
if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
error = hammer_btree_lookup(cursor);
if (error == ENOENT || error == EDEADLK) {
- if (hammer_debug_general & 0x2000)
- kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
+ if (hammer_debug_general & 0x2000) {
+ kprintf("error %d node %p %016llx index %d\n",
+ error, cursor->node,
+ (long long)cursor->node->node_offset,
+ cursor->index);
+ }
cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
error = hammer_btree_iterate(cursor);
}
*/
if (off < ran_beg && leaf->base.key > ran_beg) {
panic("hammer left edge case %016llx %d\n",
- leaf->base.key, leaf->data_len);
+ (long long)leaf->base.key,
+ leaf->data_len);
}
/*
if (hammer_debug_general & 0x0002) {
kprintf("delete_at_cursor: propagate %016llx"
" @%016llx\n",
- elm->leaf.base.delete_tid,
- node->node_offset);
+ (long long)elm->leaf.base.delete_tid,
+ (long long)node->node_offset);
}
}
}
if (error) {
ret_error = error;
- if (report_conflicts || (hammer_debug_general & 0x8000))
- kprintf("hammer_del_buffers: unable to invalidate %016llx buffer=%p rep=%d\n", base_offset, buffer, report_conflicts);
+ if (report_conflicts ||
+ (hammer_debug_general & 0x8000)) {
+ kprintf("hammer_del_buffers: unable to "
+ "invalidate %016llx buffer=%p rep=%d\n",
+ (long long)base_offset,
+ buffer, report_conflicts);
+ }
}
base_offset += HAMMER_BUFSIZE;
zone2_offset += HAMMER_BUFSIZE;
if (hammer_debug_io & 0x0001) {
kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
- buffer->zoneX_offset, buffer->zone2_offset, isnew,
- buffer->ondisk);
+ (long long)buffer->zoneX_offset,
+ (long long)buffer->zone2_offset,
+ isnew, buffer->ondisk);
}
if (buffer->ondisk == NULL) {
if (hammer_debug_general & 0x0200) {
kprintf("check %016llx %016llx cre=%016llx del=%016llx\n",
- elm->base.obj_id,
- elm->base.key,
- elm->base.create_tid,
- elm->base.delete_tid);
+ (long long)elm->base.obj_id,
+ (long long)elm->base.key,
+ (long long)elm->base.create_tid,
+ (long long)elm->base.delete_tid);
}
if (prune_should_delete(prune, elm)) {
if (hammer_debug_general & 0x0200) {
kprintf("check %016llx %016llx: DELETE\n",
- elm->base.obj_id, elm->base.key);
+ (long long)elm->base.obj_id,
+ (long long)elm->base.key);
}
/*
cursor.flags |= HAMMER_CURSOR_ATEDISK;
if (hammer_debug_general & 0x0100) {
kprintf("check %016llx %016llx: SKIP\n",
- elm->base.obj_id, elm->base.key);
+ (long long)elm->base.obj_id,
+ (long long)elm->base.key);
}
}
++prune->stat_scanrecords;
if (ip) {
if (hammer_debug_general & 0x0001) {
kprintf("pruning disconnected inode %016llx\n",
- elm->base.obj_id);
+ (long long)elm->base.obj_id);
}
hammer_rel_inode(ip, 0);
hammer_inode_waitreclaims(cursor->trans->hmp);
} else {
kprintf("unable to prune disconnected inode %016llx\n",
- elm->base.obj_id);
+ (long long)elm->base.obj_id);
}
}
*/
if (hammer_debug_general & 0x1000) {
kprintf("rebalance_closeout %016llx:",
- base_item->node->node_offset);
+ (long long)base_item->node->node_offset);
}
if (base_item->copy->count != base_count) {
base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
if (hammer_debug_general & 0x4000) {
kprintf("REBLOCK LNODE %016llx -> %016llx\n",
- onode->node_offset, nnode->node_offset);
+ (long long)onode->node_offset,
+ (long long)nnode->node_offset);
}
hammer_modify_node_done(nnode);
cursor->node = nnode;
if (hammer_debug_general & 0x4000) {
kprintf("REBLOCK INODE %016llx -> %016llx\n",
- onode->node_offset, nnode->node_offset);
+ (long long)onode->node_offset,
+ (long long)nnode->node_offset);
}
hammer_modify_node_done(nnode);
cursor->node = nnode;
kprintf("HAMMER(%s) Start Recovery %016llx - %016llx "
"(%lld bytes of UNDO)%s\n",
root_volume->ondisk->vol_name,
- first_offset, last_offset,
- bytes,
+ (long long)first_offset,
+ (long long)last_offset,
+ (long long)bytes,
(hmp->ronly ? " (RO)" : "(RW)"));
if (bytes > (rootmap->alloc_offset & HAMMER_OFF_LONG_MASK)) {
kprintf("Undo size is absurd, unable to mount\n");
if (scan_offset > rootmap->alloc_offset) {
kprintf("HAMMER(%s) UNDO record at %016llx FIFO overflow\n",
root_volume->ondisk->vol_name,
- scan_offset);
+ (long long)scan_offset);
error = EIO;
goto done;
}
while ((int64_t)bytes > 0) {
if (hammer_debug_general & 0x0080)
- kprintf("scan_offset %016llx\n", scan_offset);
+ kprintf("scan_offset %016llx\n",
+ (long long)scan_offset);
if (scan_offset == HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0)) {
scan_offset = rootmap->alloc_offset;
continue;
kprintf("HAMMER(%s) UNDO record at %016llx FIFO "
"underflow\n",
root_volume->ondisk->vol_name,
- scan_offset);
+ (long long)scan_offset);
error = EIO;
break;
}
kprintf("HAMMER(%s) Unable to read UNDO TAIL "
"at %016llx\n",
root_volume->ondisk->vol_name,
- scan_offset - sizeof(*tail));
+ (long long)scan_offset - sizeof(*tail));
break;
}
kprintf("HAMMER(%s) Illegal UNDO TAIL signature "
"at %016llx\n",
root_volume->ondisk->vol_name,
- scan_offset - sizeof(*tail));
+ (long long)scan_offset - sizeof(*tail));
error = EIO;
break;
}
if (error) {
kprintf("HAMMER(%s) UNDO record at %016llx failed\n",
root_volume->ondisk->vol_name,
- scan_offset - tail->tail_size);
+ (long long)scan_offset - tail->tail_size);
break;
}
scan_offset -= tail->tail_size;
if (buffer == NULL) {
kprintf("HAMMER: UNDO record, "
"cannot access buffer %016llx\n",
- undo->undo_offset);
+ (long long)undo->undo_offset);
break;
}
hammer_modify_buffer(NULL, buffer, NULL, 0);
hammer_recover_copy_undo(hammer_off_t undo_offset,
char *src, char *dst, int bytes)
{
- if (hammer_debug_general & 0x0080)
- kprintf("UNDO %016llx: %d\n", undo_offset, bytes);
+ if (hammer_debug_general & 0x0080) {
+ kprintf("UNDO %016llx: %d\n",
+ (long long)undo_offset, bytes);
+ }
#if 0
- kprintf("UNDO %016llx:", undo_offset);
+ kprintf("UNDO %016llx:", (long long)undo_offset);
hammer_recover_debug_dump(22, dst, bytes);
kprintf("%22s", "to:");
hammer_recover_debug_dump(22, src, bytes);
key |= 0x100000000LL;
if (hammer_debug_general & 0x0400) {
kprintf("namekey2: 0x%016llx %*.*s\n",
- key, len, len, aname);
+ (long long)key, len, len, aname);
}
*max_iterationsp = 0x00FFFFFF;
break;
return(HAMMER_XDEMARC);
}
panic("hammer_blockdemarc: illegal range %lld %lld\n",
- file_offset1, file_offset2);
+ (long long)file_offset1, (long long)file_offset2);
}
udev_t
if (tid >= 0xFFFFFFFFFF000000ULL)
panic("hammer_start_transaction: Ran out of TIDs!");
if (hammer_debug_tid)
- kprintf("alloc_tid %016llx\n", tid);
+ kprintf("alloc_tid %016llx\n", (long long)tid);
return(tid);
}
hammer_modify_buffer_done(buffer);
goto again;
}
- if (hammer_debug_general & 0x0080)
- kprintf("undo %016llx %d %d\n", next_offset, bytes, len);
+ if (hammer_debug_general & 0x0080) {
+ kprintf("undo %016llx %d %d\n",
+ (long long)next_offset, bytes, len);
+ }
/*
* We're good, create the entry.
krateprintf(&hmp->krate,
"HAMMER(%s): Critical error inode=%lld %s\n",
hmp->mp->mnt_stat.f_mntfromname,
- (ip ? ip->obj_id : -1), msg);
+ (long long)(ip ? ip->obj_id : -1), msg);
if (hmp->ronly == 0) {
hmp->ronly = 2; /* special errored read-only mode */
hmp->mp->mnt_flag |= MNT_RDONLY;
asof = dip->hmp->asof;
*ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
ksnprintf(*ap->a_fakename, 19, "0x%016llx",
- dip->obj_asof);
+ (long long)dip->obj_asof);
} else {
*ap->a_vpp = NULL;
return ENOENT;
/* vap->va_size == 26 */
ksnprintf(buf, sizeof(buf),
"@@0x%016llx:%05d",
- pfsm->pfsd.sync_end_tid,
+ (long long)pfsm->pfsd.sync_end_tid,
localization >> 16);
} else {
/* vap->va_size == 10 */
#if 0
ksnprintf(buf, sizeof(buf),
"@@0x%016llx:%05d",
- HAMMER_MAX_TID,
+ (long long)HAMMER_MAX_TID,
localization >> 16);
#endif
}
ip->trunc_off = vap->va_size;
#ifdef DEBUG_TRUNCATE
if (ip == HammerTruncIp)
- kprintf("truncate1 %016llx\n", ip->trunc_off);
+ kprintf("truncate1 %016llx\n",
+ (long long)ip->trunc_off);
#endif
} else if (ip->trunc_off > vap->va_size) {
ip->trunc_off = vap->va_size;
#ifdef DEBUG_TRUNCATE
if (ip == HammerTruncIp)
- kprintf("truncate2 %016llx\n", ip->trunc_off);
+ kprintf("truncate2 %016llx\n",
+ (long long)ip->trunc_off);
#endif
} else {
#ifdef DEBUG_TRUNCATE
if (ip == HammerTruncIp)
- kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
+ kprintf("truncate3 %016llx (ignored)\n",
+ (long long)vap->va_size);
#endif
}
}
*/
hammer_simple_transaction(&trans, ip->hmp);
#if 0
- kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
+ kprintf("bmap_beg %016llx ip->cache %p\n",
+ (long long)ap->a_loffset, ip->cache[1]);
#endif
hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
#if 0
kprintf("BMAP %016llx: %016llx - %016llx\n",
- ap->a_loffset, base_offset, last_offset);
- kprintf("BMAP %16s: %016llx - %016llx\n",
- "", base_disk_offset, last_disk_offset);
+ (long long)ap->a_loffset,
+ (long long)base_offset,
+ (long long)last_offset);
+ kprintf("BMAP %16s: %016llx - %016llx\n", "",
+ (long long)base_disk_offset,
+ (long long)last_disk_offset);
#endif
if (cursor.node) {
hammer_cache_node(&ip->cache[1], cursor.node);
#if 0
- kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
+ kprintf("bmap_end2 %016llx ip->cache %p\n",
+ (long long)ap->a_loffset, ip->cache[1]);
#endif
}
hammer_done_cursor(&cursor);
static int
hpfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
- u_int size;
+ size_t size;
int error;
struct vnode *devvp;
struct hpfs_args args;
if (bp->b_dirtyend > bcount) {
kprintf("NFS append race @%08llx:%d\n",
- bp->b_bio2.bio_offset,
+ (long long)bp->b_bio2.bio_offset,
bp->b_dirtyend - bcount);
bp->b_dirtyend = bcount;
}
*/
if (error == 0 && vap->va_size != VNOVAL &&
np->n_size != vap->va_size) {
- kprintf("NFS ftruncate: server disagrees on the file size: %lld/%lld/%lld\n", tsize, vap->va_size, np->n_size);
+ kprintf("NFS ftruncate: server disagrees on the file size: "
+ "%lld/%lld/%lld\n",
+ (long long)tsize,
+ (long long)vap->va_size,
+ (long long)np->n_size);
goto again;
}
if (error && vap->va_size != VNOVAL) {
vref(dvp);
/* Fudge together a funny name */
- sp->s_namlen = ksprintf(sp->s_name, ".nfsA%08x4.4", (int)cnp->cn_td);
+ sp->s_namlen = ksprintf(sp->s_name, ".nfsA%08x4.4",
+ (int)(intptr_t)cnp->cn_td);
/* Try lookitups until we get one that isn't there */
while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
struct nfsnode *np = VTONFS(vp);
kprintf("tag VT_NFS, fileid %lld fsid 0x%x",
- np->n_vattr.va_fileid, np->n_vattr.va_fsid);
+ (long long)np->n_vattr.va_fileid, np->n_vattr.va_fsid);
if (vp->v_type == VFIFO)
fifo_printinfo(vp);
kprintf("\n");
struct null_args args;
struct vnode *rootvp;
struct null_mount *xmp;
- u_int size;
+ size_t size;
struct nlookupdata nd;
fhandle_t fh;
}
(void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
- &size);
+ &size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
(void)nullfs_statfs(mp, &mp->mnt_stat, cred);
NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n",
{
#ifndef SMP
int name[2];
- int olen, ncpu, plen, error;
+ int ncpu, error;
+ size_t olen, plen;
name[0] = CTL_HW;
name[1] = HW_NCPU;
struct socket *so;
struct vnode *rvp;
struct portalnode *pn;
- u_int size;
+ size_t size;
int error;
/*
*/
return EINVAL;
else
- return smbfs_smb_lockandx(np, op, (u_int32_t)id, start, end, scred);
+ return smbfs_smb_lockandx(np, op, (u_int32_t)(uintptr_t)id,
+ start, end, scred);
}
int
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
biodone(bio);
- kprintf("swap_pager_strategy: bp %p offset %lld size %d, not page bounded\n", bp, bio->bio_offset, (int)bp->b_bcount);
+ kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
+ "not page bounded\n",
+ bp, (long long)bio->bio_offset, (int)bp->b_bcount);
return;
}
bp->b_bcount = PAGE_SIZE * (j - i);
bio->bio_done = swp_pager_async_iodone;
bio->bio_offset = (off_t)(blk - (reqpage - i)) << PAGE_SHIFT;
- bio->bio_driver_info = (void *)(reqpage - i);
+ bio->bio_driver_info = (void *)(intptr_t)(reqpage - i);
{
int k;
if (tsleep(mreq, 0, "swread", hz*20)) {
kprintf(
"swap_pager: indefinite wait buffer: "
- " offset: %lld, size: %d\n",
- bio->bio_offset, bp->b_bcount
+ " offset: %lld, size: %ld\n",
+ (long long)bio->bio_offset,
+ (long)bp->b_bcount
);
}
}
"swap_pager: I/O error - %s failed; offset %lld,"
"size %ld, error %d\n",
((bp->b_cmd == BUF_CMD_READ) ? "pagein" : "pageout"),
- bio->bio_offset,
+ (long long)bio->bio_offset,
(long)bp->b_bcount,
bp->b_error
);
* bio_driver_info holds the requested page
* index.
*/
- if (i != (int)bio->bio_driver_info) {
+ if (i != (int)(intptr_t)bio->bio_driver_info) {
vm_page_deactivate(m);
vm_page_wakeup(m);
} else {
*
* bio_driver_info holds the requested page
*/
- if (i != (int)bio->bio_driver_info) {
+ if (i != (int)(intptr_t)bio->bio_driver_info) {
vm_page_deactivate(m);
vm_page_wakeup(m);
} else {
{
#ifdef __amd64__
/* JG DEBUG64 We check if the page is really zeroed. */
- char *p = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
int i;
for (i = 0; i < PAGE_SIZE; i++) {
*/
bp = swbuf;
for (i = 0; i < nswbuf; ++i, ++bp) {
- bp->b_kvabase = (caddr_t)(i * MAXPHYS) + swapbkva;
+ bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva;
bp->b_kvasize = MAXPHYS;
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
BUF_LOCKINIT(bp);
vp->v_filesize = size;
} else {
object->ref_count++;
- if (vp->v_filesize != size)
- kprintf("vnode_pager_alloc: Warning, filesize mismatch %lld/%lld\n", vp->v_filesize, size);
+ if (vp->v_filesize != size) {
+ kprintf("vnode_pager_alloc: Warning, filesize "
+ "mismatch %lld/%lld\n",
+ (long long)vp->v_filesize,
+ (long long)size);
+ }
}
vref(vp);