diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/device_if.m | 10 | ||||
-rw-r--r-- | sys/kern/init_sysent.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_jail.c | 15 | ||||
-rw-r--r-- | sys/kern/kern_prot.c | 56 | ||||
-rw-r--r-- | sys/kern/kern_racct.c | 307 | ||||
-rw-r--r-- | sys/kern/kern_time.c | 5 | ||||
-rw-r--r-- | sys/kern/sys_timerfd.c | 1 | ||||
-rw-r--r-- | sys/kern/syscalls.c | 2 | ||||
-rw-r--r-- | sys/kern/syscalls.conf | 1 | ||||
-rw-r--r-- | sys/kern/syscalls.master | 5 | ||||
-rw-r--r-- | sys/kern/systrace_args.c | 8 | ||||
-rw-r--r-- | sys/kern/vfs_default.c | 1 | ||||
-rw-r--r-- | sys/kern/vfs_syscalls.c | 19 | ||||
-rw-r--r-- | sys/kern/vfs_vnops.c | 5 |
15 files changed, 260 insertions, 179 deletions
diff --git a/sys/kern/device_if.m b/sys/kern/device_if.m index c02e5a46f326..ed94b2ccbe1b 100644 --- a/sys/kern/device_if.m +++ b/sys/kern/device_if.m @@ -49,25 +49,25 @@ HEADER { CODE { static int null_shutdown(device_t dev) { - return 0; + return (0); } static int null_suspend(device_t dev) { - return 0; + return (0); } static int null_resume(device_t dev) { - return 0; + return (0); } static int null_quiesce(device_t dev) { - return 0; + return (0); } - static void * null_register(device_t dev) + static void *null_register(device_t dev) { return NULL; } diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c index 91792430d24c..c0a5479c9634 100644 --- a/sys/kern/init_sysent.c +++ b/sys/kern/init_sysent.c @@ -67,7 +67,7 @@ /* The casts are bogus but will do for now. */ struct sysent sysent[] = { { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 0 = syscall */ - { .sy_narg = AS(exit_args), .sy_call = (sy_call_t *)sys_exit, .sy_auevent = AUE_EXIT, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 1 = exit */ + { .sy_narg = AS(_exit_args), .sy_call = (sy_call_t *)sys__exit, .sy_auevent = AUE_EXIT, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 1 = _exit */ { .sy_narg = 0, .sy_call = (sy_call_t *)sys_fork, .sy_auevent = AUE_FORK, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 2 = fork */ { .sy_narg = AS(read_args), .sy_call = (sy_call_t *)sys_read, .sy_auevent = AUE_READ, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 3 = read */ { .sy_narg = AS(write_args), .sy_call = (sy_call_t *)sys_write, .sy_auevent = AUE_WRITE, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 4 = write */ diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 54e3044ab093..a32b5a1b3354 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -202,7 +202,7 @@ exit_onexit(struct proc *p) * exit -- death of process. */ int -sys_exit(struct thread *td, struct exit_args *uap) +sys__exit(struct thread *td, struct _exit_args *uap) { exit1(td, uap->rval, 0); diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c index 7ef1d19f0ea8..7c9a15ae18f3 100644 --- a/sys/kern/kern_jail.c +++ b/sys/kern/kern_jail.c @@ -115,8 +115,11 @@ struct prison prison0 = { #else .pr_flags = PR_HOST|_PR_IP_SADDRSEL, #endif - .pr_allow = PR_ALLOW_ALL_STATIC, + .pr_allow = PR_ALLOW_PRISON0, }; +_Static_assert((PR_ALLOW_PRISON0 & ~PR_ALLOW_ALL_STATIC) == 0, + "Bits enabled in PR_ALLOW_PRISON0 that are not statically reserved"); + MTX_SYSINIT(prison0, &prison0.pr_mtx, "jail mutex", MTX_DEF); struct bool_flags { @@ -232,6 +235,9 @@ static struct bool_flags pr_flag_allow[NBBY * NBPW] = { {"allow.adjtime", "allow.noadjtime", PR_ALLOW_ADJTIME}, {"allow.settime", "allow.nosettime", PR_ALLOW_SETTIME}, {"allow.routing", "allow.norouting", PR_ALLOW_ROUTING}, + {"allow.unprivileged_parent_tampering", + "allow.nounprivileged_parent_tampering", + PR_ALLOW_UNPRIV_PARENT_TAMPER}, }; static unsigned pr_allow_all = PR_ALLOW_ALL_STATIC; const size_t pr_flag_allow_size = sizeof(pr_flag_allow); @@ -4006,6 +4012,7 @@ prison_priv_check(struct ucred *cred, int priv) case PRIV_DEBUG_DIFFCRED: case PRIV_DEBUG_SUGID: case PRIV_DEBUG_UNPRIV: + case PRIV_DEBUG_DIFFJAIL: /* * Allow jail to set various resource limits and login @@ -4043,8 +4050,10 @@ prison_priv_check(struct ucred *cred, int priv) */ case PRIV_SCHED_DIFFCRED: case PRIV_SCHED_CPUSET: + case PRIV_SCHED_DIFFJAIL: case PRIV_SIGNAL_DIFFCRED: case PRIV_SIGNAL_SUGID: + case PRIV_SIGNAL_DIFFJAIL: /* * Allow jailed processes to write to sysctls marked as jail @@ -4688,6 +4697,10 @@ SYSCTL_JAIL_PARAM(_allow, read_msgbuf, CTLTYPE_INT | CTLFLAG_RW, "B", "Jail may read the kernel message buffer"); SYSCTL_JAIL_PARAM(_allow, unprivileged_proc_debug, CTLTYPE_INT | CTLFLAG_RW, "B", "Unprivileged processes may use process debugging facilities"); +SYSCTL_JAIL_PARAM(_allow, unprivileged_parent_tampering, + CTLTYPE_INT | CTLFLAG_RW, "B", + "Unprivileged parent jail processes may tamper with same-uid processes" + " (signal/debug/cpuset)"); SYSCTL_JAIL_PARAM(_allow, suser, CTLTYPE_INT | CTLFLAG_RW, "B", "Processes in jail with uid 0 have privilege"); #ifdef VIMAGE diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c index bbb622547598..2cd5b7069023 100644 --- a/sys/kern/kern_prot.c +++ b/sys/kern/kern_prot.c @@ -1914,6 +1914,38 @@ cr_canseejailproc(struct ucred *u1, struct ucred *u2) } /* + * Determine if u1 can tamper with the subject specified by u2, if they are in + * different jails and 'unprivileged_parent_tampering' jail policy allows it. + * + * May be called if u1 and u2 are in the same jail, but it is expected that the + * caller has already done a prison_check() prior to calling it. + * + * Returns: 0 for permitted, EPERM otherwise + */ +static int +cr_can_tamper_with_subjail(struct ucred *u1, struct ucred *u2, int priv) +{ + + MPASS(prison_check(u1, u2) == 0); + if (u1->cr_prison == u2->cr_prison) + return (0); + + if (priv_check_cred(u1, priv) == 0) + return (0); + + /* + * Jails do not maintain a distinct UID space, so process visibility is + * all that would control an unprivileged process' ability to tamper + * with a process in a subjail by default if we did not have the + * allow.unprivileged_parent_tampering knob to restrict it by default. + */ + if (prison_allow(u2, PR_ALLOW_UNPRIV_PARENT_TAMPER)) + return (0); + + return (EPERM); +} + +/* * Helper for cr_cansee*() functions to abide by system-wide security.bsd.see_* * policies. Determines if u1 "can see" u2 according to these policies. * Returns: 0 for permitted, ESRCH otherwise @@ -2062,6 +2094,19 @@ cr_cansignal(struct ucred *cred, struct proc *proc, int signum) return (error); } + /* + * At this point, the target may be in a different jail than the + * subject -- the subject must be in a parent jail to the target, + * whether it is prison0 or a subordinate of prison0 that has + * children. Additional privileges are required to allow this, as + * whether the creds are truly equivalent or not must be determined on + * a case-by-case basis. + */ + error = cr_can_tamper_with_subjail(cred, proc->p_ucred, + PRIV_SIGNAL_DIFFJAIL); + if (error) + return (error); + return (0); } @@ -2138,6 +2183,12 @@ p_cansched(struct thread *td, struct proc *p) if (error) return (error); } + + error = cr_can_tamper_with_subjail(td->td_ucred, p->p_ucred, + PRIV_SCHED_DIFFJAIL); + if (error) + return (error); + return (0); } @@ -2258,6 +2309,11 @@ p_candebug(struct thread *td, struct proc *p) return (error); } + error = cr_can_tamper_with_subjail(td->td_ucred, p->p_ucred, + PRIV_DEBUG_DIFFJAIL); + if (error) + return (error); + /* Can't trace init when securelevel > 0. */ if (p == initproc) { error = securelevel_gt(td->td_ucred, 0); diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c index 7ee3b9e2048a..7351e9cb6313 100644 --- a/sys/kern/kern_racct.c +++ b/sys/kern/kern_racct.c @@ -96,6 +96,13 @@ static void racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount); static void racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount); +static int racct_set_locked(struct proc *p, int resource, uint64_t amount, + int force); +static void racct_updatepcpu_locked(struct proc *p); +static void racct_updatepcpu_racct_locked(struct racct *racct); +static void racct_updatepcpu_containers(void); +static void racct_settime_locked(struct proc *p, bool exit); +static void racct_zeropcpu_locked(struct proc *p); SDT_PROVIDER_DEFINE(racct); SDT_PROBE_DEFINE3(racct, , rusage, add, @@ -308,68 +315,6 @@ fixpt_t ccpu_exp[] = { #define CCPU_EXP_MAX 110 -/* - * This function is analogical to the getpcpu() function in the ps(1) command. - * They should both calculate in the same way so that the racct %cpu - * calculations are consistent with the values shown by the ps(1) tool. - * The calculations are more complex in the 4BSD scheduler because of the value - * of the ccpu variable. In ULE it is defined to be zero which saves us some - * work. - */ -static uint64_t -racct_getpcpu(struct proc *p, u_int pcpu) -{ - u_int swtime; -#ifdef SCHED_4BSD - fixpt_t pctcpu, pctcpu_next; -#endif - fixpt_t p_pctcpu; - struct thread *td; - - ASSERT_RACCT_ENABLED(); - KASSERT((p->p_flag & P_IDLEPROC) == 0, - ("racct_getpcpu: idle process %p", p)); - - swtime = (ticks - p->p_swtick) / hz; - - /* - * For short-lived processes, the sched_pctcpu() returns small - * values even for cpu intensive processes. Therefore we use - * our own estimate in this case. - */ - if (swtime < RACCT_PCPU_SECS) - return (pcpu); - - p_pctcpu = 0; - FOREACH_THREAD_IN_PROC(p, td) { - thread_lock(td); -#ifdef SCHED_4BSD - pctcpu = sched_pctcpu(td); - /* Count also the yet unfinished second. */ - pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT; - pctcpu_next += sched_pctcpu_delta(td); - p_pctcpu += max(pctcpu, pctcpu_next); -#else - /* - * In ULE the %cpu statistics are updated on every - * sched_pctcpu() call. So special calculations to - * account for the latest (unfinished) second are - * not needed. - */ - p_pctcpu += sched_pctcpu(td); -#endif - thread_unlock(td); - } - -#ifdef SCHED_4BSD - if (swtime <= CCPU_EXP_MAX) - return ((100 * (uint64_t)p_pctcpu * 1000000) / - (FSCALE - ccpu_exp[swtime])); -#endif - - return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE); -} - static void racct_add_racct(struct racct *dest, const struct racct *src) { @@ -499,19 +444,6 @@ racct_adjust_resource(struct racct *racct, int resource, ("%s: resource %d usage < 0", __func__, resource)); racct->r_resources[resource] = 0; } - - /* - * There are some cases where the racct %cpu resource would grow - * beyond 100% per core. For example in racct_proc_exit() we add - * the process %cpu usage to the ucred racct containers. If too - * many processes terminated in a short time span, the ucred %cpu - * resource could grow too much. Also, the 4BSD scheduler sometimes - * returns for a thread more than 100% cpu usage. So we set a sane - * boundary here to 100% * the maximum number of CPUs. - */ - if ((resource == RACCT_PCTCPU) && - (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000 * (int64_t)MAXCPU)) - racct->r_resources[RACCT_PCTCPU] = 100 * 1000000 * (int64_t)MAXCPU; } static int @@ -635,10 +567,44 @@ racct_add_buf(struct proc *p, const struct buf *bp, int is_write) RACCT_UNLOCK(); } +static void +racct_settime_locked(struct proc *p, bool exit) +{ + struct thread *td; + struct timeval wallclock; + uint64_t runtime; + + ASSERT_RACCT_ENABLED(); + RACCT_LOCK_ASSERT(); + PROC_LOCK_ASSERT(p, MA_OWNED); + + if (exit) { + /* + * proc_reap() has already calculated rux + * and added crux to rux. + */ + runtime = cputick2usec(p->p_rux.rux_runtime - + p->p_crux.rux_runtime); + } else { + PROC_STATLOCK(p); + FOREACH_THREAD_IN_PROC(p, td) + ruxagg(p, td); + PROC_STATUNLOCK(p); + runtime = cputick2usec(p->p_rux.rux_runtime); + } + microuptime(&wallclock); + timevalsub(&wallclock, &p->p_stats->p_start); + + racct_set_locked(p, RACCT_CPU, runtime, 0); + racct_set_locked(p, RACCT_WALLCLOCK, + (uint64_t)wallclock.tv_sec * 1000000 + + wallclock.tv_usec, 0); +} + static int racct_set_locked(struct proc *p, int resource, uint64_t amount, int force) { - int64_t old_amount, decayed_amount, diff_proc, diff_cred; + int64_t old_amount, diff_proc, diff_cred; #ifdef RCTL int error; #endif @@ -655,17 +621,7 @@ racct_set_locked(struct proc *p, int resource, uint64_t amount, int force) * The diffs may be negative. */ diff_proc = amount - old_amount; - if (resource == RACCT_PCTCPU) { - /* - * Resources in per-credential racct containers may decay. - * If this is the case, we need to calculate the difference - * between the new amount and the proportional value of the - * old amount that has decayed in the ucred racct containers. - */ - decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE; - diff_cred = amount - decayed_amount; - } else - diff_cred = diff_proc; + diff_cred = diff_proc; #ifdef notyet KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource), ("%s: usage of non-droppable resource %d dropping", __func__, @@ -908,8 +864,6 @@ racct_proc_fork(struct proc *parent, struct proc *child) goto out; #endif - /* Init process cpu time. */ - child->p_prev_runtime = 0; child->p_throttled = 0; /* @@ -964,37 +918,16 @@ racct_proc_fork_done(struct proc *child) void racct_proc_exit(struct proc *p) { - struct timeval wallclock; - uint64_t pct_estimate, pct, runtime; int i; if (!racct_enable) return; PROC_LOCK(p); - /* - * We don't need to calculate rux, proc_reap() has already done this. - */ - runtime = cputick2usec(p->p_rux.rux_runtime); -#ifdef notyet - KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); -#else - if (runtime < p->p_prev_runtime) - runtime = p->p_prev_runtime; -#endif - microuptime(&wallclock); - timevalsub(&wallclock, &p->p_stats->p_start); - if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { - pct_estimate = (1000000 * runtime * 100) / - ((uint64_t)wallclock.tv_sec * 1000000 + - wallclock.tv_usec); - } else - pct_estimate = 0; - pct = racct_getpcpu(p, pct_estimate); - RACCT_LOCK(); - racct_set_locked(p, RACCT_CPU, runtime, 0); - racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct); + + racct_settime_locked(p, true); + racct_zeropcpu_locked(p); KASSERT(p->p_racct->r_resources[RACCT_RSS] == 0, ("process reaped with %ju allocated for RSS\n", @@ -1068,6 +1001,10 @@ racct_move(struct racct *dest, struct racct *src) RACCT_LOCK(); racct_add_racct(dest, src); racct_sub_racct(src, src); + dest->r_runtime = src->r_runtime; + dest->r_time = src->r_time; + src->r_runtime = 0; + timevalsub(&src->r_time, &src->r_time); RACCT_UNLOCK(); } @@ -1170,8 +1107,6 @@ racct_proc_wakeup(struct proc *p) static void racct_decay_callback(struct racct *racct, void *dummy1, void *dummy2) { - int64_t r_old, r_new; - ASSERT_RACCT_ENABLED(); RACCT_LOCK_ASSERT(); @@ -1181,15 +1116,6 @@ racct_decay_callback(struct racct *racct, void *dummy1, void *dummy2) rctl_throttle_decay(racct, RACCT_READIOPS); rctl_throttle_decay(racct, RACCT_WRITEIOPS); #endif - - r_old = racct->r_resources[RACCT_PCTCPU]; - - /* If there is nothing to decay, just exit. */ - if (r_old <= 0) - return; - - r_new = r_old * RACCT_DECAY_FACTOR / FSCALE; - racct->r_resources[RACCT_PCTCPU] = r_new; } static void @@ -1221,15 +1147,105 @@ racct_decay(void) } static void +racct_updatepcpu_racct_locked(struct racct *racct) +{ + struct timeval diff; + uint64_t elapsed; + uint64_t runtime; + uint64_t newpcpu; + uint64_t oldpcpu; + + ASSERT_RACCT_ENABLED(); + RACCT_LOCK_ASSERT(); + + /* Difference between now and previously-recorded time. */ + microuptime(&diff); + timevalsub(&diff, &racct->r_time); + elapsed = (uint64_t)diff.tv_sec * 1000000 + diff.tv_usec; + + /* Difference between current and previously-recorded runtime. */ + runtime = racct->r_resources[RACCT_CPU] - racct->r_runtime; + + newpcpu = runtime * 100 * 1000000 / elapsed; + oldpcpu = racct->r_resources[RACCT_PCTCPU]; + /* + * This calculation is equivalent to + * (1 - 0.3) * newpcpu + 0.3 * oldpcpu + * where RACCT_DECAY_FACTOR = 0.3 * FSCALE. + */ + racct->r_resources[RACCT_PCTCPU] = ((FSCALE - RACCT_DECAY_FACTOR) * + newpcpu + RACCT_DECAY_FACTOR * oldpcpu) / FSCALE; + if (racct->r_resources[RACCT_PCTCPU] > + 100 * 1000000 * (uint64_t)mp_ncpus) + racct->r_resources[RACCT_PCTCPU] = 100 * 1000000 * + (uint64_t)mp_ncpus; + + /* Record current times. */ + racct->r_runtime = racct->r_resources[RACCT_CPU]; + timevaladd(&racct->r_time, &diff); +} + +static void +racct_zeropcpu_locked(struct proc *p) +{ + ASSERT_RACCT_ENABLED(); + PROC_LOCK_ASSERT(p, MA_OWNED); + + p->p_racct->r_resources[RACCT_PCTCPU] = 0; +} + +static void +racct_updatepcpu_locked(struct proc *p) +{ + ASSERT_RACCT_ENABLED(); + PROC_LOCK_ASSERT(p, MA_OWNED); + + racct_updatepcpu_racct_locked(p->p_racct); +} + +static void +racct_updatepcpu_pre(void) +{ + + RACCT_LOCK(); +} + +static void +racct_updatepcpu_post(void) +{ + + RACCT_UNLOCK(); +} + +static void +racct_updatepcpu_racct_callback(struct racct *racct, void *dummy1, void *dummy2) +{ + racct_updatepcpu_racct_locked(racct); +} + +static void +racct_updatepcpu_containers(void) +{ + ASSERT_RACCT_ENABLED(); + + ui_racct_foreach(racct_updatepcpu_racct_callback, racct_updatepcpu_pre, + racct_updatepcpu_post, NULL, NULL); + loginclass_racct_foreach(racct_updatepcpu_racct_callback, racct_updatepcpu_pre, + racct_updatepcpu_post, NULL, NULL); + prison_racct_foreach(racct_updatepcpu_racct_callback, racct_updatepcpu_pre, + racct_updatepcpu_post, NULL, NULL); +} + +static void racctd(void) { - struct thread *td; struct proc *p; - struct timeval wallclock; - uint64_t pct, pct_estimate, runtime; + struct proc *idle; ASSERT_RACCT_ENABLED(); + idle = STAILQ_FIRST(&cpuhead)->pc_idlethread->td_proc; + for (;;) { racct_decay(); @@ -1237,36 +1253,16 @@ racctd(void) FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); + if (p == idle) { + PROC_UNLOCK(p); + continue; + } if (p->p_state != PRS_NORMAL || (p->p_flag & P_IDLEPROC) != 0) { - if (p->p_state == PRS_ZOMBIE) - racct_set(p, RACCT_PCTCPU, 0); PROC_UNLOCK(p); continue; } - microuptime(&wallclock); - timevalsub(&wallclock, &p->p_stats->p_start); - PROC_STATLOCK(p); - FOREACH_THREAD_IN_PROC(p, td) - ruxagg(p, td); - runtime = cputick2usec(p->p_rux.rux_runtime); - PROC_STATUNLOCK(p); -#ifdef notyet - KASSERT(runtime >= p->p_prev_runtime, - ("runtime < p_prev_runtime")); -#else - if (runtime < p->p_prev_runtime) - runtime = p->p_prev_runtime; -#endif - p->p_prev_runtime = runtime; - if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) { - pct_estimate = (1000000 * runtime * 100) / - ((uint64_t)wallclock.tv_sec * 1000000 + - wallclock.tv_usec); - } else - pct_estimate = 0; - pct = racct_getpcpu(p, pct_estimate); RACCT_LOCK(); #ifdef RCTL rctl_throttle_decay(p->p_racct, RACCT_READBPS); @@ -1274,11 +1270,8 @@ racctd(void) rctl_throttle_decay(p->p_racct, RACCT_READIOPS); rctl_throttle_decay(p->p_racct, RACCT_WRITEIOPS); #endif - racct_set_locked(p, RACCT_PCTCPU, pct, 1); - racct_set_locked(p, RACCT_CPU, runtime, 0); - racct_set_locked(p, RACCT_WALLCLOCK, - (uint64_t)wallclock.tv_sec * 1000000 + - wallclock.tv_usec, 0); + racct_settime_locked(p, false); + racct_updatepcpu_locked(p); RACCT_UNLOCK(); PROC_UNLOCK(p); } @@ -1306,6 +1299,8 @@ racctd(void) PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); + + racct_updatepcpu_containers(); pause("-", hz); } } diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c index 9830e5093a3a..2a6f0989f6aa 100644 --- a/sys/kern/kern_time.c +++ b/sys/kern/kern_time.c @@ -571,7 +571,10 @@ kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, td->td_rtcgen = atomic_load_acq_int(&rtc_generation); error = kern_clock_gettime(td, clock_id, &now); - KASSERT(error == 0, ("kern_clock_gettime: %d", error)); + if (error != 0) { + td->td_rtcgen = 0; + return (error); + } timespecsub(&ts, &now, &ts); } if (ts.tv_sec < 0 || (ts.tv_sec == 0 && ts.tv_nsec == 0)) { diff --git a/sys/kern/sys_timerfd.c b/sys/kern/sys_timerfd.c index ab7e048a2ab1..565ab3ad6ee6 100644 --- a/sys/kern/sys_timerfd.c +++ b/sys/kern/sys_timerfd.c @@ -206,7 +206,6 @@ retry: mtx_unlock(&tfd->tfd_lock); return (EAGAIN); } - td->td_rtcgen = atomic_load_acq_int(&rtc_generation); error = mtx_sleep(&tfd->tfd_count, &tfd->tfd_lock, PCATCH, "tfdrd", 0); if (error == 0) { diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c index 90a4f3a7dad8..09bf4d519927 100644 --- a/sys/kern/syscalls.c +++ b/sys/kern/syscalls.c @@ -6,7 +6,7 @@ const char *syscallnames[] = { "syscall", /* 0 = syscall */ - "exit", /* 1 = exit */ + "_exit", /* 1 = _exit */ "fork", /* 2 = fork */ "read", /* 3 = read */ "write", /* 4 = write */ diff --git a/sys/kern/syscalls.conf b/sys/kern/syscalls.conf index a98d52659832..ae7bd1f87612 100644 --- a/sys/kern/syscalls.conf +++ b/sys/kern/syscalls.conf @@ -1,3 +1,4 @@ libsysmap="../../lib/libsys/syscalls.map" libsys_h="../../lib/libsys/_libsys.h" sysmk="../sys/syscall.mk" +syshdr_extra="#define SYS_exit SYS__exit" diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master index a8815afee866..53b5d3cbbba9 100644 --- a/sys/kern/syscalls.master +++ b/sys/kern/syscalls.master @@ -51,6 +51,7 @@ ; SYSMUX syscall multiplexer. No prototype, argument struct, or ; handler is declared or used. Handled in MD syscall code. ; CAPENABLED syscall is allowed in capability mode +; NORETURN the syscall does not return ; ; To support programmatic generation of both the default ABI and 32-bit compat ; (freebsd32) we impose a number of restrictions on the types of system calls. @@ -124,8 +125,8 @@ ... ); } -1 AUE_EXIT STD|CAPENABLED { - void exit( +1 AUE_EXIT STD|CAPENABLED|NORETURN { + void _exit( int rval ); } diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c index 467caa71f20d..4dfc63924da9 100644 --- a/sys/kern/systrace_args.c +++ b/sys/kern/systrace_args.c @@ -17,9 +17,9 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args) *n_args = 0; break; } - /* exit */ + /* _exit */ case 1: { - struct exit_args *p = params; + struct _exit_args *p = params; iarg[a++] = p->rval; /* int */ *n_args = 1; break; @@ -3513,7 +3513,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz) /* syscall */ case 0: break; - /* exit */ + /* _exit */ case 1: switch (ndx) { case 0: @@ -9380,7 +9380,7 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz) switch (sysnum) { /* syscall */ case 0: - /* exit */ + /* _exit */ case 1: if (ndx == 0 || ndx == 1) p = "void"; diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index fd6202a1424c..85f67731e1cc 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -457,6 +457,7 @@ vop_stdpathconf(struct vop_pathconf_args *ap) case _PC_NAMEDATTR_ENABLED: case _PC_HAS_NAMEDATTR: case _PC_HAS_HIDDENSYSTEM: + case _PC_CLONE_BLKSIZE: *ap->a_retval = 0; return (0); default: diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 9704e9c160a8..b805e147bd62 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -5050,15 +5050,16 @@ kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd, size_t retlen; void *rl_rcookie, *rl_wcookie; off_t inoff, outoff, savinoff, savoutoff; - bool foffsets_locked; + bool foffsets_locked, foffsets_set; infp = outfp = NULL; rl_rcookie = rl_wcookie = NULL; foffsets_locked = false; + foffsets_set = false; error = 0; retlen = 0; - if (flags != 0) { + if ((flags & ~COPY_FILE_RANGE_USERFLAGS) != 0) { error = EINVAL; goto out; } @@ -5122,6 +5123,8 @@ kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd, } foffset_lock_pair(infp1, &inoff, outfp1, &outoff, 0); foffsets_locked = true; + } else { + foffsets_set = true; } savinoff = inoff; savoutoff = outoff; @@ -5180,11 +5183,12 @@ out: vn_rangelock_unlock(invp, rl_rcookie); if (rl_wcookie != NULL) vn_rangelock_unlock(outvp, rl_wcookie); + if ((foffsets_locked || foffsets_set) && + (error == EINTR || error == ERESTART)) { + inoff = savinoff; + outoff = savoutoff; + } if (foffsets_locked) { - if (error == EINTR || error == ERESTART) { - inoff = savinoff; - outoff = savoutoff; - } if (inoffp == NULL) foffset_unlock(infp, inoff, 0); else @@ -5193,6 +5197,9 @@ out: foffset_unlock(outfp, outoff, 0); else *outoffp = outoff; + } else if (foffsets_set) { + *inoffp = inoff; + *outoffp = outoff; } if (outfp != NULL) fdrop(outfp, td); diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index 6451c9e07a60..93f87ddae4de 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -3443,6 +3443,11 @@ vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp, interrupted = 0; dat = NULL; + if ((flags & COPY_FILE_RANGE_CLONE) != 0) { + error = ENOSYS; + goto out; + } + error = vn_lock(invp, LK_SHARED); if (error != 0) goto out; |