aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorRodney W. Grimes <rgrimes@FreeBSD.org>1994-05-25 09:21:21 +0000
committerRodney W. Grimes <rgrimes@FreeBSD.org>1994-05-25 09:21:21 +0000
commit26f9a76710a312a951848542b9ca1f44100450e2 (patch)
tree9179427ac860211c445df663fd2b86267366bfba /sys
parentdbda0ec78e324aced444959e2c98b89b79f22812 (diff)
downloadsrc-26f9a76710a312a951848542b9ca1f44100450e2.tar.gz
src-26f9a76710a312a951848542b9ca1f44100450e2.zip
The big 4.4BSD Lite to FreeBSD 2.0.0 (Development) patch.
Reviewed by: Rodney W. Grimes Submitted by: John Dyson and David Greenman
Notes
Notes: svn path=/head/; revision=1549
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/cpu_switch.S24
-rw-r--r--sys/amd64/amd64/db_interface.c2
-rw-r--r--sys/amd64/amd64/db_trace.c3
-rw-r--r--sys/amd64/amd64/fpu.c2
-rw-r--r--sys/amd64/amd64/genassym.c40
-rw-r--r--sys/amd64/amd64/locore.S81
-rw-r--r--sys/amd64/amd64/locore.s81
-rw-r--r--sys/amd64/amd64/machdep.c219
-rw-r--r--sys/amd64/amd64/mem.c51
-rw-r--r--sys/amd64/amd64/pmap.c76
-rw-r--r--sys/amd64/amd64/support.S12
-rw-r--r--sys/amd64/amd64/support.s12
-rw-r--r--sys/amd64/amd64/swtch.s24
-rw-r--r--sys/amd64/amd64/trap.c157
-rw-r--r--sys/amd64/amd64/tsc.c40
-rw-r--r--sys/amd64/amd64/vm_machdep.c185
-rw-r--r--sys/amd64/include/cpu.h34
-rw-r--r--sys/amd64/include/cpufunc.h139
-rw-r--r--sys/amd64/include/exec.h93
-rw-r--r--sys/amd64/include/frame.h26
-rw-r--r--sys/amd64/include/pcb.h7
-rw-r--r--sys/amd64/include/pmap.h72
-rw-r--r--sys/amd64/include/proc.h4
-rw-r--r--sys/amd64/include/reg.h16
-rw-r--r--sys/amd64/include/signal.h28
-rw-r--r--sys/amd64/include/vmparam.h14
-rw-r--r--sys/amd64/isa/clock.c40
-rw-r--r--sys/amd64/isa/isa.c1
-rw-r--r--sys/amd64/isa/npx.c2
-rw-r--r--sys/conf/Makefile.i38621
-rw-r--r--sys/conf/Makefile.powerpc21
-rw-r--r--sys/conf/files34
-rw-r--r--sys/conf/files.i3861
-rw-r--r--sys/conf/newvers.sh6
-rw-r--r--sys/conf/param.c3
-rw-r--r--sys/ddb/db_command.c6
-rw-r--r--sys/ddb/ddb.h3
-rw-r--r--sys/dev/ed/if_ed.c17
-rw-r--r--sys/dev/ep/if_ep.c5
-rw-r--r--sys/dev/fdc/fdc.c34
-rw-r--r--sys/dev/ie/if_ie.c3
-rw-r--r--sys/dev/mcd/mcd.c2
-rw-r--r--sys/dev/mse/mse.c10
-rw-r--r--sys/dev/sio/sio.c81
-rw-r--r--sys/dev/syscons/syscons.c41
-rw-r--r--sys/fs/cd9660/cd9660_lookup.c6
-rw-r--r--sys/fs/cd9660/cd9660_node.c10
-rw-r--r--sys/fs/cd9660/cd9660_util.c2
-rw-r--r--sys/fs/cd9660/cd9660_vfsops.c11
-rw-r--r--sys/fs/cd9660/cd9660_vnops.c3
-rw-r--r--sys/fs/deadfs/dead_vnops.c14
-rw-r--r--sys/fs/fdescfs/fdesc_vnops.c5
-rw-r--r--sys/fs/fifofs/fifo_vnops.c18
-rw-r--r--sys/fs/nullfs/null_subr.c2
-rw-r--r--sys/fs/portalfs/portal_vnops.c1
-rw-r--r--sys/fs/procfs/procfs_vfsops.c11
-rw-r--r--sys/fs/procfs/procfs_vnops.c16
-rw-r--r--sys/fs/specfs/spec_vnops.c16
-rw-r--r--sys/fs/umapfs/umap_subr.c2
-rw-r--r--sys/fs/umapfs/umap_vnops.c2
-rw-r--r--sys/fs/unionfs/union_subr.c7
-rw-r--r--sys/fs/unionfs/union_vfsops.c2
-rw-r--r--sys/fs/unionfs/union_vnops.c2
-rw-r--r--sys/gnu/ext2fs/ext2_bmap.c2
-rw-r--r--sys/gnu/fs/ext2fs/ext2_bmap.c2
-rw-r--r--sys/i386/conf/Makefile.i38621
-rw-r--r--sys/i386/conf/files.i3861
-rw-r--r--sys/i386/eisa/aha1742.c10
-rw-r--r--sys/i386/i386/conf.c142
-rw-r--r--sys/i386/i386/cons.c6
-rw-r--r--sys/i386/i386/cons.h12
-rw-r--r--sys/i386/i386/db_interface.c2
-rw-r--r--sys/i386/i386/db_trace.c3
-rw-r--r--sys/i386/i386/genassym.c40
-rw-r--r--sys/i386/i386/locore.s81
-rw-r--r--sys/i386/i386/machdep.c219
-rw-r--r--sys/i386/i386/math_emulate.c27
-rw-r--r--sys/i386/i386/mem.c51
-rw-r--r--sys/i386/i386/pmap.c76
-rw-r--r--sys/i386/i386/procfs_machdep.c20
-rw-r--r--sys/i386/i386/support.s12
-rw-r--r--sys/i386/i386/swtch.s24
-rw-r--r--sys/i386/i386/symbols.raw12
-rw-r--r--sys/i386/i386/trap.c157
-rw-r--r--sys/i386/i386/tsc.c40
-rw-r--r--sys/i386/i386/vm_machdep.c185
-rw-r--r--sys/i386/include/_limits.h66
-rw-r--r--sys/i386/include/ansi.h33
-rw-r--r--sys/i386/include/cpu.h34
-rw-r--r--sys/i386/include/cpufunc.h139
-rw-r--r--sys/i386/include/exec.h93
-rw-r--r--sys/i386/include/frame.h26
-rw-r--r--sys/i386/include/limits.h66
-rw-r--r--sys/i386/include/param.h1
-rw-r--r--sys/i386/include/pcb.h7
-rw-r--r--sys/i386/include/pmap.h72
-rw-r--r--sys/i386/include/proc.h4
-rw-r--r--sys/i386/include/pte.h5
-rw-r--r--sys/i386/include/reg.h16
-rw-r--r--sys/i386/include/signal.h28
-rw-r--r--sys/i386/include/spl.h5
-rw-r--r--sys/i386/include/stdarg.h24
-rw-r--r--sys/i386/include/types.h26
-rw-r--r--sys/i386/include/vmparam.h14
-rw-r--r--sys/i386/isa/aha1542.c20
-rw-r--r--sys/i386/isa/aha1742.c10
-rw-r--r--sys/i386/isa/bt742a.c4
-rw-r--r--sys/i386/isa/clock.c40
-rw-r--r--sys/i386/isa/fd.c34
-rw-r--r--sys/i386/isa/ft.c4
-rw-r--r--sys/i386/isa/icu.s12
-rw-r--r--sys/i386/isa/if_ed.c17
-rw-r--r--sys/i386/isa/if_ep.c5
-rw-r--r--sys/i386/isa/if_ie.c3
-rw-r--r--sys/i386/isa/isa.c1
-rw-r--r--sys/i386/isa/mcd.c2
-rw-r--r--sys/i386/isa/mse.c10
-rw-r--r--sys/i386/isa/npx.c2
-rw-r--r--sys/i386/isa/sio.c81
-rw-r--r--sys/i386/isa/sound/os.h2
-rw-r--r--sys/i386/isa/syscons.c41
-rw-r--r--sys/i386/isa/ultra14f.c4
-rw-r--r--sys/i386/isa/wd.c84
-rw-r--r--sys/i386/isa/wt.c4
-rw-r--r--sys/isa/atrtc.c40
-rw-r--r--sys/isa/fd.c34
-rw-r--r--sys/isa/sio.c81
-rw-r--r--sys/isa/syscons.c41
-rw-r--r--sys/isofs/cd9660/cd9660_lookup.c6
-rw-r--r--sys/isofs/cd9660/cd9660_node.c10
-rw-r--r--sys/isofs/cd9660/cd9660_util.c2
-rw-r--r--sys/isofs/cd9660/cd9660_vfsops.c11
-rw-r--r--sys/isofs/cd9660/cd9660_vnops.c3
-rw-r--r--sys/kern/imgact_aout.c23
-rw-r--r--sys/kern/imgact_shell.c1
-rw-r--r--sys/kern/init_main.c70
-rw-r--r--sys/kern/init_sysent.c14
-rw-r--r--sys/kern/kern_acct.c2
-rw-r--r--sys/kern/kern_clock.c1
-rw-r--r--sys/kern/kern_descrip.c45
-rw-r--r--sys/kern/kern_exec.c527
-rw-r--r--sys/kern/kern_exit.c3
-rw-r--r--sys/kern/kern_fork.c3
-rw-r--r--sys/kern/kern_ktrace.c12
-rw-r--r--sys/kern/kern_malloc.c2
-rw-r--r--sys/kern/kern_physio.c200
-rw-r--r--sys/kern/kern_proc.c9
-rw-r--r--sys/kern/kern_prot.c22
-rw-r--r--sys/kern/kern_resource.c29
-rw-r--r--sys/kern/kern_sig.c23
-rw-r--r--sys/kern/kern_subr.c3
-rw-r--r--sys/kern/kern_synch.c1
-rw-r--r--sys/kern/kern_sysctl.c13
-rw-r--r--sys/kern/kern_tc.c1
-rw-r--r--sys/kern/kern_time.c14
-rw-r--r--sys/kern/kern_timeout.c1
-rw-r--r--sys/kern/kern_xxx.c130
-rw-r--r--sys/kern/subr_clist.c428
-rw-r--r--sys/kern/subr_disklabel.c105
-rw-r--r--sys/kern/subr_log.c6
-rw-r--r--sys/kern/subr_param.c3
-rw-r--r--sys/kern/subr_prf.c6
-rw-r--r--sys/kern/subr_prof.c1
-rw-r--r--sys/kern/subr_trap.c157
-rw-r--r--sys/kern/subr_xxx.c6
-rw-r--r--sys/kern/sys_generic.c17
-rw-r--r--sys/kern/sys_process.c2
-rw-r--r--sys/kern/sys_socket.c6
-rw-r--r--sys/kern/syscalls.c8
-rw-r--r--sys/kern/syscalls.master10
-rw-r--r--sys/kern/tty.c2
-rw-r--r--sys/kern/tty_compat.c7
-rw-r--r--sys/kern/tty_conf.c8
-rw-r--r--sys/kern/tty_cons.c6
-rw-r--r--sys/kern/tty_pty.c17
-rw-r--r--sys/kern/tty_subr.c428
-rw-r--r--sys/kern/tty_tty.c5
-rw-r--r--sys/kern/uipc_domain.c3
-rw-r--r--sys/kern/uipc_mbuf.c8
-rw-r--r--sys/kern/uipc_sockbuf.c32
-rw-r--r--sys/kern/uipc_socket.c22
-rw-r--r--sys/kern/uipc_socket2.c32
-rw-r--r--sys/kern/uipc_syscalls.c36
-rw-r--r--sys/kern/uipc_usrreq.c32
-rw-r--r--sys/kern/vfs_bio.c841
-rw-r--r--sys/kern/vfs_cache.c4
-rw-r--r--sys/kern/vfs_cluster.c3
-rw-r--r--sys/kern/vfs_export.c47
-rw-r--r--sys/kern/vfs_extattr.c52
-rw-r--r--sys/kern/vfs_init.c1
-rw-r--r--sys/kern/vfs_subr.c47
-rw-r--r--sys/kern/vfs_syscalls.c52
-rw-r--r--sys/kern/vfs_vnops.c10
-rw-r--r--sys/miscfs/deadfs/dead_vnops.c14
-rw-r--r--sys/miscfs/fdesc/fdesc_vnops.c5
-rw-r--r--sys/miscfs/fifofs/fifo_vnops.c18
-rw-r--r--sys/miscfs/kernfs/kernfs_vfsops.c12
-rw-r--r--sys/miscfs/kernfs/kernfs_vnops.c13
-rw-r--r--sys/miscfs/nullfs/null_subr.c2
-rw-r--r--sys/miscfs/portal/portal_vnops.c1
-rw-r--r--sys/miscfs/procfs/procfs_vfsops.c11
-rw-r--r--sys/miscfs/procfs/procfs_vnops.c16
-rw-r--r--sys/miscfs/specfs/spec_vnops.c16
-rw-r--r--sys/miscfs/umapfs/umap_subr.c2
-rw-r--r--sys/miscfs/umapfs/umap_vnops.c2
-rw-r--r--sys/miscfs/union/union_subr.c7
-rw-r--r--sys/miscfs/union/union_vfsops.c2
-rw-r--r--sys/miscfs/union/union_vnops.c2
-rw-r--r--sys/net/bpf_filter.c7
-rw-r--r--sys/net/bpfdesc.h2
-rw-r--r--sys/net/if.h10
-rw-r--r--sys/net/if_loop.c8
-rw-r--r--sys/net/if_sl.c6
-rw-r--r--sys/net/netisr.h21
-rw-r--r--sys/net/radix.c2
-rw-r--r--sys/net/route.c2
-rw-r--r--sys/net/route.h2
-rw-r--r--sys/net/rtsock.c4
-rw-r--r--sys/netinet/igmp.c1
-rw-r--r--sys/netinet/in.c8
-rw-r--r--sys/netinet/in_pcb.c14
-rw-r--r--sys/netinet/in_pcb.h12
-rw-r--r--sys/netinet/in_proto.c2
-rw-r--r--sys/netinet/in_var.h2
-rw-r--r--sys/netinet/ip_input.c2
-rw-r--r--sys/netinet/ip_mroute.c1
-rw-r--r--sys/netinet/ip_output.c3
-rw-r--r--sys/netinet/tcp_input.c8
-rw-r--r--sys/netinet/tcp_reass.c8
-rw-r--r--sys/netinet/tcp_subr.c2
-rw-r--r--sys/netinet/tcp_timer.h4
-rw-r--r--sys/netinet/tcp_timewait.c2
-rw-r--r--sys/netinet/tcp_usrreq.c2
-rw-r--r--sys/netinet/udp_usrreq.c4
-rw-r--r--sys/nfs/nfs_bio.c10
-rw-r--r--sys/nfs/nfs_common.c15
-rw-r--r--sys/nfs/nfs_node.c7
-rw-r--r--sys/nfs/nfs_nqlease.c14
-rw-r--r--sys/nfs/nfs_serv.c33
-rw-r--r--sys/nfs/nfs_socket.c17
-rw-r--r--sys/nfs/nfs_srvcache.c2
-rw-r--r--sys/nfs/nfs_subs.c15
-rw-r--r--sys/nfs/nfs_syscalls.c11
-rw-r--r--sys/nfs/nfs_vnops.c26
-rw-r--r--sys/nfsclient/nfs_bio.c10
-rw-r--r--sys/nfsclient/nfs_nfsiod.c11
-rw-r--r--sys/nfsclient/nfs_node.c7
-rw-r--r--sys/nfsclient/nfs_socket.c17
-rw-r--r--sys/nfsclient/nfs_subs.c15
-rw-r--r--sys/nfsclient/nfs_vnops.c26
-rw-r--r--sys/nfsserver/nfs_serv.c33
-rw-r--r--sys/nfsserver/nfs_srvcache.c2
-rw-r--r--sys/nfsserver/nfs_srvsock.c17
-rw-r--r--sys/nfsserver/nfs_srvsubs.c15
-rw-r--r--sys/nfsserver/nfs_syscalls.c11
-rw-r--r--sys/powerpc/include/_limits.h66
-rw-r--r--sys/powerpc/include/limits.h66
-rw-r--r--sys/scsi/cd.c8
-rw-r--r--sys/scsi/scsi_base.c11
-rw-r--r--sys/scsi/scsi_ioctl.c12
-rw-r--r--sys/scsi/sd.c51
-rw-r--r--sys/sys/bio.h20
-rw-r--r--sys/sys/buf.h20
-rw-r--r--sys/sys/cdefs.h2
-rw-r--r--sys/sys/clist.h6
-rw-r--r--sys/sys/cons.h12
-rw-r--r--sys/sys/disklabel.h32
-rw-r--r--sys/sys/diskmbr.h32
-rw-r--r--sys/sys/diskpc98.h32
-rw-r--r--sys/sys/exec.h4
-rw-r--r--sys/sys/imgact.h6
-rw-r--r--sys/sys/kernel.h24
-rw-r--r--sys/sys/malloc.h4
-rw-r--r--sys/sys/mtio.h33
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/proc.h3
-rw-r--r--sys/sys/syscall.h4
-rw-r--r--sys/sys/systm.h3
-rw-r--r--sys/sys/termios.h3
-rw-r--r--sys/sys/ttycom.h8
-rw-r--r--sys/sys/un.h5
-rw-r--r--sys/sys/utsname.h22
-rw-r--r--sys/sys/vmmeter.h2
-rw-r--r--sys/ufs/ffs/ffs_alloc.c11
-rw-r--r--sys/ufs/ffs/ffs_balloc.c1
-rw-r--r--sys/ufs/ffs/ffs_extern.h2
-rw-r--r--sys/ufs/ffs/ffs_inode.c1
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c7
-rw-r--r--sys/ufs/lfs/lfs_bio.c1
-rw-r--r--sys/ufs/lfs/lfs_segment.c1
-rw-r--r--sys/ufs/lfs/lfs_subr.c1
-rw-r--r--sys/ufs/lfs/lfs_syscalls.c3
-rw-r--r--sys/ufs/lfs/lfs_vfsops.c7
-rw-r--r--sys/ufs/lfs/lfs_vnops.c1
-rw-r--r--sys/ufs/mfs/mfs_vfsops.c3
-rw-r--r--sys/ufs/mfs/mfs_vnops.c2
-rw-r--r--sys/ufs/ufs/ufs_bmap.c2
-rw-r--r--sys/ufs/ufs/ufs_disksubr.c105
-rw-r--r--sys/ufs/ufs/ufs_readwrite.c2
-rw-r--r--sys/ufs/ufs/ufs_vnops.c5
-rw-r--r--sys/vm/device_pager.c72
-rw-r--r--sys/vm/swap_pager.c2107
-rw-r--r--sys/vm/swap_pager.h65
-rw-r--r--sys/vm/vm.h4
-rw-r--r--sys/vm/vm_extern.h12
-rw-r--r--sys/vm/vm_fault.c440
-rw-r--r--sys/vm/vm_glue.c500
-rw-r--r--sys/vm/vm_init.c8
-rw-r--r--sys/vm/vm_kern.c6
-rw-r--r--sys/vm/vm_kern.h4
-rw-r--r--sys/vm/vm_map.c127
-rw-r--r--sys/vm/vm_map.h10
-rw-r--r--sys/vm/vm_meter.c1
-rw-r--r--sys/vm/vm_mmap.c4
-rw-r--r--sys/vm/vm_object.c495
-rw-r--r--sys/vm/vm_page.c391
-rw-r--r--sys/vm/vm_page.h30
-rw-r--r--sys/vm/vm_pageout.c1063
-rw-r--r--sys/vm/vm_pageout.h30
-rw-r--r--sys/vm/vm_pager.c173
-rw-r--r--sys/vm/vm_pager.h37
-rw-r--r--sys/vm/vm_param.h30
-rw-r--r--sys/vm/vm_prot.h2
-rw-r--r--sys/vm/vm_swap.c16
-rw-r--r--sys/vm/vm_unix.c41
-rw-r--r--sys/vm/vm_user.c4
-rw-r--r--sys/vm/vnode_pager.c1334
-rw-r--r--sys/vm/vnode_pager.h3
328 files changed, 11163 insertions, 4984 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 4dbc672b923e..aa8b5ba5d9a3 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -52,9 +52,9 @@
/*
* The following primitives manipulate the run queues.
* _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
+ * have processes in them. setrunqueue puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
+ * other processes are on a queue related to p->p_priority, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
@@ -72,11 +72,11 @@ _want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
- * Setrq(p)
+ * setrunqueue(p)
*
* Call should be made at spl6(), and p->p_stat should be SRUN
*/
-ENTRY(setrq)
+ENTRY(setrunqueue)
movl 4(%esp),%eax
cmpl $0,P_RLINK(%eax) /* should not be on q already */
je set1
@@ -95,7 +95,7 @@ set1:
movl %eax,P_LINK(%ecx)
ret
-set2: .asciz "setrq"
+set2: .asciz "setrunqueue"
/*
* Remrq(p)
@@ -131,10 +131,10 @@ rem2:
ret
rem3: .asciz "remrq"
-sw0: .asciz "swtch"
+sw0: .asciz "cpu_switch"
/*
- * When no processes are on the runq, swtch() branches to _idle
+ * When no processes are on the runq, cpu_switch() branches to _idle
* to wait for something to come ready.
*/
ALIGN_TEXT
@@ -146,8 +146,8 @@ _idle:
sti
/*
- * XXX callers of swtch() do a bogus splclock(). Locking should
- * be left to swtch().
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
*/
movl $SWI_AST_MASK,_cpl
testl $~SWI_AST_MASK,_ipending
@@ -169,9 +169,9 @@ badsw:
/*NOTREACHED*/
/*
- * Swtch()
+ * cpu_switch()
*/
-ENTRY(swtch)
+ENTRY(cpu_switch)
incl _cnt+V_SWTCH
/* switch to new process. first, save context as needed */
@@ -340,7 +340,7 @@ ENTRY(swtch_to_inactive)
/*
* savectx(pcb, altreturn)
* Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
*/
ENTRY(savectx)
movl 4(%esp),%ecx
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
index 5f7c9d52ec6c..e79a2ae4a7ef 100644
--- a/sys/amd64/amd64/db_interface.c
+++ b/sys/amd64/amd64/db_interface.c
@@ -36,7 +36,7 @@
#include "ddb/ddb.h"
#include <sys/reboot.h>
-#include <vm/vm_statistics.h>
+/* #include <vm/vm_statistics.h> */
#include <vm/pmap.h>
#include <setjmp.h>
diff --git a/sys/amd64/amd64/db_trace.c b/sys/amd64/amd64/db_trace.c
index c7c2cd833670..d536d942db53 100644
--- a/sys/amd64/amd64/db_trace.c
+++ b/sys/amd64/amd64/db_trace.c
@@ -30,7 +30,8 @@
#include <vm/vm_param.h>
#include <vm/lock.h>
-#include <vm/vm_statistics.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
#include <machine/pmap.h>
#include "systm.h"
#include "proc.h"
diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index 00424bf3aa14..17400bdbb29a 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -438,7 +438,7 @@ npxintr(frame)
* in doreti, and the frame for that could easily be set up
* just before it is used).
*/
- curproc->p_regs = (int *)&frame.if_es;
+ curproc->p_md.md_regs = (int *)&frame.if_es;
#ifdef notyet
/*
* Encode the appropriate code for detailed information on
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index b7847e847bf6..a75d1f1a8557 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -37,21 +37,19 @@
* $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
*/
-#include "sys/param.h"
-#include "sys/buf.h"
-#include "sys/vmmeter.h"
-#include "sys/proc.h"
-#include "sys/user.h"
-#include "sys/mbuf.h"
-#include "sys/msgbuf.h"
-#include "sys/resourcevar.h"
-#include "machine/cpu.h"
-#include "machine/trap.h"
-#include "machine/psl.h"
-#include "sys/syscall.h"
-#include "vm/vm_param.h"
-#include "vm/vm_map.h"
-#include "machine/pmap.h"
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <sys/syscall.h>
+#include <vm/vm.h>
+#include <sys/user.h>
main()
{
@@ -70,12 +68,12 @@ main()
printf("#define\tI386_CR3PAT %d\n", I386_CR3PAT);
printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
- printf("#define\tP_LINK %d\n", &p->p_link);
- printf("#define\tP_RLINK %d\n", &p->p_rlink);
+ printf("#define\tP_LINK %d\n", &p->p_forw);
+ printf("#define\tP_RLINK %d\n", &p->p_back);
printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
printf("#define\tP_ADDR %d\n", &p->p_addr);
- printf("#define\tP_PRI %d\n", &p->p_pri);
+ printf("#define\tP_PRI %d\n", &p->p_priority);
printf("#define\tP_STAT %d\n", &p->p_stat);
printf("#define\tP_WCHAN %d\n", &p->p_wchan);
printf("#define\tP_FLAG %d\n", &p->p_flag);
@@ -87,10 +85,10 @@ main()
printf("#define\tV_SYSCALL %d\n", &vm->v_syscall);
printf("#define\tV_INTR %d\n", &vm->v_intr);
printf("#define\tV_SOFT %d\n", &vm->v_soft);
- printf("#define\tV_PDMA %d\n", &vm->v_pdma);
+/* printf("#define\tV_PDMA %d\n", &vm->v_pdma); */
printf("#define\tV_FAULTS %d\n", &vm->v_faults);
- printf("#define\tV_PGREC %d\n", &vm->v_pgrec);
- printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec);
+/* printf("#define\tV_PGREC %d\n", &vm->v_pgrec); */
+/* printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec); */
printf("#define\tUPAGES %d\n", UPAGES);
printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
printf("#define\tCLSIZE %d\n", CLSIZE);
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index 8da843865c7b..7aa6e6bd8f78 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -274,7 +274,7 @@ NON_GPROF_ENTRY(btext)
movl $0xa0,%ecx
1:
#endif /* BDE_DEBUGGER */
- movl $PG_V|PG_KW,%eax /* having these bits set, */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
@@ -302,7 +302,7 @@ NON_GPROF_ENTRY(btext)
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
- orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
movl %esi,%ebx /* calculate pte offset to ptd */
shrl $PGSHIFT-2,%ebx
addl %esi,%ebx /* address of page directory */
@@ -452,10 +452,26 @@ reloc_gdt:
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+#if 0
movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
call _main /* autoconfiguration, mountroot etc */
- popl %esi
+
+ addl $(13*4),%esp /* back to a frame we can return with */
/*
* now we've run main() and determined what cpu-type we are, we can
@@ -473,69 +489,16 @@ reloc_gdt:
* set up address space and stack so that we can 'return' to user mode
*/
1:
- .globl __ucodesel,__udatasel
movl __ucodesel,%eax
movl __udatasel,%ecx
- /* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+
movl %cx,%ds
movl %cx,%es
movl %ax,%fs /* double map cs to fs */
movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
-
- pushl $lretmsg1 /* "should never get here!" */
- call _panic
-lretmsg1:
- .asciz "lret: toinit\n"
+ iret /* goto user! */
-
-#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
-/*
- * Icode is copied out to process 1 and executed in user mode:
- * execve("/sbin/init", argv, envp); exit(0);
- * If the execve fails, process 1 exits and the system panics.
- */
-NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
-
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
- movl $argv,%eax
- subl $_icode,%eax
- pushl %eax /* argp for execve() */
-
-# pushl $init-_icode
- movl $init,%eax
- subl $_icode,%eax
- pushl %eax /* fname for execve() */
-
- pushl %eax /* dummy return address */
-
- movl $SYS_execve,%eax
- LCALL(0x7,0x0)
-
- /* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $SYS_exit,%eax
- LCALL(0x7,0x0)
-
-init:
- .asciz "/sbin/init"
- ALIGN_DATA
-argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
- .long 0
-eicode:
-
- .globl _szicode
-_szicode:
- .long _szicode-_icode
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index 8da843865c7b..7aa6e6bd8f78 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -274,7 +274,7 @@ NON_GPROF_ENTRY(btext)
movl $0xa0,%ecx
1:
#endif /* BDE_DEBUGGER */
- movl $PG_V|PG_KW,%eax /* having these bits set, */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
@@ -302,7 +302,7 @@ NON_GPROF_ENTRY(btext)
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
- orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
movl %esi,%ebx /* calculate pte offset to ptd */
shrl $PGSHIFT-2,%ebx
addl %esi,%ebx /* address of page directory */
@@ -452,10 +452,26 @@ reloc_gdt:
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+#if 0
movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
call _main /* autoconfiguration, mountroot etc */
- popl %esi
+
+ addl $(13*4),%esp /* back to a frame we can return with */
/*
* now we've run main() and determined what cpu-type we are, we can
@@ -473,69 +489,16 @@ reloc_gdt:
* set up address space and stack so that we can 'return' to user mode
*/
1:
- .globl __ucodesel,__udatasel
movl __ucodesel,%eax
movl __udatasel,%ecx
- /* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+
movl %cx,%ds
movl %cx,%es
movl %ax,%fs /* double map cs to fs */
movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
-
- pushl $lretmsg1 /* "should never get here!" */
- call _panic
-lretmsg1:
- .asciz "lret: toinit\n"
+ iret /* goto user! */
-
-#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
-/*
- * Icode is copied out to process 1 and executed in user mode:
- * execve("/sbin/init", argv, envp); exit(0);
- * If the execve fails, process 1 exits and the system panics.
- */
-NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
-
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
- movl $argv,%eax
- subl $_icode,%eax
- pushl %eax /* argp for execve() */
-
-# pushl $init-_icode
- movl $init,%eax
- subl $_icode,%eax
- pushl %eax /* fname for execve() */
-
- pushl %eax /* dummy return address */
-
- movl $SYS_execve,%eax
- LCALL(0x7,0x0)
-
- /* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $SYS_exit,%eax
- LCALL(0x7,0x0)
-
-init:
- .asciz "/sbin/init"
- ALIGN_DATA
-argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
- .long 0
-eicode:
-
- .globl _szicode
-_szicode:
- .long _szicode-_icode
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index eab107598527..31bc6c294ba6 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -41,23 +41,24 @@
#include "npx.h"
#include "isa.h"
-#include <stddef.h>
-#include "param.h"
-#include "systm.h"
-#include "signalvar.h"
-#include "kernel.h"
-#include "map.h"
-#include "proc.h"
-#include "user.h"
-#include "exec.h" /* for PS_STRINGS */
-#include "buf.h"
-#include "reboot.h"
-#include "conf.h"
-#include "file.h"
-#include "callout.h"
-#include "malloc.h"
-#include "mbuf.h"
-#include "msgbuf.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/sysctl.h>
#ifdef SYSVSHM
#include "sys/shm.h"
@@ -94,7 +95,7 @@ static void identifycpu(void);
static void initcpu(void);
static int test_page(int *, int);
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
const char machine[] = "PC-Class";
const char *cpu_model;
@@ -121,6 +122,7 @@ int bouncepages = BOUNCEPAGES;
#else
int bouncepages = 0;
#endif
+int msgbufmapped = 0; /* set when safe to use msgbuf */
extern int freebufspace;
extern char *bouncememory;
@@ -141,6 +143,12 @@ extern cyloffset;
int cpu_class;
void dumpsys __P((void));
+vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+vm_offset_t pager_sva, pager_eva;
+int maxbkva, pager_map_size;
+
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
void
cpu_startup()
@@ -275,18 +283,19 @@ again:
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
- /*
- * Allocate a submap for buffer space allocations.
- * XXX we are NOT using buffer_map, but due to
- * the references to it we will just allocate 1 page of
- * vm (not real memory) to make things happy...
- */
- buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- /* bufpages * */NBPG, TRUE);
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
+
+ io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ pager_map_size, TRUE);
+
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf * MAXBSIZE), TRUE);
/*
* Allocate a submap for physio
*/
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
/*
@@ -296,7 +305,7 @@ again:
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
/*
* Initialize callouts
@@ -305,7 +314,7 @@ again:
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
- printf("avail memory = %d (%d pages)\n", ptoa(vm_page_free_count), vm_page_free_count);
+ printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@@ -437,11 +446,11 @@ sendsig(catcher, sig, mask, code)
register struct proc *p = curproc;
register int *regs;
register struct sigframe *fp;
- struct sigacts *ps = p->p_sigacts;
+ struct sigacts *psp = p->p_sigacts;
int oonstack, frmtrap;
- regs = p->p_regs;
- oonstack = ps->ps_onstack;
+ regs = p->p_md.md_regs;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
/*
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
@@ -449,10 +458,12 @@ sendsig(catcher, sig, mask, code)
* will fail if the process has not already allocated
* the space with a `brk'.
*/
- if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
- fp = (struct sigframe *)(ps->ps_sigsp
- - sizeof(struct sigframe));
- ps->ps_onstack = 1;
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size - sizeof(struct sigframe));
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
} else {
fp = (struct sigframe *)(regs[tESP]
- sizeof(struct sigframe));
@@ -540,7 +551,7 @@ sigreturn(p, uap, retval)
{
register struct sigcontext *scp;
register struct sigframe *fp;
- register int *regs = p->p_regs;
+ register int *regs = p->p_md.md_regs;
int eflags;
/*
@@ -614,7 +625,10 @@ sigreturn(p, uap, retval)
if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
return(EINVAL);
- p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
p->p_sigmask = scp->sc_mask &~
(sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
regs[tEBP] = scp->sc_fp;
@@ -651,7 +665,7 @@ boot(arghowto)
for(;;);
}
howto = arghowto;
- if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
+ if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
register struct buf *bp;
int iter, nbusy;
@@ -818,13 +832,13 @@ setregs(p, entry, stack)
u_long entry;
u_long stack;
{
- p->p_regs[tEBP] = 0; /* bottom of the fp chain */
- p->p_regs[tEIP] = entry;
- p->p_regs[tESP] = stack;
- p->p_regs[tSS] = _udatasel;
- p->p_regs[tDS] = _udatasel;
- p->p_regs[tES] = _udatasel;
- p->p_regs[tCS] = _ucodesel;
+ p->p_md.md_regs[tEBP] = 0; /* bottom of the fp chain */
+ p->p_md.md_regs[tEIP] = entry;
+ p->p_md.md_regs[tESP] = stack;
+ p->p_md.md_regs[tSS] = _udatasel;
+ p->p_md.md_regs[tDS] = _udatasel;
+ p->p_md.md_regs[tES] = _udatasel;
+ p->p_md.md_regs[tCS] = _ucodesel;
p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
load_cr0(rcr0() | CR0_TS); /* start emulating */
@@ -834,6 +848,34 @@ setregs(p, entry, stack)
}
/*
+ * machine dependent system variables.
+ */
+int
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case CPU_CONSDEV:
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
+ sizeof cn_tty->t_dev));
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+/*
* Initialize 386 and configure to run kernel
*/
@@ -1105,9 +1147,11 @@ init386(first)
r_gdt.rd_limit = sizeof(gdt) - 1;
r_gdt.rd_base = (int) gdt;
lgdt(&r_gdt);
+
r_idt.rd_limit = sizeof(idt) - 1;
r_idt.rd_base = (int) idt;
lidt(&r_idt);
+
_default_ldt = GSEL(GLDT_SEL, SEL_KPL);
lldt(_default_ldt);
currentldt = _default_ldt;
@@ -1339,7 +1383,7 @@ _remque(element)
* The registers are in the frame; the frame is in the user area of
* the process in question; when the process is active, the registers
* are in "the kernel stack"; when it's not, they're still there, but
- * things get flipped around. So, since p->p_regs is the whole address
+ * things get flipped around. So, since p->p_md.md_regs is the whole address
* of the register set, take its offset from the kernel stack, and
* index into the user block. Don't you just *love* virtual memory?
* (I'm starting to think seymour is right...)
@@ -1348,7 +1392,7 @@ _remque(element)
int
ptrace_set_pc (struct proc *p, unsigned int addr) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eip = addr;
return 0;
@@ -1357,7 +1401,7 @@ ptrace_set_pc (struct proc *p, unsigned int addr) {
int
ptrace_single_step (struct proc *p) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eflags |= PSL_T;
return 0;
@@ -1370,7 +1414,7 @@ ptrace_single_step (struct proc *p) {
int
ptrace_getregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = fill_regs (p, &regs))
return error;
@@ -1381,7 +1425,7 @@ ptrace_getregs (struct proc *p, unsigned int *addr) {
int
ptrace_setregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = copyin (addr, &regs, sizeof(regs)))
return error;
@@ -1390,11 +1434,11 @@ ptrace_setregs (struct proc *p, unsigned int *addr) {
}
int
-fill_regs(struct proc *p, struct regs *regs) {
+fill_regs(struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
regs->r_es = tp->tf_es;
@@ -1415,11 +1459,11 @@ fill_regs(struct proc *p, struct regs *regs) {
}
int
-set_regs (struct proc *p, struct regs *regs) {
+set_regs (struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
tp->tf_es = regs->r_es;
@@ -1444,6 +1488,69 @@ set_regs (struct proc *p, struct regs *regs) {
void
Debugger(const char *msg)
{
- printf("Debugger(\"%s\") called.", msg);
+ printf("Debugger(\"%s\") called.\n", msg);
}
#endif /* no DDB */
+
+#include <sys/disklabel.h>
+#define b_cylin b_resid
+#define dkpart(dev) (minor(dev) & 7)
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
+{
+ struct partition *p = lp->d_partitions + dkpart(bp->b_dev);
+ int labelsect = lp->d_partitions[0].p_offset;
+ int maxsz = p->p_size,
+ sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
+
+ /* overwriting disk label ? */
+ /* XXX should also protect bootstrap in first 8K */
+ if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect &&
+#if LABELSECTOR != 0
+ bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
+#endif
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+#if defined(DOSBBSECTOR) && defined(notyet)
+ /* overwriting master boot record? */
+ if (bp->b_blkno + p->p_offset <= DOSBBSECTOR &&
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+#endif
+
+ /* beyond partition? */
+ if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) {
+ /* if exactly at end of disk, return an EOF */
+ if (bp->b_blkno == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ return(0);
+ }
+ /* or truncate if part of it fits */
+ sz = maxsz - bp->b_blkno;
+ if (sz <= 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_pblkno = bp->b_blkno + p->p_offset;
+ bp->b_cylin = bp->b_pblkno / lp->d_secpercyl;
+ return(1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+ return(-1);
+}
+
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index c3899a17c74f..1b8f18747a93 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -45,24 +45,23 @@
* Memory special file
*/
-#include "param.h"
-#include "conf.h"
-#include "buf.h"
-#include "systm.h"
-#include "uio.h"
-#include "malloc.h"
-#include "proc.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
-#include "machine/cpu.h"
-#include "machine/psl.h"
+#include <machine/cpu.h>
+#include <machine/psl.h>
-#include "vm/vm_param.h"
-#include "vm/lock.h"
-#include "vm/vm_statistics.h"
-#include "vm/vm_prot.h"
-#include "vm/pmap.h"
+#include <vm/vm_param.h>
+#include <vm/lock.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
-extern char *vmmap; /* poor name! */
+extern char *ptvmmap; /* poor name! */
/*ARGSUSED*/
int
mmclose(dev, uio, flags)
@@ -74,7 +73,7 @@ mmclose(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags &= ~PSL_IOPL;
break;
default:
@@ -93,7 +92,7 @@ mmopen(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags |= PSL_IOPL;
break;
default:
@@ -128,25 +127,25 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
v = uio->uio_offset;
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, v,
+ pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
TRUE);
o = (int)uio->uio_offset & PGOFSET;
c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
- c = MIN(c, (u_int)(NBPG - o));
- c = MIN(c, (u_int)iov->iov_len);
- error = uiomove((caddr_t)&vmmap[o], (int)c, uio);
- pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
- (vm_offset_t)&vmmap[NBPG]);
+ c = min(c, (u_int)(NBPG - o));
+ c = min(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
+ pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
+ (vm_offset_t)&ptvmmap[NBPG]);
continue;
/* minor device 1 is kernel memory */
case 1:
c = iov->iov_len;
- if (!kernacc((caddr_t)uio->uio_offset, c,
+ if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return(EFAULT);
- error = uiomove((caddr_t)uio->uio_offset, (int)c, uio);
+ error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
/* minor device 2 is EOF/RATHOLE */
@@ -167,7 +166,7 @@ mmrw(dev, uio, flags)
malloc(CLBYTES, M_TEMP, M_WAITOK);
bzero(zbuf, CLBYTES);
}
- c = MIN(iov->iov_len, CLBYTES);
+ c = min(iov->iov_len, CLBYTES);
error = uiomove(zbuf, (int)c, uio);
continue;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d5b556fff2f1..88db9dd4ed62 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -85,19 +85,19 @@
* and to when physical maps must be made correct.
*/
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "user.h"
-#include "i386/include/cpufunc.h"
-#include "i386/include/cputypes.h"
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
-#include "vm/vm_page.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
-#include "i386/isa/isa.h"
+#include <i386/include/cpufunc.h>
+#include <i386/include/cputypes.h>
+
+#include <i386/isa/isa.h>
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
@@ -149,12 +149,12 @@ static inline void *vm_get_pmap();
static inline void vm_put_pmap();
inline void pmap_use_pt();
inline void pmap_unuse_pt();
-inline pt_entry_t * const pmap_pte();
+inline pt_entry_t * pmap_pte();
static inline pv_entry_t get_pv_entry();
void pmap_alloc_pv_entry();
void pmap_clear_modify();
void i386_protection_init();
-extern vm_offset_t pager_sva, pager_eva;
+extern vm_offset_t clean_sva, clean_eva;
extern int cpu_class;
#if BSDVM_COMPAT
@@ -163,8 +163,8 @@ extern int cpu_class;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-pt_entry_t *CMAP1, *CMAP2, *mmap;
-caddr_t CADDR1, CADDR2, vmmap;
+pt_entry_t *CMAP1, *CMAP2, *ptmmap;
+caddr_t CADDR1, CADDR2, ptvmmap;
pt_entry_t *msgbufmap;
struct msgbuf *msgbufp;
#endif
@@ -180,8 +180,8 @@ void init_pv_entries(int) ;
*/
inline pt_entry_t *
-const pmap_pte(pmap, va)
- register pmap_t pmap;
+pmap_pte(pmap, va)
+ pmap_t pmap;
vm_offset_t va;
{
@@ -374,7 +374,7 @@ pmap_bootstrap(firstaddr, loadaddr)
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
- SYSMAP(caddr_t ,mmap ,vmmap ,1 )
+ SYSMAP(caddr_t ,ptmmap ,ptvmmap ,1 )
SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
virtual_avail = va;
#endif
@@ -530,7 +530,7 @@ static inline void
vm_put_pmap(up)
struct pmaplist *up;
{
- kmem_free(kernel_map, up, ctob(1));
+ kmem_free(kernel_map, (vm_offset_t)up, ctob(1));
}
/*
@@ -851,7 +851,7 @@ pmap_remove(pmap, sva, eva)
if (pmap_is_managed(pa)) {
if ((((int) oldpte & PG_M) && (sva < USRSTACK || sva > UPT_MAX_ADDRESS))
|| (sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
- if (sva < pager_sva || sva >= pager_eva) {
+ if (sva < clean_sva || sva >= clean_eva) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -941,7 +941,7 @@ pmap_remove(pmap, sva, eva)
if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
- if (va < pager_sva || va >= pager_eva) {
+ if (va < clean_sva || va >= clean_eva ) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -1006,7 +1006,7 @@ pmap_remove_all(pa)
if ( (m->flags & PG_CLEAN) &&
((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
- if (pv->pv_va < pager_sva || pv->pv_va >= pager_eva) {
+ if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
m->flags &= ~PG_CLEAN;
}
}
@@ -1261,7 +1261,11 @@ validate:
if (va < UPT_MIN_ADDRESS)
(int) npte |= PG_u;
else if (va < UPT_MAX_ADDRESS)
- (int) npte |= PG_u | PG_RW;
+ (int) npte |= PG_u | PG_RW | PG_NC_PWT;
+
+/*
+ printf("mapping: pa: %x, to va: %x, with pte: %x\n", pa, va, npte);
+*/
if( *pte != npte) {
*pte = npte;
@@ -1414,7 +1418,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
- *pte = (pt_entry_t) ( (int) (pa | PG_RO | PG_V | PG_u));
+ *pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
}
/*
@@ -1448,16 +1452,16 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
*/
if( size > object->size / 2) {
objbytes = size;
- p = (vm_page_t) queue_first(&object->memq);
- while (!queue_end(&object->memq, (queue_entry_t) p) && objbytes != 0) {
+ p = object->memq.tqh_first;
+ while ((p != NULL) && (objbytes != 0)) {
tmpoff = p->offset;
if( tmpoff < offset) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
tmpoff -= offset;
if( tmpoff >= size) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
@@ -1469,7 +1473,7 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
vm_page_unhold(p);
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
}
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
objbytes -= NBPG;
}
} else {
@@ -1699,13 +1703,13 @@ pmap_testbit(pa, bit)
* ptes as never modified.
*/
if (bit & PG_U ) {
- if ((pv->pv_va >= pager_sva) && (pv->pv_va < pager_eva)) {
+ if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
continue;
}
}
if (bit & PG_M ) {
if (pv->pv_va >= USRSTACK) {
- if (pv->pv_va >= pager_sva && pv->pv_va < pager_eva) {
+ if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) {
continue;
}
if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
@@ -1761,7 +1765,7 @@ pmap_changebit(pa, bit, setem)
* don't write protect pager mappings
*/
if (!setem && (bit == PG_RW)) {
- if (va >= pager_sva && va < pager_eva)
+ if (va >= clean_sva && va < clean_eva)
continue;
}
@@ -1869,6 +1873,10 @@ pmap_phys_address(ppn)
/*
* Miscellaneous support routines follow
*/
+/*
+ * This really just builds a table for page write enable
+ * translation.
+ */
void
i386_protection_init()
@@ -1879,12 +1887,10 @@ i386_protection_init()
for (prot = 0; prot < 8; prot++) {
switch (prot) {
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = 0;
- break;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = PG_RO;
+ *kp++ = 0;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index e8082221e99c..9634069e2e8a 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -185,6 +185,7 @@ ENTRY(outsl) /* outsl(port, addr, cnt) */
* memory moves on standard DX !!!!!
*/
+ALTENTRY(blkclr)
ENTRY(bzero)
#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU))
cmpl $CPUCLASS_486,_cpu_class
@@ -656,6 +657,17 @@ ENTRY(fuword)
movl $0,PCB_ONFAULT(%ecx)
ret
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
ENTRY(fusword)
movl _curpcb,%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
index e8082221e99c..9634069e2e8a 100644
--- a/sys/amd64/amd64/support.s
+++ b/sys/amd64/amd64/support.s
@@ -185,6 +185,7 @@ ENTRY(outsl) /* outsl(port, addr, cnt) */
* memory moves on standard DX !!!!!
*/
+ALTENTRY(blkclr)
ENTRY(bzero)
#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU))
cmpl $CPUCLASS_486,_cpu_class
@@ -656,6 +657,17 @@ ENTRY(fuword)
movl $0,PCB_ONFAULT(%ecx)
ret
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
ENTRY(fusword)
movl _curpcb,%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 4dbc672b923e..aa8b5ba5d9a3 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -52,9 +52,9 @@
/*
* The following primitives manipulate the run queues.
* _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
+ * have processes in them. setrunqueue puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
+ * other processes are on a queue related to p->p_priority, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
@@ -72,11 +72,11 @@ _want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
- * Setrq(p)
+ * setrunqueue(p)
*
* Call should be made at spl6(), and p->p_stat should be SRUN
*/
-ENTRY(setrq)
+ENTRY(setrunqueue)
movl 4(%esp),%eax
cmpl $0,P_RLINK(%eax) /* should not be on q already */
je set1
@@ -95,7 +95,7 @@ set1:
movl %eax,P_LINK(%ecx)
ret
-set2: .asciz "setrq"
+set2: .asciz "setrunqueue"
/*
* Remrq(p)
@@ -131,10 +131,10 @@ rem2:
ret
rem3: .asciz "remrq"
-sw0: .asciz "swtch"
+sw0: .asciz "cpu_switch"
/*
- * When no processes are on the runq, swtch() branches to _idle
+ * When no processes are on the runq, cpu_switch() branches to _idle
* to wait for something to come ready.
*/
ALIGN_TEXT
@@ -146,8 +146,8 @@ _idle:
sti
/*
- * XXX callers of swtch() do a bogus splclock(). Locking should
- * be left to swtch().
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
*/
movl $SWI_AST_MASK,_cpl
testl $~SWI_AST_MASK,_ipending
@@ -169,9 +169,9 @@ badsw:
/*NOTREACHED*/
/*
- * Swtch()
+ * cpu_switch()
*/
-ENTRY(swtch)
+ENTRY(cpu_switch)
incl _cnt+V_SWTCH
/* switch to new process. first, save context as needed */
@@ -340,7 +340,7 @@ ENTRY(swtch_to_inactive)
/*
* savectx(pcb, altreturn)
* Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
*/
ENTRY(savectx)
movl 4(%esp),%ecx
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 9bb38e1e60d3..382416f06e3f 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -41,32 +41,33 @@
* 386 Trap and System call handleing
*/
-#include "isa.h"
-#include "npx.h"
-#include "ddb.h"
-#include "machine/cpu.h"
-#include "machine/psl.h"
-#include "machine/reg.h"
-#include "machine/eflags.h"
-
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "user.h"
-#include "acct.h"
-#include "kernel.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/syscall.h>
#ifdef KTRACE
-#include "ktrace.h"
+#include <sys/ktrace.h>
#endif
-#include "vm/vm_param.h"
-#include "vm/pmap.h"
-#include "vm/vm_map.h"
-#include "vm/vm_user.h"
-#include "vm/vm_page.h"
-#include "sys/vmmeter.h"
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <machine/eflags.h>
+
+#include <machine/trap.h>
-#include "machine/trap.h"
+#include "isa.h"
+#include "npx.h"
+#include "ddb.h"
#ifdef __GNUC__
@@ -84,7 +85,7 @@ void write_gs __P((/* promoted u_short */ int gs));
#endif /* __GNUC__ */
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
struct sysent sysent[];
int nsysent;
@@ -139,7 +140,7 @@ trap(frame)
{
register int i;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks = 0;
int ucode, type, code, eva, fault_type;
frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
@@ -177,10 +178,10 @@ copyfault:
return;
}
- syst = p->p_stime;
if (ISPL(frame.tf_cs) == SEL_UPL) {
type |= T_USER;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
+ sticks = p->p_sticks;
}
skiptoswitch:
@@ -210,9 +211,9 @@ skiptoswitch:
case T_ASTFLT|T_USER: /* Allow process switch */
astoff();
cnt.v_soft++;
- if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
+ if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
- p->p_flag &= ~SOWEUPC;
+ p->p_flag &= ~P_OWEUPC;
}
goto out;
@@ -284,7 +285,6 @@ skiptoswitch:
else
ftype = VM_PROT_READ;
- oldflags = p->p_flag;
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
@@ -294,7 +294,7 @@ skiptoswitch:
* Keep swapout from messing with us during this
* critical time.
*/
- p->p_flag |= SLOCK;
+ ++p->p_lock;
/*
* Grow the stack if necessary
@@ -303,8 +303,7 @@ skiptoswitch:
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
goto nogo;
}
}
@@ -332,13 +331,10 @@ skiptoswitch:
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
- if( ptepg->flags & PG_CLEAN)
- vm_page_free(ptepg);
+ vm_page_free(ptepg);
}
-
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
} else {
/*
* Since we know that kernel virtual address addresses
@@ -482,32 +478,29 @@ nogo:
out:
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
- * swtch()'ed, we might not be on the queue indicated by
+ * If that happened after we setrunqueue ourselves but before we
+ * mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -518,7 +511,7 @@ out:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
}
/*
@@ -546,14 +539,12 @@ int trapwrite(addr)
p = curproc;
vm = p->p_vmspace;
- oldflags = p->p_flag;
- p->p_flag |= SLOCK;
+ ++p->p_lock;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
return (1);
}
}
@@ -579,8 +570,7 @@ int trapwrite(addr)
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
if (rv != KERN_SUCCESS)
return 1;
@@ -603,31 +593,45 @@ syscall(frame)
register int i;
register struct sysent *callp;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks;
int error, opc;
int args[8], rval[2];
- int code;
+ u_int code;
#ifdef lint
r0 = 0; r0 = r0; r1 = 0; r1 = r1;
#endif
- syst = p->p_stime;
+ sticks = p->p_sticks;
if (ISPL(frame.tf_cs) != SEL_UPL)
panic("syscall");
code = frame.tf_eax;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
params = (caddr_t)frame.tf_esp + sizeof (int) ;
/*
* Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
*/
opc = frame.tf_eip - 7;
- if (code == 0) {
+ /*
+ * Need to check if this is a 32 bit or 64 bit syscall.
+ */
+ if (code == SYS_syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
code = fuword(params);
params += sizeof (int);
+ } else if (code == SYS___syscall) {
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
}
- if (code < 0 || code >= nsysent)
+
+ if (code >= nsysent)
callp = &sysent[0];
else
callp = &sysent[code];
@@ -672,32 +676,29 @@ done:
*/
p = curproc;
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
+ * If that happened after we setrunqueue ourselves but before we
* swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -708,21 +709,9 @@ done:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
-#ifdef DIAGNOSTICx
-{ extern int _udatasel, _ucodesel;
- if (frame.tf_ss != _udatasel)
- printf("ss %x call %d\n", frame.tf_ss, code);
- if ((frame.tf_cs&0xffff) != _ucodesel)
- printf("cs %x call %d\n", frame.tf_cs, code);
- if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
- printf("eip %x call %d\n", frame.tf_eip, code);
- frame.tf_eip = 0;
- }
-}
-#endif
}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index d338cd5c5783..e40079a40bea 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index a892c29764de..a7c4e596dfa5 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -42,27 +42,21 @@
*/
#include "npx.h"
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "buf.h"
-#include "user.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/user.h>
-#include "../include/cpu.h"
+#include <machine/cpu.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
#define b_cylin b_resid
-#define MAXCLSTATS 256
-int clstats[MAXCLSTATS];
-int rqstats[MAXCLSTATS];
-
-
-#ifndef NOBOUNCE
-
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages, bpwait;
@@ -75,7 +69,8 @@ unsigned *bounceallocarray;
int bouncefree;
#define SIXTEENMEG (4096*4096)
-#define MAXBKVA 1024
+#define MAXBKVA 512
+int maxbkva=MAXBKVA*NBPG;
/* special list that can be used at interrupt time for eventual kva free */
struct kvasfree {
@@ -258,6 +253,7 @@ int count;
pa = vm_bounce_page_find(1);
pmap_kenter(kva + i * NBPG, pa);
}
+ pmap_update();
return kva;
}
@@ -309,8 +305,8 @@ vm_bounce_alloc(bp)
bp->b_bufsize = bp->b_bcount;
}
- vastart = (vm_offset_t) bp->b_un.b_addr;
- vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
+ vastart = (vm_offset_t) bp->b_data;
+ vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
@@ -369,11 +365,11 @@ vm_bounce_alloc(bp)
/*
* save the original buffer kva
*/
- bp->b_savekva = bp->b_un.b_addr;
+ bp->b_savekva = bp->b_data;
/*
* put our new kva into the buffer (offset by original offset)
*/
- bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
+ bp->b_data = (caddr_t) (((vm_offset_t) kva) |
((vm_offset_t) bp->b_savekva & (NBPG - 1)));
return;
}
@@ -403,7 +399,7 @@ vm_bounce_free(bp)
return;
origkva = (vm_offset_t) bp->b_savekva;
- bouncekva = (vm_offset_t) bp->b_un.b_addr;
+ bouncekva = (vm_offset_t) bp->b_data;
vastart = bouncekva;
vaend = bouncekva + bp->b_bufsize;
@@ -449,17 +445,15 @@ vm_bounce_free(bp)
/*
* add the old kva into the "to free" list
*/
- bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
+ bouncekva = i386_trunc_page((vm_offset_t) bp->b_data);
vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
- bp->b_un.b_addr = bp->b_savekva;
+ bp->b_data = bp->b_savekva;
bp->b_savekva = 0;
bp->b_flags &= ~B_BOUNCE;
return;
}
-#endif /* NOBOUNCE */
-
/*
* init the bounce buffer system
*/
@@ -468,10 +462,8 @@ vm_bounce_init()
{
vm_offset_t minaddr, maxaddr;
- io_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
kvasfreecnt = 0;
-#ifndef NOBOUNCE
if (bouncepages == 0)
return;
@@ -487,11 +479,10 @@ vm_bounce_init()
bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
-#endif
-
}
+#ifdef BROKEN_IN_44
static void
cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
vm_offset_t kvanew;
@@ -827,6 +818,7 @@ nocluster:
ap->av_forw = bp;
bp->av_back = ap;
}
+#endif
/*
* quick version of vm_fault
@@ -881,7 +873,7 @@ cpu_fork(p1, p2)
offset = mvesp() - (int)kstack;
bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
(unsigned) ctob(UPAGES) - offset);
- p2->p_regs = p1->p_regs;
+ p2->p_md.md_regs = p1->p_md.md_regs;
/*
* Wire top of address space of child to it's kstack.
@@ -930,7 +922,7 @@ cpu_fork(p1, p2)
*
* Next, we assign a dummy context to be written over by swtch,
* calling it to send this process off to oblivion.
- * [The nullpcb allows us to minimize cost in swtch() by not having
+ * [The nullpcb allows us to minimize cost in mi_switch() by not having
* a special case].
*/
struct proc *swtch_to_inactive();
@@ -952,8 +944,7 @@ cpu_exit(p)
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
p->p_addr = (struct user *) &nullpcb;
- splclock();
- swtch();
+ mi_switch();
/* NOTREACHED */
}
#else
@@ -965,9 +956,8 @@ cpu_exit(p)
#if NNPX > 0
npxexit(p);
#endif /* NNPX */
- splclock();
- curproc = 0;
- swtch();
+ curproc = p;
+ mi_switch();
/*
* This is to shutup the compiler, and if swtch() failed I suppose
* this would be a good thing. This keeps gcc happy because panic
@@ -990,6 +980,21 @@ cpu_wait(p) struct proc *p; {
#endif
/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+/*
* Set a red zone in the kernel stack after the u. area.
*/
void
@@ -1008,6 +1013,43 @@ setredzone(pte, vaddr)
}
/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+void
+pagemove(from, to, size)
+ register caddr_t from, to;
+ int size;
+{
+ register vm_offset_t pa;
+
+ if (size & CLOFSET)
+ panic("pagemove");
+ while (size > 0) {
+ pa = pmap_kextract((vm_offset_t)from);
+ if (pa == 0)
+ panic("pagemove 2");
+ if (pmap_kextract((vm_offset_t)to) != 0)
+ panic("pagemove 3");
+ pmap_remove(kernel_pmap,
+ (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
+ pmap_kenter( (vm_offset_t)to, pa);
+ from += PAGE_SIZE;
+ to += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_update();
+}
+
+/*
* Convert kernel VA to physical address
*/
u_long
@@ -1036,22 +1078,49 @@ vmapbuf(bp)
{
register int npf;
register caddr_t addr;
- register long flags = bp->b_flags;
- struct proc *p;
int off;
vm_offset_t kva;
- register vm_offset_t pa;
+ vm_offset_t pa, lastv, v;
- if ((flags & B_PHYS) == 0)
+ if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE) {
+
+/*
+ * make sure that the pde is valid and held
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ vm_fault_quick(v, VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+
+/*
+ * do the vm_fault if needed, do the copy-on-write thing when
+ * reading stuff off device into memory.
+ */
+ vm_fault_quick(addr,
+ (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+/*
+ * hold the data page
+ */
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ }
+
addr = bp->b_saveaddr = bp->b_un.b_addr;
off = (int)addr & PGOFSET;
- p = bp->b_proc;
npf = btoc(round_page(bp->b_bufsize + off));
kva = kmem_alloc_wait(phys_map, ctob(npf));
bp->b_un.b_addr = (caddr_t) (kva + off);
while (npf--) {
- pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_kenter(kva, trunc_page(pa));
@@ -1071,7 +1140,7 @@ vunmapbuf(bp)
{
register int npf;
register caddr_t addr = bp->b_un.b_addr;
- vm_offset_t kva;
+ vm_offset_t kva,va,v,lastv,pa;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
@@ -1080,6 +1149,32 @@ vunmapbuf(bp)
kmem_free_wakeup(phys_map, kva, ctob(npf));
bp->b_un.b_addr = bp->b_saveaddr;
bp->b_saveaddr = NULL;
+
+
+/*
+ * unhold the pde, and data pages
+ */
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += NBPG) {
+
+ /*
+ * release the data page
+ */
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+
+ /*
+ * and unhold the page table
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+ }
}
/*
@@ -1104,7 +1199,7 @@ cpu_reset() {
int
grow(p, sp)
struct proc *p;
- int sp;
+ u_int sp;
{
unsigned int nss;
caddr_t v;
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index a2df0235ab2b..2216d71822c3 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -45,6 +45,7 @@
*/
#include "machine/frame.h"
#include "machine/segments.h"
+#include <machine/spl.h>
/*
* definitions of cpu-dependent requirements
@@ -53,20 +54,16 @@
#undef COPY_SIGCODE /* don't copy sigcode above user stack in exec */
#define cpu_exec(p) /* nothing */
+#define cpu_swapin(p) /* nothing */
+#define cpu_setstack(p, ap) (p)->p_md.md_regs = ap
+#define cpu_set_init_frame(p, fp) (p)->p_md.md_regs = fp
-/*
- * Arguments to hardclock, softclock and gatherstats
- * encapsulate the previous machine state in an opaque
- * clockframe; for now, use generic intrframe.
- * XXX softclock() has been fixed. It never needed a
- * whole frame, only a usermode flag, at least on this
- * machine. Fix the rest.
- */
-typedef struct intrframe clockframe;
+#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL)
+#define CLKF_INTR(framep) (0)
+#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_PC(framep) ((framep)->cf_eip)
-#define CLKF_USERMODE(framep) (ISPL((framep)->if_cs) == SEL_UPL)
-#define CLKF_BASEPRI(framep) (((framep)->if_ppl & ~SWI_AST_MASK) == 0)
-#define CLKF_PC(framep) ((framep)->if_eip)
+#define resettodr() /* no todr to set */
/*
* Preempt the current process if in interrupt from user mode,
@@ -79,7 +76,7 @@ typedef struct intrframe clockframe;
* interrupt. On tahoe, request an ast to send us through trap(),
* marking the proc as needing a profiling tick.
*/
-#define profile_tick(p, framep) { (p)->p_flag |= SOWEUPC; aston(); }
+#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
/*
* Notify the current process (p) that it has a signal pending,
@@ -100,6 +97,17 @@ struct cpu_nameclass {
int cpu_class;
};
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_MAXID 2 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+}
+
#ifdef KERNEL
extern int want_resched; /* resched was called */
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 3c2dcc9aa90d..729a5c06ccf8 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -71,145 +71,6 @@ tlbflush()
__asm __volatile("movl %%cr3, %%eax; movl %%eax, %%cr3" : : : "ax");
}
-static inline
-int
-imin(a, b)
- int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-int
-imax(a, b)
- int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned int
-min(a, b)
- unsigned int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned int
-max(a, b)
- unsigned int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-long
-lmin(a, b)
- long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-long
-lmax(a, b)
- long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned long
-ulmin(a, b)
- unsigned long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned long
-ulmax(a, b)
- unsigned long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-int
-ffs(mask)
- register long mask;
-{
- register int bit;
-
- if (!mask)
- return(0);
- for (bit = 1;; ++bit) {
- if (mask&0x01)
- return(bit);
- mask >>= 1;
- }
-}
-
-static inline
-int
-bcmp(v1, v2, len)
- void *v1, *v2;
- register unsigned len;
-{
- register u_char *s1 = v1, *s2 = v2;
-
- while (len--)
- if (*s1++ != *s2++)
- return (1);
- return (0);
-}
-
-static inline
-size_t
-strlen(s1)
- register const char *s1;
-{
- register size_t len;
-
- for (len = 0; *s1++ != '\0'; len++)
- ;
- return (len);
-}
-
-struct quehead {
- struct quehead *qh_link;
- struct quehead *qh_rlink;
-};
-
-static inline void
-insque(void *a, void *b)
-{
- register struct quehead *element = a, *head = b;
- element->qh_link = head->qh_link;
- head->qh_link = (struct quehead *)element;
- element->qh_rlink = (struct quehead *)head;
- ((struct quehead *)(element->qh_link))->qh_rlink
- = (struct quehead *)element;
-}
-
-static inline void
-remque(void *a)
-{
- register struct quehead *element = a;
- ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
- ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
- element->qh_rlink = 0;
-}
-
#else /* not __GNUC__ */
extern void insque __P((void *, void *));
extern void remque __P((void *));
diff --git a/sys/amd64/include/exec.h b/sys/amd64/include/exec.h
index eb587a4ed44d..f63ec49cc8c1 100644
--- a/sys/amd64/include/exec.h
+++ b/sys/amd64/include/exec.h
@@ -33,51 +33,96 @@
* @(#)exec.h 8.1 (Berkeley) 6/11/93
*/
-/* Size of a page in an object file. */
+#ifndef _EXEC_H_
+#define _EXEC_H_
+
#define __LDPGSZ 4096
/* Valid magic number check. */
#define N_BADMAG(ex) \
- ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \
- (ex).a_magic != ZMAGIC)
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+#define N_ALIGN(ex,x) \
+ (N_GETMAGIC(ex) == ZMAGIC || N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC || N_GETMAGIC_NET(ex) == QMAGIC ? \
+ ((x) + __LDPGSZ - 1) & ~(__LDPGSZ - 1) : (x))
/* Address of the bottom of the text segment. */
-#define N_TXTADDR(X) 0
+#define N_TXTADDR(ex) \
+ ((N_GETMAGIC(ex) == OMAGIC || N_GETMAGIC(ex) == NMAGIC || \
+ N_GETMAGIC(ex) == ZMAGIC) ? 0 : __LDPGSZ)
/* Address of the bottom of the data segment. */
#define N_DATADDR(ex) \
- (N_TXTADDR(ex) + ((ex).a_magic == OMAGIC ? (ex).a_text \
- : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTADDR(ex) + (ex).a_text)
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_midmag & 0xffff )
+#define N_GETMID(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETMID_NET(ex) : \
+ ((ex).a_midmag >> 16) & 0x03ff )
+#define N_GETFLAG(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETFLAG_NET(ex) : \
+ ((ex).a_midmag >> 26) & 0x3f )
+#define N_SETMAGIC(ex,mag,mid,flag) \
+ ( (ex).a_midmag = (((flag) & 0x3f) <<26) | (((mid) & 0x03ff) << 16) | \
+ ((mag) & 0xffff) )
+
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_midmag) & 0xffff)
+#define N_GETMID_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 16) & 0x03ff)
+#define N_GETFLAG_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 26) & 0x3f)
+#define N_SETMAGIC_NET(ex,mag,mid,flag) \
+ ( (ex).a_midmag = htonl( (((flag)&0x3f)<<26) | (((mid)&0x03ff)<<16) | \
+ (((mag)&0xffff)) ) )
/* Text segment offset. */
#define N_TXTOFF(ex) \
- ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec))
+ (N_GETMAGIC(ex) == ZMAGIC ? __LDPGSZ : (N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC) ? 0 : sizeof(struct exec))
/* Data segment offset. */
#define N_DATOFF(ex) \
- (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \
- __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTOFF(ex) + (ex).a_text)
+
+/* Relocation table offset. */
+#define N_RELOFF(ex) \
+ N_ALIGN(ex, N_DATOFF(ex) + (ex).a_data)
/* Symbol table offset. */
#define N_SYMOFF(ex) \
- (N_TXTOFF(ex) + (ex).a_text + (ex).a_data + (ex).a_trsize + \
- (ex).a_drsize)
+ (N_RELOFF(ex) + (ex).a_trsize + (ex).a_drsize)
/* String table offset. */
#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms)
-/* Description of the object file header (a.out format). */
+/*
+ * Header prepended to each a.out file.
+ * only manipulate the a_midmag field via the
+ * N_SETMAGIC/N_GET{MAGIC,MID,FLAG} macros in a.out.h
+ */
+
struct exec {
-#define OMAGIC 0407 /* old impure format */
-#define NMAGIC 0410 /* read-only text */
-#define ZMAGIC 0413 /* demand load format */
- long a_magic; /* magic number */
-
- u_long a_text; /* text segment size */
- u_long a_data; /* initialized data size */
- u_long a_bss; /* uninitialized data size */
- u_long a_syms; /* symbol table size */
- u_long a_entry; /* entry point */
- u_long a_trsize; /* text relocation size */
- u_long a_drsize; /* data relocation size */
+unsigned long a_midmag; /* htonl(flags<<26 | mid<<16 | magic) */
+unsigned long a_text; /* text segment size */
+unsigned long a_data; /* initialized data size */
+unsigned long a_bss; /* uninitialized data size */
+unsigned long a_syms; /* symbol table size */
+unsigned long a_entry; /* entry point */
+unsigned long a_trsize; /* text relocation size */
+unsigned long a_drsize; /* data relocation size */
};
+#define a_magic a_midmag /* XXX Hack to work with current kern_execve.c */
+
+/* a_magic */
+#define OMAGIC 0407 /* old impure format */
+#define NMAGIC 0410 /* read-only text */
+#define ZMAGIC 0413 /* demand load format */
+#define QMAGIC 0314 /* "compact" demand load format */
+
+#endif /* !_EXEC_H_ */
diff --git a/sys/amd64/include/frame.h b/sys/amd64/include/frame.h
index 05bf26504620..db2993e019fb 100644
--- a/sys/amd64/include/frame.h
+++ b/sys/amd64/include/frame.h
@@ -100,6 +100,32 @@ struct intrframe {
int if_ss;
};
+/* frame of clock (same as interrupt frame) */
+
+struct clockframe {
+ int cf_vec;
+ int cf_ppl;
+ int cf_es;
+ int cf_ds;
+ int cf_edi;
+ int cf_esi;
+ int cf_ebp;
+ int :32;
+ int cf_ebx;
+ int cf_edx;
+ int cf_ecx;
+ int cf_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int cf_eip;
+ int cf_cs;
+ int cf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int cf_esp;
+ int cf_ss;
+};
+
/*
* Signal frame
*/
diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h
index a7a29dfdbb85..990e5f90bf7c 100644
--- a/sys/amd64/include/pcb.h
+++ b/sys/amd64/include/pcb.h
@@ -79,6 +79,13 @@ struct pcb {
int pcb_cmap2; /* XXX temporary PTE - will prefault instead */
};
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the i386: ???
+ */
+struct md_coredump {
+};
+
#ifdef KERNEL
extern struct pcb *curpcb; /* our current running pcb */
#endif
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 74f002d4f391..7ddcebd0fd37 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -48,75 +48,8 @@
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
-#include "vm/vm_prot.h"
-/*
- * 386 page table entry and page table directory
- * W.Jolitz, 8/89
- */
-struct pde
-{
-unsigned int
- pd_v:1, /* valid bit */
- pd_prot:2, /* access control */
- pd_mbz1:2, /* reserved, must be zero */
- pd_u:1, /* hardware maintained 'used' bit */
- :1, /* not used */
- pd_mbz2:2, /* reserved, must be zero */
- :3, /* reserved for software */
- pd_pfnum:20; /* physical page frame number of pte's*/
-};
-
-#define PD_MASK 0xffc00000UL /* page directory address bits */
-#define PT_MASK 0x003ff000UL /* page table address bits */
-#define PD_SHIFT 22 /* page directory address shift */
-#define PG_SHIFT 12 /* page table address shift */
-
-struct pte
-{
-unsigned int
- pg_v:1, /* valid bit */
- pg_prot:2, /* access control */
- pg_mbz1:2, /* reserved, must be zero */
- pg_u:1, /* hardware maintained 'used' bit */
- pg_m:1, /* hardware maintained modified bit */
- pg_mbz2:2, /* reserved, must be zero */
- pg_w:1, /* software, wired down page */
- :1, /* software (unused) */
- pg_nc:1, /* 'uncacheable page' bit */
- pg_pfnum:20; /* physical page frame number */
-};
-
-#define PG_V 0x00000001
-#define PG_RO 0x00000000
-#define PG_RW 0x00000002
-#define PG_u 0x00000004
-#define PG_PROT 0x00000006 /* all protection bits . */
-#define PG_W 0x00000200
-#define PG_N 0x00000800 /* Non-cacheable */
-#define PG_M 0x00000040
-#define PG_U 0x00000020
-#define PG_FRAME 0xfffff000UL
-
-#define PG_NOACC 0
-#define PG_KR 0x00000000
-#define PG_KW 0x00000002
-#define PG_URKR 0x00000004
-#define PG_URKW 0x00000004
-#define PG_UW 0x00000006
-
-/* Garbage for current bastardized pager that assumes a hp300 */
-#define PG_NV 0
-#define PG_CI 0
-
-/*
- * Page Protection Exception bits
- */
-#define PGEX_P 0x01 /* Protection violation vs. not present */
-#define PGEX_W 0x02 /* during a Write cycle */
-#define PGEX_U 0x04 /* access from User mode (UPL) */
+#include <machine/pte.h>
-/* typedef struct pde pd_entry_t; */ /* page directory entry */
-/* typedef struct pte pt_entry_t; */ /* Mach page table entry */
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@@ -129,7 +62,7 @@ typedef unsigned int *pt_entry_t;
* given to the user (NUPDE)
*/
#ifndef NKPT
-#define NKPT 15 /* actual number of kernel pte's */
+#define NKPT 24 /* actual number of kernel pte's */
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of kpte's */
@@ -159,7 +92,6 @@ typedef unsigned int *pt_entry_t;
#ifdef KERNEL
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
-extern pt_entry_t *Sysmap;
extern int IdlePTD; /* physical address of "Idle" state directory */
#endif
diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h
index 1b9e4a2adebc..92de3af87dcf 100644
--- a/sys/amd64/include/proc.h
+++ b/sys/amd64/include/proc.h
@@ -42,9 +42,7 @@
*/
struct mdproc {
int md_flags; /* machine-dependent flags */
-#ifdef notyet
- int *p_regs; /* registers on current frame */
-#endif
+ int *md_regs; /* registers on current frame */
};
/* md_flags */
diff --git a/sys/amd64/include/reg.h b/sys/amd64/include/reg.h
index d20f8d0c85e1..2a1f06106b0c 100644
--- a/sys/amd64/include/reg.h
+++ b/sys/amd64/include/reg.h
@@ -74,23 +74,33 @@
* use whichver order, defined above, is correct, so that it
* is all invisible to the user.
*/
-struct regs {
+struct reg {
unsigned int r_es;
unsigned int r_ds;
unsigned int r_edi;
unsigned int r_esi;
unsigned int r_ebp;
+ unsigned int r_isp;
unsigned int r_ebx;
unsigned int r_edx;
unsigned int r_ecx;
unsigned int r_eax;
+ unsigned int r_trapno;
+ unsigned int r_err;
unsigned int r_eip;
unsigned int r_cs;
unsigned int r_eflags;
unsigned int r_esp;
unsigned int r_ss;
- unsigned int r_fs;
- unsigned int r_gs;
+};
+
+/*
+ * Register set accessible via /proc/$pid/fpreg
+ */
+struct fpreg {
+#if 0
+ int fpr_xxx; /* not implemented */
+#endif
};
#endif /* _MACHINE_REG_H_ */
diff --git a/sys/amd64/include/signal.h b/sys/amd64/include/signal.h
index 98793f2081b1..16cbef22265e 100644
--- a/sys/amd64/include/signal.h
+++ b/sys/amd64/include/signal.h
@@ -51,11 +51,25 @@ typedef int sig_atomic_t;
* a non-standard exit is performed.
*/
struct sigcontext {
- int sc_onstack; /* sigstack state to restore */
- int sc_mask; /* signal mask to restore */
- int sc_sp; /* sp to restore */
- int sc_fp; /* fp to restore */
- int sc_ap; /* ap to restore */
- int sc_pc; /* pc to restore */
- int sc_ps; /* psl to restore */
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore */
+ int sc_esp; /* machine state */
+ int sc_ebp;
+ int sc_isp;
+ int sc_eip;
+ int sc_efl;
+ int sc_es;
+ int sc_ds;
+ int sc_cs;
+ int sc_ss;
+ int sc_edi;
+ int sc_esi;
+ int sc_ebx;
+ int sc_edx;
+ int sc_ecx;
+ int sc_eax;
+# define sc_sp sc_esp
+# define sc_fp sc_ebp
+# define sc_pc sc_eip
+# define sc_ps sc_efl
};
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index df901267202a..05218ad6f781 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -174,20 +174,6 @@
#define KLSDIST 3 /* klusters advance/retard for seq. fifo */
/*
- * Paging thresholds (see vm_sched.c).
- * Strategy of 1/19/85:
- * lotsfree is 512k bytes, but at most 1/4 of memory
- * desfree is 200k bytes, but at most 1/8 of memory
- * minfree is 64k bytes, but at most 1/2 of desfree
- */
-#define LOTSFREE (512 * 1024)
-#define LOTSFREEFRACT 4
-#define DESFREE (200 * 1024)
-#define DESFREEFRACT 8
-#define MINFREE (64 * 1024)
-#define MINFREEFRACT 2
-
-/*
* There are two clock hands, initially separated by HANDSPREAD bytes
* (but at most all of user memory). The amount of time to reclaim
* a page once the pageout process examines it increases with this
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index d338cd5c5783..e40079a40bea 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/amd64/isa/isa.c b/sys/amd64/isa/isa.c
index b0d84efc01a7..32e59e754a3f 100644
--- a/sys/amd64/isa/isa.c
+++ b/sys/amd64/isa/isa.c
@@ -59,6 +59,7 @@
#include "rlist.h"
#include "machine/segments.h"
#include "vm/vm.h"
+#include <machine/spl.h>
#include "i386/isa/isa_device.h"
#include "i386/isa/isa.h"
#include "i386/isa/icu.h"
diff --git a/sys/amd64/isa/npx.c b/sys/amd64/isa/npx.c
index 00424bf3aa14..17400bdbb29a 100644
--- a/sys/amd64/isa/npx.c
+++ b/sys/amd64/isa/npx.c
@@ -438,7 +438,7 @@ npxintr(frame)
* in doreti, and the frame for that could easily be set up
* just before it is used).
*/
- curproc->p_regs = (int *)&frame.if_es;
+ curproc->p_md.md_regs = (int *)&frame.if_es;
#ifdef notyet
/*
* Encode the appropriate code for detailed information on
diff --git a/sys/conf/Makefile.i386 b/sys/conf/Makefile.i386
index db28a3406596..b755dbac1839 100644
--- a/sys/conf/Makefile.i386
+++ b/sys/conf/Makefile.i386
@@ -39,7 +39,6 @@ CWARNFLAGS=-W -Wreturn-type -Wcomment
# of material assistance.
#
COPTFLAGS=-O
-COPTFLAGS+=-D__FreeBSD__
INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
@@ -52,10 +51,10 @@ NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
- ioconf.o conf.o machdep.o
-SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
+ ioconf.o conf.o machdep.o vnode_if.o
+SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS} libkern.a
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
-SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o
+SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o libkern.a
SYSTEM_LD_TAIL= @echo rearranging symbols; symorder symbols.sort $@; \
${DBSYM} -fT ${LOAD_ADDRESS} $@; ${STRIP} -x $@; size $@; chmod 755 $@
@@ -74,6 +73,13 @@ PROFILE_C= ${CC} -S -c ${CFLAGS} $< ; \
%LOAD
+libkern.a:
+ -@if [ X${PROF} = X ]; \
+ then ln -s $S/libkern/obj/libkern.a libkern.a; \
+ else ln -s $S/libkern/obj/libkern_p.a libkern.a; \
+ fi; \
+ echo ln -s $S/libkern/obj/libkern.a libkern.a
+
clean:
rm -f eddep *386bsd tags *.o locore.i [a-uw-z]*.s \
errs linterrs makelinks genassym ,assym.s stamp-assym
@@ -140,7 +146,7 @@ genassym: Makefile
${CC} ${INCLUDES} -DKERNEL ${IDENT} ${PARAM} \
${I386}/i386/genassym.c -static -o genassym
-depend: assym.s param.c
+depend: assym.s param.c vnode_if.h
sh /usr/bin/mkdep -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${COPTS} ${CFILES} ioconf.c param.c ${I386}/i386/conf.c
sh /usr/bin/mkdep -a -p ${INCLUDES} ${IDENT} ${PARAM} ${I386}/i386/genassym.c
@@ -173,6 +179,11 @@ vers.o: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP}
sh $S/conf/newvers.sh ${KERN_IDENT} ${IDENT}
${CC} ${CFLAGS} -c vers.c
+vnode_if.c: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+vnode_if.h: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+
%RULES
# DO NOT DELETE THIS LINE -- make depend uses it
diff --git a/sys/conf/Makefile.powerpc b/sys/conf/Makefile.powerpc
index db28a3406596..b755dbac1839 100644
--- a/sys/conf/Makefile.powerpc
+++ b/sys/conf/Makefile.powerpc
@@ -39,7 +39,6 @@ CWARNFLAGS=-W -Wreturn-type -Wcomment
# of material assistance.
#
COPTFLAGS=-O
-COPTFLAGS+=-D__FreeBSD__
INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
@@ -52,10 +51,10 @@ NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
- ioconf.o conf.o machdep.o
-SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
+ ioconf.o conf.o machdep.o vnode_if.o
+SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS} libkern.a
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
-SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o
+SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o libkern.a
SYSTEM_LD_TAIL= @echo rearranging symbols; symorder symbols.sort $@; \
${DBSYM} -fT ${LOAD_ADDRESS} $@; ${STRIP} -x $@; size $@; chmod 755 $@
@@ -74,6 +73,13 @@ PROFILE_C= ${CC} -S -c ${CFLAGS} $< ; \
%LOAD
+libkern.a:
+ -@if [ X${PROF} = X ]; \
+ then ln -s $S/libkern/obj/libkern.a libkern.a; \
+ else ln -s $S/libkern/obj/libkern_p.a libkern.a; \
+ fi; \
+ echo ln -s $S/libkern/obj/libkern.a libkern.a
+
clean:
rm -f eddep *386bsd tags *.o locore.i [a-uw-z]*.s \
errs linterrs makelinks genassym ,assym.s stamp-assym
@@ -140,7 +146,7 @@ genassym: Makefile
${CC} ${INCLUDES} -DKERNEL ${IDENT} ${PARAM} \
${I386}/i386/genassym.c -static -o genassym
-depend: assym.s param.c
+depend: assym.s param.c vnode_if.h
sh /usr/bin/mkdep -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${COPTS} ${CFILES} ioconf.c param.c ${I386}/i386/conf.c
sh /usr/bin/mkdep -a -p ${INCLUDES} ${IDENT} ${PARAM} ${I386}/i386/genassym.c
@@ -173,6 +179,11 @@ vers.o: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP}
sh $S/conf/newvers.sh ${KERN_IDENT} ${IDENT}
${CC} ${CFLAGS} -c vers.c
+vnode_if.c: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+vnode_if.h: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+
%RULES
# DO NOT DELETE THIS LINE -- make depend uses it
diff --git a/sys/conf/files b/sys/conf/files
index c083f2e1deee..c62ea0fb3408 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,3 +1,19 @@
+ddb/db_access.c optional ddb
+ddb/db_aout.c optional ddb
+ddb/db_break.c optional ddb
+ddb/db_command.c optional ddb
+ddb/db_examine.c optional ddb
+ddb/db_expr.c optional ddb
+ddb/db_input.c optional ddb
+ddb/db_lex.c optional ddb
+ddb/db_output.c optional ddb
+ddb/db_print.c optional ddb
+ddb/db_run.c optional ddb
+ddb/db_sym.c optional ddb
+ddb/db_trap.c optional ddb
+ddb/db_variables.c optional ddb
+ddb/db_watch.c optional ddb
+ddb/db_write_cmd.c optional ddb
isofs/cd9660/cd9660_bmap.c optional cd9660
isofs/cd9660/cd9660_lookup.c optional cd9660
isofs/cd9660/cd9660_node.c optional cd9660
@@ -18,6 +34,8 @@ kdb/kdb_print.c optional kadb
kdb/kdb_runpcs.c optional kadb
kdb/kdb_sym.c optional kadb
kdb/kdb_trap.c optional kadb
+kern/imgact_aout.c standard
+kern/imgact_shell.c standard
kern/init_main.c standard
kern/init_sysent.c standard
kern/kern_acct.c standard
@@ -41,6 +59,7 @@ kern/kern_xxx.c standard
kern/subr_log.c standard
kern/subr_prf.c standard
kern/subr_prof.c standard
+kern/subr_rlist.c standard
kern/subr_rmap.c standard
kern/subr_xxx.c standard
kern/sys_generic.c standard
@@ -218,6 +237,15 @@ nfs/nfs_subs.c optional nfs
nfs/nfs_syscalls.c optional nfs
nfs/nfs_vfsops.c optional nfs
nfs/nfs_vnops.c optional nfs
+scsi/cd.c optional cd
+scsi/ch.c optional ch
+scsi/scsiconf.c optional scbus
+scsi/scsi_base.c optional scbus
+scsi/scsi_ioctl.c optional scbus
+scsi/sd.c optional sd
+scsi/st.c optional st
+scsi/su.c optional su
+scsi/uk.c optional uk
ufs/ffs/ffs_alloc.c optional ffs
ufs/ffs/ffs_alloc.c optional mfs
ufs/ffs/ffs_balloc.c optional ffs
@@ -254,9 +282,9 @@ ufs/ufs/ufs_lookup.c standard
ufs/ufs/ufs_quota.c standard
ufs/ufs/ufs_vfsops.c standard
ufs/ufs/ufs_vnops.c standard
-vm/device_pager.c optional devpager
+vm/device_pager.c standard
vm/kern_lock.c standard
-vm/swap_pager.c optional swappager
+vm/swap_pager.c standard
vm/vm_fault.c standard
vm/vm_glue.c standard
vm/vm_init.c standard
@@ -271,4 +299,4 @@ vm/vm_pager.c standard
vm/vm_swap.c standard
vm/vm_unix.c standard
vm/vm_user.c standard
-vm/vnode_pager.c optional vnodepager
+vm/vnode_pager.c standard
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index 7aec440ac98d..f70e799d8a33 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -14,6 +14,7 @@ i386/i386/mem.c standard
i386/i386/microtime.s standard
i386/i386/ns_cksum.c optional ns
i386/i386/pmap.c standard
+i386/i386/procfs_machdep.c optional procfs
i386/i386/sys_machdep.c standard
i386/i386/trap.c standard
i386/i386/vm_machdep.c standard
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 83a2f04ad7ac..03fdc253b61a 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -40,9 +40,9 @@ fi
touch version
v=`cat version` u=${USER-root} d=`pwd` h=`hostname` t=`date`
-echo "char ostype[] = \"4.4BSD\";" > vers.c
-echo "char osrelease[] = \"4.4BSD-Lite\";" >> vers.c
+echo "char ostype[] = \"FreeBSD\";" > vers.c
+echo "char osrelease[] = \"2.0.0 (Development)\";" >> vers.c
echo "char sccs[4] = { '@', '(', '#', ')' };" >>vers.c
-echo "char version[] = \"4.4BSD-Lite #${v}: ${t}\\n ${u}@${h}:${d}\\n\";" >>vers.c
+echo "char version[] = \"FreeBSD 2.0.0 (Development) #${v}: ${t}\\n ${u}@${h}:${d}\\n\";" >>vers.c
echo `expr ${v} + 1` > version
diff --git a/sys/conf/param.c b/sys/conf/param.c
index 9f4e2cae857c..c871594221de 100644
--- a/sys/conf/param.c
+++ b/sys/conf/param.c
@@ -75,7 +75,8 @@ int tickadj = 30000 / (60 * HZ); /* can adjust 30ms in 60s */
struct timezone tz = { TIMEZONE, DST };
#define NPROC (20 + 16 * MAXUSERS)
int maxproc = NPROC;
-#define NTEXT (80 + NPROC / 8) /* actually the object cache */
+#define NTEXT NPROC
+int vm_cache_max = NTEXT/2 + 16;
#define NVNODE (NPROC + NTEXT + 100)
int desiredvnodes = NVNODE;
int maxfiles = 3 * (NPROC + MAXUSERS) + 80;
diff --git a/sys/ddb/db_command.c b/sys/ddb/db_command.c
index 735b3c095ba0..45fd5c5be6c6 100644
--- a/sys/ddb/db_command.c
+++ b/sys/ddb/db_command.c
@@ -307,15 +307,15 @@ extern void db_listbreak_cmd();
extern void db_listwatch_cmd();
extern void db_show_regs(), db_show_one_thread(), db_show_all_threads();
extern void vm_map_print(), vm_object_print(), vm_page_print();
-extern void db_ps();
+/* extern void db_ps(); */
extern void ipc_port_print();
void db_show_help();
struct command db_show_all_cmds[] = {
#if 0
{ "threads", db_show_all_threads, 0, 0 },
-#endif
{ "procs", db_ps, 0, 0 },
+#endif
{ (char *)0 }
};
@@ -372,7 +372,9 @@ struct command db_command_table[] = {
{ "trace", db_stack_trace_cmd, 0, 0 },
{ "call", db_fncall, CS_OWN, 0 },
{ "show", 0, 0, db_show_cmds },
+#if 0
{ "ps", db_ps, 0, 0 },
+#endif
{ (char *)0, }
};
diff --git a/sys/ddb/ddb.h b/sys/ddb/ddb.h
index 4d7b206bbd7a..877436c12833 100644
--- a/sys/ddb/ddb.h
+++ b/sys/ddb/ddb.h
@@ -37,7 +37,8 @@
#ifndef __h_ddb_ddb
#define __h_ddb_ddb 1
-#include "machine/db_machdep.h" /* type definitions */
+#include <machine/db_machdep.h> /* type definitions */
+#include <vm/vm.h>
/*
* Global variables...
diff --git a/sys/dev/ed/if_ed.c b/sys/dev/ed/if_ed.c
index 84047e20441c..26e3ebdfb397 100644
--- a/sys/dev/ed/if_ed.c
+++ b/sys/dev/ed/if_ed.c
@@ -63,7 +63,7 @@
/* For backwards compatibility */
#ifndef IFF_ALTPHYS
-#define IFF_ALTPHYS IFF_LLC0
+#define IFF_ALTPHYS IFF_LINK0
#endif
/*
@@ -113,7 +113,7 @@ void edintr(int);
int ed_ioctl(struct ifnet *, int, caddr_t);
int ed_probe(struct isa_device *);
void ed_start(struct ifnet *);
-void ed_reset(int, int);
+void ed_reset(int);
void ed_watchdog(int);
static void ed_get_packet(struct ed_softc *, char *, int /*u_short*/);
@@ -1090,9 +1090,8 @@ ed_attach(isa_dev)
* Reset interface.
*/
void
-ed_reset(unit, uban)
+ed_reset(unit)
int unit;
- int uban; /* XXX */
{
int s;
@@ -1147,7 +1146,7 @@ ed_watchdog(unit)
log(LOG_ERR, "ed%d: device timeout\n", unit);
++sc->arpcom.ac_if.if_oerrors;
- ed_reset(unit, 0);
+ ed_reset(unit);
}
/*
@@ -1501,7 +1500,7 @@ outloop:
len = ed_pio_write_mbufs(sc, m, buffer);
}
- sc->txb_len[sc->txb_new] = MAX(len, ETHER_MIN_LEN);
+ sc->txb_len[sc->txb_new] = max(len, ETHER_MIN_LEN);
sc->txb_inuse++;
@@ -1652,7 +1651,7 @@ ed_rint(unit)
"ed%d: NIC memory corrupt - invalid packet length %d\n",
unit, len);
++sc->arpcom.ac_if.if_ierrors;
- ed_reset(unit, 0);
+ ed_reset(unit);
return;
}
@@ -1817,7 +1816,7 @@ edintr(unit)
/*
* Stop/reset/re-init NIC
*/
- ed_reset(unit, 0);
+ ed_reset(unit);
} else {
/*
@@ -2388,7 +2387,7 @@ ed_pio_write_mbufs(sc,m,dst)
if (!maxwait) {
log(LOG_WARNING, "ed%d: remote transmit DMA failed to complete\n",
sc->arpcom.ac_if.if_unit);
- ed_reset(sc->arpcom.ac_if.if_unit, 0);
+ ed_reset(sc->arpcom.ac_if.if_unit);
}
return(len);
diff --git a/sys/dev/ep/if_ep.c b/sys/dev/ep/if_ep.c
index e8d31129dc96..b47f829389b0 100644
--- a/sys/dev/ep/if_ep.c
+++ b/sys/dev/ep/if_ep.c
@@ -99,7 +99,7 @@ static int epioctl __P((struct ifnet * ifp, int, caddr_t));
void epinit __P((int));
void epintr __P((int));
-void epmbuffill __P((caddr_t, int));
+void epmbuffill __P((caddr_t));
void epmbufempty __P((struct ep_softc *));
void epread __P((struct ep_softc *));
void epreset __P((int));
@@ -953,9 +953,8 @@ is_eeprom_busy(is)
}
void
-epmbuffill(sp, dummy_arg)
+epmbuffill(sp)
caddr_t sp;
- int dummy_arg;
{
struct ep_softc *sc = (struct ep_softc *)sp;
int s, i;
diff --git a/sys/dev/fdc/fdc.c b/sys/dev/fdc/fdc.c
index d05c3612b67f..259d451b7b5f 100644
--- a/sys/dev/fdc/fdc.c
+++ b/sys/dev/fdc/fdc.c
@@ -199,7 +199,7 @@ int fd_debug = 1;
static void fdstart(fdcu_t);
void fdintr(fdcu_t);
-static void fd_turnoff(caddr_t, int);
+static void fd_turnoff(caddr_t);
/****************************************************************************/
/* autoconfiguration stuff */
@@ -347,7 +347,7 @@ fdattach(dev)
break;
}
- fd_turnoff((caddr_t)fdu, 0);
+ fd_turnoff((caddr_t)fdu);
hdr = 1;
}
printf("\n");
@@ -417,7 +417,7 @@ void fdstrategy(struct buf *bp)
dp = &(fdc->head);
s = splbio();
disksort(dp, bp);
- untimeout(fd_turnoff, (caddr_t)fdu); /* a good idea */
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu); /* a good idea */
fdstart(fdcu);
splx(s);
return;
@@ -463,7 +463,7 @@ set_motor(fdcu, fdu, reset)
}
static void
-fd_turnoff(caddr_t arg1, int arg2)
+fd_turnoff(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -476,7 +476,7 @@ fd_turnoff(caddr_t arg1, int arg2)
}
void
-fd_motor_on(caddr_t arg1, int arg2)
+fd_motor_on(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -502,7 +502,7 @@ fd_turnon(fdu)
{
fd_turnon1(fdu);
fd->flags |= FD_MOTOR_WAIT;
- timeout(fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
+ timeout((timeout_func_t)fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
}
}
@@ -685,7 +685,7 @@ fdstart(fdcu)
}
static void
-fd_timeout(caddr_t arg1, int arg2)
+fd_timeout(caddr_t arg1)
{
fdcu_t fdcu = (fdcu_t)arg1;
fdu_t fdu = fdc_data[fdcu].fdu;
@@ -809,8 +809,8 @@ fdstate(fdcu, fdc)
TRACE1("fd%d",fdu);
TRACE1("[%s]",fdstates[fdc->state]);
TRACE1("(0x%x)",fd->flags);
- untimeout(fd_turnoff, (caddr_t)fdu);
- timeout(fd_turnoff, (caddr_t)fdu, 4 * hz);
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu);
+ timeout((timeout_func_t)fd_turnoff, (caddr_t)fdu, 4 * hz);
switch (fdc->state)
{
case DEVIDLE:
@@ -855,12 +855,12 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,bp->b_cylin * fd->ft->steptrac);
fd->track = -2;
fdc->state = SEEKWAIT;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case SEEKWAIT:
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 50);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 50);
fdc->state = SEEKCOMPLETE;
return(0); /* will return later */
break;
@@ -925,10 +925,10 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,fd->ft->datalen); /* data length */
}
fdc->state = IOCOMPLETE;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case IOCOMPLETE: /* IO DONE, post-analyze */
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
for(i=0;i<7;i++)
{
fdc->status[i] = in_fdc(fdcu);
@@ -964,7 +964,7 @@ fdstate(fdcu, fdc)
/* ALL DONE */
fd->skip = 0;
bp->b_resid = 0;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
biodone(bp);
fdc->fd = (fd_p) 0;
fdc->fdu = -1;
@@ -991,7 +991,7 @@ fdstate(fdcu, fdc)
return(0); /* will return later */
case RECALWAIT:
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 30);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 30);
fdc->state = RECALCOMPLETE;
return(0); /* will return later */
case RECALCOMPLETE:
@@ -1079,7 +1079,7 @@ retrier(fdcu)
bp->b_flags |= B_ERROR;
bp->b_error = EIO;
bp->b_resid = bp->b_bcount - fdc->fd->skip;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
fdc->fd->skip = 0;
biodone(bp);
fdc->state = FINDWORK;
diff --git a/sys/dev/ie/if_ie.c b/sys/dev/ie/if_ie.c
index 95095bdb515b..cb6b96a6ab1b 100644
--- a/sys/dev/ie/if_ie.c
+++ b/sys/dev/ie/if_ie.c
@@ -1320,9 +1320,8 @@ iereset(unit, dummy)
* This is called if we time out.
*/
static void
-chan_attn_timeout(rock, arg2)
+chan_attn_timeout(rock)
caddr_t rock;
- int arg2;
{
*(int *)rock = 1;
}
diff --git a/sys/dev/mcd/mcd.c b/sys/dev/mcd/mcd.c
index 7309f42d4229..683b0e1798c2 100644
--- a/sys/dev/mcd/mcd.c
+++ b/sys/dev/mcd/mcd.c
@@ -387,7 +387,7 @@ static void mcd_start(int unit)
if ((bp = qp->b_actf) != 0) {
/* block found to process, dequeue */
/*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/
- qp->b_actf = bp->av_forw;
+ qp->b_actf = bp->b_actf;
splx(s);
} else {
/* nothing to do */
diff --git a/sys/dev/mse/mse.c b/sys/dev/mse/mse.c
index eebe163268ec..5f80b21cc8f8 100644
--- a/sys/dev/mse/mse.c
+++ b/sys/dev/mse/mse.c
@@ -71,7 +71,7 @@ struct isa_driver msedriver = {
struct mse_softc {
int sc_flags;
int sc_mousetype;
- pid_t sc_selp;
+ struct selinfo sc_selp;
u_int sc_port;
void (*sc_enablemouse)();
void (*sc_disablemouse)();
@@ -316,7 +316,7 @@ mseselect(dev, rw, p)
* Since this is an exclusive open device, any previous proc.
* pointer is trash now, so we can just assign it.
*/
- sc->sc_selp = p->p_pid;
+ selrecord(p, &sc->sc_selp);
splx(s);
return (0);
}
@@ -350,11 +350,7 @@ mseintr(unit)
sc->sc_flags &= ~MSESC_WANT;
wakeup((caddr_t)sc);
}
- if (sc->sc_selp) {
- p = sc->sc_selp;
- sc->sc_selp = (pid_t)0;
- selwakeup(p, 0);
- }
+ selwakeup(&sc->sc_selp);
}
}
diff --git a/sys/dev/sio/sio.c b/sys/dev/sio/sio.c
index ad09f7a3c1d4..9bdb8c410db8 100644
--- a/sys/dev/sio/sio.c
+++ b/sys/dev/sio/sio.c
@@ -36,6 +36,7 @@
#include "sio.h"
#if NSIO > 0
+#define DONT_MALLOC_TTYS
/*
* Serial driver, based on 386BSD-0.1 com driver.
* Mostly rewritten to use pseudo-DMA.
@@ -61,9 +62,11 @@
#define FAKE_DCD(unit) ((unit) == comconsole)
#define LOTS_OF_EVENTS 64 /* helps separate urgent events from input */
+#define RBSZ 1024
#define RB_I_HIGH_WATER (RBSZ - 2 * RS_IBUFSIZE)
#define RB_I_LOW_WATER ((RBSZ - 2 * RS_IBUFSIZE) * 7 / 8)
#define RS_IBUFSIZE 256
+#define RS_OBUFSIZE 256
#define TTY_BI TTY_FE /* XXX */
#define TTY_OE TTY_PE /* XXX */
@@ -221,39 +224,39 @@ struct com_s {
#define CE_INPUT_OFFSET RS_IBUFSIZE
u_char ibuf1[2 * RS_IBUFSIZE];
u_char ibuf2[2 * RS_IBUFSIZE];
+ u_char obuf[RS_OBUFSIZE];
};
/*
* The public functions in the com module ought to be declared in a com-driver
* system header.
*/
-#define Dev_t int /* promoted dev_t */
/* Interrupt handling entry points. */
void siointr __P((int unit));
void siopoll __P((void));
/* Device switch entry points. */
-int sioopen __P((Dev_t dev, int oflags, int devtype,
+int sioopen __P((dev_t dev, int oflags, int devtype,
struct proc *p));
-int sioclose __P((Dev_t dev, int fflag, int devtype,
+int sioclose __P((dev_t dev, int fflag, int devtype,
struct proc *p));
-int sioread __P((Dev_t dev, struct uio *uio, int ioflag));
-int siowrite __P((Dev_t dev, struct uio *uio, int ioflag));
-int sioioctl __P((Dev_t dev, int cmd, caddr_t data,
+int sioread __P((dev_t dev, struct uio *uio, int ioflag));
+int siowrite __P((dev_t dev, struct uio *uio, int ioflag));
+int sioioctl __P((dev_t dev, int cmd, caddr_t data,
int fflag, struct proc *p));
void siostop __P((struct tty *tp, int rw));
#define sioreset noreset
-int sioselect __P((Dev_t dev, int rw, struct proc *p));
+int sioselect __P((dev_t dev, int rw, struct proc *p));
#define siommap nommap
#define siostrategy nostrategy
/* Console device entry points. */
-int siocngetc __P((Dev_t dev));
+int siocngetc __P((dev_t dev));
struct consdev;
void siocninit __P((struct consdev *cp));
void siocnprobe __P((struct consdev *cp));
-void siocnputc __P((Dev_t dev, int c));
+void siocnputc __P((dev_t dev, int c));
static int sioattach __P((struct isa_device *dev));
static void comflush __P((struct com_s *com));
@@ -288,15 +291,9 @@ static int comconsole = -1;
static speed_t comdefaultrate = TTYDEF_SPEED;
static u_int com_events; /* input chars + weighted output completions */
static int commajor;
-#ifdef DONT_MALLOC_TTYS
-#define TB_OUT(tp) (&(tp)->t_out)
-#define TB_RAW(tp) (&(tp)->t_raw)
+#define TB_OUT(tp) (&(tp)->t_outq)
+#define TB_RAW(tp) (&(tp)->t_rawq)
struct tty sio_tty[NSIO];
-#else
-#define TB_OUT(tp) ((tp)->t_out)
-#define TB_RAW(tp) ((tp)->t_raw)
-struct tty *sio_tty[NSIO];
-#endif
extern struct tty *constty;
extern int tk_nin; /* XXX */
extern int tk_rawcc; /* XXX */
@@ -787,7 +784,7 @@ bidir_open_top:
}
out:
if (error == 0)
- error = (*linesw[tp->t_line].l_open)(dev, tp, 0);
+ error = (*linesw[tp->t_line].l_open)(dev, tp);
splx(s);
#ifdef COM_BIDIR
@@ -1129,7 +1126,7 @@ sioioctl(dev, cmd, data, flag, p)
com = com_addr(UNIT(dev));
tp = com->tp;
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return (error);
error = ttioctl(tp, cmd, data, flag);
@@ -1222,6 +1219,7 @@ sioioctl(dev, cmd, data, flag, p)
*(int *)data = com->bidir;
break;
#endif /* COM_BIDIR */
+#if 0
case TIOCMSDTRWAIT:
/* must be root since the wait applies to following logins */
error = suser(p->p_ucred, &p->p_acflag);
@@ -1240,6 +1238,7 @@ sioioctl(dev, cmd, data, flag, p)
case TIOCMGDTRWAIT:
*(int *)data = com->dtr_wait;
break;
+#endif
#ifdef TIOCTIMESTAMP
case TIOCTIMESTAMP:
com->do_timestamp = TRUE;
@@ -1259,16 +1258,14 @@ static void
comflush(com)
struct com_s *com;
{
- struct ringb *rbp;
+ struct clist *rbp;
disable_intr();
if (com->state & CS_ODONE)
com_events -= LOTS_OF_EVENTS;
com->state &= ~(CS_ODONE | CS_BUSY);
enable_intr();
- rbp = TB_OUT(com->tp);
- rbp->rb_hd += com->ocount;
- rbp->rb_hd = RB_ROLLOVER(rbp, rbp->rb_hd);
+ while( getc( TB_OUT(com->tp)) != -1);
com->ocount = 0;
com->tp->t_state &= ~TS_BUSY;
}
@@ -1343,8 +1340,8 @@ repeat:
* CS_RTS_IFLOW is on.
*/
if ((com->state & CS_RTS_IFLOW)
- && !(com->mcr_image & MCR_RTS)
- && !(tp->t_state & TS_RTS_IFLOW))
+ && !(com->mcr_image & MCR_RTS) /*
+ && !(tp->t_state & TS_RTS_IFLOW) */)
outb(com->modem_ctl_port,
com->mcr_image |= MCR_RTS);
enable_intr();
@@ -1404,16 +1401,17 @@ repeat:
if (incc <= 0 || !(tp->t_state & TS_ISOPEN))
continue;
if (com->state & CS_RTS_IFLOW
- && RB_LEN(TB_RAW(tp)) + incc >= RB_I_HIGH_WATER
- && !(tp->t_state & TS_RTS_IFLOW)
+ && TB_RAW(tp)->c_cc + incc >= RB_I_HIGH_WATER /*
+ && !(tp->t_state & TS_RTS_IFLOW) */
/*
* XXX - need RTS flow control for all line disciplines.
* Only have it in standard one now.
*/
&& linesw[tp->t_line].l_rint == ttyinput) {
- tp->t_state |= TS_RTS_IFLOW;
+/* tp->t_state |= TS_RTS_IFLOW; */
ttstart(tp);
}
+#if 0
/*
* Avoid the grotesquely inefficient lineswitch routine
* (ttyinput) in "raw" mode. It usually takes about 450
@@ -1442,6 +1440,7 @@ repeat:
ttstart(tp);
}
} else {
+#endif
do {
u_char line_status;
int recv_data;
@@ -1461,7 +1460,9 @@ repeat:
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
} while (--incc > 0);
+#if 0
}
+#endif
if (com_events == 0)
break;
}
@@ -1624,10 +1625,12 @@ comstart(tp)
com->state &= ~CS_TTGO;
else
com->state |= CS_TTGO;
+#if 0
if (tp->t_state & TS_RTS_IFLOW) {
if (com->mcr_image & MCR_RTS && com->state & CS_RTS_IFLOW)
outb(com->modem_ctl_port, com->mcr_image &= ~MCR_RTS);
} else {
+#endif
/*
* XXX don't raise MCR_RTS if CTS_RTS_IFLOW is off. Set it
* appropriately in comparam() if RTS-flow is being changed.
@@ -1635,31 +1638,29 @@ comstart(tp)
*/
if (!(com->mcr_image & MCR_RTS) && com->iptr < com->ihighwater)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
+#if 0
}
+#endif
enable_intr();
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP))
goto out;
- if (RB_LEN(TB_OUT(tp)) <= tp->t_lowat) {
+ if (TB_OUT(tp)->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)TB_OUT(tp));
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel, tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (com->ocount != 0) {
disable_intr();
siointr1(com);
enable_intr();
- } else if (RB_LEN(TB_OUT(tp)) != 0) {
+ } else if (TB_OUT(tp)->c_cc != 0) {
tp->t_state |= TS_BUSY;
- com->ocount = RB_CONTIGGET(TB_OUT(tp));
disable_intr();
- com->obufend = (com->optr = (u_char *)TB_OUT(tp)->rb_hd)
- + com->ocount;
+ com->ocount = q_to_b(TB_OUT(tp), com->obuf, sizeof com->obuf);
+ com->optr = com->obuf;
+ com->obufend = com->obuf + com->ocount;
com->state |= CS_BUSY;
siointr1(com); /* fake interrupt to start output */
enable_intr();
@@ -1728,11 +1729,11 @@ comwakeup(chan, ticks)
{
int unit;
- timeout(comwakeup, (caddr_t) NULL, hz / 100);
+ timeout((timeout_func_t)comwakeup, (caddr_t) NULL, hz / 100);
if (com_events != 0) {
#ifndef OLD_INTERRUPT_HANDLING
- int s = splsofttty();
+ int s = spltty();
#endif
siopoll();
#ifndef OLD_INTERRUPT_HANDLING
diff --git a/sys/dev/syscons/syscons.c b/sys/dev/syscons/syscons.c
index 87572956f84c..39292f93dcf3 100644
--- a/sys/dev/syscons/syscons.c
+++ b/sys/dev/syscons/syscons.c
@@ -44,6 +44,7 @@
#endif
#include "param.h"
+#include <sys/systm.h>
#include "conf.h"
#include "ioctl.h"
#include "proc.h"
@@ -51,7 +52,6 @@
#include "tty.h"
#include "uio.h"
#include "callout.h"
-#include "systm.h"
#include "kernel.h"
#include "syslog.h"
#include "errno.h"
@@ -277,14 +277,20 @@ int ttrstrt();
#endif
#if defined(__FreeBSD__)
+#if 0
#define VIRTUAL_TTY(x) (pccons[x] = ttymalloc(pccons[x]))
#define CONSOLE_TTY (pccons[NCONS] = ttymalloc(pccons[NCONS]))
+struct tty *pccons[NCONS+1];
+#else
+#define VIRTUAL_TTY(x) &pccons[x]
+#define CONSOLE_TTY &pccons[NCONS]
+struct tty pccons[NCONS+1];
+#endif
+#define timeout_t timeout_func_t
#define frametype struct trapframe
#define eflags tf_eflags
-#define timeout_t timeout_func_t
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
-struct tty *pccons[NCONS+1];
#endif
#if defined(__386BSD__) && !defined(__FreeBSD__)
@@ -456,11 +462,7 @@ int pcopen(dev_t dev, int flag, int mode, struct proc *p)
return(EBUSY);
tp->t_state |= TS_CARR_ON;
tp->t_cflag |= CLOCAL;
-#if defined(__FreeBSD__)
- return((*linesw[tp->t_line].l_open)(dev, tp, 0));
-#else
return((*linesw[tp->t_line].l_open)(dev, tp));
-#endif
}
@@ -744,12 +746,12 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return 0;
case KDENABIO: /* allow io operations */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
return 0;
case KDDISABIO: /* disallow io operations (default) */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
return 0;
@@ -960,7 +962,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
if (saved_console < 0) {
saved_console = get_scr_num();
switch_scr(minor(dev));
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
scp->status |= UNKNOWN_MODE;
scp->status |= KBD_RAW_MODE;
@@ -969,7 +971,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return EAGAIN;
case CONSOLE_X_MODE_OFF:/* just to be compatible */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
if (crtc_vga) {
load_font(0, 16, font_8x16);
@@ -1002,7 +1004,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
break;
}
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return(error);
error = ttioctl(tp, cmd, data, flag);
@@ -1028,7 +1030,7 @@ void pcxint(dev_t dev)
void pcstart(struct tty *tp)
{
-#if defined(NetBSD)
+#if defined(NetBSD) || defined(__FreeBSD__)
struct clist *rbp;
int i, s, len;
u_char buf[PCBURST];
@@ -1046,10 +1048,6 @@ void pcstart(struct tty *tp)
if (buf[i]) ansi_put(scp, buf[i]);
s = spltty();
tp->t_state &= ~TS_BUSY;
- if (rbp->c_cc) {
- tp->t_state |= TS_TIMEOUT;
- timeout((timeout_t)ttrstrt, (caddr_t)tp, 1);
- }
if (rbp->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
@@ -1060,7 +1058,7 @@ void pcstart(struct tty *tp)
}
splx(s);
-#else /* __FreeBSD__ & __386BSD__ */
+#else /* __386BSD__ */
int c, s, len, i;
scr_stat *scp = get_scr_stat(tp->t_dev);
@@ -1076,12 +1074,7 @@ void pcstart(struct tty *tp)
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)tp->t_out);
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel,
- tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (RB_LEN(tp->t_out) == 0)
break;
diff --git a/sys/fs/cd9660/cd9660_lookup.c b/sys/fs/cd9660/cd9660_lookup.c
index 62d1d3fc791e..36daffd6b2aa 100644
--- a/sys/fs/cd9660/cd9660_lookup.c
+++ b/sys/fs/cd9660/cd9660_lookup.c
@@ -89,6 +89,7 @@ struct nchstats iso_nchstats;
*
* NOTE: (LOOKUP | LOCKPARENT) currently returns the parent inode unlocked.
*/
+int
cd9660_lookup(ap)
struct vop_lookup_args /* {
struct vnode *a_dvp;
@@ -100,9 +101,9 @@ cd9660_lookup(ap)
register struct iso_node *dp; /* inode for directory being searched */
register struct iso_mnt *imp; /* file system that directory is in */
struct buf *bp; /* a buffer of directory entries */
- struct iso_directory_record *ep;/* the current directory entry */
+ struct iso_directory_record *ep = 0;/* the current directory entry */
int entryoffsetinblock; /* offset of ep in bp's buffer */
- int saveoffset; /* offset of last directory entry in dir */
+ int saveoffset = 0; /* offset of last directory entry in dir */
int numdirpasses; /* strategy for directory search */
doff_t endsearch; /* offset to end directory search */
struct iso_node *pdp; /* saved dp during symlink work */
@@ -443,6 +444,7 @@ found:
* is non-zero, fill it in with a pointer to the
* remaining space in the directory.
*/
+int
iso_blkatoff(ip, offset, bpp)
struct iso_node *ip;
doff_t offset;
diff --git a/sys/fs/cd9660/cd9660_node.c b/sys/fs/cd9660/cd9660_node.c
index d83a7a6f126a..f9641ffded7c 100644
--- a/sys/fs/cd9660/cd9660_node.c
+++ b/sys/fs/cd9660/cd9660_node.c
@@ -84,6 +84,7 @@ int prtactive; /* 1 => print out reclaim of active vnodes */
/*
* Initialize hash links for inodes and dnodes.
*/
+int
cd9660_init()
{
register int i;
@@ -102,6 +103,7 @@ cd9660_init()
dh->dh_head[1] = dh;
}
#endif
+ return (0);
}
#ifdef ISODEVMAP
@@ -163,9 +165,11 @@ iso_dunmap(dev)
* return the inode locked. Detection and handling of mount
* points must be done by the calling routine.
*/
+int
iso_iget(xp, ino, relocated, ipp, isodir)
struct iso_node *xp;
ino_t ino;
+ int relocated;
struct iso_node **ipp;
struct iso_directory_record *isodir;
{
@@ -338,6 +342,7 @@ loop:
/*
* Unlock and decrement the reference count of an inode structure.
*/
+int
iso_iput(ip)
register struct iso_node *ip;
{
@@ -346,6 +351,7 @@ iso_iput(ip)
panic("iso_iput");
ISO_IUNLOCK(ip);
vrele(ITOV(ip));
+ return (0);
}
/*
@@ -412,6 +418,7 @@ cd9660_reclaim(ap)
/*
* Lock an inode. If its already locked, set the WANT bit and sleep.
*/
+int
iso_ilock(ip)
register struct iso_node *ip;
{
@@ -426,11 +433,13 @@ iso_ilock(ip)
ip->i_spare1 = 0;
ip->i_spare0 = curproc->p_pid;
ip->i_flag |= ILOCKED;
+ return (0);
}
/*
* Unlock an inode. If WANT bit is on, wakeup.
*/
+int
iso_iunlock(ip)
register struct iso_node *ip;
{
@@ -443,6 +452,7 @@ iso_iunlock(ip)
ip->i_flag &= ~IWANT;
wakeup((caddr_t)ip);
}
+ return (0);
}
/*
diff --git a/sys/fs/cd9660/cd9660_util.c b/sys/fs/cd9660/cd9660_util.c
index f74f0515ff77..39c5fe491a47 100644
--- a/sys/fs/cd9660/cd9660_util.c
+++ b/sys/fs/cd9660/cd9660_util.c
@@ -157,7 +157,7 @@ int
isofncmp(unsigned char *fn,int fnlen,unsigned char *isofn,int isolen)
{
int i, j;
- char c;
+ unsigned char c;
while (--fnlen >= 0) {
if (--isolen < 0)
diff --git a/sys/fs/cd9660/cd9660_vfsops.c b/sys/fs/cd9660/cd9660_vfsops.c
index 02dd92af66f6..bc48367a38c6 100644
--- a/sys/fs/cd9660/cd9660_vfsops.c
+++ b/sys/fs/cd9660/cd9660_vfsops.c
@@ -82,6 +82,7 @@ struct vfsops cd9660_vfsops = {
static iso_mountfs();
+int
cd9660_mountroot()
{
register struct mount *mp;
@@ -139,6 +140,7 @@ int iso_doforce = 1;
*
* mount system call
*/
+int
cd9660_mount(mp, path, data, ndp, p)
register struct mount *mp;
char *path;
@@ -150,7 +152,7 @@ cd9660_mount(mp, path, data, ndp, p)
struct iso_args args;
u_int size;
int error;
- struct iso_mnt *imp;
+ struct iso_mnt *imp = 0;
if (error = copyin(data, (caddr_t)&args, sizeof (struct iso_args)))
return (error);
@@ -211,7 +213,8 @@ cd9660_mount(mp, path, data, ndp, p)
/*
* Common code for mount and mountroot
*/
-static iso_mountfs(devvp, mp, p, argp)
+static int
+iso_mountfs(devvp, mp, p, argp)
register struct vnode *devvp;
struct mount *mp;
struct proc *p;
@@ -381,6 +384,7 @@ out:
* Nothing to do at the moment.
*/
/* ARGSUSED */
+int
cd9660_start(mp, flags, p)
struct mount *mp;
int flags;
@@ -433,6 +437,7 @@ cd9660_unmount(mp, mntflags, p)
/*
* Return root of a filesystem
*/
+int
cd9660_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
@@ -485,6 +490,7 @@ cd9660_quotactl(mp, cmd, uid, arg, p)
/*
* Get file system statistics.
*/
+int
cd9660_statfs(mp, sbp, p)
struct mount *mp;
register struct statfs *sbp;
@@ -659,6 +665,7 @@ cd9660_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
* Vnode pointer to File handle
*/
/* ARGSUSED */
+int
cd9660_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c
index 59f5a73f5c86..7a2964bf22db 100644
--- a/sys/fs/cd9660/cd9660_vnops.c
+++ b/sys/fs/cd9660/cd9660_vnops.c
@@ -157,6 +157,7 @@ cd9660_close(ap)
* super user is granted all permissions.
*/
/* ARGSUSED */
+int
cd9660_access(ap)
struct vop_access_args /* {
struct vnode *a_vp;
@@ -168,6 +169,7 @@ cd9660_access(ap)
return (0);
}
+int
cd9660_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
@@ -217,6 +219,7 @@ extern int doclusterread;
/*
* Vnode op for reading.
*/
+int
cd9660_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
diff --git a/sys/fs/deadfs/dead_vnops.c b/sys/fs/deadfs/dead_vnops.c
index 9d04652b7fc8..cac8775810c0 100644
--- a/sys/fs/deadfs/dead_vnops.c
+++ b/sys/fs/deadfs/dead_vnops.c
@@ -156,6 +156,7 @@ dead_lookup(ap)
* Open always fails as if device did not exist.
*/
/* ARGSUSED */
+int
dead_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -172,6 +173,7 @@ dead_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
dead_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -195,6 +197,7 @@ dead_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
dead_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -213,6 +216,7 @@ dead_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
dead_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -230,6 +234,7 @@ dead_ioctl(ap)
}
/* ARGSUSED */
+int
dead_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -249,6 +254,7 @@ dead_select(ap)
/*
* Just call the device strategy routine
*/
+int
dead_strategy(ap)
struct vop_strategy_args /* {
struct buf *a_bp;
@@ -266,6 +272,7 @@ dead_strategy(ap)
/*
* Wait until the vnode has finished changing state.
*/
+int
dead_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -280,6 +287,7 @@ dead_lock(ap)
/*
* Wait until the vnode has finished changing state.
*/
+int
dead_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -299,6 +307,7 @@ dead_bmap(ap)
* Print out the contents of a dead vnode.
*/
/* ARGSUSED */
+int
dead_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -306,11 +315,13 @@ dead_print(ap)
{
printf("tag VT_NON, dead vnode\n");
+ return (0);
}
/*
* Empty vnode failed operation
*/
+int
dead_ebadf()
{
@@ -320,6 +331,7 @@ dead_ebadf()
/*
* Empty vnode bad operation
*/
+int
dead_badop()
{
@@ -330,6 +342,7 @@ dead_badop()
/*
* Empty vnode null operation
*/
+int
dead_nullop()
{
@@ -340,6 +353,7 @@ dead_nullop()
* We have to wait during times when the vnode is
* in a state of change.
*/
+int
chkvnlock(vp)
register struct vnode *vp;
{
diff --git a/sys/fs/fdescfs/fdesc_vnops.c b/sys/fs/fdescfs/fdesc_vnops.c
index 00d8675aea2f..83e665705532 100644
--- a/sys/fs/fdescfs/fdesc_vnops.c
+++ b/sys/fs/fdescfs/fdesc_vnops.c
@@ -88,6 +88,7 @@ static struct fdcache fdcache[NFDCACHE];
/*
* Initialise cache headers
*/
+int
fdesc_init()
{
struct fdcache *fc;
@@ -96,6 +97,7 @@ fdesc_init()
for (fc = fdcache; fc < fdcache + NFDCACHE; fc++)
fc->fc_forw = fc->fc_back = (struct fdescnode *) fc;
+ return (0);
}
/*
@@ -183,7 +185,7 @@ fdesc_lookup(ap)
char *pname;
struct proc *p;
int nfiles;
- unsigned fd;
+ unsigned fd = 0;
int error;
struct vnode *fvp;
char *ln;
@@ -800,6 +802,7 @@ fdesc_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
fdesc_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
diff --git a/sys/fs/fifofs/fifo_vnops.c b/sys/fs/fifofs/fifo_vnops.c
index bad33a430b62..a1ba3f4abf93 100644
--- a/sys/fs/fifofs/fifo_vnops.c
+++ b/sys/fs/fifofs/fifo_vnops.c
@@ -111,6 +111,7 @@ struct vnodeopv_desc fifo_vnodeop_opv_desc =
* Trivial lookup routine that always fails.
*/
/* ARGSUSED */
+int
fifo_lookup(ap)
struct vop_lookup_args /* {
struct vnode * a_dvp;
@@ -128,6 +129,7 @@ fifo_lookup(ap)
* to find an active instance of a fifo.
*/
/* ARGSUSED */
+int
fifo_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -218,6 +220,7 @@ fifo_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
fifo_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -257,6 +260,7 @@ fifo_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
fifo_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -286,6 +290,7 @@ fifo_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
fifo_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -308,6 +313,7 @@ fifo_ioctl(ap)
}
/* ARGSUSED */
+int
fifo_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -329,6 +335,7 @@ fifo_select(ap)
/*
* This is a noop, simply returning what one has been given.
*/
+int
fifo_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -349,6 +356,7 @@ fifo_bmap(ap)
* At the moment we do not do any locking.
*/
/* ARGSUSED */
+int
fifo_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -359,6 +367,7 @@ fifo_lock(ap)
}
/* ARGSUSED */
+int
fifo_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -372,6 +381,7 @@ fifo_unlock(ap)
* Device close routine
*/
/* ARGSUSED */
+int
fifo_close(ap)
struct vop_close_args /* {
struct vnode *a_vp;
@@ -407,6 +417,7 @@ fifo_close(ap)
/*
* Print out the contents of a fifo vnode.
*/
+int
fifo_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -416,11 +427,13 @@ fifo_print(ap)
printf("tag VT_NON");
fifo_printinfo(ap->a_vp);
printf("\n");
+ return (0);
}
/*
* Print out internal contents of a fifo vnode.
*/
+int
fifo_printinfo(vp)
struct vnode *vp;
{
@@ -428,11 +441,13 @@ fifo_printinfo(vp)
printf(", fifo with %d readers and %d writers",
fip->fi_readers, fip->fi_writers);
+ return (0);
}
/*
* Return POSIX pathconf information applicable to fifo's.
*/
+int
fifo_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -460,6 +475,7 @@ fifo_pathconf(ap)
/*
* Fifo failed operation
*/
+int
fifo_ebadf()
{
@@ -470,6 +486,7 @@ fifo_ebadf()
* Fifo advisory byte-level locks.
*/
/* ARGSUSED */
+int
fifo_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
@@ -486,6 +503,7 @@ fifo_advlock(ap)
/*
* Fifo bad operation
*/
+int
fifo_badop()
{
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index a31723fe4c22..5541fb3bf80d 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -73,6 +73,7 @@ static struct null_node_cache null_node_cache[NNULLNODECACHE];
/*
* Initialise cache headers
*/
+int
nullfs_init()
{
struct null_node_cache *ac;
@@ -82,6 +83,7 @@ nullfs_init()
for (ac = null_node_cache; ac < null_node_cache + NNULLNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct null_node *) ac;
+ return (0);
}
/*
diff --git a/sys/fs/portalfs/portal_vnops.c b/sys/fs/portalfs/portal_vnops.c
index 5e170261e71f..c19e8152099f 100644
--- a/sys/fs/portalfs/portal_vnops.c
+++ b/sys/fs/portalfs/portal_vnops.c
@@ -524,6 +524,7 @@ portal_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
portal_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
diff --git a/sys/fs/procfs/procfs_vfsops.c b/sys/fs/procfs/procfs_vfsops.c
index 3938ca123576..67dc31f78b3b 100644
--- a/sys/fs/procfs/procfs_vfsops.c
+++ b/sys/fs/procfs/procfs_vfsops.c
@@ -62,6 +62,7 @@
* mount system call
*/
/* ARGSUSED */
+int
procfs_mount(mp, path, data, ndp, p)
struct mount *mp;
char *path;
@@ -96,6 +97,7 @@ procfs_mount(mp, path, data, ndp, p)
/*
* unmount system call
*/
+int
procfs_unmount(mp, mntflags, p)
struct mount *mp;
int mntflags;
@@ -118,6 +120,7 @@ procfs_unmount(mp, mntflags, p)
return (0);
}
+int
procfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
@@ -141,6 +144,7 @@ procfs_root(mp, vpp)
/*
*/
/* ARGSUSED */
+int
procfs_start(mp, flags, p)
struct mount *mp;
int flags;
@@ -153,6 +157,7 @@ procfs_start(mp, flags, p)
/*
* Get file system statistics.
*/
+int
procfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
@@ -177,6 +182,7 @@ procfs_statfs(mp, sbp, p)
}
+int
procfs_quotactl(mp, cmds, uid, arg, p)
struct mount *mp;
int cmds;
@@ -188,6 +194,7 @@ procfs_quotactl(mp, cmds, uid, arg, p)
return (EOPNOTSUPP);
}
+int
procfs_sync(mp, waitfor)
struct mount *mp;
int waitfor;
@@ -196,6 +203,7 @@ procfs_sync(mp, waitfor)
return (0);
}
+int
procfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
@@ -205,6 +213,7 @@ procfs_vget(mp, ino, vpp)
return (EOPNOTSUPP);
}
+int
procfs_fhtovp(mp, fhp, vpp)
struct mount *mp;
struct fid *fhp;
@@ -214,6 +223,7 @@ procfs_fhtovp(mp, fhp, vpp)
return (EINVAL);
}
+int
procfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
@@ -222,6 +232,7 @@ procfs_vptofh(vp, fhp)
return EINVAL;
}
+int
procfs_init()
{
diff --git a/sys/fs/procfs/procfs_vnops.c b/sys/fs/procfs/procfs_vnops.c
index 4e1ee002bb90..b6c6e6fa572a 100644
--- a/sys/fs/procfs/procfs_vnops.c
+++ b/sys/fs/procfs/procfs_vnops.c
@@ -100,6 +100,7 @@ static pid_t atopid __P((const char *, u_int));
* is to support exclusive open on process
* memory images.
*/
+int
procfs_open(ap)
struct vop_open_args *ap;
{
@@ -134,6 +135,7 @@ procfs_open(ap)
* nothing to do for procfs other than undo
* any exclusive open flag (see _open above).
*/
+int
procfs_close(ap)
struct vop_close_args *ap;
{
@@ -153,6 +155,7 @@ procfs_close(ap)
* do an ioctl operation on pfsnode (vp).
* (vp) is not locked on entry or exit.
*/
+int
procfs_ioctl(ap)
struct vop_ioctl_args *ap;
{
@@ -170,6 +173,7 @@ procfs_ioctl(ap)
* usual no-op bmap, although returning
* (EIO) would be a reasonable alternative.
*/
+int
procfs_bmap(ap)
struct vop_bmap_args *ap;
{
@@ -197,6 +201,7 @@ procfs_bmap(ap)
*
* (vp) is not locked on entry or exit.
*/
+int
procfs_inactive(ap)
struct vop_inactive_args *ap;
{
@@ -215,6 +220,7 @@ procfs_inactive(ap)
* to free any private data and remove the node
* from any private lists.
*/
+int
procfs_reclaim(ap)
struct vop_reclaim_args *ap;
{
@@ -227,6 +233,7 @@ procfs_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
procfs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -265,6 +272,7 @@ procfs_pathconf(ap)
* just print a readable description
* of (vp).
*/
+int
procfs_print(ap)
struct vop_print_args *ap;
{
@@ -273,6 +281,7 @@ procfs_print(ap)
printf("tag VT_PROCFS, pid %d, mode %x, flags %x\n",
pfs->pfs_pid,
pfs->pfs_mode, pfs->pfs_flags);
+ return (0);
}
/*
@@ -281,6 +290,7 @@ procfs_print(ap)
* for undoing any side-effects caused by the lookup.
* this will always include freeing the pathname buffer.
*/
+int
procfs_abortop(ap)
struct vop_abortop_args *ap;
{
@@ -293,6 +303,7 @@ procfs_abortop(ap)
/*
* generic entry point for unsupported operations
*/
+int
procfs_badop()
{
@@ -308,6 +319,7 @@ procfs_badop()
*
* this is relatively minimal for procfs.
*/
+int
procfs_getattr(ap)
struct vop_getattr_args *ap;
{
@@ -423,6 +435,7 @@ procfs_getattr(ap)
return (error);
}
+int
procfs_setattr(ap)
struct vop_setattr_args *ap;
{
@@ -451,6 +464,7 @@ procfs_setattr(ap)
* but does mean that the i/o entry points need to check
* that the operation really does make sense.
*/
+int
procfs_access(ap)
struct vop_access_args *ap;
{
@@ -502,6 +516,7 @@ found:
* filesystem doesn't do any locking of its own. otherwise
* read and inwardly digest ufs_lookup().
*/
+int
procfs_lookup(ap)
struct vop_lookup_args *ap;
{
@@ -612,6 +627,7 @@ procfs_lookup(ap)
*
* this should just be done through read()
*/
+int
procfs_readdir(ap)
struct vop_readdir_args *ap;
{
diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c
index 111c517b1627..55b5dd8d433b 100644
--- a/sys/fs/specfs/spec_vnops.c
+++ b/sys/fs/specfs/spec_vnops.c
@@ -126,6 +126,7 @@ spec_lookup(ap)
* Open a special file.
*/
/* ARGSUSED */
+int
spec_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -203,6 +204,7 @@ spec_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
spec_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -285,6 +287,7 @@ spec_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
spec_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -365,6 +368,7 @@ spec_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
spec_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -399,6 +403,7 @@ spec_ioctl(ap)
}
/* ARGSUSED */
+int
spec_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -476,6 +481,7 @@ loop:
/*
* Just call the device strategy routine
*/
+int
spec_strategy(ap)
struct vop_strategy_args /* {
struct buf *a_bp;
@@ -489,6 +495,7 @@ spec_strategy(ap)
/*
* This is a noop, simply returning what one has been given.
*/
+int
spec_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -509,6 +516,7 @@ spec_bmap(ap)
* At the moment we do not do any locking.
*/
/* ARGSUSED */
+int
spec_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -519,6 +527,7 @@ spec_lock(ap)
}
/* ARGSUSED */
+int
spec_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -532,6 +541,7 @@ spec_unlock(ap)
* Device close routine
*/
/* ARGSUSED */
+int
spec_close(ap)
struct vop_close_args /* {
struct vnode *a_vp;
@@ -606,6 +616,7 @@ spec_close(ap)
/*
* Print out the contents of a special device vnode.
*/
+int
spec_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -614,11 +625,13 @@ spec_print(ap)
printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
minor(ap->a_vp->v_rdev));
+ return (0);
}
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
spec_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -656,6 +669,7 @@ spec_pathconf(ap)
* Special device advisory byte-level locks.
*/
/* ARGSUSED */
+int
spec_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
@@ -672,6 +686,7 @@ spec_advlock(ap)
/*
* Special device failed operation
*/
+int
spec_ebadf()
{
@@ -681,6 +696,7 @@ spec_ebadf()
/*
* Special device bad operation
*/
+int
spec_badop()
{
diff --git a/sys/fs/umapfs/umap_subr.c b/sys/fs/umapfs/umap_subr.c
index 6f1f077a6217..b640891a410c 100644
--- a/sys/fs/umapfs/umap_subr.c
+++ b/sys/fs/umapfs/umap_subr.c
@@ -73,6 +73,7 @@ static struct umap_node_cache umap_node_cache[NUMAPNODECACHE];
/*
* Initialise cache headers
*/
+int
umapfs_init()
{
struct umap_node_cache *ac;
@@ -82,6 +83,7 @@ umapfs_init()
for (ac = umap_node_cache; ac < umap_node_cache + NUMAPNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct umap_node *) ac;
+ return (0);
}
/*
diff --git a/sys/fs/umapfs/umap_vnops.c b/sys/fs/umapfs/umap_vnops.c
index 287804e15618..0c1955f1ed33 100644
--- a/sys/fs/umapfs/umap_vnops.c
+++ b/sys/fs/umapfs/umap_vnops.c
@@ -67,7 +67,7 @@ umap_bypass(ap)
{
extern int (**umap_vnodeop_p)(); /* not extern, really "forward" */
struct ucred **credpp = 0, *credp = 0;
- struct ucred *savecredp, *savecompcredp = 0;
+ struct ucred *savecredp = 0, *savecompcredp = 0;
struct ucred *compcredp = 0;
struct vnode **this_vp_p;
int error;
diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c
index 77947d1dfbe1..ea4f804a24db 100644
--- a/sys/fs/unionfs/union_subr.c
+++ b/sys/fs/unionfs/union_subr.c
@@ -49,9 +49,7 @@
#include <sys/queue.h>
#include <miscfs/union/union.h>
-#ifdef DIAGNOSTIC
#include <sys/proc.h>
-#endif
/* must be power of two, otherwise change UNION_HASH() */
#define NHASH 32
@@ -71,6 +69,7 @@ union_init()
for (i = 0; i < NHASH; i++)
LIST_INIT(&unhead[i]);
bzero((caddr_t) unvplock, sizeof(unvplock));
+ return (0);
}
static int
@@ -223,10 +222,10 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
struct vnode *lowervp; /* may be null */
{
int error;
- struct union_node *un;
+ struct union_node *un = 0;
struct union_node **pp;
struct vnode *xlowervp = NULLVP;
- int hash;
+ int hash = 0;
int try;
if (uppervp == NULLVP && lowervp == NULLVP)
diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c
index 9fa27460e3d4..42931d7c7bc1 100644
--- a/sys/fs/unionfs/union_vfsops.c
+++ b/sys/fs/unionfs/union_vfsops.c
@@ -73,7 +73,7 @@ union_mount(mp, path, data, ndp, p)
struct ucred *cred = 0;
struct ucred *scred;
struct vattr va;
- char *cp;
+ char *cp = 0;
int len;
u_int size;
diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c
index 96327b0922d4..30f223350b21 100644
--- a/sys/fs/unionfs/union_vnops.c
+++ b/sys/fs/unionfs/union_vnops.c
@@ -162,7 +162,7 @@ union_lookup(ap)
int lockparent = cnp->cn_flags & LOCKPARENT;
int rdonly = cnp->cn_flags & RDONLY;
struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
- struct ucred *saved_cred;
+ struct ucred *saved_cred = 0;
cnp->cn_flags |= LOCKPARENT;
diff --git a/sys/gnu/ext2fs/ext2_bmap.c b/sys/gnu/ext2fs/ext2_bmap.c
index bcd838d036a1..a424d31d7a85 100644
--- a/sys/gnu/ext2fs/ext2_bmap.c
+++ b/sys/gnu/ext2fs/ext2_bmap.c
@@ -112,7 +112,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct indir a[NIADDR], *xap;
daddr_t daddr;
long metalbn;
- int error, maxrun, num;
+ int error, maxrun = 0, num;
ip = VTOI(vp);
mp = vp->v_mount;
diff --git a/sys/gnu/fs/ext2fs/ext2_bmap.c b/sys/gnu/fs/ext2fs/ext2_bmap.c
index bcd838d036a1..a424d31d7a85 100644
--- a/sys/gnu/fs/ext2fs/ext2_bmap.c
+++ b/sys/gnu/fs/ext2fs/ext2_bmap.c
@@ -112,7 +112,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct indir a[NIADDR], *xap;
daddr_t daddr;
long metalbn;
- int error, maxrun, num;
+ int error, maxrun = 0, num;
ip = VTOI(vp);
mp = vp->v_mount;
diff --git a/sys/i386/conf/Makefile.i386 b/sys/i386/conf/Makefile.i386
index db28a3406596..b755dbac1839 100644
--- a/sys/i386/conf/Makefile.i386
+++ b/sys/i386/conf/Makefile.i386
@@ -39,7 +39,6 @@ CWARNFLAGS=-W -Wreturn-type -Wcomment
# of material assistance.
#
COPTFLAGS=-O
-COPTFLAGS+=-D__FreeBSD__
INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
@@ -52,10 +51,10 @@ NORMAL_S= ${CPP} -I. -DLOCORE ${COPTS} $< | ${AS} ${ASFLAGS} -o $*.o
DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
SYSTEM_OBJS=locore.o exception.o swtch.o support.o ${OBJS} param.o \
- ioconf.o conf.o machdep.o
-SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS}
+ ioconf.o conf.o machdep.o vnode_if.o
+SYSTEM_DEP=Makefile symbols.sort ${SYSTEM_OBJS} libkern.a
SYSTEM_LD_HEAD= @echo loading $@; rm -f $@
-SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o
+SYSTEM_LD= @${LD} -Bstatic -Z -T ${LOAD_ADDRESS} -o $@ -X ${SYSTEM_OBJS} vers.o libkern.a
SYSTEM_LD_TAIL= @echo rearranging symbols; symorder symbols.sort $@; \
${DBSYM} -fT ${LOAD_ADDRESS} $@; ${STRIP} -x $@; size $@; chmod 755 $@
@@ -74,6 +73,13 @@ PROFILE_C= ${CC} -S -c ${CFLAGS} $< ; \
%LOAD
+libkern.a:
+ -@if [ X${PROF} = X ]; \
+ then ln -s $S/libkern/obj/libkern.a libkern.a; \
+ else ln -s $S/libkern/obj/libkern_p.a libkern.a; \
+ fi; \
+ echo ln -s $S/libkern/obj/libkern.a libkern.a
+
clean:
rm -f eddep *386bsd tags *.o locore.i [a-uw-z]*.s \
errs linterrs makelinks genassym ,assym.s stamp-assym
@@ -140,7 +146,7 @@ genassym: Makefile
${CC} ${INCLUDES} -DKERNEL ${IDENT} ${PARAM} \
${I386}/i386/genassym.c -static -o genassym
-depend: assym.s param.c
+depend: assym.s param.c vnode_if.h
sh /usr/bin/mkdep -DLOAD_ADDRESS=0x${LOAD_ADDRESS} ${COPTS} ${CFILES} ioconf.c param.c ${I386}/i386/conf.c
sh /usr/bin/mkdep -a -p ${INCLUDES} ${IDENT} ${PARAM} ${I386}/i386/genassym.c
@@ -173,6 +179,11 @@ vers.o: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP}
sh $S/conf/newvers.sh ${KERN_IDENT} ${IDENT}
${CC} ${CFLAGS} -c vers.c
+vnode_if.c: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+vnode_if.h: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+
%RULES
# DO NOT DELETE THIS LINE -- make depend uses it
diff --git a/sys/i386/conf/files.i386 b/sys/i386/conf/files.i386
index 7aec440ac98d..f70e799d8a33 100644
--- a/sys/i386/conf/files.i386
+++ b/sys/i386/conf/files.i386
@@ -14,6 +14,7 @@ i386/i386/mem.c standard
i386/i386/microtime.s standard
i386/i386/ns_cksum.c optional ns
i386/i386/pmap.c standard
+i386/i386/procfs_machdep.c optional procfs
i386/i386/sys_machdep.c standard
i386/i386/trap.c standard
i386/i386/vm_machdep.c standard
diff --git a/sys/i386/eisa/aha1742.c b/sys/i386/eisa/aha1742.c
index 95c0aeda16aa..27bce5f7c056 100644
--- a/sys/i386/eisa/aha1742.c
+++ b/sys/i386/eisa/aha1742.c
@@ -274,7 +274,7 @@ int ahb_attach();
int ahb_init __P((int unit));
int ahbintr();
int32 ahb_scsi_cmd();
-void ahb_timeout(caddr_t, int);
+void ahb_timeout(caddr_t);
void ahb_done();
struct ecb *cheat;
void ahb_free_ecb();
@@ -993,7 +993,7 @@ ahb_scsi_cmd(xs)
if (!(flags & SCSI_NOMASK)) {
s = splbio();
ahb_send_immed(unit, xs->sc_link->target, AHB_TARG_RESET);
- timeout(ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
splx(s);
return (SUCCESSFULLY_QUEUED);
} else {
@@ -1122,7 +1122,7 @@ ahb_scsi_cmd(xs)
if (!(flags & SCSI_NOMASK)) {
s = splbio();
ahb_send_mbox(unit, OP_START_ECB, xs->sc_link->target, ecb);
- timeout(ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
splx(s);
SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_sent\n"));
return (SUCCESSFULLY_QUEUED);
@@ -1152,7 +1152,7 @@ ahb_scsi_cmd(xs)
}
void
-ahb_timeout(caddr_t arg1, int arg2)
+ahb_timeout(caddr_t arg1)
{
struct ecb * ecb = (struct ecb *)arg1;
int unit;
@@ -1199,7 +1199,7 @@ ahb_timeout(caddr_t arg1, int arg2)
printf("\n");
ahb_send_mbox(unit, OP_ABORT_ECB, ecb->xs->sc_link->target, ecb);
/* 2 secs for the abort */
- timeout(ahb_timeout, (caddr_t)ecb, 2 * hz);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, 2 * hz);
ecb->flags = ECB_ABORTED;
}
splx(s);
diff --git a/sys/i386/i386/conf.c b/sys/i386/i386/conf.c
index a52f8cd068e8..3360de618c36 100644
--- a/sys/i386/i386/conf.c
+++ b/sys/i386/i386/conf.c
@@ -44,12 +44,27 @@
* $Id: conf.c,v 1.24 1994/04/21 14:10:31 sos Exp $
*/
-#include "param.h"
-#include "systm.h"
-#include "buf.h"
-#include "ioctl.h"
-#include "tty.h"
-#include "conf.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/ioctl.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/tty.h>
+#include <sys/conf.h>
+
+typedef int d_open_t __P((dev_t, int, int, struct proc *));
+typedef int d_close_t __P((dev_t, int, int, struct proc *));
+typedef int d_strategy_t __P((struct buf *));
+typedef int d_ioctl_t __P((dev_t, int, caddr_t, int, struct proc *));
+typedef int d_dump_t __P(());
+typedef int d_psize_t __P((dev_t));
+
+typedef int d_rdwr_t __P((dev_t, struct uio *, int));
+typedef int d_stop_t __P((struct tty *, int));
+typedef int d_reset_t __P((int));
+typedef int d_select_t __P((dev_t, int, struct proc *));
+typedef int d_mmap_t __P((/* XXX */));
int nullop(), enxio(), enodev();
d_rdwr_t rawread, rawwrite;
@@ -237,7 +252,7 @@ d_close_t pcclose;
d_rdwr_t pcread, pcwrite;
d_ioctl_t pcioctl;
d_mmap_t pcmmap;
-extern struct tty *pccons;
+extern struct tty pccons[];
/* controlling TTY */
d_open_t cttyopen;
@@ -263,7 +278,7 @@ d_close_t ptcclose;
d_rdwr_t ptcread, ptcwrite;
d_select_t ptcselect;
d_ioctl_t ptyioctl;
-extern struct tty *pt_tty[];
+extern struct tty pt_tty[];
#else
#define ptsopen (d_open_t *)enxio
#define ptsclose (d_close_t *)enxio
@@ -288,7 +303,7 @@ d_rdwr_t comwrite;
d_ioctl_t comioctl;
d_select_t comselect;
#define comreset (d_reset_t *)enxio
-extern struct tty *com_tty[];
+extern struct tty com_tty[];
#else
#define comopen (d_open_t *)enxio
#define comclose (d_close_t *)enxio
@@ -463,7 +478,7 @@ d_ioctl_t sioioctl;
d_select_t sioselect;
d_stop_t siostop;
#define sioreset (d_reset_t *)enxio
-extern struct tty *sio_tty[];
+extern struct tty sio_tty[];
#else
#define sioopen (d_open_t *)enxio
#define sioclose (d_close_t *)enxio
@@ -554,7 +569,7 @@ struct cdevsw cdevsw[] =
noioc, nostop, nullreset, NULL,
seltrue, nommap, nostrat },
{ pcopen, pcclose, pcread, pcwrite, /*12*/
- pcioctl, nullstop, nullreset, &pccons, /* pc */
+ pcioctl, nullstop, nullreset, pccons, /* pc */
ttselect, pcmmap, NULL },
{ sdopen, sdclose, rawread, rawwrite, /*13*/
sdioctl, nostop, nullreset, NULL, /* sd */
@@ -634,3 +649,108 @@ int mem_no = 2; /* major device number of memory special file */
* provided as a character (raw) device.
*/
dev_t swapdev = makedev(1, 0);
+
+/*
+ * Routine that identifies /dev/mem and /dev/kmem.
+ *
+ * A minimal stub routine can always return 0.
+ */
+int
+iskmemdev(dev)
+ dev_t dev;
+{
+
+ return (major(dev) == 2 && (minor(dev) == 0 || minor(dev) == 1));
+}
+
+int
+iszerodev(dev)
+ dev_t dev;
+{
+ return (major(dev) == 2 && minor(dev) == 12);
+}
+
+/*
+ * Routine to determine if a device is a disk.
+ *
+ * A minimal stub routine can always return 0.
+ */
+int
+isdisk(dev, type)
+ dev_t dev;
+ int type;
+{
+
+ switch (major(dev)) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ case 7:
+ if (type == VBLK)
+ return (1);
+ return (0);
+ case 3:
+ case 9:
+ case 13:
+ case 15:
+ case 29:
+ if (type == VCHR)
+ return (1);
+ /* fall through */
+ default:
+ return (0);
+ }
+ /* NOTREACHED */
+}
+
+#define MAXDEV 32
+static int chrtoblktbl[MAXDEV] = {
+ /* VCHR */ /* VBLK */
+ /* 0 */ NODEV,
+ /* 1 */ NODEV,
+ /* 2 */ NODEV,
+ /* 3 */ 0,
+ /* 4 */ NODEV,
+ /* 5 */ NODEV,
+ /* 6 */ NODEV,
+ /* 7 */ NODEV,
+ /* 8 */ NODEV,
+ /* 9 */ 2,
+ /* 10 */ 3,
+ /* 11 */ NODEV,
+ /* 12 */ NODEV,
+ /* 13 */ 4,
+ /* 14 */ 5,
+ /* 15 */ 6,
+ /* 16 */ NODEV,
+ /* 17 */ NODEV,
+ /* 18 */ NODEV,
+ /* 19 */ NODEV,
+ /* 20 */ NODEV,
+ /* 21 */ NODEV,
+ /* 22 */ NODEV,
+ /* 23 */ NODEV,
+ /* 25 */ NODEV,
+ /* 26 */ NODEV,
+ /* 27 */ NODEV,
+ /* 28 */ NODEV,
+ /* 29 */ 7,
+ /* 30 */ NODEV,
+ /* 31 */ NODEV,
+};
+/*
+ * Routine to convert from character to block device number.
+ *
+ * A minimal stub routine can always return NODEV.
+ */
+int
+chrtoblk(dev)
+ dev_t dev;
+{
+ int blkmaj;
+
+ if (major(dev) >= MAXDEV || (blkmaj = chrtoblktbl[major(dev)]) == NODEV)
+ return (NODEV);
+ return (makedev(blkmaj, minor(dev)));
+}
diff --git a/sys/i386/i386/cons.c b/sys/i386/i386/cons.c
index f5fc887be1b1..ceb4b398b62c 100644
--- a/sys/i386/i386/cons.c
+++ b/sys/i386/i386/cons.c
@@ -41,9 +41,9 @@
#include "sys/param.h"
+#include <sys/systm.h>
#include "sys/proc.h"
#include "sys/user.h"
-#include "sys/systm.h"
#include "sys/buf.h"
#include "sys/ioctl.h"
#include "sys/tty.h"
@@ -122,7 +122,7 @@ cnopen(dev, flag, mode, p)
return (0);
dev = cn_tab->cn_dev;
- if ((vfinddev(dev, VCHR, &vp) == 0) && vcount(vp))
+ if (vfinddev(dev, VCHR, &vp) && vcount(vp))
return (0);
return ((*cdevsw[major(dev)].d_open)(dev, flag, mode, p));
@@ -140,7 +140,7 @@ cnclose(dev, flag, mode, p)
return (0);
dev = cn_tab->cn_dev;
- if ((vfinddev(dev, VCHR, &vp) == 0) && vcount(vp))
+ if (vfinddev(dev, VCHR, &vp) && vcount(vp))
return (0);
return ((*cdevsw[major(dev)].d_close)(dev, flag, mode, p));
diff --git a/sys/i386/i386/cons.h b/sys/i386/i386/cons.h
index 5e0f30d88b2a..2766193d03f0 100644
--- a/sys/i386/i386/cons.h
+++ b/sys/i386/i386/cons.h
@@ -69,12 +69,12 @@ extern struct tty *cn_tty;
struct proc; struct uio;
/* cdevsw[] entries */
-extern int cnopen(int /*dev_t*/, int, int, struct proc *);
-extern int cnclose(int /*dev_t*/, int, int, struct proc *);
-extern int cnread(int /*dev_t*/, struct uio *, int);
-extern int cnwrite(int /*dev_t*/, struct uio *, int);
-extern int cnioctl(int /*dev_t*/, int, caddr_t, int, struct proc *);
-extern int cnselect(int /*dev_t*/, int, struct proc *);
+extern int cnopen(dev_t, int, int, struct proc *);
+extern int cnclose(dev_t, int, int, struct proc *);
+extern int cnread(dev_t, struct uio *, int);
+extern int cnwrite(dev_t, struct uio *, int);
+extern int cnioctl(dev_t, int, caddr_t, int, struct proc *);
+extern int cnselect(dev_t, int, struct proc *);
/* other kernel entry points */
extern void cninit(void);
diff --git a/sys/i386/i386/db_interface.c b/sys/i386/i386/db_interface.c
index 5f7c9d52ec6c..e79a2ae4a7ef 100644
--- a/sys/i386/i386/db_interface.c
+++ b/sys/i386/i386/db_interface.c
@@ -36,7 +36,7 @@
#include "ddb/ddb.h"
#include <sys/reboot.h>
-#include <vm/vm_statistics.h>
+/* #include <vm/vm_statistics.h> */
#include <vm/pmap.h>
#include <setjmp.h>
diff --git a/sys/i386/i386/db_trace.c b/sys/i386/i386/db_trace.c
index c7c2cd833670..d536d942db53 100644
--- a/sys/i386/i386/db_trace.c
+++ b/sys/i386/i386/db_trace.c
@@ -30,7 +30,8 @@
#include <vm/vm_param.h>
#include <vm/lock.h>
-#include <vm/vm_statistics.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
#include <machine/pmap.h>
#include "systm.h"
#include "proc.h"
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index b7847e847bf6..a75d1f1a8557 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -37,21 +37,19 @@
* $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
*/
-#include "sys/param.h"
-#include "sys/buf.h"
-#include "sys/vmmeter.h"
-#include "sys/proc.h"
-#include "sys/user.h"
-#include "sys/mbuf.h"
-#include "sys/msgbuf.h"
-#include "sys/resourcevar.h"
-#include "machine/cpu.h"
-#include "machine/trap.h"
-#include "machine/psl.h"
-#include "sys/syscall.h"
-#include "vm/vm_param.h"
-#include "vm/vm_map.h"
-#include "machine/pmap.h"
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <sys/syscall.h>
+#include <vm/vm.h>
+#include <sys/user.h>
main()
{
@@ -70,12 +68,12 @@ main()
printf("#define\tI386_CR3PAT %d\n", I386_CR3PAT);
printf("#define\tUDOT_SZ %d\n", sizeof(struct user));
- printf("#define\tP_LINK %d\n", &p->p_link);
- printf("#define\tP_RLINK %d\n", &p->p_rlink);
+ printf("#define\tP_LINK %d\n", &p->p_forw);
+ printf("#define\tP_RLINK %d\n", &p->p_back);
printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
printf("#define\tP_ADDR %d\n", &p->p_addr);
- printf("#define\tP_PRI %d\n", &p->p_pri);
+ printf("#define\tP_PRI %d\n", &p->p_priority);
printf("#define\tP_STAT %d\n", &p->p_stat);
printf("#define\tP_WCHAN %d\n", &p->p_wchan);
printf("#define\tP_FLAG %d\n", &p->p_flag);
@@ -87,10 +85,10 @@ main()
printf("#define\tV_SYSCALL %d\n", &vm->v_syscall);
printf("#define\tV_INTR %d\n", &vm->v_intr);
printf("#define\tV_SOFT %d\n", &vm->v_soft);
- printf("#define\tV_PDMA %d\n", &vm->v_pdma);
+/* printf("#define\tV_PDMA %d\n", &vm->v_pdma); */
printf("#define\tV_FAULTS %d\n", &vm->v_faults);
- printf("#define\tV_PGREC %d\n", &vm->v_pgrec);
- printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec);
+/* printf("#define\tV_PGREC %d\n", &vm->v_pgrec); */
+/* printf("#define\tV_FASTPGREC %d\n", &vm->v_fastpgrec); */
printf("#define\tUPAGES %d\n", UPAGES);
printf("#define\tHIGHPAGES %d\n", HIGHPAGES);
printf("#define\tCLSIZE %d\n", CLSIZE);
diff --git a/sys/i386/i386/locore.s b/sys/i386/i386/locore.s
index 8da843865c7b..7aa6e6bd8f78 100644
--- a/sys/i386/i386/locore.s
+++ b/sys/i386/i386/locore.s
@@ -274,7 +274,7 @@ NON_GPROF_ENTRY(btext)
movl $0xa0,%ecx
1:
#endif /* BDE_DEBUGGER */
- movl $PG_V|PG_KW,%eax /* having these bits set, */
+ movl $PG_V|PG_KW|PG_NC_PWT,%eax /* kernel R/W, valid, cache write-through */
lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
movl %ebx,_KPTphys-KERNBASE /* save in global */
fillkpt
@@ -302,7 +302,7 @@ NON_GPROF_ENTRY(btext)
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
- orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
+ orl $PG_V|PG_KW|PG_NC_PWT,%eax /* valid, kernel read/write, cache write-though */
movl %esi,%ebx /* calculate pte offset to ptd */
shrl $PGSHIFT-2,%ebx
addl %esi,%ebx /* address of page directory */
@@ -452,10 +452,26 @@ reloc_gdt:
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
+ popl %esi
+#if 0
movl $0,_PTD
+#endif
+
+ .globl __ucodesel,__udatasel
+
+ pushl $0 /* unused */
+ pushl __udatasel /* ss */
+ pushl $0 /* esp - filled in by execve() */
+ pushl $0x3200 /* eflags (ring 3, int enab) */
+ pushl __ucodesel /* cs */
+ pushl $0 /* eip - filled in by execve() */
+ subl $(12*4),%esp /* space for rest of registers */
+
+ pushl %esp /* call main with frame pointer */
call _main /* autoconfiguration, mountroot etc */
- popl %esi
+
+ addl $(13*4),%esp /* back to a frame we can return with */
/*
* now we've run main() and determined what cpu-type we are, we can
@@ -473,69 +489,16 @@ reloc_gdt:
* set up address space and stack so that we can 'return' to user mode
*/
1:
- .globl __ucodesel,__udatasel
movl __ucodesel,%eax
movl __udatasel,%ecx
- /* build outer stack frame */
- pushl %ecx /* user ss */
- pushl $USRSTACK /* user esp */
- pushl %eax /* user cs */
- pushl $0 /* user ip */
+
movl %cx,%ds
movl %cx,%es
movl %ax,%fs /* double map cs to fs */
movl %cx,%gs /* and ds to gs */
- lret /* goto user! */
-
- pushl $lretmsg1 /* "should never get here!" */
- call _panic
-lretmsg1:
- .asciz "lret: toinit\n"
+ iret /* goto user! */
-
-#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
-/*
- * Icode is copied out to process 1 and executed in user mode:
- * execve("/sbin/init", argv, envp); exit(0);
- * If the execve fails, process 1 exits and the system panics.
- */
-NON_GPROF_ENTRY(icode)
- pushl $0 /* envp for execve() */
-
-# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
- movl $argv,%eax
- subl $_icode,%eax
- pushl %eax /* argp for execve() */
-
-# pushl $init-_icode
- movl $init,%eax
- subl $_icode,%eax
- pushl %eax /* fname for execve() */
-
- pushl %eax /* dummy return address */
-
- movl $SYS_execve,%eax
- LCALL(0x7,0x0)
-
- /* exit if something botches up in the above execve() */
- pushl %eax /* execve failed, the errno will do for an */
- /* exit code because errnos are < 128 */
- pushl %eax /* dummy return address */
- movl $SYS_exit,%eax
- LCALL(0x7,0x0)
-
-init:
- .asciz "/sbin/init"
- ALIGN_DATA
-argv:
- .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
- .long eicode-_icode /* argv[1] follows icode after copyout */
- .long 0
-eicode:
-
- .globl _szicode
-_szicode:
- .long _szicode-_icode
+#define LCALL(x,y) .byte 0x9a ; .long y ; .word x
NON_GPROF_ENTRY(sigcode)
call SIGF_HANDLER(%esp)
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index eab107598527..31bc6c294ba6 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -41,23 +41,24 @@
#include "npx.h"
#include "isa.h"
-#include <stddef.h>
-#include "param.h"
-#include "systm.h"
-#include "signalvar.h"
-#include "kernel.h"
-#include "map.h"
-#include "proc.h"
-#include "user.h"
-#include "exec.h" /* for PS_STRINGS */
-#include "buf.h"
-#include "reboot.h"
-#include "conf.h"
-#include "file.h"
-#include "callout.h"
-#include "malloc.h"
-#include "mbuf.h"
-#include "msgbuf.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/sysctl.h>
#ifdef SYSVSHM
#include "sys/shm.h"
@@ -94,7 +95,7 @@ static void identifycpu(void);
static void initcpu(void);
static int test_page(int *, int);
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
const char machine[] = "PC-Class";
const char *cpu_model;
@@ -121,6 +122,7 @@ int bouncepages = BOUNCEPAGES;
#else
int bouncepages = 0;
#endif
+int msgbufmapped = 0; /* set when safe to use msgbuf */
extern int freebufspace;
extern char *bouncememory;
@@ -141,6 +143,12 @@ extern cyloffset;
int cpu_class;
void dumpsys __P((void));
+vm_offset_t buffer_sva, buffer_eva;
+vm_offset_t clean_sva, clean_eva;
+vm_offset_t pager_sva, pager_eva;
+int maxbkva, pager_map_size;
+
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
void
cpu_startup()
@@ -275,18 +283,19 @@ again:
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
- /*
- * Allocate a submap for buffer space allocations.
- * XXX we are NOT using buffer_map, but due to
- * the references to it we will just allocate 1 page of
- * vm (not real memory) to make things happy...
- */
- buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- /* bufpages * */NBPG, TRUE);
+ clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
+ (nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
+
+ io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
+ pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
+ pager_map_size, TRUE);
+
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf * MAXBSIZE), TRUE);
/*
* Allocate a submap for physio
*/
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
/*
@@ -296,7 +305,7 @@ again:
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
/*
* Initialize callouts
@@ -305,7 +314,7 @@ again:
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
- printf("avail memory = %d (%d pages)\n", ptoa(vm_page_free_count), vm_page_free_count);
+ printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@@ -437,11 +446,11 @@ sendsig(catcher, sig, mask, code)
register struct proc *p = curproc;
register int *regs;
register struct sigframe *fp;
- struct sigacts *ps = p->p_sigacts;
+ struct sigacts *psp = p->p_sigacts;
int oonstack, frmtrap;
- regs = p->p_regs;
- oonstack = ps->ps_onstack;
+ regs = p->p_md.md_regs;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
/*
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
@@ -449,10 +458,12 @@ sendsig(catcher, sig, mask, code)
* will fail if the process has not already allocated
* the space with a `brk'.
*/
- if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
- fp = (struct sigframe *)(ps->ps_sigsp
- - sizeof(struct sigframe));
- ps->ps_onstack = 1;
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size - sizeof(struct sigframe));
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
} else {
fp = (struct sigframe *)(regs[tESP]
- sizeof(struct sigframe));
@@ -540,7 +551,7 @@ sigreturn(p, uap, retval)
{
register struct sigcontext *scp;
register struct sigframe *fp;
- register int *regs = p->p_regs;
+ register int *regs = p->p_md.md_regs;
int eflags;
/*
@@ -614,7 +625,10 @@ sigreturn(p, uap, retval)
if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
return(EINVAL);
- p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
p->p_sigmask = scp->sc_mask &~
(sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
regs[tEBP] = scp->sc_fp;
@@ -651,7 +665,7 @@ boot(arghowto)
for(;;);
}
howto = arghowto;
- if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
+ if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
register struct buf *bp;
int iter, nbusy;
@@ -818,13 +832,13 @@ setregs(p, entry, stack)
u_long entry;
u_long stack;
{
- p->p_regs[tEBP] = 0; /* bottom of the fp chain */
- p->p_regs[tEIP] = entry;
- p->p_regs[tESP] = stack;
- p->p_regs[tSS] = _udatasel;
- p->p_regs[tDS] = _udatasel;
- p->p_regs[tES] = _udatasel;
- p->p_regs[tCS] = _ucodesel;
+ p->p_md.md_regs[tEBP] = 0; /* bottom of the fp chain */
+ p->p_md.md_regs[tEIP] = entry;
+ p->p_md.md_regs[tESP] = stack;
+ p->p_md.md_regs[tSS] = _udatasel;
+ p->p_md.md_regs[tDS] = _udatasel;
+ p->p_md.md_regs[tES] = _udatasel;
+ p->p_md.md_regs[tCS] = _ucodesel;
p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
load_cr0(rcr0() | CR0_TS); /* start emulating */
@@ -834,6 +848,34 @@ setregs(p, entry, stack)
}
/*
+ * machine dependent system variables.
+ */
+int
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case CPU_CONSDEV:
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
+ sizeof cn_tty->t_dev));
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+/*
* Initialize 386 and configure to run kernel
*/
@@ -1105,9 +1147,11 @@ init386(first)
r_gdt.rd_limit = sizeof(gdt) - 1;
r_gdt.rd_base = (int) gdt;
lgdt(&r_gdt);
+
r_idt.rd_limit = sizeof(idt) - 1;
r_idt.rd_base = (int) idt;
lidt(&r_idt);
+
_default_ldt = GSEL(GLDT_SEL, SEL_KPL);
lldt(_default_ldt);
currentldt = _default_ldt;
@@ -1339,7 +1383,7 @@ _remque(element)
* The registers are in the frame; the frame is in the user area of
* the process in question; when the process is active, the registers
* are in "the kernel stack"; when it's not, they're still there, but
- * things get flipped around. So, since p->p_regs is the whole address
+ * things get flipped around. So, since p->p_md.md_regs is the whole address
* of the register set, take its offset from the kernel stack, and
* index into the user block. Don't you just *love* virtual memory?
* (I'm starting to think seymour is right...)
@@ -1348,7 +1392,7 @@ _remque(element)
int
ptrace_set_pc (struct proc *p, unsigned int addr) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eip = addr;
return 0;
@@ -1357,7 +1401,7 @@ ptrace_set_pc (struct proc *p, unsigned int addr) {
int
ptrace_single_step (struct proc *p) {
void *regs = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
((struct trapframe *)regs)->tf_eflags |= PSL_T;
return 0;
@@ -1370,7 +1414,7 @@ ptrace_single_step (struct proc *p) {
int
ptrace_getregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = fill_regs (p, &regs))
return error;
@@ -1381,7 +1425,7 @@ ptrace_getregs (struct proc *p, unsigned int *addr) {
int
ptrace_setregs (struct proc *p, unsigned int *addr) {
int error;
- struct regs regs = {0};
+ struct reg regs = {0};
if (error = copyin (addr, &regs, sizeof(regs)))
return error;
@@ -1390,11 +1434,11 @@ ptrace_setregs (struct proc *p, unsigned int *addr) {
}
int
-fill_regs(struct proc *p, struct regs *regs) {
+fill_regs(struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
regs->r_es = tp->tf_es;
@@ -1415,11 +1459,11 @@ fill_regs(struct proc *p, struct regs *regs) {
}
int
-set_regs (struct proc *p, struct regs *regs) {
+set_regs (struct proc *p, struct reg *regs) {
int error;
struct trapframe *tp;
void *ptr = (char*)p->p_addr +
- ((char*) p->p_regs - (char*) kstack);
+ ((char*) p->p_md.md_regs - (char*) kstack);
tp = ptr;
tp->tf_es = regs->r_es;
@@ -1444,6 +1488,69 @@ set_regs (struct proc *p, struct regs *regs) {
void
Debugger(const char *msg)
{
- printf("Debugger(\"%s\") called.", msg);
+ printf("Debugger(\"%s\") called.\n", msg);
}
#endif /* no DDB */
+
+#include <sys/disklabel.h>
+#define b_cylin b_resid
+#define dkpart(dev) (minor(dev) & 7)
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
+{
+ struct partition *p = lp->d_partitions + dkpart(bp->b_dev);
+ int labelsect = lp->d_partitions[0].p_offset;
+ int maxsz = p->p_size,
+ sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
+
+ /* overwriting disk label ? */
+ /* XXX should also protect bootstrap in first 8K */
+ if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect &&
+#if LABELSECTOR != 0
+ bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
+#endif
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+#if defined(DOSBBSECTOR) && defined(notyet)
+ /* overwriting master boot record? */
+ if (bp->b_blkno + p->p_offset <= DOSBBSECTOR &&
+ (bp->b_flags & B_READ) == 0 && wlabel == 0) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+#endif
+
+ /* beyond partition? */
+ if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) {
+ /* if exactly at end of disk, return an EOF */
+ if (bp->b_blkno == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ return(0);
+ }
+ /* or truncate if part of it fits */
+ sz = maxsz - bp->b_blkno;
+ if (sz <= 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_pblkno = bp->b_blkno + p->p_offset;
+ bp->b_cylin = bp->b_pblkno / lp->d_secpercyl;
+ return(1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+ return(-1);
+}
+
diff --git a/sys/i386/i386/math_emulate.c b/sys/i386/i386/math_emulate.c
index 1b15e6113ba3..5013123f3294 100644
--- a/sys/i386/i386/math_emulate.c
+++ b/sys/i386/i386/math_emulate.c
@@ -35,20 +35,21 @@
* hide most of the 387-specific things here.
*/
-#include "machine/cpu.h"
-#include "machine/psl.h"
-#include "machine/reg.h"
-
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "user.h"
-#include "acct.h"
-#include "kernel.h"
-#include "signal.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/signal.h>
#define __ALIGNED_TEMP_REAL 1
-#include "i386/i386/math_emu.h"
+#include "math_emu.h"
#define bswapw(x) __asm__("xchgb %%al,%%ah":"=a" (x):"0" ((short)x))
#define ST(x) (*__st((x)))
@@ -568,7 +569,7 @@ static int __regoffset[] = {
tEAX, tECX, tEDX, tEBX, tESP, tEBP, tESI, tEDI
};
-#define REG(x) (curproc->p_regs[__regoffset[(x)]])
+#define REG(x) (curproc->p_md.md_regs[__regoffset[(x)]])
static char * sib(struct trapframe * info, int mod)
{
diff --git a/sys/i386/i386/mem.c b/sys/i386/i386/mem.c
index c3899a17c74f..1b8f18747a93 100644
--- a/sys/i386/i386/mem.c
+++ b/sys/i386/i386/mem.c
@@ -45,24 +45,23 @@
* Memory special file
*/
-#include "param.h"
-#include "conf.h"
-#include "buf.h"
-#include "systm.h"
-#include "uio.h"
-#include "malloc.h"
-#include "proc.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
-#include "machine/cpu.h"
-#include "machine/psl.h"
+#include <machine/cpu.h>
+#include <machine/psl.h>
-#include "vm/vm_param.h"
-#include "vm/lock.h"
-#include "vm/vm_statistics.h"
-#include "vm/vm_prot.h"
-#include "vm/pmap.h"
+#include <vm/vm_param.h>
+#include <vm/lock.h>
+#include <vm/vm_prot.h>
+#include <vm/pmap.h>
-extern char *vmmap; /* poor name! */
+extern char *ptvmmap; /* poor name! */
/*ARGSUSED*/
int
mmclose(dev, uio, flags)
@@ -74,7 +73,7 @@ mmclose(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags &= ~PSL_IOPL;
break;
default:
@@ -93,7 +92,7 @@ mmopen(dev, uio, flags)
switch (minor(dev)) {
case 14:
- fp = (struct trapframe *)curproc->p_regs;
+ fp = (struct trapframe *)curproc->p_md.md_regs;
fp->tf_eflags |= PSL_IOPL;
break;
default:
@@ -128,25 +127,25 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
v = uio->uio_offset;
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, v,
+ pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
TRUE);
o = (int)uio->uio_offset & PGOFSET;
c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
- c = MIN(c, (u_int)(NBPG - o));
- c = MIN(c, (u_int)iov->iov_len);
- error = uiomove((caddr_t)&vmmap[o], (int)c, uio);
- pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
- (vm_offset_t)&vmmap[NBPG]);
+ c = min(c, (u_int)(NBPG - o));
+ c = min(c, (u_int)iov->iov_len);
+ error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
+ pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
+ (vm_offset_t)&ptvmmap[NBPG]);
continue;
/* minor device 1 is kernel memory */
case 1:
c = iov->iov_len;
- if (!kernacc((caddr_t)uio->uio_offset, c,
+ if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return(EFAULT);
- error = uiomove((caddr_t)uio->uio_offset, (int)c, uio);
+ error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
/* minor device 2 is EOF/RATHOLE */
@@ -167,7 +166,7 @@ mmrw(dev, uio, flags)
malloc(CLBYTES, M_TEMP, M_WAITOK);
bzero(zbuf, CLBYTES);
}
- c = MIN(iov->iov_len, CLBYTES);
+ c = min(iov->iov_len, CLBYTES);
error = uiomove(zbuf, (int)c, uio);
continue;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d5b556fff2f1..88db9dd4ed62 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -85,19 +85,19 @@
* and to when physical maps must be made correct.
*/
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "user.h"
-#include "i386/include/cpufunc.h"
-#include "i386/include/cputypes.h"
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
-#include "vm/vm_page.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
-#include "i386/isa/isa.h"
+#include <i386/include/cpufunc.h>
+#include <i386/include/cputypes.h>
+
+#include <i386/isa/isa.h>
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
@@ -149,12 +149,12 @@ static inline void *vm_get_pmap();
static inline void vm_put_pmap();
inline void pmap_use_pt();
inline void pmap_unuse_pt();
-inline pt_entry_t * const pmap_pte();
+inline pt_entry_t * pmap_pte();
static inline pv_entry_t get_pv_entry();
void pmap_alloc_pv_entry();
void pmap_clear_modify();
void i386_protection_init();
-extern vm_offset_t pager_sva, pager_eva;
+extern vm_offset_t clean_sva, clean_eva;
extern int cpu_class;
#if BSDVM_COMPAT
@@ -163,8 +163,8 @@ extern int cpu_class;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-pt_entry_t *CMAP1, *CMAP2, *mmap;
-caddr_t CADDR1, CADDR2, vmmap;
+pt_entry_t *CMAP1, *CMAP2, *ptmmap;
+caddr_t CADDR1, CADDR2, ptvmmap;
pt_entry_t *msgbufmap;
struct msgbuf *msgbufp;
#endif
@@ -180,8 +180,8 @@ void init_pv_entries(int) ;
*/
inline pt_entry_t *
-const pmap_pte(pmap, va)
- register pmap_t pmap;
+pmap_pte(pmap, va)
+ pmap_t pmap;
vm_offset_t va;
{
@@ -374,7 +374,7 @@ pmap_bootstrap(firstaddr, loadaddr)
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
- SYSMAP(caddr_t ,mmap ,vmmap ,1 )
+ SYSMAP(caddr_t ,ptmmap ,ptvmmap ,1 )
SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
virtual_avail = va;
#endif
@@ -530,7 +530,7 @@ static inline void
vm_put_pmap(up)
struct pmaplist *up;
{
- kmem_free(kernel_map, up, ctob(1));
+ kmem_free(kernel_map, (vm_offset_t)up, ctob(1));
}
/*
@@ -851,7 +851,7 @@ pmap_remove(pmap, sva, eva)
if (pmap_is_managed(pa)) {
if ((((int) oldpte & PG_M) && (sva < USRSTACK || sva > UPT_MAX_ADDRESS))
|| (sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
- if (sva < pager_sva || sva >= pager_eva) {
+ if (sva < clean_sva || sva >= clean_eva) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -941,7 +941,7 @@ pmap_remove(pmap, sva, eva)
if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
- if (va < pager_sva || va >= pager_eva) {
+ if (va < clean_sva || va >= clean_eva ) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
}
@@ -1006,7 +1006,7 @@ pmap_remove_all(pa)
if ( (m->flags & PG_CLEAN) &&
((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
- if (pv->pv_va < pager_sva || pv->pv_va >= pager_eva) {
+ if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
m->flags &= ~PG_CLEAN;
}
}
@@ -1261,7 +1261,11 @@ validate:
if (va < UPT_MIN_ADDRESS)
(int) npte |= PG_u;
else if (va < UPT_MAX_ADDRESS)
- (int) npte |= PG_u | PG_RW;
+ (int) npte |= PG_u | PG_RW | PG_NC_PWT;
+
+/*
+ printf("mapping: pa: %x, to va: %x, with pte: %x\n", pa, va, npte);
+*/
if( *pte != npte) {
*pte = npte;
@@ -1414,7 +1418,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
- *pte = (pt_entry_t) ( (int) (pa | PG_RO | PG_V | PG_u));
+ *pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
}
/*
@@ -1448,16 +1452,16 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
*/
if( size > object->size / 2) {
objbytes = size;
- p = (vm_page_t) queue_first(&object->memq);
- while (!queue_end(&object->memq, (queue_entry_t) p) && objbytes != 0) {
+ p = object->memq.tqh_first;
+ while ((p != NULL) && (objbytes != 0)) {
tmpoff = p->offset;
if( tmpoff < offset) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
tmpoff -= offset;
if( tmpoff >= size) {
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
continue;
}
@@ -1469,7 +1473,7 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
vm_page_unhold(p);
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
}
- p = (vm_page_t) queue_next(&p->listq);
+ p = p->listq.tqe_next;
objbytes -= NBPG;
}
} else {
@@ -1699,13 +1703,13 @@ pmap_testbit(pa, bit)
* ptes as never modified.
*/
if (bit & PG_U ) {
- if ((pv->pv_va >= pager_sva) && (pv->pv_va < pager_eva)) {
+ if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
continue;
}
}
if (bit & PG_M ) {
if (pv->pv_va >= USRSTACK) {
- if (pv->pv_va >= pager_sva && pv->pv_va < pager_eva) {
+ if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) {
continue;
}
if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
@@ -1761,7 +1765,7 @@ pmap_changebit(pa, bit, setem)
* don't write protect pager mappings
*/
if (!setem && (bit == PG_RW)) {
- if (va >= pager_sva && va < pager_eva)
+ if (va >= clean_sva && va < clean_eva)
continue;
}
@@ -1869,6 +1873,10 @@ pmap_phys_address(ppn)
/*
* Miscellaneous support routines follow
*/
+/*
+ * This really just builds a table for page write enable
+ * translation.
+ */
void
i386_protection_init()
@@ -1879,12 +1887,10 @@ i386_protection_init()
for (prot = 0; prot < 8; prot++) {
switch (prot) {
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = 0;
- break;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = PG_RO;
+ *kp++ = 0;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
diff --git a/sys/i386/i386/procfs_machdep.c b/sys/i386/i386/procfs_machdep.c
index b3ad300db59e..d122fc6f3f09 100644
--- a/sys/i386/i386/procfs_machdep.c
+++ b/sys/i386/i386/procfs_machdep.c
@@ -73,7 +73,7 @@
#include <sys/vnode.h>
#include <machine/psl.h>
#include <machine/reg.h>
-/*#include <machine/frame.h>*/
+#include <machine/frame.h>
#include <miscfs/procfs/procfs.h>
int
@@ -81,15 +81,13 @@ procfs_read_regs(p, regs)
struct proc *p;
struct reg *regs;
{
- struct frame *f;
+ struct trapframe *f;
if ((p->p_flag & P_INMEM) == 0)
return (EIO);
- f = (struct frame *) p->p_md.md_regs;
- bcopy((void *) f->f_regs, (void *) regs->r_regs, sizeof(regs->r_regs));
- regs->r_pc = f->f_pc;
- regs->r_sr = f->f_sr;
+ f = (struct trapframe *) p->p_md.md_regs;
+ bcopy((void *) f, (void *) regs, sizeof(*regs));
return (0);
}
@@ -105,15 +103,13 @@ procfs_write_regs(p, regs)
struct proc *p;
struct reg *regs;
{
- struct frame *f;
+ struct trapframe *f;
if ((p->p_flag & P_INMEM) == 0)
return (EIO);
- f = (struct frame *) p->p_md.md_regs;
- bcopy((void *) regs->r_regs, (void *) f->f_regs, sizeof(f->f_regs));
- f->f_pc = regs->r_pc;
- f->f_sr = regs->r_sr;
+ f = (struct trapframe *) p->p_md.md_regs;
+ bcopy((void *) regs, (void *) f, sizeof(*regs));
return (0);
}
@@ -146,7 +142,7 @@ procfs_sstep(p)
error = procfs_read_regs(p, &r);
if (error == 0) {
- r.r_sr |= PSL_T;
+ r.r_eflags |= PSL_T;
error = procfs_write_regs(p, &r);
}
diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s
index e8082221e99c..9634069e2e8a 100644
--- a/sys/i386/i386/support.s
+++ b/sys/i386/i386/support.s
@@ -185,6 +185,7 @@ ENTRY(outsl) /* outsl(port, addr, cnt) */
* memory moves on standard DX !!!!!
*/
+ALTENTRY(blkclr)
ENTRY(bzero)
#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU))
cmpl $CPUCLASS_486,_cpu_class
@@ -656,6 +657,17 @@ ENTRY(fuword)
movl $0,PCB_ONFAULT(%ecx)
ret
+/*
+ * These two routines are called from the profiling code, potentially
+ * at interrupt time. If they fail, that's okay, good things will
+ * happen later. Fail all the time for now - until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movl $-1,%eax
+ ret
+
ENTRY(fusword)
movl _curpcb,%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index 4dbc672b923e..aa8b5ba5d9a3 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -52,9 +52,9 @@
/*
* The following primitives manipulate the run queues.
* _whichqs tells which of the 32 queues _qs
- * have processes in them. Setrq puts processes into queues, Remrq
+ * have processes in them. setrunqueue puts processes into queues, Remrq
* removes them from queues. The running process is on no queue,
- * other processes are on a queue related to p->p_pri, divided by 4
+ * other processes are on a queue related to p->p_priority, divided by 4
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
@@ -72,11 +72,11 @@ _want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
- * Setrq(p)
+ * setrunqueue(p)
*
* Call should be made at spl6(), and p->p_stat should be SRUN
*/
-ENTRY(setrq)
+ENTRY(setrunqueue)
movl 4(%esp),%eax
cmpl $0,P_RLINK(%eax) /* should not be on q already */
je set1
@@ -95,7 +95,7 @@ set1:
movl %eax,P_LINK(%ecx)
ret
-set2: .asciz "setrq"
+set2: .asciz "setrunqueue"
/*
* Remrq(p)
@@ -131,10 +131,10 @@ rem2:
ret
rem3: .asciz "remrq"
-sw0: .asciz "swtch"
+sw0: .asciz "cpu_switch"
/*
- * When no processes are on the runq, swtch() branches to _idle
+ * When no processes are on the runq, cpu_switch() branches to _idle
* to wait for something to come ready.
*/
ALIGN_TEXT
@@ -146,8 +146,8 @@ _idle:
sti
/*
- * XXX callers of swtch() do a bogus splclock(). Locking should
- * be left to swtch().
+ * XXX callers of cpu_switch() do a bogus splclock(). Locking should
+ * be left to cpu_switch().
*/
movl $SWI_AST_MASK,_cpl
testl $~SWI_AST_MASK,_ipending
@@ -169,9 +169,9 @@ badsw:
/*NOTREACHED*/
/*
- * Swtch()
+ * cpu_switch()
*/
-ENTRY(swtch)
+ENTRY(cpu_switch)
incl _cnt+V_SWTCH
/* switch to new process. first, save context as needed */
@@ -340,7 +340,7 @@ ENTRY(swtch_to_inactive)
/*
* savectx(pcb, altreturn)
* Update pcb, saving current processor state and arranging
- * for alternate return ala longjmp in swtch if altreturn is true.
+ * for alternate return ala longjmp in cpu_switch if altreturn is true.
*/
ENTRY(savectx)
movl 4(%esp),%ecx
diff --git a/sys/i386/i386/symbols.raw b/sys/i386/i386/symbols.raw
index 0f5bafd83885..084f9b6eb27d 100644
--- a/sys/i386/i386/symbols.raw
+++ b/sys/i386/i386/symbols.raw
@@ -39,9 +39,9 @@
_swapmap
#vmstat
_cp_time
- _rate
+# _rate
_total
- _sum
+# _sum
# _rectime
# _pgintime
_dk_xfer
@@ -61,13 +61,13 @@
_rawcb
_Sysmap
_ifnet
- _rthost
- _rtnet
+# _rthost
+# _rtnet
_icmpstat
_filehead
_nfiles
- _rthashsize
- _radix_node_head
+# _rthashsize
+# _radix_node_head
#routed
_ifnet
#rwho
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index 9bb38e1e60d3..382416f06e3f 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -41,32 +41,33 @@
* 386 Trap and System call handleing
*/
-#include "isa.h"
-#include "npx.h"
-#include "ddb.h"
-#include "machine/cpu.h"
-#include "machine/psl.h"
-#include "machine/reg.h"
-#include "machine/eflags.h"
-
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "user.h"
-#include "acct.h"
-#include "kernel.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/syscall.h>
#ifdef KTRACE
-#include "ktrace.h"
+#include <sys/ktrace.h>
#endif
-#include "vm/vm_param.h"
-#include "vm/pmap.h"
-#include "vm/vm_map.h"
-#include "vm/vm_user.h"
-#include "vm/vm_page.h"
-#include "sys/vmmeter.h"
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <machine/eflags.h>
+
+#include <machine/trap.h>
-#include "machine/trap.h"
+#include "isa.h"
+#include "npx.h"
+#include "ddb.h"
#ifdef __GNUC__
@@ -84,7 +85,7 @@ void write_gs __P((/* promoted u_short */ int gs));
#endif /* __GNUC__ */
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
struct sysent sysent[];
int nsysent;
@@ -139,7 +140,7 @@ trap(frame)
{
register int i;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks = 0;
int ucode, type, code, eva, fault_type;
frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
@@ -177,10 +178,10 @@ copyfault:
return;
}
- syst = p->p_stime;
if (ISPL(frame.tf_cs) == SEL_UPL) {
type |= T_USER;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
+ sticks = p->p_sticks;
}
skiptoswitch:
@@ -210,9 +211,9 @@ skiptoswitch:
case T_ASTFLT|T_USER: /* Allow process switch */
astoff();
cnt.v_soft++;
- if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
+ if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
- p->p_flag &= ~SOWEUPC;
+ p->p_flag &= ~P_OWEUPC;
}
goto out;
@@ -284,7 +285,6 @@ skiptoswitch:
else
ftype = VM_PROT_READ;
- oldflags = p->p_flag;
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
@@ -294,7 +294,7 @@ skiptoswitch:
* Keep swapout from messing with us during this
* critical time.
*/
- p->p_flag |= SLOCK;
+ ++p->p_lock;
/*
* Grow the stack if necessary
@@ -303,8 +303,7 @@ skiptoswitch:
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
goto nogo;
}
}
@@ -332,13 +331,10 @@ skiptoswitch:
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
- if( ptepg->flags & PG_CLEAN)
- vm_page_free(ptepg);
+ vm_page_free(ptepg);
}
-
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
} else {
/*
* Since we know that kernel virtual address addresses
@@ -482,32 +478,29 @@ nogo:
out:
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
- * swtch()'ed, we might not be on the queue indicated by
+ * If that happened after we setrunqueue ourselves but before we
+ * mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -518,7 +511,7 @@ out:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
}
/*
@@ -546,14 +539,12 @@ int trapwrite(addr)
p = curproc;
vm = p->p_vmspace;
- oldflags = p->p_flag;
- p->p_flag |= SLOCK;
+ ++p->p_lock;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
return (1);
}
}
@@ -579,8 +570,7 @@ int trapwrite(addr)
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
if (rv != KERN_SUCCESS)
return 1;
@@ -603,31 +593,45 @@ syscall(frame)
register int i;
register struct sysent *callp;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks;
int error, opc;
int args[8], rval[2];
- int code;
+ u_int code;
#ifdef lint
r0 = 0; r0 = r0; r1 = 0; r1 = r1;
#endif
- syst = p->p_stime;
+ sticks = p->p_sticks;
if (ISPL(frame.tf_cs) != SEL_UPL)
panic("syscall");
code = frame.tf_eax;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
params = (caddr_t)frame.tf_esp + sizeof (int) ;
/*
* Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
*/
opc = frame.tf_eip - 7;
- if (code == 0) {
+ /*
+ * Need to check if this is a 32 bit or 64 bit syscall.
+ */
+ if (code == SYS_syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
code = fuword(params);
params += sizeof (int);
+ } else if (code == SYS___syscall) {
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
}
- if (code < 0 || code >= nsysent)
+
+ if (code >= nsysent)
callp = &sysent[0];
else
callp = &sysent[code];
@@ -672,32 +676,29 @@ done:
*/
p = curproc;
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
+ * If that happened after we setrunqueue ourselves but before we
* swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -708,21 +709,9 @@ done:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
-#ifdef DIAGNOSTICx
-{ extern int _udatasel, _ucodesel;
- if (frame.tf_ss != _udatasel)
- printf("ss %x call %d\n", frame.tf_ss, code);
- if ((frame.tf_cs&0xffff) != _ucodesel)
- printf("cs %x call %d\n", frame.tf_cs, code);
- if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
- printf("eip %x call %d\n", frame.tf_eip, code);
- frame.tf_eip = 0;
- }
-}
-#endif
}
diff --git a/sys/i386/i386/tsc.c b/sys/i386/i386/tsc.c
index d338cd5c5783..e40079a40bea 100644
--- a/sys/i386/i386/tsc.c
+++ b/sys/i386/i386/tsc.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index a892c29764de..a7c4e596dfa5 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -42,27 +42,21 @@
*/
#include "npx.h"
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "malloc.h"
-#include "buf.h"
-#include "user.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/user.h>
-#include "../include/cpu.h"
+#include <machine/cpu.h>
-#include "vm/vm.h"
-#include "vm/vm_kern.h"
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
#define b_cylin b_resid
-#define MAXCLSTATS 256
-int clstats[MAXCLSTATS];
-int rqstats[MAXCLSTATS];
-
-
-#ifndef NOBOUNCE
-
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
int bouncepages, bpwait;
@@ -75,7 +69,8 @@ unsigned *bounceallocarray;
int bouncefree;
#define SIXTEENMEG (4096*4096)
-#define MAXBKVA 1024
+#define MAXBKVA 512
+int maxbkva=MAXBKVA*NBPG;
/* special list that can be used at interrupt time for eventual kva free */
struct kvasfree {
@@ -258,6 +253,7 @@ int count;
pa = vm_bounce_page_find(1);
pmap_kenter(kva + i * NBPG, pa);
}
+ pmap_update();
return kva;
}
@@ -309,8 +305,8 @@ vm_bounce_alloc(bp)
bp->b_bufsize = bp->b_bcount;
}
- vastart = (vm_offset_t) bp->b_un.b_addr;
- vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
+ vastart = (vm_offset_t) bp->b_data;
+ vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
vapstart = i386_trunc_page(vastart);
vapend = i386_round_page(vaend);
@@ -369,11 +365,11 @@ vm_bounce_alloc(bp)
/*
* save the original buffer kva
*/
- bp->b_savekva = bp->b_un.b_addr;
+ bp->b_savekva = bp->b_data;
/*
* put our new kva into the buffer (offset by original offset)
*/
- bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
+ bp->b_data = (caddr_t) (((vm_offset_t) kva) |
((vm_offset_t) bp->b_savekva & (NBPG - 1)));
return;
}
@@ -403,7 +399,7 @@ vm_bounce_free(bp)
return;
origkva = (vm_offset_t) bp->b_savekva;
- bouncekva = (vm_offset_t) bp->b_un.b_addr;
+ bouncekva = (vm_offset_t) bp->b_data;
vastart = bouncekva;
vaend = bouncekva + bp->b_bufsize;
@@ -449,17 +445,15 @@ vm_bounce_free(bp)
/*
* add the old kva into the "to free" list
*/
- bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
+ bouncekva = i386_trunc_page((vm_offset_t) bp->b_data);
vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
- bp->b_un.b_addr = bp->b_savekva;
+ bp->b_data = bp->b_savekva;
bp->b_savekva = 0;
bp->b_flags &= ~B_BOUNCE;
return;
}
-#endif /* NOBOUNCE */
-
/*
* init the bounce buffer system
*/
@@ -468,10 +462,8 @@ vm_bounce_init()
{
vm_offset_t minaddr, maxaddr;
- io_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
kvasfreecnt = 0;
-#ifndef NOBOUNCE
if (bouncepages == 0)
return;
@@ -487,11 +479,10 @@ vm_bounce_init()
bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
-#endif
-
}
+#ifdef BROKEN_IN_44
static void
cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
vm_offset_t kvanew;
@@ -827,6 +818,7 @@ nocluster:
ap->av_forw = bp;
bp->av_back = ap;
}
+#endif
/*
* quick version of vm_fault
@@ -881,7 +873,7 @@ cpu_fork(p1, p2)
offset = mvesp() - (int)kstack;
bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
(unsigned) ctob(UPAGES) - offset);
- p2->p_regs = p1->p_regs;
+ p2->p_md.md_regs = p1->p_md.md_regs;
/*
* Wire top of address space of child to it's kstack.
@@ -930,7 +922,7 @@ cpu_fork(p1, p2)
*
* Next, we assign a dummy context to be written over by swtch,
* calling it to send this process off to oblivion.
- * [The nullpcb allows us to minimize cost in swtch() by not having
+ * [The nullpcb allows us to minimize cost in mi_switch() by not having
* a special case].
*/
struct proc *swtch_to_inactive();
@@ -952,8 +944,7 @@ cpu_exit(p)
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
p->p_addr = (struct user *) &nullpcb;
- splclock();
- swtch();
+ mi_switch();
/* NOTREACHED */
}
#else
@@ -965,9 +956,8 @@ cpu_exit(p)
#if NNPX > 0
npxexit(p);
#endif /* NNPX */
- splclock();
- curproc = 0;
- swtch();
+ curproc = p;
+ mi_switch();
/*
* This is to shutup the compiler, and if swtch() failed I suppose
* this would be a good thing. This keeps gcc happy because panic
@@ -990,6 +980,21 @@ cpu_wait(p) struct proc *p; {
#endif
/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+/*
* Set a red zone in the kernel stack after the u. area.
*/
void
@@ -1008,6 +1013,43 @@ setredzone(pte, vaddr)
}
/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+
+void
+pagemove(from, to, size)
+ register caddr_t from, to;
+ int size;
+{
+ register vm_offset_t pa;
+
+ if (size & CLOFSET)
+ panic("pagemove");
+ while (size > 0) {
+ pa = pmap_kextract((vm_offset_t)from);
+ if (pa == 0)
+ panic("pagemove 2");
+ if (pmap_kextract((vm_offset_t)to) != 0)
+ panic("pagemove 3");
+ pmap_remove(kernel_pmap,
+ (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
+ pmap_kenter( (vm_offset_t)to, pa);
+ from += PAGE_SIZE;
+ to += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_update();
+}
+
+/*
* Convert kernel VA to physical address
*/
u_long
@@ -1036,22 +1078,49 @@ vmapbuf(bp)
{
register int npf;
register caddr_t addr;
- register long flags = bp->b_flags;
- struct proc *p;
int off;
vm_offset_t kva;
- register vm_offset_t pa;
+ vm_offset_t pa, lastv, v;
- if ((flags & B_PHYS) == 0)
+ if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE) {
+
+/*
+ * make sure that the pde is valid and held
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ vm_fault_quick(v, VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+
+/*
+ * do the vm_fault if needed, do the copy-on-write thing when
+ * reading stuff off device into memory.
+ */
+ vm_fault_quick(addr,
+ (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+/*
+ * hold the data page
+ */
+ vm_page_hold(PHYS_TO_VM_PAGE(pa));
+ }
+
addr = bp->b_saveaddr = bp->b_un.b_addr;
off = (int)addr & PGOFSET;
- p = bp->b_proc;
npf = btoc(round_page(bp->b_bufsize + off));
kva = kmem_alloc_wait(phys_map, ctob(npf));
bp->b_un.b_addr = (caddr_t) (kva + off);
while (npf--) {
- pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_kenter(kva, trunc_page(pa));
@@ -1071,7 +1140,7 @@ vunmapbuf(bp)
{
register int npf;
register caddr_t addr = bp->b_un.b_addr;
- vm_offset_t kva;
+ vm_offset_t kva,va,v,lastv,pa;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
@@ -1080,6 +1149,32 @@ vunmapbuf(bp)
kmem_free_wakeup(phys_map, kva, ctob(npf));
bp->b_un.b_addr = bp->b_saveaddr;
bp->b_saveaddr = NULL;
+
+
+/*
+ * unhold the pde, and data pages
+ */
+ lastv = 0;
+ for (addr = (caddr_t)trunc_page(bp->b_data);
+ addr < bp->b_data + bp->b_bufsize;
+ addr += NBPG) {
+
+ /*
+ * release the data page
+ */
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+
+ /*
+ * and unhold the page table
+ */
+ v = trunc_page(((vm_offset_t)vtopte(addr)));
+ if (v != lastv) {
+ pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
+ vm_page_unhold(PHYS_TO_VM_PAGE(pa));
+ lastv = v;
+ }
+ }
}
/*
@@ -1104,7 +1199,7 @@ cpu_reset() {
int
grow(p, sp)
struct proc *p;
- int sp;
+ u_int sp;
{
unsigned int nss;
caddr_t v;
diff --git a/sys/i386/include/_limits.h b/sys/i386/include/_limits.h
index 5aed8709f850..e507313a046b 100644
--- a/sys/i386/include/_limits.h
+++ b/sys/i386/include/_limits.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1988 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,40 +30,60 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
- * $Id: limits.h,v 1.5 1994/02/26 00:56:02 ache Exp $
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
*/
#ifndef _MACHINE_LIMITS_H_
#define _MACHINE_LIMITS_H_ 1
#define CHAR_BIT 8 /* number of bits in a char */
-#define MB_LEN_MAX 6 /* allow 21-bit UTF2 */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
-#define SCHAR_MIN (-0x7f-1) /* max value for a signed char */
-#define SCHAR_MAX 0x7f /* min value for a signed char */
-#define UCHAR_MAX 0xff /* max value for an unsigned char */
-#define CHAR_MAX 0x7f /* max value for a char */
-#define CHAR_MIN (-0x7f-1) /* min value for a char */
+#define CLK_TCK 128 /* ticks per second */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 127 /* min value for a signed char */
+#define SCHAR_MIN (-128) /* max value for a signed char */
+
+#define UCHAR_MAX 255 /* max value for an unsigned char */
+#define CHAR_MAX 127 /* max value for a char */
+#define CHAR_MIN (-128) /* min value for a char */
-#define USHRT_MAX 0xffff /* max value for an unsigned short */
-#define SHRT_MAX 0x7fff /* max value for a short */
-#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+#define USHRT_MAX 65535 /* max value for an unsigned short */
+#define SHRT_MAX 32767 /* max value for a short */
+#define SHRT_MIN (-32768) /* min value for a short */
#define UINT_MAX 0xffffffff /* max value for an unsigned int */
-#define INT_MAX 0x7fffffff /* max value for an int */
-#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+#define INT_MAX 2147483647 /* max value for an int */
+#define INT_MIN (-2147483647-1) /* min value for an int */
#define ULONG_MAX 0xffffffff /* max value for an unsigned long */
-#define LONG_MAX 0x7fffffff /* max value for a long */
-#define LONG_MIN (-0x7fffffff-1) /* min value for a long */
+#define LONG_MAX 2147483647 /* max value for a long */
+#define LONG_MIN (-2147483647-1) /* min value for a long */
-#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
-#define CLK_TCK 128 /* ticks per second */
-#define UQUAD_MAX 0xffffffffffffffffLL /* max unsigned quad */
-#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */
-#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */
-#endif
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ANSI_SOURCE */
#endif /* _MACHINE_LIMITS_H_ */
diff --git a/sys/i386/include/ansi.h b/sys/i386/include/ansi.h
index 1665aadef1dc..0ef060a3b5bb 100644
--- a/sys/i386/include/ansi.h
+++ b/sys/i386/include/ansi.h
@@ -1,6 +1,6 @@
/*-
- * Copyright (c) 1990 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,8 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)ansi.h 7.1 (Berkeley) 3/9/91
- * $Id: ansi.h,v 1.2 1993/10/16 14:39:05 rgrimes Exp $
+ * @(#)ansi.h 8.2 (Berkeley) 1/4/94
*/
#ifndef _ANSI_H_
@@ -41,18 +40,17 @@
* Types which are fundamental to the implementation and may appear in
* more than one standard header are defined here. Standard headers
* then use:
- * #ifdef _SIZE_T_
- * typedef _SIZE_T_ size_t;
- * #undef _SIZE_T_
+ * #ifdef _BSD_SIZE_T_
+ * typedef _BSD_SIZE_T_ size_t;
+ * #undef _BSD_SIZE_T_
* #endif
- *
- * Thanks, ANSI!
*/
-#define _CLOCK_T_ unsigned long /* clock() */
-#define _PTRDIFF_T_ int /* ptr1 - ptr2 */
-#define _SIZE_T_ unsigned int /* sizeof() */
-#define _TIME_T_ long /* time() */
-#define _VA_LIST_ char * /* va_list */
+#define _BSD_CLOCK_T_ unsigned long /* clock() */
+#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned int /* sizeof() */
+#define _BSD_SSIZE_T_ int /* byte count or error */
+#define _BSD_TIME_T_ long /* time() */
+#define _BSD_VA_LIST_ char * /* va_list */
/*
* Runes (wchar_t) is declared to be an ``int'' instead of the more natural
@@ -63,13 +61,12 @@
* chosen over a long is that the is*() and to*() routines take ints (says
* ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you
* lose a bit of ANSI conformance, but your programs will still work.
- *
+ *
* Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t
* and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains
* defined for ctype.h.
*/
-#define _BSD_WCHAR_T_ int /* wchar_t */
-#define _BSD_RUNE_T_ int /* rune_t */
-
+#define _BSD_WCHAR_T_ int /* wchar_t */
+#define _BSD_RUNE_T_ int /* rune_t */
#endif /* _ANSI_H_ */
diff --git a/sys/i386/include/cpu.h b/sys/i386/include/cpu.h
index a2df0235ab2b..2216d71822c3 100644
--- a/sys/i386/include/cpu.h
+++ b/sys/i386/include/cpu.h
@@ -45,6 +45,7 @@
*/
#include "machine/frame.h"
#include "machine/segments.h"
+#include <machine/spl.h>
/*
* definitions of cpu-dependent requirements
@@ -53,20 +54,16 @@
#undef COPY_SIGCODE /* don't copy sigcode above user stack in exec */
#define cpu_exec(p) /* nothing */
+#define cpu_swapin(p) /* nothing */
+#define cpu_setstack(p, ap) (p)->p_md.md_regs = ap
+#define cpu_set_init_frame(p, fp) (p)->p_md.md_regs = fp
-/*
- * Arguments to hardclock, softclock and gatherstats
- * encapsulate the previous machine state in an opaque
- * clockframe; for now, use generic intrframe.
- * XXX softclock() has been fixed. It never needed a
- * whole frame, only a usermode flag, at least on this
- * machine. Fix the rest.
- */
-typedef struct intrframe clockframe;
+#define CLKF_USERMODE(framep) (ISPL((framep)->cf_cs) == SEL_UPL)
+#define CLKF_INTR(framep) (0)
+#define CLKF_BASEPRI(framep) (((framep)->cf_ppl & ~SWI_AST_MASK) == 0)
+#define CLKF_PC(framep) ((framep)->cf_eip)
-#define CLKF_USERMODE(framep) (ISPL((framep)->if_cs) == SEL_UPL)
-#define CLKF_BASEPRI(framep) (((framep)->if_ppl & ~SWI_AST_MASK) == 0)
-#define CLKF_PC(framep) ((framep)->if_eip)
+#define resettodr() /* no todr to set */
/*
* Preempt the current process if in interrupt from user mode,
@@ -79,7 +76,7 @@ typedef struct intrframe clockframe;
* interrupt. On tahoe, request an ast to send us through trap(),
* marking the proc as needing a profiling tick.
*/
-#define profile_tick(p, framep) { (p)->p_flag |= SOWEUPC; aston(); }
+#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
/*
* Notify the current process (p) that it has a signal pending,
@@ -100,6 +97,17 @@ struct cpu_nameclass {
int cpu_class;
};
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_CONSDEV 1 /* dev_t: console terminal device */
+#define CPU_MAXID 2 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+}
+
#ifdef KERNEL
extern int want_resched; /* resched was called */
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 3c2dcc9aa90d..729a5c06ccf8 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -71,145 +71,6 @@ tlbflush()
__asm __volatile("movl %%cr3, %%eax; movl %%eax, %%cr3" : : : "ax");
}
-static inline
-int
-imin(a, b)
- int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-int
-imax(a, b)
- int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned int
-min(a, b)
- unsigned int a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned int
-max(a, b)
- unsigned int a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-long
-lmin(a, b)
- long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-long
-lmax(a, b)
- long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-unsigned long
-ulmin(a, b)
- unsigned long a, b;
-{
-
- return (a < b ? a : b);
-}
-
-static inline
-unsigned long
-ulmax(a, b)
- unsigned long a, b;
-{
-
- return (a > b ? a : b);
-}
-
-static inline
-int
-ffs(mask)
- register long mask;
-{
- register int bit;
-
- if (!mask)
- return(0);
- for (bit = 1;; ++bit) {
- if (mask&0x01)
- return(bit);
- mask >>= 1;
- }
-}
-
-static inline
-int
-bcmp(v1, v2, len)
- void *v1, *v2;
- register unsigned len;
-{
- register u_char *s1 = v1, *s2 = v2;
-
- while (len--)
- if (*s1++ != *s2++)
- return (1);
- return (0);
-}
-
-static inline
-size_t
-strlen(s1)
- register const char *s1;
-{
- register size_t len;
-
- for (len = 0; *s1++ != '\0'; len++)
- ;
- return (len);
-}
-
-struct quehead {
- struct quehead *qh_link;
- struct quehead *qh_rlink;
-};
-
-static inline void
-insque(void *a, void *b)
-{
- register struct quehead *element = a, *head = b;
- element->qh_link = head->qh_link;
- head->qh_link = (struct quehead *)element;
- element->qh_rlink = (struct quehead *)head;
- ((struct quehead *)(element->qh_link))->qh_rlink
- = (struct quehead *)element;
-}
-
-static inline void
-remque(void *a)
-{
- register struct quehead *element = a;
- ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
- ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
- element->qh_rlink = 0;
-}
-
#else /* not __GNUC__ */
extern void insque __P((void *, void *));
extern void remque __P((void *));
diff --git a/sys/i386/include/exec.h b/sys/i386/include/exec.h
index eb587a4ed44d..f63ec49cc8c1 100644
--- a/sys/i386/include/exec.h
+++ b/sys/i386/include/exec.h
@@ -33,51 +33,96 @@
* @(#)exec.h 8.1 (Berkeley) 6/11/93
*/
-/* Size of a page in an object file. */
+#ifndef _EXEC_H_
+#define _EXEC_H_
+
#define __LDPGSZ 4096
/* Valid magic number check. */
#define N_BADMAG(ex) \
- ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \
- (ex).a_magic != ZMAGIC)
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+#define N_ALIGN(ex,x) \
+ (N_GETMAGIC(ex) == ZMAGIC || N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC || N_GETMAGIC_NET(ex) == QMAGIC ? \
+ ((x) + __LDPGSZ - 1) & ~(__LDPGSZ - 1) : (x))
/* Address of the bottom of the text segment. */
-#define N_TXTADDR(X) 0
+#define N_TXTADDR(ex) \
+ ((N_GETMAGIC(ex) == OMAGIC || N_GETMAGIC(ex) == NMAGIC || \
+ N_GETMAGIC(ex) == ZMAGIC) ? 0 : __LDPGSZ)
/* Address of the bottom of the data segment. */
#define N_DATADDR(ex) \
- (N_TXTADDR(ex) + ((ex).a_magic == OMAGIC ? (ex).a_text \
- : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTADDR(ex) + (ex).a_text)
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_midmag & 0xffff )
+#define N_GETMID(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETMID_NET(ex) : \
+ ((ex).a_midmag >> 16) & 0x03ff )
+#define N_GETFLAG(ex) \
+ ( (N_GETMAGIC_NET(ex) == ZMAGIC) ? N_GETFLAG_NET(ex) : \
+ ((ex).a_midmag >> 26) & 0x3f )
+#define N_SETMAGIC(ex,mag,mid,flag) \
+ ( (ex).a_midmag = (((flag) & 0x3f) <<26) | (((mid) & 0x03ff) << 16) | \
+ ((mag) & 0xffff) )
+
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_midmag) & 0xffff)
+#define N_GETMID_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 16) & 0x03ff)
+#define N_GETFLAG_NET(ex) \
+ ((ntohl((ex).a_midmag) >> 26) & 0x3f)
+#define N_SETMAGIC_NET(ex,mag,mid,flag) \
+ ( (ex).a_midmag = htonl( (((flag)&0x3f)<<26) | (((mid)&0x03ff)<<16) | \
+ (((mag)&0xffff)) ) )
/* Text segment offset. */
#define N_TXTOFF(ex) \
- ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec))
+ (N_GETMAGIC(ex) == ZMAGIC ? __LDPGSZ : (N_GETMAGIC(ex) == QMAGIC || \
+ N_GETMAGIC_NET(ex) == ZMAGIC) ? 0 : sizeof(struct exec))
/* Data segment offset. */
#define N_DATOFF(ex) \
- (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \
- __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1))))
+ N_ALIGN(ex, N_TXTOFF(ex) + (ex).a_text)
+
+/* Relocation table offset. */
+#define N_RELOFF(ex) \
+ N_ALIGN(ex, N_DATOFF(ex) + (ex).a_data)
/* Symbol table offset. */
#define N_SYMOFF(ex) \
- (N_TXTOFF(ex) + (ex).a_text + (ex).a_data + (ex).a_trsize + \
- (ex).a_drsize)
+ (N_RELOFF(ex) + (ex).a_trsize + (ex).a_drsize)
/* String table offset. */
#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms)
-/* Description of the object file header (a.out format). */
+/*
+ * Header prepended to each a.out file.
+ * only manipulate the a_midmag field via the
+ * N_SETMAGIC/N_GET{MAGIC,MID,FLAG} macros in a.out.h
+ */
+
struct exec {
-#define OMAGIC 0407 /* old impure format */
-#define NMAGIC 0410 /* read-only text */
-#define ZMAGIC 0413 /* demand load format */
- long a_magic; /* magic number */
-
- u_long a_text; /* text segment size */
- u_long a_data; /* initialized data size */
- u_long a_bss; /* uninitialized data size */
- u_long a_syms; /* symbol table size */
- u_long a_entry; /* entry point */
- u_long a_trsize; /* text relocation size */
- u_long a_drsize; /* data relocation size */
+unsigned long a_midmag; /* htonl(flags<<26 | mid<<16 | magic) */
+unsigned long a_text; /* text segment size */
+unsigned long a_data; /* initialized data size */
+unsigned long a_bss; /* uninitialized data size */
+unsigned long a_syms; /* symbol table size */
+unsigned long a_entry; /* entry point */
+unsigned long a_trsize; /* text relocation size */
+unsigned long a_drsize; /* data relocation size */
};
+#define a_magic a_midmag /* XXX Hack to work with current kern_execve.c */
+
+/* a_magic */
+#define OMAGIC 0407 /* old impure format */
+#define NMAGIC 0410 /* read-only text */
+#define ZMAGIC 0413 /* demand load format */
+#define QMAGIC 0314 /* "compact" demand load format */
+
+#endif /* !_EXEC_H_ */
diff --git a/sys/i386/include/frame.h b/sys/i386/include/frame.h
index 05bf26504620..db2993e019fb 100644
--- a/sys/i386/include/frame.h
+++ b/sys/i386/include/frame.h
@@ -100,6 +100,32 @@ struct intrframe {
int if_ss;
};
+/* frame of clock (same as interrupt frame) */
+
+struct clockframe {
+ int cf_vec;
+ int cf_ppl;
+ int cf_es;
+ int cf_ds;
+ int cf_edi;
+ int cf_esi;
+ int cf_ebp;
+ int :32;
+ int cf_ebx;
+ int cf_edx;
+ int cf_ecx;
+ int cf_eax;
+ int :32; /* for compat with trap frame - trapno */
+ int :32; /* for compat with trap frame - err */
+ /* below portion defined in 386 hardware */
+ int cf_eip;
+ int cf_cs;
+ int cf_eflags;
+ /* below only when transitting rings (e.g. user to kernel) */
+ int cf_esp;
+ int cf_ss;
+};
+
/*
* Signal frame
*/
diff --git a/sys/i386/include/limits.h b/sys/i386/include/limits.h
index 5aed8709f850..e507313a046b 100644
--- a/sys/i386/include/limits.h
+++ b/sys/i386/include/limits.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1988 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,40 +30,60 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
- * $Id: limits.h,v 1.5 1994/02/26 00:56:02 ache Exp $
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
*/
#ifndef _MACHINE_LIMITS_H_
#define _MACHINE_LIMITS_H_ 1
#define CHAR_BIT 8 /* number of bits in a char */
-#define MB_LEN_MAX 6 /* allow 21-bit UTF2 */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
-#define SCHAR_MIN (-0x7f-1) /* max value for a signed char */
-#define SCHAR_MAX 0x7f /* min value for a signed char */
-#define UCHAR_MAX 0xff /* max value for an unsigned char */
-#define CHAR_MAX 0x7f /* max value for a char */
-#define CHAR_MIN (-0x7f-1) /* min value for a char */
+#define CLK_TCK 128 /* ticks per second */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 127 /* min value for a signed char */
+#define SCHAR_MIN (-128) /* max value for a signed char */
+
+#define UCHAR_MAX 255 /* max value for an unsigned char */
+#define CHAR_MAX 127 /* max value for a char */
+#define CHAR_MIN (-128) /* min value for a char */
-#define USHRT_MAX 0xffff /* max value for an unsigned short */
-#define SHRT_MAX 0x7fff /* max value for a short */
-#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+#define USHRT_MAX 65535 /* max value for an unsigned short */
+#define SHRT_MAX 32767 /* max value for a short */
+#define SHRT_MIN (-32768) /* min value for a short */
#define UINT_MAX 0xffffffff /* max value for an unsigned int */
-#define INT_MAX 0x7fffffff /* max value for an int */
-#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+#define INT_MAX 2147483647 /* max value for an int */
+#define INT_MIN (-2147483647-1) /* min value for an int */
#define ULONG_MAX 0xffffffff /* max value for an unsigned long */
-#define LONG_MAX 0x7fffffff /* max value for a long */
-#define LONG_MIN (-0x7fffffff-1) /* min value for a long */
+#define LONG_MAX 2147483647 /* max value for a long */
+#define LONG_MIN (-2147483647-1) /* min value for a long */
-#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
-#define CLK_TCK 128 /* ticks per second */
-#define UQUAD_MAX 0xffffffffffffffffLL /* max unsigned quad */
-#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */
-#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */
-#endif
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ANSI_SOURCE */
#endif /* _MACHINE_LIMITS_H_ */
diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h
index b847afc93f0a..ab8ad9c230ff 100644
--- a/sys/i386/include/param.h
+++ b/sys/i386/include/param.h
@@ -152,6 +152,7 @@
*/
#define trunc_page(x) ((unsigned)(x) & ~(NBPG-1))
#define round_page(x) ((((unsigned)(x)) + NBPG - 1) & ~(NBPG-1))
+
#define atop(x) ((unsigned)(x) >> PG_SHIFT)
#define ptoa(x) ((unsigned)(x) << PG_SHIFT)
diff --git a/sys/i386/include/pcb.h b/sys/i386/include/pcb.h
index a7a29dfdbb85..990e5f90bf7c 100644
--- a/sys/i386/include/pcb.h
+++ b/sys/i386/include/pcb.h
@@ -79,6 +79,13 @@ struct pcb {
int pcb_cmap2; /* XXX temporary PTE - will prefault instead */
};
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the i386: ???
+ */
+struct md_coredump {
+};
+
#ifdef KERNEL
extern struct pcb *curpcb; /* our current running pcb */
#endif
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 74f002d4f391..7ddcebd0fd37 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -48,75 +48,8 @@
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
-#include "vm/vm_prot.h"
-/*
- * 386 page table entry and page table directory
- * W.Jolitz, 8/89
- */
-struct pde
-{
-unsigned int
- pd_v:1, /* valid bit */
- pd_prot:2, /* access control */
- pd_mbz1:2, /* reserved, must be zero */
- pd_u:1, /* hardware maintained 'used' bit */
- :1, /* not used */
- pd_mbz2:2, /* reserved, must be zero */
- :3, /* reserved for software */
- pd_pfnum:20; /* physical page frame number of pte's*/
-};
-
-#define PD_MASK 0xffc00000UL /* page directory address bits */
-#define PT_MASK 0x003ff000UL /* page table address bits */
-#define PD_SHIFT 22 /* page directory address shift */
-#define PG_SHIFT 12 /* page table address shift */
-
-struct pte
-{
-unsigned int
- pg_v:1, /* valid bit */
- pg_prot:2, /* access control */
- pg_mbz1:2, /* reserved, must be zero */
- pg_u:1, /* hardware maintained 'used' bit */
- pg_m:1, /* hardware maintained modified bit */
- pg_mbz2:2, /* reserved, must be zero */
- pg_w:1, /* software, wired down page */
- :1, /* software (unused) */
- pg_nc:1, /* 'uncacheable page' bit */
- pg_pfnum:20; /* physical page frame number */
-};
-
-#define PG_V 0x00000001
-#define PG_RO 0x00000000
-#define PG_RW 0x00000002
-#define PG_u 0x00000004
-#define PG_PROT 0x00000006 /* all protection bits . */
-#define PG_W 0x00000200
-#define PG_N 0x00000800 /* Non-cacheable */
-#define PG_M 0x00000040
-#define PG_U 0x00000020
-#define PG_FRAME 0xfffff000UL
-
-#define PG_NOACC 0
-#define PG_KR 0x00000000
-#define PG_KW 0x00000002
-#define PG_URKR 0x00000004
-#define PG_URKW 0x00000004
-#define PG_UW 0x00000006
-
-/* Garbage for current bastardized pager that assumes a hp300 */
-#define PG_NV 0
-#define PG_CI 0
-
-/*
- * Page Protection Exception bits
- */
-#define PGEX_P 0x01 /* Protection violation vs. not present */
-#define PGEX_W 0x02 /* during a Write cycle */
-#define PGEX_U 0x04 /* access from User mode (UPL) */
+#include <machine/pte.h>
-/* typedef struct pde pd_entry_t; */ /* page directory entry */
-/* typedef struct pte pt_entry_t; */ /* Mach page table entry */
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@@ -129,7 +62,7 @@ typedef unsigned int *pt_entry_t;
* given to the user (NUPDE)
*/
#ifndef NKPT
-#define NKPT 15 /* actual number of kernel pte's */
+#define NKPT 24 /* actual number of kernel pte's */
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of kpte's */
@@ -159,7 +92,6 @@ typedef unsigned int *pt_entry_t;
#ifdef KERNEL
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
-extern pt_entry_t *Sysmap;
extern int IdlePTD; /* physical address of "Idle" state directory */
#endif
diff --git a/sys/i386/include/proc.h b/sys/i386/include/proc.h
index 1b9e4a2adebc..92de3af87dcf 100644
--- a/sys/i386/include/proc.h
+++ b/sys/i386/include/proc.h
@@ -42,9 +42,7 @@
*/
struct mdproc {
int md_flags; /* machine-dependent flags */
-#ifdef notyet
- int *p_regs; /* registers on current frame */
-#endif
+ int *md_regs; /* registers on current frame */
};
/* md_flags */
diff --git a/sys/i386/include/pte.h b/sys/i386/include/pte.h
index 227a8aeb5dda..feb74a4d3f99 100644
--- a/sys/i386/include/pte.h
+++ b/sys/i386/include/pte.h
@@ -82,14 +82,19 @@ unsigned int
#define PD_MASK 0xffc00000 /* page directory address bits */
#define PD_SHIFT 22 /* page directory address bits */
+#define PT_MASK 0x003ff000UL /* page table address bits */
+#define PG_SHIFT 12 /* page table address shift */
#define PG_V 0x00000001
+#define PG_RW 0x00000002
+#define PG_u 0x00000004
#define PG_PROT 0x00000006 /* all protection bits . */
#define PG_NC_PWT 0x00000008 /* page cache write through */
#define PG_NC_PCD 0x00000010 /* page cache disable */
#define PG_N 0x00000018 /* Non-cacheable */
#define PG_U 0x00000020 /* page was accessed */
#define PG_M 0x00000040 /* page was modified */
+#define PG_W 0x00000200
#define PG_FRAME 0xfffff000
#define PG_NOACC 0
diff --git a/sys/i386/include/reg.h b/sys/i386/include/reg.h
index d20f8d0c85e1..2a1f06106b0c 100644
--- a/sys/i386/include/reg.h
+++ b/sys/i386/include/reg.h
@@ -74,23 +74,33 @@
* use whichver order, defined above, is correct, so that it
* is all invisible to the user.
*/
-struct regs {
+struct reg {
unsigned int r_es;
unsigned int r_ds;
unsigned int r_edi;
unsigned int r_esi;
unsigned int r_ebp;
+ unsigned int r_isp;
unsigned int r_ebx;
unsigned int r_edx;
unsigned int r_ecx;
unsigned int r_eax;
+ unsigned int r_trapno;
+ unsigned int r_err;
unsigned int r_eip;
unsigned int r_cs;
unsigned int r_eflags;
unsigned int r_esp;
unsigned int r_ss;
- unsigned int r_fs;
- unsigned int r_gs;
+};
+
+/*
+ * Register set accessible via /proc/$pid/fpreg
+ */
+struct fpreg {
+#if 0
+ int fpr_xxx; /* not implemented */
+#endif
};
#endif /* _MACHINE_REG_H_ */
diff --git a/sys/i386/include/signal.h b/sys/i386/include/signal.h
index 98793f2081b1..16cbef22265e 100644
--- a/sys/i386/include/signal.h
+++ b/sys/i386/include/signal.h
@@ -51,11 +51,25 @@ typedef int sig_atomic_t;
* a non-standard exit is performed.
*/
struct sigcontext {
- int sc_onstack; /* sigstack state to restore */
- int sc_mask; /* signal mask to restore */
- int sc_sp; /* sp to restore */
- int sc_fp; /* fp to restore */
- int sc_ap; /* ap to restore */
- int sc_pc; /* pc to restore */
- int sc_ps; /* psl to restore */
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore */
+ int sc_esp; /* machine state */
+ int sc_ebp;
+ int sc_isp;
+ int sc_eip;
+ int sc_efl;
+ int sc_es;
+ int sc_ds;
+ int sc_cs;
+ int sc_ss;
+ int sc_edi;
+ int sc_esi;
+ int sc_ebx;
+ int sc_edx;
+ int sc_ecx;
+ int sc_eax;
+# define sc_sp sc_esp
+# define sc_fp sc_ebp
+# define sc_pc sc_eip
+# define sc_ps sc_efl
};
diff --git a/sys/i386/include/spl.h b/sys/i386/include/spl.h
index 0be93644a463..c916df36da25 100644
--- a/sys/i386/include/spl.h
+++ b/sys/i386/include/spl.h
@@ -1,7 +1,7 @@
#ifndef _MACHINE_IPL_H_
#define _MACHINE_IPL_H_
-#include "machine/../isa/ipl.h" /* XXX "machine" means cpu for i386 */
+#include <machine/ipl.h> /* XXX "machine" means cpu for i386 */
/*
* Software interrupt bit numbers in priority order. The priority only
@@ -75,12 +75,15 @@ static __inline int name(void) \
GENSPL(splbio, cpl |= bio_imask)
GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
GENSPL(splhigh, cpl = HWI_MASK | SWI_MASK)
+GENSPL(splstatclock, cpl = HWI_MASK | SWI_MASK)
GENSPL(splimp, cpl |= net_imask)
GENSPL(splnet, cpl |= SWI_NET_MASK)
GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
GENSPL(spltty, cpl |= tty_imask)
+#define splnone() spl0()
+
static __inline void
spl0(void)
{
diff --git a/sys/i386/include/stdarg.h b/sys/i386/include/stdarg.h
index 91dab8bbfabb..1110b667083b 100644
--- a/sys/i386/include/stdarg.h
+++ b/sys/i386/include/stdarg.h
@@ -1,6 +1,6 @@
/*-
- * Copyright (c) 1991 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,15 +30,20 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)stdarg.h 7.2 (Berkeley) 5/4/91
- * $Id: stdarg.h,v 1.2 1993/10/16 14:39:34 rgrimes Exp $
+ * @(#)stdarg.h 8.1 (Berkeley) 6/10/93
*/
-#ifndef _MACHINE_STDARG_H_
-#define _MACHINE_STDARG_H_ 1
+#ifndef _STDARG_H_
+#define _STDARG_H_
typedef char *va_list;
+#define __va_promote(type) \
+ (((sizeof(type) + sizeof(int) - 1) / sizeof(int)) * sizeof(int))
+
+#define va_start(ap, last) \
+ (ap = ((char *)&(last) + __va_promote(last)))
+
#ifdef KERNEL
#define va_arg(ap, type) \
((type *)(ap += sizeof(type)))[-1]
@@ -50,9 +55,4 @@ typedef char *va_list;
#define va_end(ap)
-#define __va_promote(type) \
- (((sizeof(type) + sizeof(int) - 1) / sizeof(int)) * sizeof(int))
-
-#define va_start(ap, last) \
- (ap = ((char *)&(last) + __va_promote(last)))
-#endif /* _MACHINE_STDARG_H_ */
+#endif /* !_STDARG_H_ */
diff --git a/sys/i386/include/types.h b/sys/i386/include/types.h
index 118290cc23c0..dcbb29c08fe9 100644
--- a/sys/i386/include/types.h
+++ b/sys/i386/include/types.h
@@ -1,6 +1,6 @@
/*-
- * Copyright (c) 1990 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,13 +30,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)types.h 7.5 (Berkeley) 3/9/91
- * $Id$
+ * @(#)types.h 8.3 (Berkeley) 1/5/94
*/
#ifndef _MACHTYPES_H_
#define _MACHTYPES_H_
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
typedef struct _physadr {
int r[1];
} *physadr;
@@ -44,8 +44,22 @@ typedef struct _physadr {
typedef struct label_t {
int val[6];
} label_t;
+#endif
-typedef u_long vm_offset_t;
-typedef u_long vm_size_t;
+typedef unsigned long vm_offset_t;
+typedef unsigned long vm_size_t;
+
+/*
+ * Basic integral types. Omit the typedef if
+ * not possible for a machine/compiler combination.
+ */
+typedef __signed char int8_t;
+typedef unsigned char u_int8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+typedef long long int64_t;
+typedef unsigned long long u_int64_t;
#endif /* _MACHTYPES_H_ */
diff --git a/sys/i386/include/vmparam.h b/sys/i386/include/vmparam.h
index df901267202a..05218ad6f781 100644
--- a/sys/i386/include/vmparam.h
+++ b/sys/i386/include/vmparam.h
@@ -174,20 +174,6 @@
#define KLSDIST 3 /* klusters advance/retard for seq. fifo */
/*
- * Paging thresholds (see vm_sched.c).
- * Strategy of 1/19/85:
- * lotsfree is 512k bytes, but at most 1/4 of memory
- * desfree is 200k bytes, but at most 1/8 of memory
- * minfree is 64k bytes, but at most 1/2 of desfree
- */
-#define LOTSFREE (512 * 1024)
-#define LOTSFREEFRACT 4
-#define DESFREE (200 * 1024)
-#define DESFREEFRACT 8
-#define MINFREE (64 * 1024)
-#define MINFREEFRACT 2
-
-/*
* There are two clock hands, initially separated by HANDSPREAD bytes
* (but at most all of user memory). The amount of time to reclaim
* a page once the pageout process examines it increases with this
diff --git a/sys/i386/isa/aha1542.c b/sys/i386/isa/aha1542.c
index 84c5e57a965a..670f212352ea 100644
--- a/sys/i386/isa/aha1542.c
+++ b/sys/i386/isa/aha1542.c
@@ -43,6 +43,10 @@
#define NAHA 1
#endif /*KERNEL */
+#ifndef NetBSD
+typedef timeout_func_t timeout_t;
+#endif
+
/************************** board definitions *******************************/
/*
@@ -310,7 +314,7 @@ void aha_done();
int ahaattach();
int ahaintr();
int32 aha_scsi_cmd();
-void aha_timeout(caddr_t, int);
+void aha_timeout(caddr_t);
void ahaminphys();
u_int32 aha_adapter_info();
@@ -684,7 +688,7 @@ ahaintr(unit)
#endif /*AHADEBUG */
}
if (ccb) {
- untimeout(aha_timeout, (caddr_t)ccb);
+ untimeout((timeout_t)aha_timeout, (caddr_t)ccb);
aha_done(unit, ccb);
}
aha->aha_mbx.mbi[i].stat = AHA_MBI_FREE;
@@ -1213,7 +1217,7 @@ aha_scsi_cmd(xs)
bcopy(xs->cmd, &ccb->scsi_cmd, ccb->scsi_cmd_length);
if (!(flags & SCSI_NOMASK)) {
s = splbio(); /* stop instant timeouts */
- timeout(aha_timeout, (caddr_t)ccb, (xs->timeout * hz) / 1000);
+ timeout((timeout_t)aha_timeout, (caddr_t)ccb, (xs->timeout * hz) / 1000);
aha_startmbx(ccb->mbx);
/*
* Usually return SUCCESSFULLY QUEUED
@@ -1268,13 +1272,13 @@ aha_poll(unit, xs, ccb)
* clock is not running yet by taking out the
* clock queue entry it makes
*/
- aha_timeout((caddr_t)ccb, 0);
+ aha_timeout((caddr_t)ccb);
/*
* because we are polling,
* take out the timeout entry aha_timeout made
*/
- untimeout(aha_timeout, (caddr_t)ccb);
+ untimeout((timeout_t)aha_timeout, (caddr_t)ccb);
count = 2000;
while (count) {
/*
@@ -1296,7 +1300,7 @@ aha_poll(unit, xs, ccb)
* Notice that this time there is no
* clock queue entry to remove
*/
- aha_timeout((caddr_t)ccb, 0);
+ aha_timeout((caddr_t)ccb);
}
}
if (xs->error)
@@ -1432,7 +1436,7 @@ aha_bus_speed_check(unit, speed)
#endif /*TUNE_1542*/
void
-aha_timeout(caddr_t arg1, int arg2)
+aha_timeout(caddr_t arg1)
{
struct aha_ccb * ccb = (struct aha_ccb *)arg1;
int unit;
@@ -1468,7 +1472,7 @@ aha_timeout(caddr_t arg1, int arg2)
printf("\n");
aha_abortmbx(ccb->mbx);
/* 4 secs for the abort */
- timeout(aha_timeout, (caddr_t)ccb, 4 * hz);
+ timeout((timeout_t)aha_timeout, (caddr_t)ccb, 4 * hz);
ccb->flags = CCB_ABORTED;
} splx(s);
}
diff --git a/sys/i386/isa/aha1742.c b/sys/i386/isa/aha1742.c
index 95c0aeda16aa..27bce5f7c056 100644
--- a/sys/i386/isa/aha1742.c
+++ b/sys/i386/isa/aha1742.c
@@ -274,7 +274,7 @@ int ahb_attach();
int ahb_init __P((int unit));
int ahbintr();
int32 ahb_scsi_cmd();
-void ahb_timeout(caddr_t, int);
+void ahb_timeout(caddr_t);
void ahb_done();
struct ecb *cheat;
void ahb_free_ecb();
@@ -993,7 +993,7 @@ ahb_scsi_cmd(xs)
if (!(flags & SCSI_NOMASK)) {
s = splbio();
ahb_send_immed(unit, xs->sc_link->target, AHB_TARG_RESET);
- timeout(ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
splx(s);
return (SUCCESSFULLY_QUEUED);
} else {
@@ -1122,7 +1122,7 @@ ahb_scsi_cmd(xs)
if (!(flags & SCSI_NOMASK)) {
s = splbio();
ahb_send_mbox(unit, OP_START_ECB, xs->sc_link->target, ecb);
- timeout(ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, (xs->timeout * hz) / 1000);
splx(s);
SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_sent\n"));
return (SUCCESSFULLY_QUEUED);
@@ -1152,7 +1152,7 @@ ahb_scsi_cmd(xs)
}
void
-ahb_timeout(caddr_t arg1, int arg2)
+ahb_timeout(caddr_t arg1)
{
struct ecb * ecb = (struct ecb *)arg1;
int unit;
@@ -1199,7 +1199,7 @@ ahb_timeout(caddr_t arg1, int arg2)
printf("\n");
ahb_send_mbox(unit, OP_ABORT_ECB, ecb->xs->sc_link->target, ecb);
/* 2 secs for the abort */
- timeout(ahb_timeout, (caddr_t)ecb, 2 * hz);
+ timeout((timeout_t)ahb_timeout, (caddr_t)ecb, 2 * hz);
ecb->flags = ECB_ABORTED;
}
splx(s);
diff --git a/sys/i386/isa/bt742a.c b/sys/i386/isa/bt742a.c
index ac24e813a06d..58b53f91b51b 100644
--- a/sys/i386/isa/bt742a.c
+++ b/sys/i386/isa/bt742a.c
@@ -342,7 +342,7 @@ int btprobe();
int btattach();
int btintr();
int32 bt_scsi_cmd();
-void bt_timeout(caddr_t, int);
+void bt_timeout(caddr_t);
void bt_inquire_setup_information();
void bt_done();
void btminphys();
@@ -1440,7 +1440,7 @@ bt_poll(unit, xs, ccb)
}
void
-bt_timeout(caddr_t arg1, int arg2)
+bt_timeout(caddr_t arg1)
{
struct bt_ccb * ccb = (struct bt_ccb *)arg1;
int unit;
diff --git a/sys/i386/isa/clock.c b/sys/i386/isa/clock.c
index d338cd5c5783..e40079a40bea 100644
--- a/sys/i386/isa/clock.c
+++ b/sys/i386/isa/clock.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/i386/isa/fd.c b/sys/i386/isa/fd.c
index d05c3612b67f..259d451b7b5f 100644
--- a/sys/i386/isa/fd.c
+++ b/sys/i386/isa/fd.c
@@ -199,7 +199,7 @@ int fd_debug = 1;
static void fdstart(fdcu_t);
void fdintr(fdcu_t);
-static void fd_turnoff(caddr_t, int);
+static void fd_turnoff(caddr_t);
/****************************************************************************/
/* autoconfiguration stuff */
@@ -347,7 +347,7 @@ fdattach(dev)
break;
}
- fd_turnoff((caddr_t)fdu, 0);
+ fd_turnoff((caddr_t)fdu);
hdr = 1;
}
printf("\n");
@@ -417,7 +417,7 @@ void fdstrategy(struct buf *bp)
dp = &(fdc->head);
s = splbio();
disksort(dp, bp);
- untimeout(fd_turnoff, (caddr_t)fdu); /* a good idea */
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu); /* a good idea */
fdstart(fdcu);
splx(s);
return;
@@ -463,7 +463,7 @@ set_motor(fdcu, fdu, reset)
}
static void
-fd_turnoff(caddr_t arg1, int arg2)
+fd_turnoff(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -476,7 +476,7 @@ fd_turnoff(caddr_t arg1, int arg2)
}
void
-fd_motor_on(caddr_t arg1, int arg2)
+fd_motor_on(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -502,7 +502,7 @@ fd_turnon(fdu)
{
fd_turnon1(fdu);
fd->flags |= FD_MOTOR_WAIT;
- timeout(fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
+ timeout((timeout_func_t)fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
}
}
@@ -685,7 +685,7 @@ fdstart(fdcu)
}
static void
-fd_timeout(caddr_t arg1, int arg2)
+fd_timeout(caddr_t arg1)
{
fdcu_t fdcu = (fdcu_t)arg1;
fdu_t fdu = fdc_data[fdcu].fdu;
@@ -809,8 +809,8 @@ fdstate(fdcu, fdc)
TRACE1("fd%d",fdu);
TRACE1("[%s]",fdstates[fdc->state]);
TRACE1("(0x%x)",fd->flags);
- untimeout(fd_turnoff, (caddr_t)fdu);
- timeout(fd_turnoff, (caddr_t)fdu, 4 * hz);
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu);
+ timeout((timeout_func_t)fd_turnoff, (caddr_t)fdu, 4 * hz);
switch (fdc->state)
{
case DEVIDLE:
@@ -855,12 +855,12 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,bp->b_cylin * fd->ft->steptrac);
fd->track = -2;
fdc->state = SEEKWAIT;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case SEEKWAIT:
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 50);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 50);
fdc->state = SEEKCOMPLETE;
return(0); /* will return later */
break;
@@ -925,10 +925,10 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,fd->ft->datalen); /* data length */
}
fdc->state = IOCOMPLETE;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case IOCOMPLETE: /* IO DONE, post-analyze */
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
for(i=0;i<7;i++)
{
fdc->status[i] = in_fdc(fdcu);
@@ -964,7 +964,7 @@ fdstate(fdcu, fdc)
/* ALL DONE */
fd->skip = 0;
bp->b_resid = 0;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
biodone(bp);
fdc->fd = (fd_p) 0;
fdc->fdu = -1;
@@ -991,7 +991,7 @@ fdstate(fdcu, fdc)
return(0); /* will return later */
case RECALWAIT:
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 30);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 30);
fdc->state = RECALCOMPLETE;
return(0); /* will return later */
case RECALCOMPLETE:
@@ -1079,7 +1079,7 @@ retrier(fdcu)
bp->b_flags |= B_ERROR;
bp->b_error = EIO;
bp->b_resid = bp->b_bcount - fdc->fd->skip;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
fdc->fd->skip = 0;
biodone(bp);
fdc->state = FINDWORK;
diff --git a/sys/i386/isa/ft.c b/sys/i386/isa/ft.c
index 4022b20e8263..5f4f62c33d03 100644
--- a/sys/i386/isa/ft.c
+++ b/sys/i386/isa/ft.c
@@ -261,7 +261,7 @@ void ftstrategy(struct buf *);
int ftioctl(dev_t, int, caddr_t, int, struct proc *);
int ftdump(dev_t);
int ftsize(dev_t);
-static void ft_timeout(caddr_t arg1, int arg2);
+static void ft_timeout(caddr_t arg1);
void async_cmd(ftu_t);
void async_req(ftu_t, int);
void async_read(ftu_t, int);
@@ -1150,7 +1150,7 @@ huh_what:
/*
* Interrupt timeout routine.
*/
-static void ft_timeout(caddr_t arg1, int arg2)
+static void ft_timeout(caddr_t arg1)
{
int s;
ftu_t ftu = (ftu_t)arg1;
diff --git a/sys/i386/isa/icu.s b/sys/i386/isa/icu.s
index b8bf1a823755..c71380607c67 100644
--- a/sys/i386/isa/icu.s
+++ b/sys/i386/isa/icu.s
@@ -296,24 +296,18 @@ swi_clock:
ALIGN_TEXT
swi_net:
MCOUNT
-#if 0
- DONET(NETISR_RAW, _rawintr,netisr_raw)
-#endif
#ifdef INET
+ DONET(NETISR_ARP, _arpintr,netisr_ip)
DONET(NETISR_IP, _ipintr,netisr_ip)
#endif
-#ifdef IMP
- DONET(NETISR_IMP, _impintr,netisr_imp)
-#endif
#ifdef NS
DONET(NETISR_NS, _nsintr,netisr_ns)
#endif
#ifdef ISO
DONET(NETISR_ISO, _clnlintr,netisr_iso)
#endif
-#ifdef CCITT
- DONET(NETISR_X25, _pkintr, 29)
- DONET(NETISR_HDLC, _hdintr, 30)
+#ifdef CCITT
+ DONET(NETISR_CCITT, _ccittintr, 29)
#endif
ret
diff --git a/sys/i386/isa/if_ed.c b/sys/i386/isa/if_ed.c
index 84047e20441c..26e3ebdfb397 100644
--- a/sys/i386/isa/if_ed.c
+++ b/sys/i386/isa/if_ed.c
@@ -63,7 +63,7 @@
/* For backwards compatibility */
#ifndef IFF_ALTPHYS
-#define IFF_ALTPHYS IFF_LLC0
+#define IFF_ALTPHYS IFF_LINK0
#endif
/*
@@ -113,7 +113,7 @@ void edintr(int);
int ed_ioctl(struct ifnet *, int, caddr_t);
int ed_probe(struct isa_device *);
void ed_start(struct ifnet *);
-void ed_reset(int, int);
+void ed_reset(int);
void ed_watchdog(int);
static void ed_get_packet(struct ed_softc *, char *, int /*u_short*/);
@@ -1090,9 +1090,8 @@ ed_attach(isa_dev)
* Reset interface.
*/
void
-ed_reset(unit, uban)
+ed_reset(unit)
int unit;
- int uban; /* XXX */
{
int s;
@@ -1147,7 +1146,7 @@ ed_watchdog(unit)
log(LOG_ERR, "ed%d: device timeout\n", unit);
++sc->arpcom.ac_if.if_oerrors;
- ed_reset(unit, 0);
+ ed_reset(unit);
}
/*
@@ -1501,7 +1500,7 @@ outloop:
len = ed_pio_write_mbufs(sc, m, buffer);
}
- sc->txb_len[sc->txb_new] = MAX(len, ETHER_MIN_LEN);
+ sc->txb_len[sc->txb_new] = max(len, ETHER_MIN_LEN);
sc->txb_inuse++;
@@ -1652,7 +1651,7 @@ ed_rint(unit)
"ed%d: NIC memory corrupt - invalid packet length %d\n",
unit, len);
++sc->arpcom.ac_if.if_ierrors;
- ed_reset(unit, 0);
+ ed_reset(unit);
return;
}
@@ -1817,7 +1816,7 @@ edintr(unit)
/*
* Stop/reset/re-init NIC
*/
- ed_reset(unit, 0);
+ ed_reset(unit);
} else {
/*
@@ -2388,7 +2387,7 @@ ed_pio_write_mbufs(sc,m,dst)
if (!maxwait) {
log(LOG_WARNING, "ed%d: remote transmit DMA failed to complete\n",
sc->arpcom.ac_if.if_unit);
- ed_reset(sc->arpcom.ac_if.if_unit, 0);
+ ed_reset(sc->arpcom.ac_if.if_unit);
}
return(len);
diff --git a/sys/i386/isa/if_ep.c b/sys/i386/isa/if_ep.c
index e8d31129dc96..b47f829389b0 100644
--- a/sys/i386/isa/if_ep.c
+++ b/sys/i386/isa/if_ep.c
@@ -99,7 +99,7 @@ static int epioctl __P((struct ifnet * ifp, int, caddr_t));
void epinit __P((int));
void epintr __P((int));
-void epmbuffill __P((caddr_t, int));
+void epmbuffill __P((caddr_t));
void epmbufempty __P((struct ep_softc *));
void epread __P((struct ep_softc *));
void epreset __P((int));
@@ -953,9 +953,8 @@ is_eeprom_busy(is)
}
void
-epmbuffill(sp, dummy_arg)
+epmbuffill(sp)
caddr_t sp;
- int dummy_arg;
{
struct ep_softc *sc = (struct ep_softc *)sp;
int s, i;
diff --git a/sys/i386/isa/if_ie.c b/sys/i386/isa/if_ie.c
index 95095bdb515b..cb6b96a6ab1b 100644
--- a/sys/i386/isa/if_ie.c
+++ b/sys/i386/isa/if_ie.c
@@ -1320,9 +1320,8 @@ iereset(unit, dummy)
* This is called if we time out.
*/
static void
-chan_attn_timeout(rock, arg2)
+chan_attn_timeout(rock)
caddr_t rock;
- int arg2;
{
*(int *)rock = 1;
}
diff --git a/sys/i386/isa/isa.c b/sys/i386/isa/isa.c
index b0d84efc01a7..32e59e754a3f 100644
--- a/sys/i386/isa/isa.c
+++ b/sys/i386/isa/isa.c
@@ -59,6 +59,7 @@
#include "rlist.h"
#include "machine/segments.h"
#include "vm/vm.h"
+#include <machine/spl.h>
#include "i386/isa/isa_device.h"
#include "i386/isa/isa.h"
#include "i386/isa/icu.h"
diff --git a/sys/i386/isa/mcd.c b/sys/i386/isa/mcd.c
index 7309f42d4229..683b0e1798c2 100644
--- a/sys/i386/isa/mcd.c
+++ b/sys/i386/isa/mcd.c
@@ -387,7 +387,7 @@ static void mcd_start(int unit)
if ((bp = qp->b_actf) != 0) {
/* block found to process, dequeue */
/*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/
- qp->b_actf = bp->av_forw;
+ qp->b_actf = bp->b_actf;
splx(s);
} else {
/* nothing to do */
diff --git a/sys/i386/isa/mse.c b/sys/i386/isa/mse.c
index eebe163268ec..5f80b21cc8f8 100644
--- a/sys/i386/isa/mse.c
+++ b/sys/i386/isa/mse.c
@@ -71,7 +71,7 @@ struct isa_driver msedriver = {
struct mse_softc {
int sc_flags;
int sc_mousetype;
- pid_t sc_selp;
+ struct selinfo sc_selp;
u_int sc_port;
void (*sc_enablemouse)();
void (*sc_disablemouse)();
@@ -316,7 +316,7 @@ mseselect(dev, rw, p)
* Since this is an exclusive open device, any previous proc.
* pointer is trash now, so we can just assign it.
*/
- sc->sc_selp = p->p_pid;
+ selrecord(p, &sc->sc_selp);
splx(s);
return (0);
}
@@ -350,11 +350,7 @@ mseintr(unit)
sc->sc_flags &= ~MSESC_WANT;
wakeup((caddr_t)sc);
}
- if (sc->sc_selp) {
- p = sc->sc_selp;
- sc->sc_selp = (pid_t)0;
- selwakeup(p, 0);
- }
+ selwakeup(&sc->sc_selp);
}
}
diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c
index 00424bf3aa14..17400bdbb29a 100644
--- a/sys/i386/isa/npx.c
+++ b/sys/i386/isa/npx.c
@@ -438,7 +438,7 @@ npxintr(frame)
* in doreti, and the frame for that could easily be set up
* just before it is used).
*/
- curproc->p_regs = (int *)&frame.if_es;
+ curproc->p_md.md_regs = (int *)&frame.if_es;
#ifdef notyet
/*
* Encode the appropriate code for detailed information on
diff --git a/sys/i386/isa/sio.c b/sys/i386/isa/sio.c
index ad09f7a3c1d4..9bdb8c410db8 100644
--- a/sys/i386/isa/sio.c
+++ b/sys/i386/isa/sio.c
@@ -36,6 +36,7 @@
#include "sio.h"
#if NSIO > 0
+#define DONT_MALLOC_TTYS
/*
* Serial driver, based on 386BSD-0.1 com driver.
* Mostly rewritten to use pseudo-DMA.
@@ -61,9 +62,11 @@
#define FAKE_DCD(unit) ((unit) == comconsole)
#define LOTS_OF_EVENTS 64 /* helps separate urgent events from input */
+#define RBSZ 1024
#define RB_I_HIGH_WATER (RBSZ - 2 * RS_IBUFSIZE)
#define RB_I_LOW_WATER ((RBSZ - 2 * RS_IBUFSIZE) * 7 / 8)
#define RS_IBUFSIZE 256
+#define RS_OBUFSIZE 256
#define TTY_BI TTY_FE /* XXX */
#define TTY_OE TTY_PE /* XXX */
@@ -221,39 +224,39 @@ struct com_s {
#define CE_INPUT_OFFSET RS_IBUFSIZE
u_char ibuf1[2 * RS_IBUFSIZE];
u_char ibuf2[2 * RS_IBUFSIZE];
+ u_char obuf[RS_OBUFSIZE];
};
/*
* The public functions in the com module ought to be declared in a com-driver
* system header.
*/
-#define Dev_t int /* promoted dev_t */
/* Interrupt handling entry points. */
void siointr __P((int unit));
void siopoll __P((void));
/* Device switch entry points. */
-int sioopen __P((Dev_t dev, int oflags, int devtype,
+int sioopen __P((dev_t dev, int oflags, int devtype,
struct proc *p));
-int sioclose __P((Dev_t dev, int fflag, int devtype,
+int sioclose __P((dev_t dev, int fflag, int devtype,
struct proc *p));
-int sioread __P((Dev_t dev, struct uio *uio, int ioflag));
-int siowrite __P((Dev_t dev, struct uio *uio, int ioflag));
-int sioioctl __P((Dev_t dev, int cmd, caddr_t data,
+int sioread __P((dev_t dev, struct uio *uio, int ioflag));
+int siowrite __P((dev_t dev, struct uio *uio, int ioflag));
+int sioioctl __P((dev_t dev, int cmd, caddr_t data,
int fflag, struct proc *p));
void siostop __P((struct tty *tp, int rw));
#define sioreset noreset
-int sioselect __P((Dev_t dev, int rw, struct proc *p));
+int sioselect __P((dev_t dev, int rw, struct proc *p));
#define siommap nommap
#define siostrategy nostrategy
/* Console device entry points. */
-int siocngetc __P((Dev_t dev));
+int siocngetc __P((dev_t dev));
struct consdev;
void siocninit __P((struct consdev *cp));
void siocnprobe __P((struct consdev *cp));
-void siocnputc __P((Dev_t dev, int c));
+void siocnputc __P((dev_t dev, int c));
static int sioattach __P((struct isa_device *dev));
static void comflush __P((struct com_s *com));
@@ -288,15 +291,9 @@ static int comconsole = -1;
static speed_t comdefaultrate = TTYDEF_SPEED;
static u_int com_events; /* input chars + weighted output completions */
static int commajor;
-#ifdef DONT_MALLOC_TTYS
-#define TB_OUT(tp) (&(tp)->t_out)
-#define TB_RAW(tp) (&(tp)->t_raw)
+#define TB_OUT(tp) (&(tp)->t_outq)
+#define TB_RAW(tp) (&(tp)->t_rawq)
struct tty sio_tty[NSIO];
-#else
-#define TB_OUT(tp) ((tp)->t_out)
-#define TB_RAW(tp) ((tp)->t_raw)
-struct tty *sio_tty[NSIO];
-#endif
extern struct tty *constty;
extern int tk_nin; /* XXX */
extern int tk_rawcc; /* XXX */
@@ -787,7 +784,7 @@ bidir_open_top:
}
out:
if (error == 0)
- error = (*linesw[tp->t_line].l_open)(dev, tp, 0);
+ error = (*linesw[tp->t_line].l_open)(dev, tp);
splx(s);
#ifdef COM_BIDIR
@@ -1129,7 +1126,7 @@ sioioctl(dev, cmd, data, flag, p)
com = com_addr(UNIT(dev));
tp = com->tp;
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return (error);
error = ttioctl(tp, cmd, data, flag);
@@ -1222,6 +1219,7 @@ sioioctl(dev, cmd, data, flag, p)
*(int *)data = com->bidir;
break;
#endif /* COM_BIDIR */
+#if 0
case TIOCMSDTRWAIT:
/* must be root since the wait applies to following logins */
error = suser(p->p_ucred, &p->p_acflag);
@@ -1240,6 +1238,7 @@ sioioctl(dev, cmd, data, flag, p)
case TIOCMGDTRWAIT:
*(int *)data = com->dtr_wait;
break;
+#endif
#ifdef TIOCTIMESTAMP
case TIOCTIMESTAMP:
com->do_timestamp = TRUE;
@@ -1259,16 +1258,14 @@ static void
comflush(com)
struct com_s *com;
{
- struct ringb *rbp;
+ struct clist *rbp;
disable_intr();
if (com->state & CS_ODONE)
com_events -= LOTS_OF_EVENTS;
com->state &= ~(CS_ODONE | CS_BUSY);
enable_intr();
- rbp = TB_OUT(com->tp);
- rbp->rb_hd += com->ocount;
- rbp->rb_hd = RB_ROLLOVER(rbp, rbp->rb_hd);
+ while( getc( TB_OUT(com->tp)) != -1);
com->ocount = 0;
com->tp->t_state &= ~TS_BUSY;
}
@@ -1343,8 +1340,8 @@ repeat:
* CS_RTS_IFLOW is on.
*/
if ((com->state & CS_RTS_IFLOW)
- && !(com->mcr_image & MCR_RTS)
- && !(tp->t_state & TS_RTS_IFLOW))
+ && !(com->mcr_image & MCR_RTS) /*
+ && !(tp->t_state & TS_RTS_IFLOW) */)
outb(com->modem_ctl_port,
com->mcr_image |= MCR_RTS);
enable_intr();
@@ -1404,16 +1401,17 @@ repeat:
if (incc <= 0 || !(tp->t_state & TS_ISOPEN))
continue;
if (com->state & CS_RTS_IFLOW
- && RB_LEN(TB_RAW(tp)) + incc >= RB_I_HIGH_WATER
- && !(tp->t_state & TS_RTS_IFLOW)
+ && TB_RAW(tp)->c_cc + incc >= RB_I_HIGH_WATER /*
+ && !(tp->t_state & TS_RTS_IFLOW) */
/*
* XXX - need RTS flow control for all line disciplines.
* Only have it in standard one now.
*/
&& linesw[tp->t_line].l_rint == ttyinput) {
- tp->t_state |= TS_RTS_IFLOW;
+/* tp->t_state |= TS_RTS_IFLOW; */
ttstart(tp);
}
+#if 0
/*
* Avoid the grotesquely inefficient lineswitch routine
* (ttyinput) in "raw" mode. It usually takes about 450
@@ -1442,6 +1440,7 @@ repeat:
ttstart(tp);
}
} else {
+#endif
do {
u_char line_status;
int recv_data;
@@ -1461,7 +1460,9 @@ repeat:
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
} while (--incc > 0);
+#if 0
}
+#endif
if (com_events == 0)
break;
}
@@ -1624,10 +1625,12 @@ comstart(tp)
com->state &= ~CS_TTGO;
else
com->state |= CS_TTGO;
+#if 0
if (tp->t_state & TS_RTS_IFLOW) {
if (com->mcr_image & MCR_RTS && com->state & CS_RTS_IFLOW)
outb(com->modem_ctl_port, com->mcr_image &= ~MCR_RTS);
} else {
+#endif
/*
* XXX don't raise MCR_RTS if CTS_RTS_IFLOW is off. Set it
* appropriately in comparam() if RTS-flow is being changed.
@@ -1635,31 +1638,29 @@ comstart(tp)
*/
if (!(com->mcr_image & MCR_RTS) && com->iptr < com->ihighwater)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
+#if 0
}
+#endif
enable_intr();
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP))
goto out;
- if (RB_LEN(TB_OUT(tp)) <= tp->t_lowat) {
+ if (TB_OUT(tp)->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)TB_OUT(tp));
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel, tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (com->ocount != 0) {
disable_intr();
siointr1(com);
enable_intr();
- } else if (RB_LEN(TB_OUT(tp)) != 0) {
+ } else if (TB_OUT(tp)->c_cc != 0) {
tp->t_state |= TS_BUSY;
- com->ocount = RB_CONTIGGET(TB_OUT(tp));
disable_intr();
- com->obufend = (com->optr = (u_char *)TB_OUT(tp)->rb_hd)
- + com->ocount;
+ com->ocount = q_to_b(TB_OUT(tp), com->obuf, sizeof com->obuf);
+ com->optr = com->obuf;
+ com->obufend = com->obuf + com->ocount;
com->state |= CS_BUSY;
siointr1(com); /* fake interrupt to start output */
enable_intr();
@@ -1728,11 +1729,11 @@ comwakeup(chan, ticks)
{
int unit;
- timeout(comwakeup, (caddr_t) NULL, hz / 100);
+ timeout((timeout_func_t)comwakeup, (caddr_t) NULL, hz / 100);
if (com_events != 0) {
#ifndef OLD_INTERRUPT_HANDLING
- int s = splsofttty();
+ int s = spltty();
#endif
siopoll();
#ifndef OLD_INTERRUPT_HANDLING
diff --git a/sys/i386/isa/sound/os.h b/sys/i386/isa/sound/os.h
index c6b688ac6aa9..fea169b70813 100644
--- a/sys/i386/isa/sound/os.h
+++ b/sys/i386/isa/sound/os.h
@@ -186,7 +186,7 @@ struct snd_wait {
* 1 or 0 could be returned (1 should be better than 0).
* I'm not sure if the following is correct for FreeBSD.
*/
-#define PROCESS_ABORTING(q, f) (f.aborting | curproc->p_sig)
+#define PROCESS_ABORTING(q, f) (f.aborting | curproc->p_siglist)
/*
* The following macro calls sleep. It should be implemented such that
diff --git a/sys/i386/isa/syscons.c b/sys/i386/isa/syscons.c
index 87572956f84c..39292f93dcf3 100644
--- a/sys/i386/isa/syscons.c
+++ b/sys/i386/isa/syscons.c
@@ -44,6 +44,7 @@
#endif
#include "param.h"
+#include <sys/systm.h>
#include "conf.h"
#include "ioctl.h"
#include "proc.h"
@@ -51,7 +52,6 @@
#include "tty.h"
#include "uio.h"
#include "callout.h"
-#include "systm.h"
#include "kernel.h"
#include "syslog.h"
#include "errno.h"
@@ -277,14 +277,20 @@ int ttrstrt();
#endif
#if defined(__FreeBSD__)
+#if 0
#define VIRTUAL_TTY(x) (pccons[x] = ttymalloc(pccons[x]))
#define CONSOLE_TTY (pccons[NCONS] = ttymalloc(pccons[NCONS]))
+struct tty *pccons[NCONS+1];
+#else
+#define VIRTUAL_TTY(x) &pccons[x]
+#define CONSOLE_TTY &pccons[NCONS]
+struct tty pccons[NCONS+1];
+#endif
+#define timeout_t timeout_func_t
#define frametype struct trapframe
#define eflags tf_eflags
-#define timeout_t timeout_func_t
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
-struct tty *pccons[NCONS+1];
#endif
#if defined(__386BSD__) && !defined(__FreeBSD__)
@@ -456,11 +462,7 @@ int pcopen(dev_t dev, int flag, int mode, struct proc *p)
return(EBUSY);
tp->t_state |= TS_CARR_ON;
tp->t_cflag |= CLOCAL;
-#if defined(__FreeBSD__)
- return((*linesw[tp->t_line].l_open)(dev, tp, 0));
-#else
return((*linesw[tp->t_line].l_open)(dev, tp));
-#endif
}
@@ -744,12 +746,12 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return 0;
case KDENABIO: /* allow io operations */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
return 0;
case KDDISABIO: /* disallow io operations (default) */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
return 0;
@@ -960,7 +962,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
if (saved_console < 0) {
saved_console = get_scr_num();
switch_scr(minor(dev));
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
scp->status |= UNKNOWN_MODE;
scp->status |= KBD_RAW_MODE;
@@ -969,7 +971,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return EAGAIN;
case CONSOLE_X_MODE_OFF:/* just to be compatible */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
if (crtc_vga) {
load_font(0, 16, font_8x16);
@@ -1002,7 +1004,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
break;
}
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return(error);
error = ttioctl(tp, cmd, data, flag);
@@ -1028,7 +1030,7 @@ void pcxint(dev_t dev)
void pcstart(struct tty *tp)
{
-#if defined(NetBSD)
+#if defined(NetBSD) || defined(__FreeBSD__)
struct clist *rbp;
int i, s, len;
u_char buf[PCBURST];
@@ -1046,10 +1048,6 @@ void pcstart(struct tty *tp)
if (buf[i]) ansi_put(scp, buf[i]);
s = spltty();
tp->t_state &= ~TS_BUSY;
- if (rbp->c_cc) {
- tp->t_state |= TS_TIMEOUT;
- timeout((timeout_t)ttrstrt, (caddr_t)tp, 1);
- }
if (rbp->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
@@ -1060,7 +1058,7 @@ void pcstart(struct tty *tp)
}
splx(s);
-#else /* __FreeBSD__ & __386BSD__ */
+#else /* __386BSD__ */
int c, s, len, i;
scr_stat *scp = get_scr_stat(tp->t_dev);
@@ -1076,12 +1074,7 @@ void pcstart(struct tty *tp)
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)tp->t_out);
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel,
- tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (RB_LEN(tp->t_out) == 0)
break;
diff --git a/sys/i386/isa/ultra14f.c b/sys/i386/isa/ultra14f.c
index c1849040b75e..b9017444d9c1 100644
--- a/sys/i386/isa/ultra14f.c
+++ b/sys/i386/isa/ultra14f.c
@@ -238,7 +238,7 @@ int uhaprobe();
int uha_attach();
int uhaintr();
int32 uha_scsi_cmd();
-void uha_timeout(caddr_t, int);
+void uha_timeout(caddr_t);
void uha_free_mscp();
int uha_abort();
void uhaminphys();
@@ -1077,7 +1077,7 @@ uha_scsi_cmd(xs)
}
void
-uha_timeout(caddr_t arg1, int arg2)
+uha_timeout(caddr_t arg1)
{
struct mscp *mscp = (struct mscp *)arg1;
int unit;
diff --git a/sys/i386/isa/wd.c b/sys/i386/isa/wd.c
index b8cf448d3bfa..2794ecda6ba1 100644
--- a/sys/i386/isa/wd.c
+++ b/sys/i386/isa/wd.c
@@ -176,7 +176,7 @@ static void wderror(struct buf *bp, struct disk *du, char *mesg);
static void wdflushirq(struct disk *du, int old_ipl);
static int wdreset(struct disk *du);
static void wdsleep(int ctrlr, char *wmesg);
-static void wdtimeout(caddr_t cdu, int ticks);
+static void wdtimeout(caddr_t cdu);
static int wdunwedge(struct disk *du);
static int wdwait(struct disk *du, u_char bits_wanted, int timeout);
@@ -184,6 +184,8 @@ struct isa_driver wdcdriver = {
wdprobe, wdattach, "wdc",
};
+extern char *readdisklabel();
+
/*
* Probe for controller.
*/
@@ -321,7 +323,7 @@ wdattach(struct isa_device *dvp)
* Start timeout routine for this drive.
* XXX timeout should be per controller.
*/
- wdtimeout((caddr_t)du, 0);
+ wdtimeout((caddr_t)du);
} else {
free(du, M_TEMP);
wddrives[lunit] = NULL;
@@ -397,7 +399,8 @@ wdstrategy(register struct buf *bp)
dp = &wdutab[lunit];
s = splbio();
- cldisksort(dp, bp, 254*DEV_BSIZE);
+ /* cldisksort(dp, bp, 254*DEV_BSIZE); */
+ disksort(dp, bp);
if (dp->b_active == 0)
wdustart(du); /* start drive */
@@ -440,13 +443,15 @@ wdustart(register struct disk *du)
if (bp == NULL)
return;
+ dp->b_actf = bp->b_actf;
+ bp->b_actf = NULL;
/* link onto controller queue */
- dp->b_forw = NULL;
- if (wdtab[ctrlr].b_actf == NULL)
- wdtab[ctrlr].b_actf = dp;
- else
- wdtab[ctrlr].b_actl->b_forw = dp;
- wdtab[ctrlr].b_actl = dp;
+ if (wdtab[ctrlr].b_actf == NULL) {
+ wdtab[ctrlr].b_actf = bp;
+ } else {
+ *wdtab[ctrlr].b_actb = bp;
+ }
+ wdtab[ctrlr].b_actb = &bp->b_actf;
/* mark the drive unit as busy */
dp->b_active = 1;
@@ -474,20 +479,10 @@ wdstart(int ctrlr)
loop:
/* is there a drive for the controller to do a transfer with? */
- dp = wdtab[ctrlr].b_actf;
- if (dp == NULL)
+ bp = wdtab[ctrlr].b_actf;
+ if (bp == NULL)
return;
- /*
- * Is there a transfer to this drive? If so, link it on the
- * controller's queue.
- */
- bp = dp->b_actf;
- if (bp == NULL) {
- wdtab[ctrlr].b_actf = dp->b_forw;
- goto loop;
- }
-
/* obtain controller and drive information */
lunit = wdunit(bp->b_dev);
du = wddrives[lunit];
@@ -671,9 +666,10 @@ wdintr(int unit)
return;
}
- dp = wdtab[unit].b_actf;
- bp = dp->b_actf;
+ bp = wdtab[unit].b_actf;
du = wddrives[wdunit(bp->b_dev)];
+ dp = &wdutab[du->dk_lunit];
+
du->dk_timeout = 0;
if (wdwait(du, 0, TIMEOUT) < 0) {
@@ -783,13 +779,12 @@ outt:
done: ;
/* done with this transfer, with or without error */
du->dk_flags &= ~DKFL_SINGLE;
- wdtab[unit].b_actf = dp->b_forw;
+ wdtab[unit].b_actf = bp->b_actf;
wdtab[unit].b_errcnt = 0;
bp->b_resid = bp->b_bcount - du->dk_skip * DEV_BSIZE;
- du->dk_skip = 0;
dp->b_active = 0;
- dp->b_actf = bp->av_forw;
dp->b_errcnt = 0;
+ du->dk_skip = 0;
biodone(bp);
}
@@ -797,8 +792,7 @@ done: ;
wdtab[unit].b_active = 0;
/* anything more on drive queue? */
- if (dp->b_actf)
- wdustart(du);
+ wdustart(du);
/* anything more for controller to do? */
if (wdtab[unit].b_actf)
wdstart(unit);
@@ -871,11 +865,16 @@ wdopen(dev_t dev, int flags, int fmt, struct proc *p)
* to the driver by resetting the state machine.
*/
save_label = du->dk_dd;
+ du->dk_dd.d_partitions[WDRAW].p_offset = 0;
+ du->dk_dd.d_partitions[WDRAW].p_size = 0x7fffffff;/* XXX */
#define WDSTRATEGY ((int (*)(struct buf *)) wdstrategy) /* XXX */
msg = readdisklabel(makewddev(major(dev), lunit, WDRAW),
- (d_strategy_t *) WDSTRATEGY, &du->dk_dd,
- du->dk_dospartitions, &du->dk_bad,
- (struct buf **)NULL);
+ WDSTRATEGY, &du->dk_dd,
+ du->dk_dospartitions, &du->dk_bad);
+/*
+ msg = readdisklabel(makewddev(major(dev), lunit, WDRAW),
+ WDSTRATEGY, &du->dk_dd);
+*/
du->dk_flags &= ~DKFL_LABELLING;
if (msg != NULL) {
du->dk_dd = save_label;
@@ -1347,7 +1346,7 @@ wdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
du->dk_openpart |= (1 << 0); /* XXX */
wlab = du->dk_wlabel;
du->dk_wlabel = 1;
- error = writedisklabel(dev, (d_strategy_t *) WDSTRATEGY,
+ error = writedisklabel(dev, WDSTRATEGY,
&du->dk_dd, du->dk_dospartitions);
du->dk_openpart = du->dk_copenpart | du->dk_bopenpart;
du->dk_wlabel = wlab;
@@ -1406,19 +1405,24 @@ wdsize(dev_t dev)
{
int lunit = wdunit(dev), part = wdpart(dev), val;
struct disk *du;
+ int size;
- if (lunit >= NWD || wddospart(dev) || (du = wddrives[lunit]) == NULL)
+ if (lunit >= NWD || wddospart(dev) || (du = wddrives[lunit]) == NULL) {
return (-1);
+ }
val = 0;
- if (du->dk_state == CLOSED)
+ if (du->dk_state == CLOSED) {
val = wdopen(makewddev(major(dev), lunit, WDRAW),
FREAD, S_IFBLK, 0);
- if (val != 0 || du->dk_flags & DKFL_WRITEPROT)
+ }
+ if (val != 0 || du->dk_flags & DKFL_WRITEPROT) {
return (-1);
- return ((int)du->dk_dd.d_partitions[part].p_size);
+ }
+ size = ((int)du->dk_dd.d_partitions[part].p_size);
+ return size;
}
-extern char *vmmap; /* poor name! */
+extern char *ptvmmap; /* poor name! */
/*
* Dump core after a system crash.
@@ -1580,7 +1584,7 @@ out:
return (EIO);
}
while (blkcnt != 0) {
- pmap_enter(kernel_pmap, CADDR1, trunc_page(addr),
+ pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, trunc_page(addr),
VM_PROT_READ, TRUE);
/* Ready to send data? */
@@ -1685,7 +1689,7 @@ wdsleep(int ctrlr, char *wmesg)
}
static void
-wdtimeout(caddr_t cdu, int ticks)
+wdtimeout(caddr_t cdu)
{
struct disk *du;
int x;
@@ -1700,7 +1704,7 @@ wdtimeout(caddr_t cdu, int ticks)
du->dk_flags |= DKFL_SINGLE;
wdstart(du->dk_ctrlr);
}
- timeout(wdtimeout, cdu, hz);
+ timeout((timeout_func_t)wdtimeout, cdu, hz);
splx(x);
}
diff --git a/sys/i386/isa/wt.c b/sys/i386/isa/wt.c
index 1e9755301efd..5ed78a2c5e8d 100644
--- a/sys/i386/isa/wt.c
+++ b/sys/i386/isa/wt.c
@@ -156,7 +156,7 @@ static int wtwait (wtinfo_t *t, int catch, char *msg);
static int wtcmd (wtinfo_t *t, int cmd);
static int wtstart (wtinfo_t *t, unsigned mode, void *vaddr, unsigned len);
static void wtdma (wtinfo_t *t);
-static void wtimer (caddr_t, int);
+static void wtimer (caddr_t);
static void wtclock (wtinfo_t *t);
static int wtreset (wtinfo_t *t);
static int wtsense (wtinfo_t *t, int verb, int ignor);
@@ -782,7 +782,7 @@ static void wtclock (wtinfo_t *t)
* This is necessary in case interrupts get eaten due to
* multiple devices on a single IRQ line.
*/
-static void wtimer (caddr_t xt, int dummy)
+static void wtimer (caddr_t xt)
{
wtinfo_t *t = (wtinfo_t *)xt;
int s;
diff --git a/sys/isa/atrtc.c b/sys/isa/atrtc.c
index d338cd5c5783..e40079a40bea 100644
--- a/sys/isa/atrtc.c
+++ b/sys/isa/atrtc.c
@@ -50,6 +50,7 @@
#include "i386/isa/isa.h"
#include "i386/isa/rtc.h"
#include "i386/isa/timerreg.h"
+#include <machine/cpu.h>
/* X-tals being what they are, it's nice to be able to fudge this one... */
/* Note, the name changed here from XTALSPEED to TIMER_FREQ rgrimes 4/26/93 */
@@ -71,15 +72,23 @@ static u_int hardclock_divisor;
void
-timerintr(struct intrframe frame)
+clkintr(frame)
+ struct clockframe frame;
{
- timer_func(frame);
+ hardclock(&frame);
+}
+
+#if 0
+void
+timerintr(struct clockframe frame)
+{
+ timer_func(&frame);
switch (timer0_state) {
case 0:
break;
case 1:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
timer0_prescale = 0;
}
break;
@@ -96,7 +105,7 @@ timerintr(struct intrframe frame)
break;
case 3:
if ((timer0_prescale+=timer0_divisor) >= hardclock_divisor) {
- hardclock(frame);
+ hardclock(&frame);
disable_intr();
outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT);
outb(TIMER_CNTR0, TIMER_DIV(hz)%256);
@@ -111,6 +120,7 @@ timerintr(struct intrframe frame)
}
}
+#endif
int
acquire_timer0(int rate, void (*function)() )
@@ -395,16 +405,6 @@ test_inittodr(time_t base)
}
#endif
-
-/*
- * Restart the clock.
- */
-void
-resettodr()
-{
-}
-
-
/*
* Wire clock interrupt in.
*/
@@ -428,3 +428,15 @@ spinwait(int millisecs)
{
DELAY(1000 * millisecs);
}
+
+void
+cpu_initclocks()
+{
+ startrtclock();
+ enablertclock();
+}
+
+void
+setstatclockrate(int newhz)
+{
+}
diff --git a/sys/isa/fd.c b/sys/isa/fd.c
index d05c3612b67f..259d451b7b5f 100644
--- a/sys/isa/fd.c
+++ b/sys/isa/fd.c
@@ -199,7 +199,7 @@ int fd_debug = 1;
static void fdstart(fdcu_t);
void fdintr(fdcu_t);
-static void fd_turnoff(caddr_t, int);
+static void fd_turnoff(caddr_t);
/****************************************************************************/
/* autoconfiguration stuff */
@@ -347,7 +347,7 @@ fdattach(dev)
break;
}
- fd_turnoff((caddr_t)fdu, 0);
+ fd_turnoff((caddr_t)fdu);
hdr = 1;
}
printf("\n");
@@ -417,7 +417,7 @@ void fdstrategy(struct buf *bp)
dp = &(fdc->head);
s = splbio();
disksort(dp, bp);
- untimeout(fd_turnoff, (caddr_t)fdu); /* a good idea */
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu); /* a good idea */
fdstart(fdcu);
splx(s);
return;
@@ -463,7 +463,7 @@ set_motor(fdcu, fdu, reset)
}
static void
-fd_turnoff(caddr_t arg1, int arg2)
+fd_turnoff(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -476,7 +476,7 @@ fd_turnoff(caddr_t arg1, int arg2)
}
void
-fd_motor_on(caddr_t arg1, int arg2)
+fd_motor_on(caddr_t arg1)
{
fdu_t fdu = (fdu_t)arg1;
int s;
@@ -502,7 +502,7 @@ fd_turnon(fdu)
{
fd_turnon1(fdu);
fd->flags |= FD_MOTOR_WAIT;
- timeout(fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
+ timeout((timeout_func_t)fd_motor_on, (caddr_t)fdu, hz); /* in 1 sec its ok */
}
}
@@ -685,7 +685,7 @@ fdstart(fdcu)
}
static void
-fd_timeout(caddr_t arg1, int arg2)
+fd_timeout(caddr_t arg1)
{
fdcu_t fdcu = (fdcu_t)arg1;
fdu_t fdu = fdc_data[fdcu].fdu;
@@ -809,8 +809,8 @@ fdstate(fdcu, fdc)
TRACE1("fd%d",fdu);
TRACE1("[%s]",fdstates[fdc->state]);
TRACE1("(0x%x)",fd->flags);
- untimeout(fd_turnoff, (caddr_t)fdu);
- timeout(fd_turnoff, (caddr_t)fdu, 4 * hz);
+ untimeout((timeout_func_t)fd_turnoff, (caddr_t)fdu);
+ timeout((timeout_func_t)fd_turnoff, (caddr_t)fdu, 4 * hz);
switch (fdc->state)
{
case DEVIDLE:
@@ -855,12 +855,12 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,bp->b_cylin * fd->ft->steptrac);
fd->track = -2;
fdc->state = SEEKWAIT;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case SEEKWAIT:
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 50);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 50);
fdc->state = SEEKCOMPLETE;
return(0); /* will return later */
break;
@@ -925,10 +925,10 @@ fdstate(fdcu, fdc)
out_fdc(fdcu,fd->ft->datalen); /* data length */
}
fdc->state = IOCOMPLETE;
- timeout(fd_timeout, (caddr_t)fdcu, 2 * hz);
+ timeout((timeout_func_t)fd_timeout, (caddr_t)fdcu, 2 * hz);
return(0); /* will return later */
case IOCOMPLETE: /* IO DONE, post-analyze */
- untimeout(fd_timeout, (caddr_t)fdcu);
+ untimeout((timeout_func_t)fd_timeout, (caddr_t)fdcu);
for(i=0;i<7;i++)
{
fdc->status[i] = in_fdc(fdcu);
@@ -964,7 +964,7 @@ fdstate(fdcu, fdc)
/* ALL DONE */
fd->skip = 0;
bp->b_resid = 0;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
biodone(bp);
fdc->fd = (fd_p) 0;
fdc->fdu = -1;
@@ -991,7 +991,7 @@ fdstate(fdcu, fdc)
return(0); /* will return later */
case RECALWAIT:
/* allow heads to settle */
- timeout(fd_pseudointr, (caddr_t)fdcu, hz / 30);
+ timeout((timeout_func_t)fd_pseudointr, (caddr_t)fdcu, hz / 30);
fdc->state = RECALCOMPLETE;
return(0); /* will return later */
case RECALCOMPLETE:
@@ -1079,7 +1079,7 @@ retrier(fdcu)
bp->b_flags |= B_ERROR;
bp->b_error = EIO;
bp->b_resid = bp->b_bcount - fdc->fd->skip;
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
fdc->fd->skip = 0;
biodone(bp);
fdc->state = FINDWORK;
diff --git a/sys/isa/sio.c b/sys/isa/sio.c
index ad09f7a3c1d4..9bdb8c410db8 100644
--- a/sys/isa/sio.c
+++ b/sys/isa/sio.c
@@ -36,6 +36,7 @@
#include "sio.h"
#if NSIO > 0
+#define DONT_MALLOC_TTYS
/*
* Serial driver, based on 386BSD-0.1 com driver.
* Mostly rewritten to use pseudo-DMA.
@@ -61,9 +62,11 @@
#define FAKE_DCD(unit) ((unit) == comconsole)
#define LOTS_OF_EVENTS 64 /* helps separate urgent events from input */
+#define RBSZ 1024
#define RB_I_HIGH_WATER (RBSZ - 2 * RS_IBUFSIZE)
#define RB_I_LOW_WATER ((RBSZ - 2 * RS_IBUFSIZE) * 7 / 8)
#define RS_IBUFSIZE 256
+#define RS_OBUFSIZE 256
#define TTY_BI TTY_FE /* XXX */
#define TTY_OE TTY_PE /* XXX */
@@ -221,39 +224,39 @@ struct com_s {
#define CE_INPUT_OFFSET RS_IBUFSIZE
u_char ibuf1[2 * RS_IBUFSIZE];
u_char ibuf2[2 * RS_IBUFSIZE];
+ u_char obuf[RS_OBUFSIZE];
};
/*
* The public functions in the com module ought to be declared in a com-driver
* system header.
*/
-#define Dev_t int /* promoted dev_t */
/* Interrupt handling entry points. */
void siointr __P((int unit));
void siopoll __P((void));
/* Device switch entry points. */
-int sioopen __P((Dev_t dev, int oflags, int devtype,
+int sioopen __P((dev_t dev, int oflags, int devtype,
struct proc *p));
-int sioclose __P((Dev_t dev, int fflag, int devtype,
+int sioclose __P((dev_t dev, int fflag, int devtype,
struct proc *p));
-int sioread __P((Dev_t dev, struct uio *uio, int ioflag));
-int siowrite __P((Dev_t dev, struct uio *uio, int ioflag));
-int sioioctl __P((Dev_t dev, int cmd, caddr_t data,
+int sioread __P((dev_t dev, struct uio *uio, int ioflag));
+int siowrite __P((dev_t dev, struct uio *uio, int ioflag));
+int sioioctl __P((dev_t dev, int cmd, caddr_t data,
int fflag, struct proc *p));
void siostop __P((struct tty *tp, int rw));
#define sioreset noreset
-int sioselect __P((Dev_t dev, int rw, struct proc *p));
+int sioselect __P((dev_t dev, int rw, struct proc *p));
#define siommap nommap
#define siostrategy nostrategy
/* Console device entry points. */
-int siocngetc __P((Dev_t dev));
+int siocngetc __P((dev_t dev));
struct consdev;
void siocninit __P((struct consdev *cp));
void siocnprobe __P((struct consdev *cp));
-void siocnputc __P((Dev_t dev, int c));
+void siocnputc __P((dev_t dev, int c));
static int sioattach __P((struct isa_device *dev));
static void comflush __P((struct com_s *com));
@@ -288,15 +291,9 @@ static int comconsole = -1;
static speed_t comdefaultrate = TTYDEF_SPEED;
static u_int com_events; /* input chars + weighted output completions */
static int commajor;
-#ifdef DONT_MALLOC_TTYS
-#define TB_OUT(tp) (&(tp)->t_out)
-#define TB_RAW(tp) (&(tp)->t_raw)
+#define TB_OUT(tp) (&(tp)->t_outq)
+#define TB_RAW(tp) (&(tp)->t_rawq)
struct tty sio_tty[NSIO];
-#else
-#define TB_OUT(tp) ((tp)->t_out)
-#define TB_RAW(tp) ((tp)->t_raw)
-struct tty *sio_tty[NSIO];
-#endif
extern struct tty *constty;
extern int tk_nin; /* XXX */
extern int tk_rawcc; /* XXX */
@@ -787,7 +784,7 @@ bidir_open_top:
}
out:
if (error == 0)
- error = (*linesw[tp->t_line].l_open)(dev, tp, 0);
+ error = (*linesw[tp->t_line].l_open)(dev, tp);
splx(s);
#ifdef COM_BIDIR
@@ -1129,7 +1126,7 @@ sioioctl(dev, cmd, data, flag, p)
com = com_addr(UNIT(dev));
tp = com->tp;
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return (error);
error = ttioctl(tp, cmd, data, flag);
@@ -1222,6 +1219,7 @@ sioioctl(dev, cmd, data, flag, p)
*(int *)data = com->bidir;
break;
#endif /* COM_BIDIR */
+#if 0
case TIOCMSDTRWAIT:
/* must be root since the wait applies to following logins */
error = suser(p->p_ucred, &p->p_acflag);
@@ -1240,6 +1238,7 @@ sioioctl(dev, cmd, data, flag, p)
case TIOCMGDTRWAIT:
*(int *)data = com->dtr_wait;
break;
+#endif
#ifdef TIOCTIMESTAMP
case TIOCTIMESTAMP:
com->do_timestamp = TRUE;
@@ -1259,16 +1258,14 @@ static void
comflush(com)
struct com_s *com;
{
- struct ringb *rbp;
+ struct clist *rbp;
disable_intr();
if (com->state & CS_ODONE)
com_events -= LOTS_OF_EVENTS;
com->state &= ~(CS_ODONE | CS_BUSY);
enable_intr();
- rbp = TB_OUT(com->tp);
- rbp->rb_hd += com->ocount;
- rbp->rb_hd = RB_ROLLOVER(rbp, rbp->rb_hd);
+ while( getc( TB_OUT(com->tp)) != -1);
com->ocount = 0;
com->tp->t_state &= ~TS_BUSY;
}
@@ -1343,8 +1340,8 @@ repeat:
* CS_RTS_IFLOW is on.
*/
if ((com->state & CS_RTS_IFLOW)
- && !(com->mcr_image & MCR_RTS)
- && !(tp->t_state & TS_RTS_IFLOW))
+ && !(com->mcr_image & MCR_RTS) /*
+ && !(tp->t_state & TS_RTS_IFLOW) */)
outb(com->modem_ctl_port,
com->mcr_image |= MCR_RTS);
enable_intr();
@@ -1404,16 +1401,17 @@ repeat:
if (incc <= 0 || !(tp->t_state & TS_ISOPEN))
continue;
if (com->state & CS_RTS_IFLOW
- && RB_LEN(TB_RAW(tp)) + incc >= RB_I_HIGH_WATER
- && !(tp->t_state & TS_RTS_IFLOW)
+ && TB_RAW(tp)->c_cc + incc >= RB_I_HIGH_WATER /*
+ && !(tp->t_state & TS_RTS_IFLOW) */
/*
* XXX - need RTS flow control for all line disciplines.
* Only have it in standard one now.
*/
&& linesw[tp->t_line].l_rint == ttyinput) {
- tp->t_state |= TS_RTS_IFLOW;
+/* tp->t_state |= TS_RTS_IFLOW; */
ttstart(tp);
}
+#if 0
/*
* Avoid the grotesquely inefficient lineswitch routine
* (ttyinput) in "raw" mode. It usually takes about 450
@@ -1442,6 +1440,7 @@ repeat:
ttstart(tp);
}
} else {
+#endif
do {
u_char line_status;
int recv_data;
@@ -1461,7 +1460,9 @@ repeat:
}
(*linesw[tp->t_line].l_rint)(recv_data, tp);
} while (--incc > 0);
+#if 0
}
+#endif
if (com_events == 0)
break;
}
@@ -1624,10 +1625,12 @@ comstart(tp)
com->state &= ~CS_TTGO;
else
com->state |= CS_TTGO;
+#if 0
if (tp->t_state & TS_RTS_IFLOW) {
if (com->mcr_image & MCR_RTS && com->state & CS_RTS_IFLOW)
outb(com->modem_ctl_port, com->mcr_image &= ~MCR_RTS);
} else {
+#endif
/*
* XXX don't raise MCR_RTS if CTS_RTS_IFLOW is off. Set it
* appropriately in comparam() if RTS-flow is being changed.
@@ -1635,31 +1638,29 @@ comstart(tp)
*/
if (!(com->mcr_image & MCR_RTS) && com->iptr < com->ihighwater)
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
+#if 0
}
+#endif
enable_intr();
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP))
goto out;
- if (RB_LEN(TB_OUT(tp)) <= tp->t_lowat) {
+ if (TB_OUT(tp)->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)TB_OUT(tp));
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel, tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (com->ocount != 0) {
disable_intr();
siointr1(com);
enable_intr();
- } else if (RB_LEN(TB_OUT(tp)) != 0) {
+ } else if (TB_OUT(tp)->c_cc != 0) {
tp->t_state |= TS_BUSY;
- com->ocount = RB_CONTIGGET(TB_OUT(tp));
disable_intr();
- com->obufend = (com->optr = (u_char *)TB_OUT(tp)->rb_hd)
- + com->ocount;
+ com->ocount = q_to_b(TB_OUT(tp), com->obuf, sizeof com->obuf);
+ com->optr = com->obuf;
+ com->obufend = com->obuf + com->ocount;
com->state |= CS_BUSY;
siointr1(com); /* fake interrupt to start output */
enable_intr();
@@ -1728,11 +1729,11 @@ comwakeup(chan, ticks)
{
int unit;
- timeout(comwakeup, (caddr_t) NULL, hz / 100);
+ timeout((timeout_func_t)comwakeup, (caddr_t) NULL, hz / 100);
if (com_events != 0) {
#ifndef OLD_INTERRUPT_HANDLING
- int s = splsofttty();
+ int s = spltty();
#endif
siopoll();
#ifndef OLD_INTERRUPT_HANDLING
diff --git a/sys/isa/syscons.c b/sys/isa/syscons.c
index 87572956f84c..39292f93dcf3 100644
--- a/sys/isa/syscons.c
+++ b/sys/isa/syscons.c
@@ -44,6 +44,7 @@
#endif
#include "param.h"
+#include <sys/systm.h>
#include "conf.h"
#include "ioctl.h"
#include "proc.h"
@@ -51,7 +52,6 @@
#include "tty.h"
#include "uio.h"
#include "callout.h"
-#include "systm.h"
#include "kernel.h"
#include "syslog.h"
#include "errno.h"
@@ -277,14 +277,20 @@ int ttrstrt();
#endif
#if defined(__FreeBSD__)
+#if 0
#define VIRTUAL_TTY(x) (pccons[x] = ttymalloc(pccons[x]))
#define CONSOLE_TTY (pccons[NCONS] = ttymalloc(pccons[NCONS]))
+struct tty *pccons[NCONS+1];
+#else
+#define VIRTUAL_TTY(x) &pccons[x]
+#define CONSOLE_TTY &pccons[NCONS]
+struct tty pccons[NCONS+1];
+#endif
+#define timeout_t timeout_func_t
#define frametype struct trapframe
#define eflags tf_eflags
-#define timeout_t timeout_func_t
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
-struct tty *pccons[NCONS+1];
#endif
#if defined(__386BSD__) && !defined(__FreeBSD__)
@@ -456,11 +462,7 @@ int pcopen(dev_t dev, int flag, int mode, struct proc *p)
return(EBUSY);
tp->t_state |= TS_CARR_ON;
tp->t_cflag |= CLOCAL;
-#if defined(__FreeBSD__)
- return((*linesw[tp->t_line].l_open)(dev, tp, 0));
-#else
return((*linesw[tp->t_line].l_open)(dev, tp));
-#endif
}
@@ -744,12 +746,12 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return 0;
case KDENABIO: /* allow io operations */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
return 0;
case KDDISABIO: /* disallow io operations (default) */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
return 0;
@@ -960,7 +962,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
if (saved_console < 0) {
saved_console = get_scr_num();
switch_scr(minor(dev));
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags |= PSL_IOPL;
scp->status |= UNKNOWN_MODE;
scp->status |= KBD_RAW_MODE;
@@ -969,7 +971,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
return EAGAIN;
case CONSOLE_X_MODE_OFF:/* just to be compatible */
- fp = (frametype *)p->p_regs;
+ fp = (frametype *)p->p_md.md_regs;
fp->eflags &= ~PSL_IOPL;
if (crtc_vga) {
load_font(0, 16, font_8x16);
@@ -1002,7 +1004,7 @@ int pcioctl(dev_t dev, int cmd, caddr_t data, int flag, struct proc *p)
break;
}
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error >= 0)
return(error);
error = ttioctl(tp, cmd, data, flag);
@@ -1028,7 +1030,7 @@ void pcxint(dev_t dev)
void pcstart(struct tty *tp)
{
-#if defined(NetBSD)
+#if defined(NetBSD) || defined(__FreeBSD__)
struct clist *rbp;
int i, s, len;
u_char buf[PCBURST];
@@ -1046,10 +1048,6 @@ void pcstart(struct tty *tp)
if (buf[i]) ansi_put(scp, buf[i]);
s = spltty();
tp->t_state &= ~TS_BUSY;
- if (rbp->c_cc) {
- tp->t_state |= TS_TIMEOUT;
- timeout((timeout_t)ttrstrt, (caddr_t)tp, 1);
- }
if (rbp->c_cc <= tp->t_lowat) {
if (tp->t_state & TS_ASLEEP) {
tp->t_state &= ~TS_ASLEEP;
@@ -1060,7 +1058,7 @@ void pcstart(struct tty *tp)
}
splx(s);
-#else /* __FreeBSD__ & __386BSD__ */
+#else /* __386BSD__ */
int c, s, len, i;
scr_stat *scp = get_scr_stat(tp->t_dev);
@@ -1076,12 +1074,7 @@ void pcstart(struct tty *tp)
tp->t_state &= ~TS_ASLEEP;
wakeup((caddr_t)tp->t_out);
}
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel,
- tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
+ selwakeup(&tp->t_wsel);
}
if (RB_LEN(tp->t_out) == 0)
break;
diff --git a/sys/isofs/cd9660/cd9660_lookup.c b/sys/isofs/cd9660/cd9660_lookup.c
index 62d1d3fc791e..36daffd6b2aa 100644
--- a/sys/isofs/cd9660/cd9660_lookup.c
+++ b/sys/isofs/cd9660/cd9660_lookup.c
@@ -89,6 +89,7 @@ struct nchstats iso_nchstats;
*
* NOTE: (LOOKUP | LOCKPARENT) currently returns the parent inode unlocked.
*/
+int
cd9660_lookup(ap)
struct vop_lookup_args /* {
struct vnode *a_dvp;
@@ -100,9 +101,9 @@ cd9660_lookup(ap)
register struct iso_node *dp; /* inode for directory being searched */
register struct iso_mnt *imp; /* file system that directory is in */
struct buf *bp; /* a buffer of directory entries */
- struct iso_directory_record *ep;/* the current directory entry */
+ struct iso_directory_record *ep = 0;/* the current directory entry */
int entryoffsetinblock; /* offset of ep in bp's buffer */
- int saveoffset; /* offset of last directory entry in dir */
+ int saveoffset = 0; /* offset of last directory entry in dir */
int numdirpasses; /* strategy for directory search */
doff_t endsearch; /* offset to end directory search */
struct iso_node *pdp; /* saved dp during symlink work */
@@ -443,6 +444,7 @@ found:
* is non-zero, fill it in with a pointer to the
* remaining space in the directory.
*/
+int
iso_blkatoff(ip, offset, bpp)
struct iso_node *ip;
doff_t offset;
diff --git a/sys/isofs/cd9660/cd9660_node.c b/sys/isofs/cd9660/cd9660_node.c
index d83a7a6f126a..f9641ffded7c 100644
--- a/sys/isofs/cd9660/cd9660_node.c
+++ b/sys/isofs/cd9660/cd9660_node.c
@@ -84,6 +84,7 @@ int prtactive; /* 1 => print out reclaim of active vnodes */
/*
* Initialize hash links for inodes and dnodes.
*/
+int
cd9660_init()
{
register int i;
@@ -102,6 +103,7 @@ cd9660_init()
dh->dh_head[1] = dh;
}
#endif
+ return (0);
}
#ifdef ISODEVMAP
@@ -163,9 +165,11 @@ iso_dunmap(dev)
* return the inode locked. Detection and handling of mount
* points must be done by the calling routine.
*/
+int
iso_iget(xp, ino, relocated, ipp, isodir)
struct iso_node *xp;
ino_t ino;
+ int relocated;
struct iso_node **ipp;
struct iso_directory_record *isodir;
{
@@ -338,6 +342,7 @@ loop:
/*
* Unlock and decrement the reference count of an inode structure.
*/
+int
iso_iput(ip)
register struct iso_node *ip;
{
@@ -346,6 +351,7 @@ iso_iput(ip)
panic("iso_iput");
ISO_IUNLOCK(ip);
vrele(ITOV(ip));
+ return (0);
}
/*
@@ -412,6 +418,7 @@ cd9660_reclaim(ap)
/*
* Lock an inode. If its already locked, set the WANT bit and sleep.
*/
+int
iso_ilock(ip)
register struct iso_node *ip;
{
@@ -426,11 +433,13 @@ iso_ilock(ip)
ip->i_spare1 = 0;
ip->i_spare0 = curproc->p_pid;
ip->i_flag |= ILOCKED;
+ return (0);
}
/*
* Unlock an inode. If WANT bit is on, wakeup.
*/
+int
iso_iunlock(ip)
register struct iso_node *ip;
{
@@ -443,6 +452,7 @@ iso_iunlock(ip)
ip->i_flag &= ~IWANT;
wakeup((caddr_t)ip);
}
+ return (0);
}
/*
diff --git a/sys/isofs/cd9660/cd9660_util.c b/sys/isofs/cd9660/cd9660_util.c
index f74f0515ff77..39c5fe491a47 100644
--- a/sys/isofs/cd9660/cd9660_util.c
+++ b/sys/isofs/cd9660/cd9660_util.c
@@ -157,7 +157,7 @@ int
isofncmp(unsigned char *fn,int fnlen,unsigned char *isofn,int isolen)
{
int i, j;
- char c;
+ unsigned char c;
while (--fnlen >= 0) {
if (--isolen < 0)
diff --git a/sys/isofs/cd9660/cd9660_vfsops.c b/sys/isofs/cd9660/cd9660_vfsops.c
index 02dd92af66f6..bc48367a38c6 100644
--- a/sys/isofs/cd9660/cd9660_vfsops.c
+++ b/sys/isofs/cd9660/cd9660_vfsops.c
@@ -82,6 +82,7 @@ struct vfsops cd9660_vfsops = {
static iso_mountfs();
+int
cd9660_mountroot()
{
register struct mount *mp;
@@ -139,6 +140,7 @@ int iso_doforce = 1;
*
* mount system call
*/
+int
cd9660_mount(mp, path, data, ndp, p)
register struct mount *mp;
char *path;
@@ -150,7 +152,7 @@ cd9660_mount(mp, path, data, ndp, p)
struct iso_args args;
u_int size;
int error;
- struct iso_mnt *imp;
+ struct iso_mnt *imp = 0;
if (error = copyin(data, (caddr_t)&args, sizeof (struct iso_args)))
return (error);
@@ -211,7 +213,8 @@ cd9660_mount(mp, path, data, ndp, p)
/*
* Common code for mount and mountroot
*/
-static iso_mountfs(devvp, mp, p, argp)
+static int
+iso_mountfs(devvp, mp, p, argp)
register struct vnode *devvp;
struct mount *mp;
struct proc *p;
@@ -381,6 +384,7 @@ out:
* Nothing to do at the moment.
*/
/* ARGSUSED */
+int
cd9660_start(mp, flags, p)
struct mount *mp;
int flags;
@@ -433,6 +437,7 @@ cd9660_unmount(mp, mntflags, p)
/*
* Return root of a filesystem
*/
+int
cd9660_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
@@ -485,6 +490,7 @@ cd9660_quotactl(mp, cmd, uid, arg, p)
/*
* Get file system statistics.
*/
+int
cd9660_statfs(mp, sbp, p)
struct mount *mp;
register struct statfs *sbp;
@@ -659,6 +665,7 @@ cd9660_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
* Vnode pointer to File handle
*/
/* ARGSUSED */
+int
cd9660_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
diff --git a/sys/isofs/cd9660/cd9660_vnops.c b/sys/isofs/cd9660/cd9660_vnops.c
index 59f5a73f5c86..7a2964bf22db 100644
--- a/sys/isofs/cd9660/cd9660_vnops.c
+++ b/sys/isofs/cd9660/cd9660_vnops.c
@@ -157,6 +157,7 @@ cd9660_close(ap)
* super user is granted all permissions.
*/
/* ARGSUSED */
+int
cd9660_access(ap)
struct vop_access_args /* {
struct vnode *a_vp;
@@ -168,6 +169,7 @@ cd9660_access(ap)
return (0);
}
+int
cd9660_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
@@ -217,6 +219,7 @@ extern int doclusterread;
/*
* Vnode op for reading.
*/
+int
cd9660_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 5d69e097007f..4be4e5041f4a 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -31,15 +31,15 @@
* $Id: imgact_aout.c,v 1.3 1993/12/30 01:39:29 davidg Exp $
*/
-#include "param.h"
-#include "systm.h"
-#include "resourcevar.h"
-#include "exec.h"
-#include "mman.h"
-#include "imgact.h"
-#include "kernel.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/resourcevar.h>
+#include <sys/exec.h>
+#include <sys/mman.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
-#include "vm/vm.h"
+#include <vm/vm.h>
int
exec_aout_imgact(iparams)
@@ -135,8 +135,8 @@ exec_aout_imgact(iparams)
a_out->a_text, /* size */
VM_PROT_READ | VM_PROT_EXECUTE, /* protection */
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE, /* max protection */
- MAP_FILE | MAP_PRIVATE | MAP_FIXED, /* flags */
- iparams->vnodep, /* vnode */
+ MAP_PRIVATE | MAP_FIXED, /* flags */
+ (caddr_t)iparams->vnodep, /* vnode */
file_offset); /* offset */
if (error)
return (error);
@@ -151,7 +151,8 @@ exec_aout_imgact(iparams)
&vmaddr,
a_out->a_data,
VM_PROT_READ | VM_PROT_WRITE | (a_out->a_text ? 0 : VM_PROT_EXECUTE),
- VM_PROT_ALL, MAP_FILE | MAP_PRIVATE | MAP_FIXED, iparams->vnodep,
+ VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED,
+ (caddr_t) iparams->vnodep,
file_offset + a_out->a_text);
if (error)
return (error);
diff --git a/sys/kern/imgact_shell.c b/sys/kern/imgact_shell.c
index 42b329759b54..e4f4d953c651 100644
--- a/sys/kern/imgact_shell.c
+++ b/sys/kern/imgact_shell.c
@@ -34,6 +34,7 @@
#include "param.h"
#include "systm.h"
#include "resourcevar.h"
+#include <sys/exec.h>
#include "imgact.h"
#include "kernel.h"
#include "machine/endian.h"
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index c6497153a695..f1fcc44c4b96 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -81,7 +81,7 @@ struct filedesc0 filedesc0;
struct plimit limit0;
struct vmspace vmspace0;
struct proc *curproc = &proc0;
-struct proc *initproc, *pageproc;
+struct proc *initproc, *pageproc, *pagescanproc, *updateproc;
int cmask = CMASK;
extern struct user *proc0paddr;
@@ -93,12 +93,26 @@ struct timeval runtime;
static void start_init __P((struct proc *p, void *framep));
+#if __GNUC__ >= 2
+void __main() {}
+#endif
+
+/*
+ * This table is filled in by the linker with functions that need to be
+ * called to initialize various pseudo-devices and whatnot.
+ */
+typedef void (*pseudo_func_t)(void);
+extern const struct linker_set pseudo_set;
+static const pseudo_func_t *pseudos =
+ (const pseudo_func_t *)&pseudo_set.ls_items[0];
+
/*
* System startup; initialize the world, create process 0, mount root
* filesystem, and fork to create init and pagedaemon. Most of the
* hard work is done in the lower-level initialization routines including
* startup(), which does memory initialization and autoconfiguration.
*/
+void
main(framep)
void *framep;
{
@@ -178,7 +192,7 @@ main(framep)
p->p_vmspace = &vmspace0;
vmspace0.vm_refcnt = 1;
pmap_pinit(&vmspace0.vm_pmap);
- vm_map_init(&p->p_vmspace->vm_map, round_page(VM_MIN_ADDRESS),
+ vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAX_ADDRESS), TRUE);
vmspace0.vm_map.pmap = &vmspace0.vm_pmap;
p->p_addr = proc0paddr; /* XXX */
@@ -214,14 +228,12 @@ main(framep)
/* Initialize clists. */
clist_init();
-#ifdef SYSVSHM
- /* Initialize System V style shared memory. */
- shminit();
-#endif
-
- /* Attach pseudo-devices. */
- for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
- (*pdev->pdev_attach)(pdev->pdev_count);
+ /*
+ * Attach pseudo-devices.
+ */
+ while(*pseudos) {
+ (**pseudos++)();
+ }
/*
* Initialize protocols. Block reception of incoming packets
@@ -287,6 +299,37 @@ main(framep)
vm_pageout();
/* NOTREACHED */
}
+#if 1
+ /*
+ * Start page scanner daemon (process 3).
+ */
+ if (fork(p, (void *) NULL, rval))
+ panic("failed fork page scanner daemon");
+ if (rval[1]) {
+ p = curproc;
+ pagescanproc = p;
+ p->p_flag |= P_INMEM | P_SYSTEM;
+ bcopy("pagescan", p->p_comm, sizeof("pagescan"));
+ vm_pagescan();
+ /*NOTREACHED*/
+ }
+#endif
+
+ /*
+ * Start update daemon (process 4).
+ */
+#ifndef LAPTOP
+ if (fork(p, (void *) NULL, rval))
+ panic("failed fork update daemon");
+ if (rval[1]) {
+ p = curproc;
+ updateproc = p;
+ p->p_flag |= P_INMEM | P_SYSTEM;
+ bcopy("update", p->p_comm, sizeof("update"));
+ vfs_update();
+ /*NOTREACHED*/
+ }
+#endif
/* The scheduler is an infinite loop. */
scheduler();
@@ -331,10 +374,11 @@ start_init(p, framep)
/*
* Need just enough stack to hold the faked-up "execve()" arguments.
*/
- addr = trunc_page(VM_MAX_ADDRESS - PAGE_SIZE);
+ addr = trunc_page(VM_MAXUSER_ADDRESS - PAGE_SIZE);
if (vm_allocate(&p->p_vmspace->vm_map, &addr, PAGE_SIZE, FALSE) != 0)
panic("init: couldn't allocate argument space");
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
+ p->p_vmspace->vm_ssize = 1;
for (pathp = &initpaths[0]; (path = *pathp) != NULL; pathp++) {
/*
@@ -377,8 +421,8 @@ start_init(p, framep)
* Point at the arguments.
*/
args.fname = arg0;
- args.argp = uap;
- args.envp = NULL;
+ args.argv = uap;
+ args.envv = NULL;
/*
* Now try to exec the program. If can't for any reason
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index 4b25c0695cfe..80cab9051dbe 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -138,6 +138,10 @@ int fstatfs();
int getfh();
#else
#endif
+int getdomainname();
+int setdomainname();
+int uname();
+int sysarch();
#ifdef SYSVSHM
int shmsys();
#else
@@ -308,7 +312,7 @@ struct sysent sysent[] = {
{ 0, nosys }, /* 68 = obsolete vwrite */
{ 1, sbrk }, /* 69 = sbrk */
{ 1, sstk }, /* 70 = sstk */
- { compat(7,mmap) }, /* 71 = old mmap */
+ { compat(6,mmap) }, /* 71 = old mmap */
{ 1, ovadvise }, /* 72 = vadvise */
{ 2, munmap }, /* 73 = munmap */
{ 3, mprotect }, /* 74 = mprotect */
@@ -415,10 +419,10 @@ struct sysent sysent[] = {
#else
{ 0, nosys }, /* 161 = nosys */
#endif
- { 0, nosys }, /* 162 = nosys */
- { 0, nosys }, /* 163 = nosys */
- { 0, nosys }, /* 164 = nosys */
- { 0, nosys }, /* 165 = nosys */
+ { 2, getdomainname }, /* 162 = getdomainname */
+ { 2, setdomainname }, /* 163 = setdomainname */
+ { 1, uname }, /* 164 = uname */
+ { 2, sysarch }, /* 165 = sysarch */
{ 0, nosys }, /* 166 = nosys */
{ 0, nosys }, /* 167 = nosys */
{ 0, nosys }, /* 168 = nosys */
diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c
index b752279d120a..3cbde510f84b 100644
--- a/sys/kern/kern_acct.c
+++ b/sys/kern/kern_acct.c
@@ -49,6 +49,7 @@
struct acct_args {
char *fname;
};
+int
acct(a1, a2, a3)
struct proc *a1;
struct acct_args *a2;
@@ -60,6 +61,7 @@ acct(a1, a2, a3)
return (ENOSYS);
}
+void
acct_process(a1)
struct proc *a1;
{
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index f42900cb75d2..4017e966ee69 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -511,6 +511,7 @@ statclock(frame)
/*
* Return information about system clocks.
*/
+int
sysctl_clockrate(where, sizep)
register char *where;
size_t *sizep;
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 543946d3f8fd..9258e811f1be 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -68,6 +68,7 @@ struct getdtablesize_args {
int dummy;
};
/* ARGSUSED */
+int
getdtablesize(p, uap, retval)
struct proc *p;
struct getdtablesize_args *uap;
@@ -85,6 +86,7 @@ struct dup_args {
u_int fd;
};
/* ARGSUSED */
+int
dup(p, uap, retval)
struct proc *p;
struct dup_args *uap;
@@ -116,6 +118,7 @@ struct dup2_args {
u_int to;
};
/* ARGSUSED */
+int
dup2(p, uap, retval)
struct proc *p;
struct dup2_args *uap;
@@ -159,6 +162,7 @@ struct fcntl_args {
int arg;
};
/* ARGSUSED */
+int
fcntl(p, uap, retval)
struct proc *p;
register struct fcntl_args *uap;
@@ -324,6 +328,7 @@ struct close_args {
int fd;
};
/* ARGSUSED */
+int
close(p, uap, retval)
struct proc *p;
struct close_args *uap;
@@ -358,6 +363,7 @@ struct ofstat_args {
struct ostat *sb;
};
/* ARGSUSED */
+int
ofstat(p, uap, retval)
struct proc *p;
register struct ofstat_args *uap;
@@ -401,6 +407,7 @@ struct fstat_args {
struct stat *sb;
};
/* ARGSUSED */
+int
fstat(p, uap, retval)
struct proc *p;
register struct fstat_args *uap;
@@ -441,6 +448,7 @@ struct fpathconf_args {
int name;
};
/* ARGSUSED */
+int
fpathconf(p, uap, retval)
struct proc *p;
register struct fpathconf_args *uap;
@@ -476,6 +484,7 @@ fpathconf(p, uap, retval)
*/
int fdexpand;
+int
fdalloc(p, want, result)
struct proc *p;
int want;
@@ -538,12 +547,14 @@ fdalloc(p, want, result)
fdp->fd_nfiles = nfiles;
fdexpand++;
}
+ return (0);
}
/*
* Check to see whether n user file descriptors
* are available to the process p.
*/
+int
fdavail(p, n)
struct proc *p;
register int n;
@@ -566,6 +577,7 @@ fdavail(p, n)
* Create a new open file structure and allocate
* a file decriptor for the process that refers to it.
*/
+int
falloc(p, resultfp, resultfd)
register struct proc *p;
struct file **resultfp;
@@ -612,6 +624,7 @@ falloc(p, resultfp, resultfd)
/*
* Free a file descriptor.
*/
+void
ffree(fp)
register struct file *fp;
{
@@ -709,11 +722,40 @@ fdfree(p)
}
/*
+ * Close any files on exec?
+ */
+void
+fdcloseexec(p)
+ struct proc *p;
+{
+ struct filedesc *fdp = p->p_fd;
+ struct file **fpp;
+ char *fdfp;
+ register int i;
+
+ fpp = fdp->fd_ofiles;
+ fdfp = fdp->fd_ofileflags;
+ for (i = 0; i <= fdp->fd_lastfile; i++, fpp++, fdfp++)
+ if (*fpp != NULL && (*fdfp & UF_EXCLOSE)) {
+ if (*fdfp & UF_MAPPED)
+ (void) munmapfd(i);
+ (void) closef(*fpp, p);
+ *fpp = NULL;
+ *fdfp = 0;
+ if (i < fdp->fd_freefile)
+ fdp->fd_freefile = i;
+ }
+ while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
+ fdp->fd_lastfile--;
+}
+
+/*
* Internal form of close.
* Decrement reference count on file structure.
* Note: p may be NULL when closing a file
* that was being passed in a message.
*/
+int
closef(fp, p)
register struct file *fp;
register struct proc *p;
@@ -771,6 +813,7 @@ struct flock_args {
int how;
};
/* ARGSUSED */
+int
flock(p, uap, retval)
struct proc *p;
register struct flock_args *uap;
@@ -816,6 +859,7 @@ flock(p, uap, retval)
* references to this file will be direct to the other driver.
*/
/* ARGSUSED */
+int
fdopen(dev, mode, type, p)
dev_t dev;
int mode, type;
@@ -837,6 +881,7 @@ fdopen(dev, mode, type, p)
/*
* Duplicate the specified descriptor to a free descriptor.
*/
+int
dupfdopen(fdp, indx, dfd, mode, error)
register struct filedesc *fdp;
register int indx, dfd;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index fbb4444d52bd..6717e4e53b6b 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -1,11 +1,6 @@
-/*-
- * Copyright (c) 1982, 1986, 1991, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (c) 1993, David Greenman
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -17,16 +12,14 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * This product includes software developed by David Greenman
+ * 4. The name of the developer may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -35,30 +28,502 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
+ * $Id: kern_execve.c,v 1.20 1994/03/26 12:24:27 davidg Exp $
*/
#include <sys/param.h>
-#include <sys/errno.h>
-#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/resourcevar.h>
+#include <sys/kernel.h>
+#include <sys/mount.h>
+#include <sys/file.h>
+#include <sys/acct.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <sys/malloc.h>
+#include <sys/syslog.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+
+#include <machine/reg.h>
+
+int exec_extract_strings __P((struct image_params *));
+int *exec_copyout_strings __P((struct image_params *));
+
+/*
+ * execsw_set is constructed for us by the linker. Each of the items
+ * is a pointer to a `const struct execsw', hence the double pointer here.
+ */
+extern const struct linker_set execsw_set;
+const struct execsw **execsw = (const struct execsw **)&execsw_set.ls_items[0];
+
+/*
+ * execve() system call.
+ */
+int
+execve(p, uap, retval)
+ struct proc *p;
+ register struct execve_args *uap;
+ int *retval;
+{
+ struct nameidata nd, *ndp;
+ char *stringbase, *stringp;
+ int *stack_base;
+ int error, resid, len, i;
+ struct image_params image_params, *iparams;
+ struct vnode *vnodep;
+ struct vattr attr;
+ char *image_header;
+
+ iparams = &image_params;
+ bzero((caddr_t)iparams, sizeof(struct image_params));
+ image_header = (char *)0;
+
+ /*
+ * Initialize a few constants in the common area
+ */
+ iparams->proc = p;
+ iparams->uap = uap;
+ iparams->attr = &attr;
+
+ /*
+ * Allocate temporary demand zeroed space for argument and
+ * environment strings
+ */
+ error = vm_allocate(kernel_map, (vm_offset_t *)&iparams->stringbase,
+ ARG_MAX, TRUE);
+ if (error) {
+ log(LOG_WARNING, "execve: failed to allocate string space\n");
+ return (error);
+ }
+
+ if (!iparams->stringbase) {
+ error = ENOMEM;
+ goto exec_fail;
+ }
+ iparams->stringp = iparams->stringbase;
+ iparams->stringspace = ARG_MAX;
+
+ /*
+ * Translate the file name. namei() returns a vnode pointer
+ * in ni_vp amoung other things.
+ */
+ ndp = &nd;
+ ndp->ni_cnd.cn_nameiop = LOOKUP;
+ ndp->ni_cnd.cn_flags = LOCKLEAF | FOLLOW | SAVENAME;
+ ndp->ni_cnd.cn_proc = curproc;
+ ndp->ni_cnd.cn_cred = curproc->p_cred->pc_ucred;
+ ndp->ni_segflg = UIO_USERSPACE;
+ ndp->ni_dirp = uap->fname;
+
+interpret:
+
+ error = namei(ndp);
+ if (error) {
+ vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase,
+ ARG_MAX);
+ goto exec_fail;
+ }
+
+ iparams->vnodep = vnodep = ndp->ni_vp;
+
+ if (vnodep == NULL) {
+ error = ENOEXEC;
+ goto exec_fail_dealloc;
+ }
+
+ /*
+ * Check file permissions (also 'opens' file)
+ */
+ error = exec_check_permissions(iparams);
+ if (error)
+ goto exec_fail_dealloc;
+
+ /*
+ * Map the image header (first page) of the file into
+ * kernel address space
+ */
+ error = vm_mmap(kernel_map, /* map */
+ (vm_offset_t *)&image_header, /* address */
+ PAGE_SIZE, /* size */
+ VM_PROT_READ, /* protection */
+ VM_PROT_READ, /* max protection */
+ 0, /* flags */
+ (caddr_t)vnodep, /* vnode */
+ 0); /* offset */
+ if (error) {
+ uprintf("mmap failed: %d\n",error);
+ goto exec_fail_dealloc;
+ }
+ iparams->image_header = image_header;
+
+ /*
+ * Loop through list of image activators, calling each one.
+ * If there is no match, the activator returns -1. If there
+ * is a match, but there was an error during the activation,
+ * the error is returned. Otherwise 0 means success. If the
+ * image is interpreted, loop back up and try activating
+ * the interpreter.
+ */
+ for (i = 0; execsw[i]; ++i) {
+ if (execsw[i]->ex_imgact)
+ error = (*execsw[i]->ex_imgact)(iparams);
+ else
+ continue;
+
+ if (error == -1)
+ continue;
+ if (error)
+ goto exec_fail_dealloc;
+ if (iparams->interpreted) {
+ /* free old vnode and name buffer */
+ vput(ndp->ni_vp);
+ FREE(ndp->ni_cnd.cn_pnbuf, M_NAMEI);
+ if (vm_deallocate(kernel_map,
+ (vm_offset_t)image_header, PAGE_SIZE))
+ panic("execve: header dealloc failed (1)");
+
+ /* set new name to that of the interpreter */
+ ndp->ni_segflg = UIO_SYSSPACE;
+ ndp->ni_dirp = iparams->interpreter_name;
+ ndp->ni_cnd.cn_nameiop = LOOKUP;
+ ndp->ni_cnd.cn_flags = LOCKLEAF | FOLLOW | SAVENAME;
+ ndp->ni_cnd.cn_proc = curproc;
+ ndp->ni_cnd.cn_cred = curproc->p_cred->pc_ucred;
+ goto interpret;
+ }
+ break;
+ }
+ /* If we made it through all the activators and none matched, exit. */
+ if (error == -1) {
+ error = ENOEXEC;
+ goto exec_fail_dealloc;
+ }
+
+ /*
+ * Copy out strings (args and env) and initialize stack base
+ */
+ stack_base = exec_copyout_strings(iparams);
+ p->p_vmspace->vm_minsaddr = (char *)stack_base;
+
+ /*
+ * Stuff argument count as first item on stack
+ */
+ *(--stack_base) = iparams->argc;
+
+ /* close files on exec */
+ fdcloseexec(p);
+
+ /* reset caught signals */
+ execsigs(p);
+
+ /* name this process - nameiexec(p, ndp) */
+ len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
+ bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
+ p->p_comm[len] = 0;
+
+ /*
+ * mark as executable, wakeup any process that was vforked and tell
+ * it that it now has it's own resources back
+ */
+ p->p_flag |= P_EXEC;
+ if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
+ p->p_flag &= ~P_PPWAIT;
+ wakeup((caddr_t)p->p_pptr);
+ }
+
+ /* implement set userid/groupid */
+ p->p_flag &= ~P_SUGID;
+
+ /*
+ * Turn off kernel tracing for set-id programs, except for
+ * root.
+ */
+ if (p->p_tracep && (attr.va_mode & (VSUID | VSGID)) &&
+ suser(p->p_ucred, &p->p_acflag)) {
+ p->p_traceflag = 0;
+ vrele(p->p_tracep);
+ p->p_tracep = 0;
+ }
+ if ((attr.va_mode & VSUID) && (p->p_flag & P_TRACED) == 0) {
+ p->p_ucred = crcopy(p->p_ucred);
+ p->p_ucred->cr_uid = attr.va_uid;
+ p->p_flag |= P_SUGID;
+ }
+ if ((attr.va_mode & VSGID) && (p->p_flag & P_TRACED) == 0) {
+ p->p_ucred = crcopy(p->p_ucred);
+ p->p_ucred->cr_groups[0] = attr.va_gid;
+ p->p_flag |= P_SUGID;
+ }
+
+ /*
+ * Implement correct POSIX saved uid behavior.
+ */
+ p->p_cred->p_svuid = p->p_ucred->cr_uid;
+ p->p_cred->p_svgid = p->p_ucred->cr_gid;
+
+ /* mark vnode pure text */
+ ndp->ni_vp->v_flag |= VTEXT;
+
+ /*
+ * If tracing the process, trap to debugger so breakpoints
+ * can be set before the program executes.
+ */
+ if (p->p_flag & P_TRACED)
+ psignal(p, SIGTRAP);
+
+ /* clear "fork but no exec" flag, as we _are_ execing */
+ p->p_acflag &= ~AFORK;
+
+ /* Set entry address */
+ setregs(p, iparams->entry_addr, stack_base);
+
+ /*
+ * free various allocated resources
+ */
+ if (vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase, ARG_MAX))
+ panic("execve: string buffer dealloc failed (1)");
+ if (vm_deallocate(kernel_map, (vm_offset_t)image_header, PAGE_SIZE))
+ panic("execve: header dealloc failed (2)");
+ vput(ndp->ni_vp);
+ FREE(ndp->ni_cnd.cn_pnbuf, M_NAMEI);
+
+ return (0);
+
+exec_fail_dealloc:
+ if (iparams->stringbase && iparams->stringbase != (char *)-1)
+ if (vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase,
+ ARG_MAX))
+ panic("execve: string buffer dealloc failed (2)");
+ if (iparams->image_header && iparams->image_header != (char *)-1)
+ if (vm_deallocate(kernel_map,
+ (vm_offset_t)iparams->image_header, PAGE_SIZE))
+ panic("execve: header dealloc failed (3)");
+ vput(ndp->ni_vp);
+ FREE(ndp->ni_cnd.cn_pnbuf, M_NAMEI);
+
+exec_fail:
+ if (iparams->vmspace_destroyed) {
+ /* sorry, no more process anymore. exit gracefully */
+#if 0 /* XXX */
+ vm_deallocate(&vs->vm_map, USRSTACK - MAXSSIZ, MAXSSIZ);
+#endif
+ exit1(p, W_EXITCODE(0, SIGABRT));
+ /* NOT REACHED */
+ return(0);
+ } else {
+ return(error);
+ }
+}
+
+/*
+ * Destroy old address space, and allocate a new stack
+ * The new stack is only SGROWSIZ large because it is grown
+ * automatically in trap.c.
+ */
+int
+exec_new_vmspace(iparams)
+ struct image_params *iparams;
+{
+ int error;
+ struct vmspace *vmspace = iparams->proc->p_vmspace;
+ caddr_t stack_addr = (caddr_t) (USRSTACK - SGROWSIZ);
+
+ iparams->vmspace_destroyed = 1;
+
+ /* Blow away entire process VM */
+ vm_deallocate(&vmspace->vm_map, 0, USRSTACK);
+
+ /* Allocate a new stack */
+ error = vm_allocate(&vmspace->vm_map, (vm_offset_t *)&stack_addr,
+ SGROWSIZ, FALSE);
+ if (error)
+ return(error);
+
+ vmspace->vm_ssize = SGROWSIZ >> PAGE_SHIFT;
+
+ /* Initialize maximum stack address */
+ vmspace->vm_maxsaddr = (char *)USRSTACK - MAXSSIZ;
+
+ return(0);
+}
+
+/*
+ * Copy out argument and environment strings from the old process
+ * address space into the temporary string buffer.
+ */
+int
+exec_extract_strings(iparams)
+ struct image_params *iparams;
+{
+ char **argv, **envv;
+ char *argp, *envp;
+ int length;
+
+ /*
+ * extract arguments first
+ */
+
+ argv = iparams->uap->argv;
+
+ if (argv)
+ while (argp = (caddr_t) fuword(argv++)) {
+ if (argp == (caddr_t) -1)
+ return (EFAULT);
+ if (copyinstr(argp, iparams->stringp, iparams->stringspace,
+ &length) == ENAMETOOLONG)
+ return(E2BIG);
+ iparams->stringspace -= length;
+ iparams->stringp += length;
+ iparams->argc++;
+ }
+
+ /*
+ * extract environment strings
+ */
+
+ envv = iparams->uap->envv;
+
+ if (envv)
+ while (envp = (caddr_t) fuword(envv++)) {
+ if (envp == (caddr_t) -1)
+ return (EFAULT);
+ if (copyinstr(envp, iparams->stringp, iparams->stringspace,
+ &length) == ENAMETOOLONG)
+ return(E2BIG);
+ iparams->stringspace -= length;
+ iparams->stringp += length;
+ iparams->envc++;
+ }
+
+ return (0);
+}
+
+/*
+ * Copy strings out to the new process address space, constructing
+ * new arg and env vector tables. Return a pointer to the base
+ * so that it can be used as the initial stack pointer.
+ */
+int *
+exec_copyout_strings(iparams)
+ struct image_params *iparams;
+{
+ int argc, envc;
+ char **vectp;
+ char *stringp, *destp;
+ int *stack_base;
+ int vect_table_size, string_table_size;
+
+ /*
+ * Calculate string base and vector table pointers.
+ */
+ destp = (caddr_t) ((caddr_t)USRSTACK -
+ roundup((ARG_MAX - iparams->stringspace), sizeof(char *)));
+ /*
+ * The '+ 2' is for the null pointers at the end of each of the
+ * arg and env vector sets
+ */
+ vectp = (char **) (destp -
+ (iparams->argc + iparams->envc + 2) * sizeof(char *));
+
+ /*
+ * vectp also becomes our initial stack base
+ */
+ stack_base = (int *)vectp;
+
+ stringp = iparams->stringbase;
+ argc = iparams->argc;
+ envc = iparams->envc;
+
+ for (; argc > 0; --argc) {
+ *(vectp++) = destp;
+ while (*destp++ = *stringp++);
+ }
+
+ /* a null vector table pointer seperates the argp's from the envp's */
+ *(vectp++) = NULL;
+
+ for (; envc > 0; --envc) {
+ *(vectp++) = destp;
+ while (*destp++ = *stringp++);
+ }
+
+ /* end of vector table is a null pointer */
+ *vectp = NULL;
+
+ return (stack_base);
+}
/*
- * exec system call
+ * Check permissions of file to execute.
+ * Return 0 for success or error code on failure.
*/
-struct execve_args {
- char *fname;
- char **argp;
- char **envp;
-};
-/* ARGSUSED */
-execve(a1, a2, a3)
- struct proc *a1;
- struct execve_args *a2;
- int *a3;
+int
+exec_check_permissions(iparams)
+ struct image_params *iparams;
{
+ struct proc *p = iparams->proc;
+ struct vnode *vnodep = iparams->vnodep;
+ struct vattr *attr = iparams->attr;
+ int error;
+
+ /*
+ * Check number of open-for-writes on the file and deny execution
+ * if there are any.
+ */
+ if (vnodep->v_writecount) {
+ return (ETXTBSY);
+ }
+
+ /* Get file attributes */
+ error = VOP_GETATTR(vnodep, attr, p->p_ucred, p);
+ if (error)
+ return (error);
+
+ /*
+ * 1) Check if file execution is disabled for the filesystem that this
+ * file resides on.
+ * 2) Insure that at least one execute bit is on - otherwise root
+ * will always succeed, and we don't want to happen unless the
+ * file really is executable.
+ * 3) Insure that the file is a regular file.
+ */
+ if ((vnodep->v_mount->mnt_flag & MNT_NOEXEC) ||
+ ((attr->va_mode & 0111) == 0) ||
+ (attr->va_type != VREG)) {
+ return (EACCES);
+ }
/*
- * Body deleted.
+ * Zero length files can't be exec'd
*/
- return (ENOSYS);
+ if (attr->va_size == 0)
+ return (ENOEXEC);
+
+ /*
+ * Disable setuid/setgid if the filesystem prohibits it or if
+ * the process is being traced.
+ */
+ if ((vnodep->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
+ attr->va_mode &= ~(VSUID | VSGID);
+
+ /*
+ * Check for execute permission to file based on current credentials.
+ * Then call filesystem specific open routine (which does nothing
+ * in the general case).
+ */
+ error = VOP_ACCESS(vnodep, VEXEC, p->p_ucred, p);
+ if (error)
+ return (error);
+
+ error = VOP_OPEN(vnodep, FREAD, p->p_ucred, p);
+ if (error)
+ return (error);
+
+ return (0);
}
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 03353c72d1d1..7db9830abb67 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -84,6 +84,7 @@ exit(p, uap, retval)
exit1(p, W_EXITCODE(uap->rval, 0));
/* NOTREACHED */
+ while (1);
}
/*
@@ -293,6 +294,7 @@ struct wait_args {
#define GETPS(rp) (rp)[PS]
#endif
+int
owait(p, uap, retval)
struct proc *p;
register struct wait_args *uap;
@@ -317,6 +319,7 @@ owait(p, uap, retval)
return (wait1(p, uap, retval));
}
+int
wait4(p, uap, retval)
struct proc *p;
struct wait_args *uap;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 8bec2fa5d5fe..c285017013f4 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -55,6 +55,7 @@ struct fork_args {
int dummy;
};
/* ARGSUSED */
+int
fork(p, uap, retval)
struct proc *p;
struct fork_args *uap;
@@ -65,6 +66,7 @@ fork(p, uap, retval)
}
/* ARGSUSED */
+int
vfork(p, uap, retval)
struct proc *p;
struct fork_args *uap;
@@ -76,6 +78,7 @@ vfork(p, uap, retval)
int nprocs = 1; /* process 0 */
+int
fork1(p1, isvfork, retval)
register struct proc *p1;
int isvfork, retval[];
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 763cfb257ffb..4b6f721c3a26 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -44,6 +44,8 @@
#include <sys/malloc.h>
#include <sys/syslog.h>
+void ktrwrite __P((struct vnode *, struct ktr_header *));
+
struct ktr_header *
ktrgetheader(type)
int type;
@@ -60,6 +62,7 @@ ktrgetheader(type)
return (kth);
}
+void
ktrsyscall(vp, code, narg, args)
struct vnode *vp;
int code, narg, args[];
@@ -86,6 +89,7 @@ ktrsyscall(vp, code, narg, args)
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
+void
ktrsysret(vp, code, error, retval)
struct vnode *vp;
int code, error, retval;
@@ -108,6 +112,7 @@ ktrsysret(vp, code, error, retval)
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
+void
ktrnamei(vp, path)
struct vnode *vp;
char *path;
@@ -125,6 +130,7 @@ ktrnamei(vp, path)
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
+void
ktrgenio(vp, fd, rw, iov, len, error)
struct vnode *vp;
int fd;
@@ -166,6 +172,7 @@ done:
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
+void
ktrpsig(vp, sig, action, mask, code)
struct vnode *vp;
int sig;
@@ -190,6 +197,7 @@ ktrpsig(vp, sig, action, mask, code)
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
+void
ktrcsw(vp, out, user)
struct vnode *vp;
int out, user;
@@ -222,6 +230,7 @@ struct ktrace_args {
int pid;
};
/* ARGSUSED */
+int
ktrace(curp, uap, retval)
struct proc *curp;
register struct ktrace_args *uap;
@@ -357,6 +366,7 @@ ktrops(curp, p, ops, facs, vp)
return (1);
}
+int
ktrsetchildren(curp, top, ops, facs, vp)
struct proc *curp, *top;
int ops, facs;
@@ -392,6 +402,7 @@ ktrsetchildren(curp, top, ops, facs, vp)
/*NOTREACHED*/
}
+void
ktrwrite(vp, kth)
struct vnode *vp;
register struct ktr_header *kth;
@@ -446,6 +457,7 @@ ktrwrite(vp, kth)
*
* TODO: check groups. use caller effective gid.
*/
+int
ktrcanset(callp, targetp)
struct proc *callp, *targetp;
{
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index c6276bc73cf4..3da06d9c04a8 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/map.h>
#include <sys/kernel.h>
@@ -348,6 +349,7 @@ free(addr, type)
/*
* Initialize the kernel memory allocator
*/
+void
kmeminit()
{
register long indx;
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index 1eaae3599dee..9e0405fc6abb 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -1,41 +1,20 @@
-/*-
- * Copyright (c) 1982, 1986, 1990, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice immediately at the beginning of the file, without modification,
+ * this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: @(#)kern_physio.c 8.1 (Berkeley) 6/10/93
+ * 3. Absolutely no warranty of function or purpose is made by the author
+ * John S. Dyson.
+ * 4. Modifications may be freely made to this file if the above conditions
+ * are met.
*/
#include <sys/param.h>
@@ -43,51 +22,150 @@
#include <sys/buf.h>
#include <sys/conf.h>
#include <sys/proc.h>
+#include <vm/vm.h>
-physio(a1, a2, a3, a4, a5, a6)
- int (*a1)();
- struct buf *a2;
- dev_t a3;
- int a4;
- u_int (*a5)();
- struct uio *a6;
+static void physwakeup();
+
+int
+physio(strategy, bp, dev, rw, minp, uio)
+ int (*strategy)();
+ struct buf *bp;
+ dev_t dev;
+ int rw;
+ u_int (*minp)();
+ struct uio *uio;
{
+ int i;
+ int bp_alloc = (bp == 0);
+ int bufflags = rw?B_READ:0;
+ int error;
+ int spl;
+
+/*
+ * keep the process from being swapped
+ */
+ curproc->p_flag |= P_PHYSIO;
+
+ /* create and build a buffer header for a transfer */
+
+ if (bp_alloc) {
+ bp = (struct buf *)getpbuf();
+ } else {
+ spl = splbio();
+ while (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep((caddr_t)bp, PRIBIO, "physbw", 0);
+ }
+ bp->b_flags |= B_BUSY;
+ splx(spl);
+ }
+
+ bp->b_proc = curproc;
+ bp->b_dev = dev;
+ error = bp->b_error = 0;
+
+ for(i=0;i<uio->uio_iovcnt;i++) {
+ while( uio->uio_iov[i].iov_len) {
+ vm_offset_t v, lastv, pa;
+ caddr_t adr;
+
+ bp->b_bcount = uio->uio_iov[i].iov_len;
+ bp->b_bufsize = bp->b_bcount;
+ bp->b_flags = B_BUSY | B_PHYS | B_CALL | bufflags;
+ bp->b_iodone = physwakeup;
+ bp->b_data = uio->uio_iov[i].iov_base;
+ bp->b_blkno = btodb(uio->uio_offset);
+
+
+ if (rw && !useracc(bp->b_data, bp->b_bufsize, B_WRITE)) {
+ error = EFAULT;
+ goto doerror;
+ }
+ if (!rw && !useracc(bp->b_data, bp->b_bufsize, B_READ)) {
+ error = EFAULT;
+ goto doerror;
+ }
+
+ vmapbuf(bp);
+
+ /* perform transfer */
+ (*strategy)(bp);
+
+ spl = splbio();
+ while ((bp->b_flags & B_DONE) == 0)
+ tsleep((caddr_t)bp, PRIBIO, "physstr", 0);
+ splx(spl);
- /*
- * Body deleted.
- */
- return (EIO);
+ vunmapbuf(bp);
+
+ /*
+ * update the uio data
+ */
+ {
+ int iolen = bp->b_bcount - bp->b_resid;
+ uio->uio_iov[i].iov_len -= iolen;
+ uio->uio_iov[i].iov_base += iolen;
+ uio->uio_resid -= iolen;
+ uio->uio_offset += iolen;
+ }
+
+ /*
+ * check for an error
+ */
+ if( bp->b_flags & B_ERROR) {
+ error = bp->b_error;
+ goto doerror;
+ }
+ }
+ }
+
+
+doerror:
+ if (bp_alloc) {
+ relpbuf(bp);
+ } else {
+ bp->b_flags &= ~(B_BUSY|B_PHYS);
+ if( bp->b_flags & B_WANTED) {
+ bp->b_flags &= ~B_WANTED;
+ wakeup((caddr_t)bp);
+ }
+ }
+/*
+ * allow the process to be swapped
+ */
+ curproc->p_flag &= ~P_PHYSIO;
+
+ return (error);
}
u_int
-minphys(a1)
- struct buf *a1;
+minphys(struct buf *bp)
{
- /*
- * Body deleted.
- */
- return (0);
+ if( bp->b_bcount > MAXBSIZE) {
+ bp->b_bcount = MAXBSIZE;
+ }
+ return bp->b_bcount;
}
-/*
- * Do a read on a device for a user process.
- */
-rawread(dev, uio)
- dev_t dev;
- struct uio *uio;
+int
+rawread(dev_t dev, struct uio *uio)
{
return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
- dev, B_READ, minphys, uio));
+ dev, 1, minphys, uio));
}
-/*
- * Do a write on a device for a user process.
- */
-rawwrite(dev, uio)
- dev_t dev;
- struct uio *uio;
+int
+rawwrite(dev_t dev, struct uio *uio)
{
return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
- dev, B_WRITE, minphys, uio));
+ dev, 0, minphys, uio));
+}
+
+static void
+physwakeup(bp)
+ struct buf *bp;
+{
+ wakeup((caddr_t) bp);
+ bp->b_flags &= ~B_CALL;
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 91d9e212d388..63a22c966c5a 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -49,6 +49,9 @@
#include <sys/ioctl.h>
#include <sys/tty.h>
+void pgdelete __P((struct pgrp *));
+void fixjobc __P((struct proc *, struct pgrp *, int));
+
/*
* Structure associated with user cacheing.
*/
@@ -64,6 +67,7 @@ u_long uihash; /* size of hash table - 1 */
/*
* Allocate a hash table.
*/
+void
usrinfoinit()
{
@@ -116,6 +120,7 @@ chgproccnt(uid, diff)
/*
* Is p an inferior of the current process?
*/
+int
inferior(p)
register struct proc *p;
{
@@ -160,6 +165,7 @@ pgfind(pgid)
/*
* Move p to a new or existing process group (and session)
*/
+int
enterpgrp(p, pgid, mksess)
register struct proc *p;
pid_t pgid;
@@ -259,6 +265,7 @@ enterpgrp(p, pgid, mksess)
/*
* remove process from process group
*/
+int
leavepgrp(p)
register struct proc *p;
{
@@ -283,6 +290,7 @@ leavepgrp(p)
/*
* delete a process group
*/
+void
pgdelete(pgrp)
register struct pgrp *pgrp;
{
@@ -318,6 +326,7 @@ static void orphanpg();
* entering == 0 => p is leaving specified group.
* entering == 1 => p is entering specified group.
*/
+void
fixjobc(p, pgrp, entering)
register struct proc *p;
register struct pgrp *pgrp;
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index ef400770e20a..1e205eecd5ae 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -56,6 +56,7 @@ struct args {
};
/* ARGSUSED */
+int
getpid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -70,6 +71,7 @@ getpid(p, uap, retval)
}
/* ARGSUSED */
+int
getppid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -81,6 +83,7 @@ getppid(p, uap, retval)
}
/* Get process group ID; note that POSIX getpgrp takes no parameter */
+int
getpgrp(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -92,6 +95,7 @@ getpgrp(p, uap, retval)
}
/* ARGSUSED */
+int
getuid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -106,6 +110,7 @@ getuid(p, uap, retval)
}
/* ARGSUSED */
+int
geteuid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -117,6 +122,7 @@ geteuid(p, uap, retval)
}
/* ARGSUSED */
+int
getgid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -136,6 +142,7 @@ getgid(p, uap, retval)
* correctly in a library function.
*/
/* ARGSUSED */
+int
getegid(p, uap, retval)
struct proc *p;
struct args *uap;
@@ -150,6 +157,7 @@ struct getgroups_args {
u_int gidsetsize;
gid_t *gidset;
};
+int
getgroups(p, uap, retval)
struct proc *p;
register struct getgroups_args *uap;
@@ -174,6 +182,7 @@ getgroups(p, uap, retval)
}
/* ARGSUSED */
+int
setsid(p, uap, retval)
register struct proc *p;
struct args *uap;
@@ -207,6 +216,7 @@ struct setpgid_args {
int pgid; /* target pgrp id */
};
/* ARGSUSED */
+int
setpgid(curp, uap, retval)
struct proc *curp;
register struct setpgid_args *uap;
@@ -239,6 +249,7 @@ struct setuid_args {
uid_t uid;
};
/* ARGSUSED */
+int
setuid(p, uap, retval)
struct proc *p;
struct setuid_args *uap;
@@ -271,6 +282,7 @@ struct seteuid_args {
uid_t euid;
};
/* ARGSUSED */
+int
seteuid(p, uap, retval)
struct proc *p;
struct seteuid_args *uap;
@@ -298,6 +310,7 @@ struct setgid_args {
gid_t gid;
};
/* ARGSUSED */
+int
setgid(p, uap, retval)
struct proc *p;
struct setgid_args *uap;
@@ -322,6 +335,7 @@ struct setegid_args {
gid_t egid;
};
/* ARGSUSED */
+int
setegid(p, uap, retval)
struct proc *p;
struct setegid_args *uap;
@@ -346,6 +360,7 @@ struct setgroups_args {
gid_t *gidset;
};
/* ARGSUSED */
+int
setgroups(p, uap, retval)
struct proc *p;
struct setgroups_args *uap;
@@ -374,6 +389,7 @@ struct setreuid_args {
int euid;
};
/* ARGSUSED */
+int
osetreuid(p, uap, retval)
register struct proc *p;
struct setreuid_args *uap;
@@ -401,6 +417,7 @@ struct setregid_args {
int egid;
};
/* ARGSUSED */
+int
osetregid(p, uap, retval)
register struct proc *p;
struct setregid_args *uap;
@@ -427,6 +444,7 @@ osetregid(p, uap, retval)
/*
* Check if gid is a member of the group set.
*/
+int
groupmember(gid, cred)
gid_t gid;
register struct ucred *cred;
@@ -447,6 +465,7 @@ groupmember(gid, cred)
* indicating use of super-powers.
* Returns 0 or error.
*/
+int
suser(cred, acflag)
struct ucred *cred;
short *acflag;
@@ -477,6 +496,7 @@ crget()
* Free a cred structure.
* Throws away space when ref count gets to 0.
*/
+void
crfree(cr)
struct ucred *cr;
{
@@ -529,6 +549,7 @@ struct getlogin_args {
u_int namelen;
};
/* ARGSUSED */
+int
getlogin(p, uap, retval)
struct proc *p;
struct getlogin_args *uap;
@@ -548,6 +569,7 @@ struct setlogin_args {
char *namebuf;
};
/* ARGSUSED */
+int
setlogin(p, uap, retval)
struct proc *p;
struct setlogin_args *uap;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 68e9dfbc86de..e38471a41e5b 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -39,6 +39,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/file.h>
#include <sys/resourcevar.h>
@@ -55,6 +56,7 @@ struct getpriority_args {
int which;
int who;
};
+int
getpriority(curp, uap, retval)
struct proc *curp;
register struct getpriority_args *uap;
@@ -114,6 +116,7 @@ struct setpriority_args {
int prio;
};
/* ARGSUSED */
+int
setpriority(curp, uap, retval)
struct proc *curp;
register struct setpriority_args *uap;
@@ -167,6 +170,7 @@ setpriority(curp, uap, retval)
return (error);
}
+int
donice(curp, chgp, n)
register struct proc *curp, *chgp;
register int n;
@@ -194,6 +198,7 @@ struct setrlimit_args {
struct orlimit *lim;
};
/* ARGSUSED */
+int
osetrlimit(p, uap, retval)
struct proc *p;
register struct setrlimit_args *uap;
@@ -216,6 +221,7 @@ struct getrlimit_args {
struct orlimit *rlp;
};
/* ARGSUSED */
+int
ogetrlimit(p, uap, retval)
struct proc *p;
register struct getrlimit_args *uap;
@@ -240,6 +246,7 @@ struct __setrlimit_args {
struct rlimit *lim;
};
/* ARGSUSED */
+int
setrlimit(p, uap, retval)
struct proc *p;
register struct __setrlimit_args *uap;
@@ -254,13 +261,13 @@ setrlimit(p, uap, retval)
return (dosetrlimit(p, uap->which, &alim));
}
+int
dosetrlimit(p, which, limp)
struct proc *p;
u_int which;
struct rlimit *limp;
{
register struct rlimit *alimp;
- extern unsigned maxdmap;
int error;
if (which >= RLIM_NLIMITS)
@@ -282,17 +289,17 @@ dosetrlimit(p, which, limp)
switch (which) {
case RLIMIT_DATA:
- if (limp->rlim_cur > maxdmap)
- limp->rlim_cur = maxdmap;
- if (limp->rlim_max > maxdmap)
- limp->rlim_max = maxdmap;
+ if (limp->rlim_cur > MAXDSIZ)
+ limp->rlim_cur = MAXDSIZ;
+ if (limp->rlim_max > MAXDSIZ)
+ limp->rlim_max = MAXDSIZ;
break;
case RLIMIT_STACK:
- if (limp->rlim_cur > maxdmap)
- limp->rlim_cur = maxdmap;
- if (limp->rlim_max > maxdmap)
- limp->rlim_max = maxdmap;
+ if (limp->rlim_cur > MAXSSIZ)
+ limp->rlim_cur = MAXSSIZ;
+ if (limp->rlim_max > MAXSSIZ)
+ limp->rlim_max = MAXSSIZ;
/*
* Stack is allocated to the max at exec time with only
* "rlim_cur" bytes accessible. If stack limit is going
@@ -342,6 +349,7 @@ struct __getrlimit_args {
struct rlimit *rlp;
};
/* ARGSUSED */
+int
getrlimit(p, uap, retval)
struct proc *p;
register struct __getrlimit_args *uap;
@@ -358,6 +366,7 @@ getrlimit(p, uap, retval)
* Transform the running time and tick information in proc p into user,
* system, and interrupt time usage.
*/
+void
calcru(p, up, sp, ip)
register struct proc *p;
register struct timeval *up;
@@ -415,6 +424,7 @@ struct getrusage_args {
struct rusage *rusage;
};
/* ARGSUSED */
+int
getrusage(p, uap, retval)
register struct proc *p;
register struct getrusage_args *uap;
@@ -440,6 +450,7 @@ getrusage(p, uap, retval)
sizeof (struct rusage)));
}
+void
ruadd(ru, ru2)
register struct rusage *ru, *ru2;
{
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 3dcff922c399..f778c364d670 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -62,6 +62,10 @@
#include <vm/vm.h>
#include <sys/user.h> /* for coredump */
+void setsigvec __P((struct proc *, int, struct sigaction *));
+void stop __P((struct proc *));
+void sigexit __P((struct proc *, int));
+
/*
* Can process p, with pcred pc, send the signal signum to process q?
*/
@@ -79,6 +83,7 @@ struct sigaction_args {
struct sigaction *osa;
};
/* ARGSUSED */
+int
sigaction(p, uap, retval)
struct proc *p;
register struct sigaction_args *uap;
@@ -119,6 +124,7 @@ sigaction(p, uap, retval)
return (0);
}
+void
setsigvec(p, signum, sa)
register struct proc *p;
int signum;
@@ -237,6 +243,7 @@ struct sigprocmask_args {
int how;
sigset_t mask;
};
+int
sigprocmask(p, uap, retval)
register struct proc *p;
struct sigprocmask_args *uap;
@@ -272,6 +279,7 @@ struct sigpending_args {
int dummy;
};
/* ARGSUSED */
+int
sigpending(p, uap, retval)
struct proc *p;
struct sigpending_args *uap;
@@ -292,6 +300,7 @@ struct osigvec_args {
struct sigvec *osv;
};
/* ARGSUSED */
+int
osigvec(p, uap, retval)
struct proc *p;
register struct osigvec_args *uap;
@@ -348,6 +357,7 @@ osigvec(p, uap, retval)
struct osigblock_args {
int mask;
};
+int
osigblock(p, uap, retval)
register struct proc *p;
struct osigblock_args *uap;
@@ -364,6 +374,7 @@ osigblock(p, uap, retval)
struct osigsetmask_args {
int mask;
};
+int
osigsetmask(p, uap, retval)
struct proc *p;
struct osigsetmask_args *uap;
@@ -387,6 +398,7 @@ struct sigsuspend_args {
sigset_t mask;
};
/* ARGSUSED */
+int
sigsuspend(p, uap, retval)
register struct proc *p;
struct sigsuspend_args *uap;
@@ -416,6 +428,7 @@ struct osigstack_args {
struct sigstack *oss;
};
/* ARGSUSED */
+int
osigstack(p, uap, retval)
struct proc *p;
register struct osigstack_args *uap;
@@ -447,6 +460,7 @@ struct sigaltstack_args {
struct sigaltstack *oss;
};
/* ARGSUSED */
+int
sigaltstack(p, uap, retval)
struct proc *p;
register struct sigaltstack_args *uap;
@@ -485,6 +499,7 @@ struct kill_args {
int signum;
};
/* ARGSUSED */
+int
kill(cp, uap, retval)
register struct proc *cp;
register struct kill_args *uap;
@@ -522,6 +537,7 @@ struct okillpg_args {
int signum;
};
/* ARGSUSED */
+int
okillpg(p, uap, retval)
struct proc *p;
register struct okillpg_args *uap;
@@ -538,6 +554,7 @@ okillpg(p, uap, retval)
* Common code for kill process group/broadcast kill.
* cp is calling process.
*/
+int
killpg1(cp, signum, pgid, all)
register struct proc *cp;
int signum, pgid, all;
@@ -864,6 +881,7 @@ out:
* while (signum = CURSIG(curproc))
* postsig(signum);
*/
+int
issignal(p)
register struct proc *p;
{
@@ -1004,6 +1022,7 @@ issignal(p)
* via wakeup. Signals are handled elsewhere. The process must not be
* on the run queue.
*/
+void
stop(p)
register struct proc *p;
{
@@ -1085,6 +1104,7 @@ postsig(signum)
/*
* Kill the current process for stated reason.
*/
+void
killproc(p, why)
struct proc *p;
char *why;
@@ -1103,6 +1123,7 @@ killproc(p, why)
* If dumping core, save the signal number for the debugger. Calls exit and
* does not return.
*/
+void
sigexit(p, signum)
register struct proc *p;
int signum;
@@ -1122,6 +1143,7 @@ sigexit(p, signum)
* Dump core, into a file named "progname.core", unless the process was
* setuid/setgid.
*/
+int
coredump(p)
register struct proc *p;
{
@@ -1186,6 +1208,7 @@ struct nosys_args {
int dummy;
};
/* ARGSUSED */
+int
nosys(p, args, retval)
struct proc *p;
struct nosys_args *args;
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index 5c12afcba33b..8eb4dee65a7c 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -44,6 +44,7 @@
#include <sys/malloc.h>
#include <sys/queue.h>
+int
uiomove(cp, n, uio)
register caddr_t cp;
register int n;
@@ -101,6 +102,7 @@ uiomove(cp, n, uio)
/*
* Give next character to user as result of read.
*/
+int
ureadc(c, uio)
register int c;
register struct uio *uio;
@@ -143,6 +145,7 @@ again:
/*
* Get next character written in by user from uio.
*/
+int
uwritec(uio)
struct uio *uio;
{
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 1c2a578f3036..a299dbaf7aaa 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -600,6 +600,7 @@ mi_switch()
* Initialize the (doubly-linked) run queues
* to be empty.
*/
+void
rqinit()
{
register int i;
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index ae16decff813..7e5f196dfb09 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -89,7 +89,7 @@ __sysctl(p, uap, retval)
int *retval;
{
int error, dolock = 1;
- u_int savelen, oldlen = 0;
+ u_int savelen = 0, oldlen = 0;
sysctlfn *fn;
int name[CTL_MAXNAME];
@@ -181,6 +181,7 @@ int securelevel;
/*
* kernel related system variables.
*/
+int
kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
@@ -271,6 +272,7 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
/*
* hardware related system variables.
*/
+int
hw_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
@@ -356,6 +358,7 @@ debug_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
* Validate parameters and get old / set new parameters
* for an integer-valued sysctl function.
*/
+int
sysctl_int(oldp, oldlenp, newp, newlen, valp)
void *oldp;
size_t *oldlenp;
@@ -380,6 +383,7 @@ sysctl_int(oldp, oldlenp, newp, newlen, valp)
/*
* As above, but read-only.
*/
+int
sysctl_rdint(oldp, oldlenp, newp, val)
void *oldp;
size_t *oldlenp;
@@ -402,6 +406,7 @@ sysctl_rdint(oldp, oldlenp, newp, val)
* Validate parameters and get old / set new parameters
* for a string-valued sysctl function.
*/
+int
sysctl_string(oldp, oldlenp, newp, newlen, str, maxlen)
void *oldp;
size_t *oldlenp;
@@ -431,6 +436,7 @@ sysctl_string(oldp, oldlenp, newp, newlen, str, maxlen)
/*
* As above, but read-only.
*/
+int
sysctl_rdstring(oldp, oldlenp, newp, str)
void *oldp;
size_t *oldlenp;
@@ -454,6 +460,7 @@ sysctl_rdstring(oldp, oldlenp, newp, str)
* Validate parameters and get old / set new parameters
* for a structure oriented sysctl function.
*/
+int
sysctl_struct(oldp, oldlenp, newp, newlen, sp, len)
void *oldp;
size_t *oldlenp;
@@ -481,6 +488,7 @@ sysctl_struct(oldp, oldlenp, newp, newlen, sp, len)
* Validate parameters and get old parameters
* for a structure oriented sysctl function.
*/
+int
sysctl_rdstruct(oldp, oldlenp, newp, sp, len)
void *oldp;
size_t *oldlenp;
@@ -502,6 +510,7 @@ sysctl_rdstruct(oldp, oldlenp, newp, sp, len)
/*
* Get file structures.
*/
+int
sysctl_file(where, sizep)
char *where;
size_t *sizep;
@@ -553,6 +562,7 @@ sysctl_file(where, sizep)
*/
#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
+int
sysctl_doproc(name, namelen, where, sizep)
int *name;
u_int namelen;
@@ -718,6 +728,7 @@ struct getkerninfo_args {
int arg;
};
+int
ogetkerninfo(p, uap, retval)
struct proc *p;
register struct getkerninfo_args *uap;
diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c
index f42900cb75d2..4017e966ee69 100644
--- a/sys/kern/kern_tc.c
+++ b/sys/kern/kern_tc.c
@@ -511,6 +511,7 @@ statclock(frame)
/*
* Return information about system clocks.
*/
+int
sysctl_clockrate(where, sizep)
register char *where;
size_t *sizep;
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 4dadcb8e0b9d..2e86376de468 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -42,6 +42,10 @@
#include <machine/cpu.h>
+void timevaladd __P((struct timeval *, struct timeval *));
+void timevalsub __P((struct timeval *, struct timeval *));
+void timevalfix __P((struct timeval *));
+
/*
* Time of day and interval timer support.
*
@@ -57,6 +61,7 @@ struct gettimeofday_args {
struct timezone *tzp;
};
/* ARGSUSED */
+int
gettimeofday(p, uap, retval)
struct proc *p;
register struct gettimeofday_args *uap;
@@ -82,6 +87,7 @@ struct settimeofday_args {
struct timezone *tzp;
};
/* ARGSUSED */
+int
settimeofday(p, uap, retval)
struct proc *p;
struct settimeofday_args *uap;
@@ -131,6 +137,7 @@ struct adjtime_args {
struct timeval *olddelta;
};
/* ARGSUSED */
+int
adjtime(p, uap, retval)
struct proc *p;
register struct adjtime_args *uap;
@@ -209,6 +216,7 @@ struct getitimer_args {
struct itimerval *itv;
};
/* ARGSUSED */
+int
getitimer(p, uap, retval)
struct proc *p;
register struct getitimer_args *uap;
@@ -246,6 +254,7 @@ struct setitimer_args {
struct itimerval *itv, *oitv;
};
/* ARGSUSED */
+int
setitimer(p, uap, retval)
struct proc *p;
register struct setitimer_args *uap;
@@ -322,6 +331,7 @@ realitexpire(arg)
* fix it to have at least minimal value (i.e. if it is less
* than the resolution of the clock, round it up.)
*/
+int
itimerfix(tv)
struct timeval *tv;
{
@@ -344,6 +354,7 @@ itimerfix(tv)
* that it is called in a context where the timers
* on which it is operating cannot change in value.
*/
+int
itimerdecr(itp, usec)
register struct itimerval *itp;
int usec;
@@ -383,6 +394,7 @@ expire:
* it just gets very confused in this case.
* Caveat emptor.
*/
+void
timevaladd(t1, t2)
struct timeval *t1, *t2;
{
@@ -392,6 +404,7 @@ timevaladd(t1, t2)
timevalfix(t1);
}
+void
timevalsub(t1, t2)
struct timeval *t1, *t2;
{
@@ -401,6 +414,7 @@ timevalsub(t1, t2)
timevalfix(t1);
}
+void
timevalfix(t1)
struct timeval *t1;
{
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index f42900cb75d2..4017e966ee69 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -511,6 +511,7 @@ statclock(frame)
/*
* Return information about system clocks.
*/
+int
sysctl_clockrate(where, sizep)
register char *where;
size_t *sizep;
diff --git a/sys/kern/kern_xxx.c b/sys/kern/kern_xxx.c
index 64fac9105d7f..656430d5b104 100644
--- a/sys/kern/kern_xxx.c
+++ b/sys/kern/kern_xxx.c
@@ -40,11 +40,16 @@
#include <sys/reboot.h>
#include <vm/vm.h>
#include <sys/sysctl.h>
+#include <sys/utsname.h>
+
+char domainname[MAXHOSTNAMELEN];
+int domainnamelen;
struct reboot_args {
int opt;
};
/* ARGSUSED */
+int
reboot(p, uap, retval)
struct proc *p;
struct reboot_args *uap;
@@ -65,6 +70,7 @@ struct gethostname_args {
u_int len;
};
/* ARGSUSED */
+int
ogethostname(p, uap, retval)
struct proc *p;
struct gethostname_args *uap;
@@ -81,6 +87,7 @@ struct sethostname_args {
u_int len;
};
/* ARGSUSED */
+int
osethostname(p, uap, retval)
struct proc *p;
register struct sethostname_args *uap;
@@ -101,6 +108,7 @@ struct gethostid_args {
int dummy;
};
/* ARGSUSED */
+int
ogethostid(p, uap, retval)
struct proc *p;
struct gethostid_args *uap;
@@ -117,6 +125,7 @@ struct sethostid_args {
long hostid;
};
/* ARGSUSED */
+int
osethostid(p, uap, retval)
struct proc *p;
struct sethostid_args *uap;
@@ -130,9 +139,130 @@ osethostid(p, uap, retval)
return (0);
}
+int
oquota()
{
return (ENOSYS);
}
#endif /* COMPAT_43 */
+
+void
+shutdown_nice(void)
+{
+ register struct proc *p;
+
+ /* Send a signal to init(8) and have it shutdown the world */
+ p = pfind(1);
+ psignal(p, SIGINT);
+
+ return;
+}
+
+
+struct uname_args {
+ struct utsname *name;
+};
+
+/* ARGSUSED */
+int
+uname(p, uap, retval)
+ struct proc *p;
+ struct uname_args *uap;
+ int *retval;
+{
+ int name;
+ int len;
+ int rtval;
+ char *s, *us;
+
+ name = KERN_OSTYPE;
+ len = sizeof uap->name->sysname;
+ rtval = kern_sysctl(&name, 1, uap->name->sysname, &len, 0, 0, p);
+ if( rtval) return rtval;
+ subyte( uap->name->sysname + sizeof(uap->name->sysname) - 1, 0);
+
+ name = KERN_HOSTNAME;
+ len = sizeof uap->name->nodename;
+ rtval = kern_sysctl(&name, 1, uap->name->nodename, &len, 0, 0, p);
+ if( rtval) return rtval;
+ subyte( uap->name->nodename + sizeof(uap->name->nodename) - 1, 0);
+
+ name = KERN_OSRELEASE;
+ len = sizeof uap->name->release;
+ rtval = kern_sysctl(&name, 1, uap->name->release, &len, 0, 0, p);
+ if( rtval) return rtval;
+ subyte( uap->name->release + sizeof(uap->name->release) - 1, 0);
+
+/*
+ name = KERN_VERSION;
+ len = sizeof uap->name->version;
+ rtval = kern_sysctl(&name, 1, uap->name->version, &len, 0, 0, p);
+ if( rtval) return rtval;
+ subyte( uap->name->version + sizeof(uap->name->version) - 1, 0);
+*/
+
+/*
+ * this stupid hackery to make the version field look like FreeBSD 1.1
+ */
+ for(s = version; *s && *s != '#'; s++);
+
+ for(us = uap->name->version; *s && *s != ':'; s++) {
+ rtval = subyte( us++, *s);
+ if( rtval)
+ return rtval;
+ }
+ rtval = subyte( us++, 0);
+ if( rtval)
+ return rtval;
+
+ name = HW_MACHINE;
+ len = sizeof uap->name->machine;
+ rtval = hw_sysctl(&name, 1, uap->name->machine, &len, 0, 0, p);
+ if( rtval) return rtval;
+ subyte( uap->name->machine + sizeof(uap->name->machine) - 1, 0);
+
+ return 0;
+}
+
+struct getdomainname_args {
+ char *domainname;
+ u_int len;
+};
+
+/* ARGSUSED */
+int
+getdomainname(p, uap, retval)
+ struct proc *p;
+ struct getdomainname_args *uap;
+ int *retval;
+{
+ if (uap->len > domainnamelen + 1)
+ uap->len = domainnamelen + 1;
+ return (copyout((caddr_t)domainname, (caddr_t)uap->domainname, uap->len));
+}
+
+struct setdomainname_args {
+ char *domainname;
+ u_int len;
+};
+
+/* ARGSUSED */
+int
+setdomainname(p, uap, retval)
+ struct proc *p;
+ struct setdomainname_args *uap;
+ int *retval;
+{
+ int error;
+
+ if (error = suser(p->p_ucred, &p->p_acflag))
+ return (error);
+ if (uap->len > sizeof (domainname) - 1)
+ return EINVAL;
+ domainnamelen = uap->len;
+ error = copyin((caddr_t)uap->domainname, domainname, uap->len);
+ domainname[domainnamelen] = 0;
+ return (error);
+}
+
diff --git a/sys/kern/subr_clist.c b/sys/kern/subr_clist.c
index fe8f000f87d5..acbb7f11cf13 100644
--- a/sys/kern/subr_clist.c
+++ b/sys/kern/subr_clist.c
@@ -1,159 +1,387 @@
-/*-
- * Copyright (c) 1982, 1986, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (C) 1994, David Greenman. This software may be used, modified,
+ * copied, distributed, and sold, in both source and binary form provided
+ * that the above copyright and these terms are retained. Under no
+ * circumstances is the author responsible for the proper functioning
+ * of this software, nor does the author assume any responsibility
+ * for damages incurred with its use.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: @(#)tty_subr.c 8.2 (Berkeley) 9/5/93
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/ioctl.h>
#include <sys/tty.h>
+#include <sys/clist.h>
+#include <sys/malloc.h>
char cwaiting;
-struct cblock *cfree, *cfreelist;
-int cfreecount, nclist;
+struct cblock *cfreelist = 0;
+int cfreecount, nclist = 256;
void
clist_init()
{
+ int i;
+ struct cblock *tmp;
- /*
- * Body deleted.
- */
+ for (i = 0; i < nclist; ++i) {
+ tmp = malloc(sizeof(struct cblock), M_TTYS, M_NOWAIT);
+ if (!tmp)
+ panic("clist_init: could not allocate cblock");
+ bzero((char *)tmp, sizeof(struct cblock));
+ tmp->c_next = cfreelist;
+ cfreelist = tmp;
+ cfreecount += CBSIZE;
+ }
return;
}
-getc(a1)
- struct clist *a1;
+/*
+ * Get a character from head of clist.
+ */
+int
+getc(clistp)
+ struct clist *clistp;
{
+ int chr = -1;
+ int s;
+ struct cblock *cblockp;
- /*
- * Body deleted.
- */
- return ((char)0);
-}
+ s = spltty();
-q_to_b(a1, a2, a3)
- struct clist *a1;
- char *a2;
- int a3;
-{
+ /* If there are characters in the list, get one */
+ if (clistp->c_cc) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ chr = (u_char)*clistp->c_cf;
+#if 0
+ /*
+ * If this char is quoted, set the flag.
+ */
+ if (isset(cblockp->c_quote, clistp->c_cf - (char *)cblockp->c_info))
+ chr |= TTY_QUOTE;
+#endif
+ clistp->c_cf++;
+ clistp->c_cc--;
+ if ((clistp->c_cf >= (char *)(cblockp+1)) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
- /*
- * Body deleted.
- */
- return (0);
+ splx(s);
+ return (chr);
}
-ndqb(a1, a2)
- struct clist *a1;
- int a2;
+/*
+ * Copy 'amount' of chars, beginning at head of clist 'clistp' to
+ * destination linear buffer 'dest'.
+ */
+int
+q_to_b(clistp, dest, amount)
+ struct clist *clistp;
+ char *dest;
+ int amount;
{
+ struct cblock *cblockp;
+ struct cblock *cblockn;
+ char *dest_orig = dest;
+ int numc;
+ int s;
- /*
- * Body deleted.
- */
- return (0);
+ s = spltty();
+
+ while (clistp && amount && (clistp->c_cc > 0)) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ cblockn = cblockp + 1; /* pointer arithmetic! */
+ numc = min(amount, (char *)cblockn - clistp->c_cf);
+ numc = min(numc, clistp->c_cc);
+ bcopy(clistp->c_cf, dest, numc);
+ amount -= numc;
+ clistp->c_cf += numc;
+ clistp->c_cc -= numc;
+ dest += numc;
+ if ((clistp->c_cf >= (char *)cblockn) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
+
+ splx(s);
+ return (dest - dest_orig);
}
+/*
+ * Flush 'amount' of chars, beginning at head of clist 'clistp'.
+ */
void
-ndflush(a1, a2)
- struct clist *a1;
- int a2;
+ndflush(clistp, amount)
+ struct clist *clistp;
+ int amount;
{
+ struct cblock *cblockp;
+ struct cblock *cblockn;
+ int numc;
+ int s;
- /*
- * Body deleted.
- */
+ s = spltty();
+
+ while (amount && (clistp->c_cc > 0)) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ cblockn = cblockp + 1; /* pointer arithmetic! */
+ numc = min(amount, (char *)cblockn - clistp->c_cf);
+ numc = min(numc, clistp->c_cc);
+ amount -= numc;
+ clistp->c_cf += numc;
+ clistp->c_cc -= numc;
+ if ((clistp->c_cf >= (char *)cblockn) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
+
+ splx(s);
return;
}
-putc(a1, a2)
- char a1;
- struct clist *a2;
+int
+putc(chr, clistp)
+ int chr;
+ struct clist *clistp;
{
+ struct cblock *cblockp;
+ struct cblock *bclockn;
+ int s;
- /*
- * Body deleted.
- */
+ s = spltty();
+
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+
+ if (clistp->c_cl == NULL) {
+ if (cfreelist) {
+ cblockp = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cf = clistp->c_cl = cblockp->c_info;
+ clistp->c_cc = 0;
+ } else {
+ splx(s);
+ return (-1);
+ }
+ } else {
+ if (((long)clistp->c_cl & CROUND) == 0) {
+ if (cfreelist) {
+ cblockp = (cblockp-1)->c_next = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cl = cblockp->c_info;
+ } else {
+ splx(s);
+ return (-1);
+ }
+ }
+ }
+
+#if 0
+ if (chr & TTY_QUOTE)
+ setbit(cblockp->c_quote, clistp->c_cl - (char *)cblockp->c_info);
+#endif
+ *clistp->c_cl++ = chr;
+ clistp->c_cc++;
+
+ splx(s);
return (0);
}
-b_to_q(a1, a2, a3)
- char *a1;
- int a2;
- struct clist *a3;
+/*
+ * Copy data from linear buffer to clist chain.
+ */
+int
+b_to_q(src, amount, clistp)
+ char *src;
+ int amount;
+ struct clist *clistp;
{
+ struct cblock *cblockp;
+ struct cblock *bclockn;
+ int s;
+ int numc;
+
+ s = spltty();
/*
- * Body deleted.
+ * If there are no cblocks assigned to this clist yet,
+ * then get one.
*/
- return (0);
+ if (clistp->c_cl == NULL) {
+ if (cfreelist) {
+ cblockp = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cf = clistp->c_cl = cblockp->c_info;
+ clistp->c_cc = 0;
+ } else {
+ splx(s);
+ return (amount);
+ }
+ } else {
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+ }
+
+ while (amount) {
+ /*
+ * Get another cblock if needed.
+ */
+ if (((long)clistp->c_cl & CROUND) == 0) {
+ if (cfreelist) {
+ cblockp = (cblockp-1)->c_next = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cl = cblockp->c_info;
+ } else {
+ splx(s);
+ return (amount);
+ }
+ }
+
+ /*
+ * Copy a chunk of the linear buffer up to the end
+ * of this cblock.
+ */
+ cblockp += 1;
+ numc = min(amount, (char *)(cblockp) - clistp->c_cl);
+ bcopy(src, clistp->c_cl, numc);
+
+ /*
+ * Clear quote bits.
+ */
+
+ /*
+ * ...and update pointer for the next chunk.
+ */
+ src += numc;
+ clistp->c_cl += numc;
+ clistp->c_cc += numc;
+ amount -= numc;
+ }
+
+ splx(s);
+ return (amount);
}
char *
-nextc(a1, a2, a3)
- struct clist *a1;
- char *a2;
- int *a3;
+nextc(clistp, cp, dst)
+ struct clist *clistp;
+ char *cp;
+ int *dst;
{
+ struct cblock *cblockp;
- /*
- * Body deleted.
- */
- return ((char *)0);
+ ++cp;
+ if (clistp->c_cc && (cp != clistp->c_cl)) {
+ if (((long)cp & CROUND) == 0)
+ cp = ((struct cblock *)cp - 1)->c_next->c_info;
+ cblockp = (struct cblock *)((long)cp & ~CROUND);
+#if 0
+ *dst = *cp | (isset(cblockp->c_quote, cp - (char *)cblockp->c_info) ? TTY_QUOTE : 0);
+#endif
+ *dst = (u_char)*cp;
+ return (cp);
+ }
+
+ return (NULL);
}
-unputc(a1)
- struct clist *a1;
+int
+unputc(clistp)
+ struct clist *clistp;
{
+ struct cblock *cblockp = 0, *cbp = 0;
+ int s;
+ int chr = -1;
+
+
+ s = spltty();
+
+ if (clistp->c_cc) {
+ --clistp->c_cc;
+ chr = (u_char)*--clistp->c_cl;
+ /*
+ * Get the quote flag and 'unput' it, too.
+ */
+
+ /* XXX write me! */
+
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+
+ /*
+ * If all of the characters have been unput in this
+ * cblock, the find the previous one and free this
+ * one.
+ */
+ if (clistp->c_cc && (clistp->c_cl <= (char *)cblockp->c_info)) {
+ cbp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+
+ while (cbp->c_next != cblockp)
+ cbp = cbp->c_next;
+
+ clistp->c_cl = (char *)(cbp+1);
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ cbp->c_next = NULL;
+ }
+ }
/*
- * Body deleted.
+ * If there are no more characters on the list, then
+ * free the last cblock.
*/
- return ((char)0);
+ if ((clistp->c_cc == 0) && clistp->c_cl) {
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+
+ splx(s);
+ return (chr);
}
void
-catq(a1, a2)
- struct clist *a1, *a2;
+catq(src_clistp, dest_clistp)
+ struct clist *src_clistp, *dest_clistp;
{
+ char buffer[CBSIZE*2];
+ int amount;
+
+ while (src_clistp->c_cc) {
+ amount = q_to_b(src_clistp, buffer, sizeof(buffer));
+ b_to_q(buffer, amount, dest_clistp);
+ }
- /*
- * Body deleted.
- */
return;
}
diff --git a/sys/kern/subr_disklabel.c b/sys/kern/subr_disklabel.c
index 78dede4da773..cc0f28d37f15 100644
--- a/sys/kern/subr_disklabel.c
+++ b/sys/kern/subr_disklabel.c
@@ -43,6 +43,7 @@
#include <sys/buf.h>
#include <sys/disklabel.h>
#include <sys/syslog.h>
+#include <sys/dkbad.h>
/*
* Seek sort for disks. We depend on the driver which calls us using b_resid
@@ -153,14 +154,19 @@ insert: bp->b_actf = bq->b_actf;
* string on failure.
*/
char *
-readdisklabel(dev, strat, lp)
+readdisklabel(dev, strat, lp, dp, bdp)
dev_t dev;
int (*strat)();
register struct disklabel *lp;
+ struct dos_partition *dp;
+ struct dkbad *bdp;
{
register struct buf *bp;
struct disklabel *dlp;
char *msg = NULL;
+ int dospartoff;
+ int i;
+ int cyl;
if (lp->d_secperunit == 0)
lp->d_secperunit = 0x1fffffff;
@@ -170,11 +176,61 @@ readdisklabel(dev, strat, lp)
lp->d_partitions[0].p_offset = 0;
bp = geteblk((int)lp->d_secsize);
- bp->b_dev = dev;
- bp->b_blkno = LABELSECTOR;
+ /* do dos partitions in the process of getting disklabel? */
+ dospartoff = 0;
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ struct dos_partition *ap;
+
+ /* read master boot record */
+ bp->b_dev = dev;
+ bp->b_blkno = DOSBBSECTOR;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylinder = DOSBBSECTOR / lp->d_secpercyl;
+ (*strat)(bp);
+
+ /* if successful, wander through dos partition table */
+ if (biowait(bp)) {
+ msg = "dos partition I/O error";
+ goto done;
+ } else {
+ /* XXX how do we check veracity/bounds of this? */
+ bcopy(bp->b_un.b_addr + DOSPARTOFF, dp,
+ NDOSPART * sizeof(*dp));
+ for (i = 0; i < NDOSPART; i++, dp++)
+ /* is this ours? */
+ if (dp->dp_size &&
+ dp->dp_typ == DOSPTYP_386BSD
+ && dospartoff == 0) {
+
+ /* need sector address for SCSI/IDE,
+ cylinder for ESDI/ST506/RLL */
+ dospartoff = dp->dp_start;
+ cyl = DPCYL(dp->dp_scyl, dp->dp_ssect);
+
+ /* update disklabel with details */
+ lp->d_partitions[0].p_size =
+ dp->dp_size;
+ lp->d_partitions[0].p_offset =
+ dp->dp_start;
+ lp->d_ntracks = dp->dp_ehd + 1;
+ lp->d_nsectors = DPSECT(dp->dp_esect);
+ lp->d_subtype |= (lp->d_subtype & 3)
+ + i | DSTYPE_INDOSPART;
+ lp->d_secpercyl = lp->d_ntracks *
+ lp->d_nsectors;
+ }
+ }
+
+ }
+
+ /* next, dig out disk label */
+ bp->b_blkno = dospartoff + LABELSECTOR;
+ bp->b_dev = dev;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
- bp->b_cylinder = LABELSECTOR / lp->d_secpercyl;
+ bp->b_cylinder = cyl;
(*strat)(bp);
if (biowait(bp))
msg = "I/O error";
@@ -194,6 +250,46 @@ readdisklabel(dev, strat, lp)
break;
}
}
+ if (msg)
+ goto done;
+
+ /* obtain bad sector table if requested and present */
+ if (bdp && (lp->d_flags & D_BADSECT)) {
+ struct dkbad *db;
+
+ printf("d_secsize: %d\n", lp->d_secsize);
+ i = 0;
+ do {
+ /* read a bad sector table */
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
+ if (lp->d_secsize > DEV_BSIZE)
+ bp->b_blkno *= lp->d_secsize / DEV_BSIZE;
+ else
+ bp->b_blkno /= DEV_BSIZE / lp->d_secsize;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_cylinder = lp->d_ncylinders - 1;
+ (*strat)(bp);
+
+ /* if successful, validate, otherwise try another */
+ if (biowait(bp)) {
+ msg = "bad sector table I/O error";
+ } else {
+ db = (struct dkbad *)(bp->b_un.b_addr);
+#define DKBAD_MAGIC 0x4321
+ if (db->bt_mbz == 0
+ && db->bt_flag == DKBAD_MAGIC) {
+ msg = NULL;
+ *bdp = *db;
+ break;
+ } else
+ msg = "bad sector table corrupted";
+ }
+ } while ((bp->b_flags & B_ERROR) && (i += 2) < 10 &&
+ i < lp->d_nsectors);
+ }
+
+done:
bp->b_flags = B_INVAL | B_AGE;
brelse(bp);
return (msg);
@@ -294,6 +390,7 @@ done:
/*
* Compute checksum for disk label.
*/
+int
dkcksum(lp)
register struct disklabel *lp;
{
diff --git a/sys/kern/subr_log.c b/sys/kern/subr_log.c
index f065761d756e..92e4543f38db 100644
--- a/sys/kern/subr_log.c
+++ b/sys/kern/subr_log.c
@@ -59,6 +59,7 @@ struct logsoftc {
int log_open; /* also used in log() */
/*ARGSUSED*/
+int
logopen(dev, flags, mode, p)
dev_t dev;
int flags, mode;
@@ -87,6 +88,7 @@ logopen(dev, flags, mode, p)
}
/*ARGSUSED*/
+int
logclose(dev, flag, mode, p)
dev_t dev;
int flag, mode;
@@ -99,6 +101,7 @@ logclose(dev, flag, mode, p)
}
/*ARGSUSED*/
+int
logread(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -144,6 +147,7 @@ logread(dev, uio, flag)
}
/*ARGSUSED*/
+int
logselect(dev, rw, p)
dev_t dev;
int rw;
@@ -165,6 +169,7 @@ logselect(dev, rw, p)
return (0);
}
+void
logwakeup()
{
struct proc *p;
@@ -185,6 +190,7 @@ logwakeup()
}
/*ARGSUSED*/
+int
logioctl(dev, com, data, flag, p)
dev_t dev;
int com;
diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c
index 9f4e2cae857c..c871594221de 100644
--- a/sys/kern/subr_param.c
+++ b/sys/kern/subr_param.c
@@ -75,7 +75,8 @@ int tickadj = 30000 / (60 * HZ); /* can adjust 30ms in 60s */
struct timezone tz = { TIMEZONE, DST };
#define NPROC (20 + 16 * MAXUSERS)
int maxproc = NPROC;
-#define NTEXT (80 + NPROC / 8) /* actually the object cache */
+#define NTEXT NPROC
+int vm_cache_max = NTEXT/2 + 16;
#define NVNODE (NPROC + NTEXT + 100)
int desiredvnodes = NVNODE;
int maxfiles = 3 * (NPROC + MAXUSERS) + 80;
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index 2adb7793a3c5..5ef4925856f2 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -122,6 +122,10 @@ panic(fmt, va_alist)
if (boothowto & RB_KDB)
kdbpanic();
#endif
+#include "ddb.h"
+#if NDDB > 0
+ Debugger ("panic");
+#endif
boot(bootopt);
}
@@ -508,8 +512,10 @@ putchar(c, flags, tp)
* Scaled down version of sprintf(3).
*/
#ifdef __STDC__
+int
sprintf(char *buf, const char *cfmt, ...)
#else
+int
sprintf(buf, cfmt, va_alist)
char *buf, *cfmt;
#endif
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 4fb81d823cac..efe56b098d23 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -146,6 +146,7 @@ struct profil_args {
u_int scale;
};
/* ARGSUSED */
+int
profil(p, uap, retval)
struct proc *p;
register struct profil_args *uap;
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 9bb38e1e60d3..382416f06e3f 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -41,32 +41,33 @@
* 386 Trap and System call handleing
*/
-#include "isa.h"
-#include "npx.h"
-#include "ddb.h"
-#include "machine/cpu.h"
-#include "machine/psl.h"
-#include "machine/reg.h"
-#include "machine/eflags.h"
-
-#include "param.h"
-#include "systm.h"
-#include "proc.h"
-#include "user.h"
-#include "acct.h"
-#include "kernel.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/kernel.h>
+#include <sys/syscall.h>
#ifdef KTRACE
-#include "ktrace.h"
+#include <sys/ktrace.h>
#endif
-#include "vm/vm_param.h"
-#include "vm/pmap.h"
-#include "vm/vm_map.h"
-#include "vm/vm_user.h"
-#include "vm/vm_page.h"
-#include "sys/vmmeter.h"
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
+#include <machine/cpu.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#include <machine/eflags.h>
+
+#include <machine/trap.h>
-#include "machine/trap.h"
+#include "isa.h"
+#include "npx.h"
+#include "ddb.h"
#ifdef __GNUC__
@@ -84,7 +85,7 @@ void write_gs __P((/* promoted u_short */ int gs));
#endif /* __GNUC__ */
-extern int grow(struct proc *,int);
+extern int grow(struct proc *,u_int);
struct sysent sysent[];
int nsysent;
@@ -139,7 +140,7 @@ trap(frame)
{
register int i;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks = 0;
int ucode, type, code, eva, fault_type;
frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
@@ -177,10 +178,10 @@ copyfault:
return;
}
- syst = p->p_stime;
if (ISPL(frame.tf_cs) == SEL_UPL) {
type |= T_USER;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
+ sticks = p->p_sticks;
}
skiptoswitch:
@@ -210,9 +211,9 @@ skiptoswitch:
case T_ASTFLT|T_USER: /* Allow process switch */
astoff();
cnt.v_soft++;
- if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
+ if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
- p->p_flag &= ~SOWEUPC;
+ p->p_flag &= ~P_OWEUPC;
}
goto out;
@@ -284,7 +285,6 @@ skiptoswitch:
else
ftype = VM_PROT_READ;
- oldflags = p->p_flag;
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
@@ -294,7 +294,7 @@ skiptoswitch:
* Keep swapout from messing with us during this
* critical time.
*/
- p->p_flag |= SLOCK;
+ ++p->p_lock;
/*
* Grow the stack if necessary
@@ -303,8 +303,7 @@ skiptoswitch:
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
goto nogo;
}
}
@@ -332,13 +331,10 @@ skiptoswitch:
if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
VM_PROT_NONE);
- if( ptepg->flags & PG_CLEAN)
- vm_page_free(ptepg);
+ vm_page_free(ptepg);
}
-
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
} else {
/*
* Since we know that kernel virtual address addresses
@@ -482,32 +478,29 @@ nogo:
out:
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
- * swtch()'ed, we might not be on the queue indicated by
+ * If that happened after we setrunqueue ourselves but before we
+ * mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -518,7 +511,7 @@ out:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
}
/*
@@ -546,14 +539,12 @@ int trapwrite(addr)
p = curproc;
vm = p->p_vmspace;
- oldflags = p->p_flag;
- p->p_flag |= SLOCK;
+ ++p->p_lock;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
if (!grow(p, va)) {
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
return (1);
}
}
@@ -579,8 +570,7 @@ int trapwrite(addr)
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
- p->p_flag &= ~SLOCK;
- p->p_flag |= (oldflags & SLOCK);
+ --p->p_lock;
if (rv != KERN_SUCCESS)
return 1;
@@ -603,31 +593,45 @@ syscall(frame)
register int i;
register struct sysent *callp;
register struct proc *p = curproc;
- struct timeval syst;
+ u_quad_t sticks;
int error, opc;
int args[8], rval[2];
- int code;
+ u_int code;
#ifdef lint
r0 = 0; r0 = r0; r1 = 0; r1 = r1;
#endif
- syst = p->p_stime;
+ sticks = p->p_sticks;
if (ISPL(frame.tf_cs) != SEL_UPL)
panic("syscall");
code = frame.tf_eax;
- p->p_regs = (int *)&frame;
+ p->p_md.md_regs = (int *)&frame;
params = (caddr_t)frame.tf_esp + sizeof (int) ;
/*
* Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
*/
opc = frame.tf_eip - 7;
- if (code == 0) {
+ /*
+ * Need to check if this is a 32 bit or 64 bit syscall.
+ */
+ if (code == SYS_syscall) {
+ /*
+ * Code is first argument, followed by actual args.
+ */
code = fuword(params);
params += sizeof (int);
+ } else if (code == SYS___syscall) {
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
}
- if (code < 0 || code >= nsysent)
+
+ if (code >= nsysent)
callp = &sysent[0];
else
callp = &sysent[code];
@@ -672,32 +676,29 @@ done:
*/
p = curproc;
while (i = CURSIG(p))
- psig(i);
- p->p_pri = p->p_usrpri;
+ postsig(i);
+ p->p_priority = p->p_usrpri;
if (want_resched) {
int s;
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
- * If that happened after we setrq ourselves but before we
+ * If that happened after we setrunqueue ourselves but before we
* swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
s = splclock();
- setrq(p);
+ setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- swtch();
+ mi_switch();
splx(s);
while (i = CURSIG(p))
- psig(i);
+ postsig(i);
}
if (p->p_stats->p_prof.pr_scale) {
- int ticks;
- struct timeval *tv = &p->p_stime;
+ u_quad_t ticks = p->p_sticks - sticks;
- ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
- (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
if (ticks) {
#ifdef PROFTIMER
extern int profscale;
@@ -708,21 +709,9 @@ done:
#endif
}
}
- curpri = p->p_pri;
+ curpriority = p->p_priority;
#ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET))
ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
-#ifdef DIAGNOSTICx
-{ extern int _udatasel, _ucodesel;
- if (frame.tf_ss != _udatasel)
- printf("ss %x call %d\n", frame.tf_ss, code);
- if ((frame.tf_cs&0xffff) != _ucodesel)
- printf("cs %x call %d\n", frame.tf_cs, code);
- if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
- printf("eip %x call %d\n", frame.tf_eip, code);
- frame.tf_eip = 0;
- }
-}
-#endif
}
diff --git a/sys/kern/subr_xxx.c b/sys/kern/subr_xxx.c
index c692ec11a3bd..3304d57da6e8 100644
--- a/sys/kern/subr_xxx.c
+++ b/sys/kern/subr_xxx.c
@@ -45,6 +45,7 @@
/*
* Unsupported device function (e.g. writing to read-only device).
*/
+int
enodev()
{
@@ -54,6 +55,7 @@ enodev()
/*
* Unconfigured device function; driver not configured.
*/
+int
enxio()
{
@@ -63,6 +65,7 @@ enxio()
/*
* Unsupported ioctl function.
*/
+int
enoioctl()
{
@@ -74,6 +77,7 @@ enoioctl()
* This is used for an otherwise-reasonable operation
* that is not supported by the current system binary.
*/
+int
enosys()
{
@@ -84,6 +88,7 @@ enosys()
* Return error for operation not supported
* on a specific object or file type.
*/
+int
eopnotsupp()
{
@@ -93,6 +98,7 @@ eopnotsupp()
/*
* Generic null operation, always returns success.
*/
+int
nullop()
{
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index a121209f9fef..919c8664c5d9 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -62,6 +62,7 @@ struct read_args {
u_int nbyte;
};
/* ARGSUSED */
+int
read(p, uap, retval)
struct proc *p;
register struct read_args *uap;
@@ -117,6 +118,7 @@ struct readv_args {
struct iovec *iovp;
u_int iovcnt;
};
+int
readv(p, uap, retval)
struct proc *p;
register struct readv_args *uap;
@@ -158,10 +160,6 @@ readv(p, uap, retval)
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
- if (iov->iov_len < 0) {
- error = EINVAL;
- goto done;
- }
auio.uio_resid += iov->iov_len;
if (auio.uio_resid < 0) {
error = EINVAL;
@@ -207,6 +205,7 @@ struct write_args {
char *buf;
u_int nbyte;
};
+int
write(p, uap, retval)
struct proc *p;
register struct write_args *uap;
@@ -217,6 +216,7 @@ write(p, uap, retval)
struct uio auio;
struct iovec aiov;
long cnt, error = 0;
+ int i;
#ifdef KTRACE
struct iovec ktriov;
#endif
@@ -266,6 +266,7 @@ struct writev_args {
struct iovec *iovp;
u_int iovcnt;
};
+int
writev(p, uap, retval)
struct proc *p;
register struct writev_args *uap;
@@ -307,10 +308,6 @@ writev(p, uap, retval)
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
- if (iov->iov_len < 0) {
- error = EINVAL;
- goto done;
- }
auio.uio_resid += iov->iov_len;
if (auio.uio_resid < 0) {
error = EINVAL;
@@ -360,6 +357,7 @@ struct ioctl_args {
caddr_t data;
};
/* ARGSUSED */
+int
ioctl(p, uap, retval)
struct proc *p;
register struct ioctl_args *uap;
@@ -497,6 +495,7 @@ struct select_args {
fd_set *in, *ou, *ex;
struct timeval *tv;
};
+int
select(p, uap, retval)
register struct proc *p;
register struct select_args *uap;
@@ -588,6 +587,7 @@ done:
return (error);
}
+int
selscan(p, ibits, obits, nfd, retval)
struct proc *p;
fd_set *ibits, *obits;
@@ -620,6 +620,7 @@ selscan(p, ibits, obits, nfd, retval)
}
/*ARGSUSED*/
+int
seltrue(dev, flag, p)
dev_t dev;
int flag;
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 4cc40baf5821..527371df71bb 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -51,6 +51,7 @@ struct ptrace_args {
caddr_t addr;
int data;
};
+int
ptrace(a1, a2, a3)
struct proc *a1;
struct ptrace_args *a2;
@@ -63,6 +64,7 @@ ptrace(a1, a2, a3)
return (ENOSYS);
}
+int
trace_req(a1)
struct proc *a1;
{
diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c
index a93ae86df853..63f529841170 100644
--- a/sys/kern/sys_socket.c
+++ b/sys/kern/sys_socket.c
@@ -51,6 +51,7 @@ struct fileops socketops =
{ soo_read, soo_write, soo_ioctl, soo_select, soo_close };
/* ARGSUSED */
+int
soo_read(fp, uio, cred)
struct file *fp;
struct uio *uio;
@@ -62,6 +63,7 @@ soo_read(fp, uio, cred)
}
/* ARGSUSED */
+int
soo_write(fp, uio, cred)
struct file *fp;
struct uio *uio;
@@ -72,6 +74,7 @@ soo_write(fp, uio, cred)
uio, (struct mbuf *)0, (struct mbuf *)0, 0));
}
+int
soo_ioctl(fp, cmd, data, p)
struct file *fp;
int cmd;
@@ -130,6 +133,7 @@ soo_ioctl(fp, cmd, data, p)
(struct mbuf *)cmd, (struct mbuf *)data, (struct mbuf *)0));
}
+int
soo_select(fp, which, p)
struct file *fp;
int which;
@@ -171,6 +175,7 @@ soo_select(fp, which, p)
return (0);
}
+int
soo_stat(so, ub)
register struct socket *so;
register struct stat *ub;
@@ -184,6 +189,7 @@ soo_stat(so, ub)
}
/* ARGSUSED */
+int
soo_close(fp, p)
struct file *fp;
struct proc *p;
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index 1809905a4f6a..339b9979a1fa 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -188,10 +188,10 @@ char *syscallnames[] = {
#else
"#161", /* 161 = nosys */
#endif
- "#162", /* 162 = nosys */
- "#163", /* 163 = nosys */
- "#164", /* 164 = nosys */
- "#165", /* 165 = nosys */
+ "getdomainname", /* 162 = getdomainname */
+ "setdomainname", /* 163 = setdomainname */
+ "uname", /* 164 = uname */
+ "sysarch", /* 165 = sysarch */
"#166", /* 166 = nosys */
"#167", /* 167 = nosys */
"#168", /* 168 = nosys */
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 1b8de145fba7..4ba7df2ba2f0 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -100,7 +100,7 @@
68 OBSOL 0 vwrite
69 STD 1 sbrk
70 STD 1 sstk
-71 COMPAT 7 mmap
+71 COMPAT 6 mmap
72 STD 1 ovadvise vadvise
73 STD 2 munmap
74 STD 3 mprotect
@@ -212,10 +212,10 @@
#else
161 UNIMPL 0 nosys
#endif
-162 UNIMPL 0 nosys
-163 UNIMPL 0 nosys
-164 UNIMPL 0 nosys
-165 UNIMPL 0 nosys
+162 STD 2 getdomainname
+163 STD 2 setdomainname
+164 STD 1 uname
+165 STD 2 sysarch
166 UNIMPL 0 nosys
167 UNIMPL 0 nosys
168 UNIMPL 0 nosys
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index 6cc7be23700f..23309a3a7f6b 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -1355,7 +1355,7 @@ ttwrite(tp, uio, flag)
register struct uio *uio;
int flag;
{
- register char *cp;
+ register char *cp = 0;
register int cc, ce;
register struct proc *p;
int i, hiwat, cnt, error, s;
diff --git a/sys/kern/tty_compat.c b/sys/kern/tty_compat.c
index a6a39d9d7bf3..7047230844d9 100644
--- a/sys/kern/tty_compat.c
+++ b/sys/kern/tty_compat.c
@@ -49,6 +49,9 @@
#include <sys/kernel.h>
#include <sys/syslog.h>
+void ttcompatsetflags __P((struct tty *, struct termios *));
+void ttcompatsetlflags __P((struct tty *, struct termios *));
+
int ttydebug = 0;
static struct speedtab compatspeeds[] = {
@@ -76,6 +79,7 @@ static int compatspcodes[16] = {
};
/*ARGSUSED*/
+int
ttcompat(tp, com, data, flag)
register struct tty *tp;
int com;
@@ -222,6 +226,7 @@ ttcompat(tp, com, data, flag)
return (0);
}
+int
ttcompatgetflags(tp)
register struct tty *tp;
{
@@ -279,6 +284,7 @@ if (ttydebug)
return (flags);
}
+void
ttcompatsetflags(tp, t)
register struct tty *tp;
register struct termios *t;
@@ -350,6 +356,7 @@ ttcompatsetflags(tp, t)
t->c_cflag = cflag;
}
+void
ttcompatsetlflags(tp, t)
register struct tty *tp;
register struct termios *t;
diff --git a/sys/kern/tty_conf.c b/sys/kern/tty_conf.c
index b53edb429756..f517a37f0ed0 100644
--- a/sys/kern/tty_conf.c
+++ b/sys/kern/tty_conf.c
@@ -84,8 +84,13 @@ struct linesw linesw[] =
{ ttynodisc, ttyerrclose, ttyerrio, ttyerrio, nullioctl,
ttyerrinput, ttyerrstart, nullmodem }, /* 1- defunct */
+#ifdef COMPAT_43
+ { ttyopen, ttylclose, ttread, ttwrite, nullioctl,
+ ttyinput, ttstart, ttymodem }, /* 2- NTTYDISC */
+#else
{ ttynodisc, ttyerrclose, ttyerrio, ttyerrio, nullioctl,
- ttyerrinput, ttyerrstart, nullmodem }, /* 2- defunct */
+ ttyerrinput, ttyerrstart, nullmodem },
+#endif
#if NTB > 0
{ tbopen, tbclose, tbread, enodev, tbioctl,
@@ -111,6 +116,7 @@ int nlinesw = sizeof (linesw) / sizeof (linesw[0]);
* discipline specific ioctl command.
*/
/*ARGSUSED*/
+int
nullioctl(tp, cmd, data, flags, p)
struct tty *tp;
int cmd;
diff --git a/sys/kern/tty_cons.c b/sys/kern/tty_cons.c
index f5fc887be1b1..ceb4b398b62c 100644
--- a/sys/kern/tty_cons.c
+++ b/sys/kern/tty_cons.c
@@ -41,9 +41,9 @@
#include "sys/param.h"
+#include <sys/systm.h>
#include "sys/proc.h"
#include "sys/user.h"
-#include "sys/systm.h"
#include "sys/buf.h"
#include "sys/ioctl.h"
#include "sys/tty.h"
@@ -122,7 +122,7 @@ cnopen(dev, flag, mode, p)
return (0);
dev = cn_tab->cn_dev;
- if ((vfinddev(dev, VCHR, &vp) == 0) && vcount(vp))
+ if (vfinddev(dev, VCHR, &vp) && vcount(vp))
return (0);
return ((*cdevsw[major(dev)].d_open)(dev, flag, mode, p));
@@ -140,7 +140,7 @@ cnclose(dev, flag, mode, p)
return (0);
dev = cn_tab->cn_dev;
- if ((vfinddev(dev, VCHR, &vp) == 0) && vcount(vp))
+ if (vfinddev(dev, VCHR, &vp) && vcount(vp))
return (0);
return ((*cdevsw[major(dev)].d_close)(dev, flag, mode, p));
diff --git a/sys/kern/tty_pty.c b/sys/kern/tty_pty.c
index 0e6911b63e19..50f0581f7612 100644
--- a/sys/kern/tty_pty.c
+++ b/sys/kern/tty_pty.c
@@ -76,7 +76,8 @@ int npty = NPTY; /* for pstat -t */
#define PF_NOSTOP 0x40
#define PF_UCNTL 0x80 /* user control mode */
-void ptsstop __P((struct tty *, int));
+void ptsstop __P((struct tty *, int));
+void ptcwakeup __P((struct tty *, int));
/*
* Establish n (or default if n is 1) ptys in the system.
@@ -106,6 +107,7 @@ ptyattach(n)
}
/*ARGSUSED*/
+int
ptsopen(dev, flag, devtype, p)
dev_t dev;
int flag, devtype;
@@ -143,6 +145,7 @@ ptsopen(dev, flag, devtype, p)
return (error);
}
+int
ptsclose(dev, flag, mode, p)
dev_t dev;
int flag, mode;
@@ -158,6 +161,7 @@ ptsclose(dev, flag, mode, p)
return (err);
}
+int
ptsread(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -210,6 +214,7 @@ again:
* Wakeups of controlling tty will happen
* indirectly, when tty driver calls ptsstart.
*/
+int
ptswrite(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -242,6 +247,7 @@ ptsstart(tp)
ptcwakeup(tp, FREAD);
}
+void
ptcwakeup(tp, flag)
struct tty *tp;
int flag;
@@ -260,8 +266,10 @@ ptcwakeup(tp, flag)
/*ARGSUSED*/
#ifdef __STDC__
+int
ptcopen(dev_t dev, int flag, int devtype, struct proc *p)
#else
+int
ptcopen(dev, flag, devtype, p)
dev_t dev;
int flag, devtype;
@@ -289,6 +297,7 @@ ptcopen(dev, flag, devtype, p)
return (0);
}
+int
ptcclose(dev)
dev_t dev;
{
@@ -302,6 +311,7 @@ ptcclose(dev)
return (0);
}
+int
ptcread(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -392,6 +402,7 @@ ptsstop(tp, flush)
ptcwakeup(tp, flag);
}
+int
ptcselect(dev, rw, p)
dev_t dev;
int rw;
@@ -446,13 +457,14 @@ ptcselect(dev, rw, p)
return (0);
}
+int
ptcwrite(dev, uio, flag)
dev_t dev;
register struct uio *uio;
int flag;
{
register struct tty *tp = &pt_tty[minor(dev)];
- register u_char *cp;
+ register u_char *cp = 0;
register int cc = 0;
u_char locbuf[BUFSIZ];
int cnt = 0;
@@ -534,6 +546,7 @@ block:
}
/*ARGSUSED*/
+int
ptyioctl(dev, cmd, data, flag, p)
dev_t dev;
int cmd;
diff --git a/sys/kern/tty_subr.c b/sys/kern/tty_subr.c
index fe8f000f87d5..acbb7f11cf13 100644
--- a/sys/kern/tty_subr.c
+++ b/sys/kern/tty_subr.c
@@ -1,159 +1,387 @@
-/*-
- * Copyright (c) 1982, 1986, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (C) 1994, David Greenman. This software may be used, modified,
+ * copied, distributed, and sold, in both source and binary form provided
+ * that the above copyright and these terms are retained. Under no
+ * circumstances is the author responsible for the proper functioning
+ * of this software, nor does the author assume any responsibility
+ * for damages incurred with its use.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: @(#)tty_subr.c 8.2 (Berkeley) 9/5/93
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/ioctl.h>
#include <sys/tty.h>
+#include <sys/clist.h>
+#include <sys/malloc.h>
char cwaiting;
-struct cblock *cfree, *cfreelist;
-int cfreecount, nclist;
+struct cblock *cfreelist = 0;
+int cfreecount, nclist = 256;
void
clist_init()
{
+ int i;
+ struct cblock *tmp;
- /*
- * Body deleted.
- */
+ for (i = 0; i < nclist; ++i) {
+ tmp = malloc(sizeof(struct cblock), M_TTYS, M_NOWAIT);
+ if (!tmp)
+ panic("clist_init: could not allocate cblock");
+ bzero((char *)tmp, sizeof(struct cblock));
+ tmp->c_next = cfreelist;
+ cfreelist = tmp;
+ cfreecount += CBSIZE;
+ }
return;
}
-getc(a1)
- struct clist *a1;
+/*
+ * Get a character from head of clist.
+ */
+int
+getc(clistp)
+ struct clist *clistp;
{
+ int chr = -1;
+ int s;
+ struct cblock *cblockp;
- /*
- * Body deleted.
- */
- return ((char)0);
-}
+ s = spltty();
-q_to_b(a1, a2, a3)
- struct clist *a1;
- char *a2;
- int a3;
-{
+ /* If there are characters in the list, get one */
+ if (clistp->c_cc) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ chr = (u_char)*clistp->c_cf;
+#if 0
+ /*
+ * If this char is quoted, set the flag.
+ */
+ if (isset(cblockp->c_quote, clistp->c_cf - (char *)cblockp->c_info))
+ chr |= TTY_QUOTE;
+#endif
+ clistp->c_cf++;
+ clistp->c_cc--;
+ if ((clistp->c_cf >= (char *)(cblockp+1)) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
- /*
- * Body deleted.
- */
- return (0);
+ splx(s);
+ return (chr);
}
-ndqb(a1, a2)
- struct clist *a1;
- int a2;
+/*
+ * Copy 'amount' of chars, beginning at head of clist 'clistp' to
+ * destination linear buffer 'dest'.
+ */
+int
+q_to_b(clistp, dest, amount)
+ struct clist *clistp;
+ char *dest;
+ int amount;
{
+ struct cblock *cblockp;
+ struct cblock *cblockn;
+ char *dest_orig = dest;
+ int numc;
+ int s;
- /*
- * Body deleted.
- */
- return (0);
+ s = spltty();
+
+ while (clistp && amount && (clistp->c_cc > 0)) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ cblockn = cblockp + 1; /* pointer arithmetic! */
+ numc = min(amount, (char *)cblockn - clistp->c_cf);
+ numc = min(numc, clistp->c_cc);
+ bcopy(clistp->c_cf, dest, numc);
+ amount -= numc;
+ clistp->c_cf += numc;
+ clistp->c_cc -= numc;
+ dest += numc;
+ if ((clistp->c_cf >= (char *)cblockn) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
+
+ splx(s);
+ return (dest - dest_orig);
}
+/*
+ * Flush 'amount' of chars, beginning at head of clist 'clistp'.
+ */
void
-ndflush(a1, a2)
- struct clist *a1;
- int a2;
+ndflush(clistp, amount)
+ struct clist *clistp;
+ int amount;
{
+ struct cblock *cblockp;
+ struct cblock *cblockn;
+ int numc;
+ int s;
- /*
- * Body deleted.
- */
+ s = spltty();
+
+ while (amount && (clistp->c_cc > 0)) {
+ cblockp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+ cblockn = cblockp + 1; /* pointer arithmetic! */
+ numc = min(amount, (char *)cblockn - clistp->c_cf);
+ numc = min(numc, clistp->c_cc);
+ amount -= numc;
+ clistp->c_cf += numc;
+ clistp->c_cc -= numc;
+ if ((clistp->c_cf >= (char *)cblockn) || (clistp->c_cc == 0)) {
+ if (clistp->c_cc > 0) {
+ clistp->c_cf = cblockp->c_next->c_info;
+ } else {
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ }
+ }
+
+ splx(s);
return;
}
-putc(a1, a2)
- char a1;
- struct clist *a2;
+int
+putc(chr, clistp)
+ int chr;
+ struct clist *clistp;
{
+ struct cblock *cblockp;
+ struct cblock *bclockn;
+ int s;
- /*
- * Body deleted.
- */
+ s = spltty();
+
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+
+ if (clistp->c_cl == NULL) {
+ if (cfreelist) {
+ cblockp = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cf = clistp->c_cl = cblockp->c_info;
+ clistp->c_cc = 0;
+ } else {
+ splx(s);
+ return (-1);
+ }
+ } else {
+ if (((long)clistp->c_cl & CROUND) == 0) {
+ if (cfreelist) {
+ cblockp = (cblockp-1)->c_next = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cl = cblockp->c_info;
+ } else {
+ splx(s);
+ return (-1);
+ }
+ }
+ }
+
+#if 0
+ if (chr & TTY_QUOTE)
+ setbit(cblockp->c_quote, clistp->c_cl - (char *)cblockp->c_info);
+#endif
+ *clistp->c_cl++ = chr;
+ clistp->c_cc++;
+
+ splx(s);
return (0);
}
-b_to_q(a1, a2, a3)
- char *a1;
- int a2;
- struct clist *a3;
+/*
+ * Copy data from linear buffer to clist chain.
+ */
+int
+b_to_q(src, amount, clistp)
+ char *src;
+ int amount;
+ struct clist *clistp;
{
+ struct cblock *cblockp;
+ struct cblock *bclockn;
+ int s;
+ int numc;
+
+ s = spltty();
/*
- * Body deleted.
+ * If there are no cblocks assigned to this clist yet,
+ * then get one.
*/
- return (0);
+ if (clistp->c_cl == NULL) {
+ if (cfreelist) {
+ cblockp = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cf = clistp->c_cl = cblockp->c_info;
+ clistp->c_cc = 0;
+ } else {
+ splx(s);
+ return (amount);
+ }
+ } else {
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+ }
+
+ while (amount) {
+ /*
+ * Get another cblock if needed.
+ */
+ if (((long)clistp->c_cl & CROUND) == 0) {
+ if (cfreelist) {
+ cblockp = (cblockp-1)->c_next = cfreelist;
+ cfreelist = cfreelist->c_next;
+ cfreecount -= CBSIZE;
+ cblockp->c_next = NULL;
+ clistp->c_cl = cblockp->c_info;
+ } else {
+ splx(s);
+ return (amount);
+ }
+ }
+
+ /*
+ * Copy a chunk of the linear buffer up to the end
+ * of this cblock.
+ */
+ cblockp += 1;
+ numc = min(amount, (char *)(cblockp) - clistp->c_cl);
+ bcopy(src, clistp->c_cl, numc);
+
+ /*
+ * Clear quote bits.
+ */
+
+ /*
+ * ...and update pointer for the next chunk.
+ */
+ src += numc;
+ clistp->c_cl += numc;
+ clistp->c_cc += numc;
+ amount -= numc;
+ }
+
+ splx(s);
+ return (amount);
}
char *
-nextc(a1, a2, a3)
- struct clist *a1;
- char *a2;
- int *a3;
+nextc(clistp, cp, dst)
+ struct clist *clistp;
+ char *cp;
+ int *dst;
{
+ struct cblock *cblockp;
- /*
- * Body deleted.
- */
- return ((char *)0);
+ ++cp;
+ if (clistp->c_cc && (cp != clistp->c_cl)) {
+ if (((long)cp & CROUND) == 0)
+ cp = ((struct cblock *)cp - 1)->c_next->c_info;
+ cblockp = (struct cblock *)((long)cp & ~CROUND);
+#if 0
+ *dst = *cp | (isset(cblockp->c_quote, cp - (char *)cblockp->c_info) ? TTY_QUOTE : 0);
+#endif
+ *dst = (u_char)*cp;
+ return (cp);
+ }
+
+ return (NULL);
}
-unputc(a1)
- struct clist *a1;
+int
+unputc(clistp)
+ struct clist *clistp;
{
+ struct cblock *cblockp = 0, *cbp = 0;
+ int s;
+ int chr = -1;
+
+
+ s = spltty();
+
+ if (clistp->c_cc) {
+ --clistp->c_cc;
+ chr = (u_char)*--clistp->c_cl;
+ /*
+ * Get the quote flag and 'unput' it, too.
+ */
+
+ /* XXX write me! */
+
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+
+ /*
+ * If all of the characters have been unput in this
+ * cblock, the find the previous one and free this
+ * one.
+ */
+ if (clistp->c_cc && (clistp->c_cl <= (char *)cblockp->c_info)) {
+ cbp = (struct cblock *)((long)clistp->c_cf & ~CROUND);
+
+ while (cbp->c_next != cblockp)
+ cbp = cbp->c_next;
+
+ clistp->c_cl = (char *)(cbp+1);
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ cbp->c_next = NULL;
+ }
+ }
/*
- * Body deleted.
+ * If there are no more characters on the list, then
+ * free the last cblock.
*/
- return ((char)0);
+ if ((clistp->c_cc == 0) && clistp->c_cl) {
+ cblockp = (struct cblock *)((long)clistp->c_cl & ~CROUND);
+ cblockp->c_next = cfreelist;
+ cfreelist = cblockp;
+ cfreecount += CBSIZE;
+ clistp->c_cf = clistp->c_cl = NULL;
+ }
+
+ splx(s);
+ return (chr);
}
void
-catq(a1, a2)
- struct clist *a1, *a2;
+catq(src_clistp, dest_clistp)
+ struct clist *src_clistp, *dest_clistp;
{
+ char buffer[CBSIZE*2];
+ int amount;
+
+ while (src_clistp->c_cc) {
+ amount = q_to_b(src_clistp, buffer, sizeof(buffer));
+ b_to_q(buffer, amount, dest_clistp);
+ }
- /*
- * Body deleted.
- */
return;
}
diff --git a/sys/kern/tty_tty.c b/sys/kern/tty_tty.c
index 964fc6f6d5ed..6baba9932400 100644
--- a/sys/kern/tty_tty.c
+++ b/sys/kern/tty_tty.c
@@ -48,6 +48,7 @@
#define cttyvp(p) ((p)->p_flag & P_CONTROLT ? (p)->p_session->s_ttyvp : NULL)
/*ARGSUSED*/
+int
cttyopen(dev, flag, mode, p)
dev_t dev;
int flag, mode;
@@ -78,6 +79,7 @@ cttyopen(dev, flag, mode, p)
}
/*ARGSUSED*/
+int
cttyread(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -95,6 +97,7 @@ cttyread(dev, uio, flag)
}
/*ARGSUSED*/
+int
cttywrite(dev, uio, flag)
dev_t dev;
struct uio *uio;
@@ -112,6 +115,7 @@ cttywrite(dev, uio, flag)
}
/*ARGSUSED*/
+int
cttyioctl(dev, cmd, addr, flag, p)
dev_t dev;
int cmd;
@@ -134,6 +138,7 @@ cttyioctl(dev, cmd, addr, flag, p)
}
/*ARGSUSED*/
+int
cttyselect(dev, flag, p)
dev_t dev;
int flag;
diff --git a/sys/kern/uipc_domain.c b/sys/kern/uipc_domain.c
index 8834dbf44427..db082c73660e 100644
--- a/sys/kern/uipc_domain.c
+++ b/sys/kern/uipc_domain.c
@@ -54,6 +54,7 @@ void pfslowtimo __P((void *));
domains = &__CONCAT(x,domain); \
}
+void
domaininit()
{
register struct domain *dp;
@@ -141,6 +142,7 @@ found:
return (maybe);
}
+int
net_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
@@ -178,6 +180,7 @@ found:
return (ENOPROTOOPT);
}
+void
pfctlinput(cmd, sa)
int cmd;
struct sockaddr *sa;
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index b71c6345e361..5569bea97dbf 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -47,10 +47,13 @@
#include <vm/vm.h>
+void m_reclaim __P(());
+
extern vm_map_t mb_map;
struct mbuf *mbutl;
char *mclrefcnt;
+void
mbinit()
{
int s;
@@ -75,6 +78,7 @@ bad:
* Must be called at splimp.
*/
/* ARGSUSED */
+int
m_clalloc(ncl, nowait)
register int ncl;
int nowait;
@@ -137,6 +141,7 @@ m_retryhdr(i, t)
return (m);
}
+void
m_reclaim()
{
register struct domain *dp;
@@ -323,6 +328,7 @@ nospace:
* Copy data from an mbuf chain starting "off" bytes from the beginning,
* continuing for "len" bytes, into the indicated buffer.
*/
+void
m_copydata(m, off, len, cp)
register struct mbuf *m;
register int off;
@@ -358,6 +364,7 @@ m_copydata(m, off, len, cp)
* Both chains must be of the same type (e.g. MT_DATA).
* Any m_pkthdr is not updated.
*/
+void
m_cat(m, n)
register struct mbuf *m, *n;
{
@@ -378,6 +385,7 @@ m_cat(m, n)
}
}
+void
m_adj(mp, req_len)
struct mbuf *mp;
int req_len;
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index d4af592d79b5..dc153bd0cae3 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -44,6 +44,14 @@
#include <sys/socket.h>
#include <sys/socketvar.h>
+void soqinsque __P((struct socket *, struct socket *, int));
+void sowakeup __P((struct socket *, struct sockbuf *));
+void sbrelease __P((struct sockbuf *));
+void sbappendrecord __P((struct sockbuf *, struct mbuf *));
+void sbcompress __P((struct sockbuf *, struct mbuf *, struct mbuf *));
+void sbflush __P((struct sockbuf *));
+void sbdrop __P((struct sockbuf *, int));
+
/*
* Primitive routines for operating on sockets and socket buffers
*/
@@ -85,6 +93,7 @@ u_long sb_max = SB_MAX; /* patchable */
* cause software-interrupt process scheduling.
*/
+void
soisconnecting(so)
register struct socket *so;
{
@@ -93,6 +102,7 @@ soisconnecting(so)
so->so_state |= SS_ISCONNECTING;
}
+void
soisconnected(so)
register struct socket *so;
{
@@ -111,6 +121,7 @@ soisconnected(so)
}
}
+void
soisdisconnecting(so)
register struct socket *so;
{
@@ -122,6 +133,7 @@ soisdisconnecting(so)
sorwakeup(so);
}
+void
soisdisconnected(so)
register struct socket *so;
{
@@ -181,6 +193,7 @@ sonewconn1(head, connstatus)
return (so);
}
+void
soqinsque(head, so, q)
register struct socket *head, *so;
int q;
@@ -202,6 +215,7 @@ soqinsque(head, so, q)
*prev = so;
}
+int
soqremque(so, q)
register struct socket *so;
int q;
@@ -240,6 +254,7 @@ soqremque(so, q)
* Data queued for reading in the socket may yet be read.
*/
+void
socantsendmore(so)
struct socket *so;
{
@@ -248,6 +263,7 @@ socantsendmore(so)
sowwakeup(so);
}
+void
socantrcvmore(so)
struct socket *so;
{
@@ -259,6 +275,7 @@ socantrcvmore(so)
/*
* Wait for data to arrive at/drain from a socket buffer.
*/
+int
sbwait(sb)
struct sockbuf *sb;
{
@@ -273,6 +290,7 @@ sbwait(sb)
* Lock a sockbuf already known to be locked;
* return any error returned from sleep (EINTR).
*/
+int
sb_lock(sb)
register struct sockbuf *sb;
{
@@ -294,6 +312,7 @@ sb_lock(sb)
* Do asynchronous notification via SIGIO
* if the socket has the SS_ASYNC flag set.
*/
+void
sowakeup(so, sb)
register struct socket *so;
register struct sockbuf *sb;
@@ -346,6 +365,7 @@ sowakeup(so, sb)
* should be released by calling sbrelease() when the socket is destroyed.
*/
+int
soreserve(so, sndcc, rcvcc)
register struct socket *so;
u_long sndcc, rcvcc;
@@ -373,6 +393,7 @@ bad:
* Attempt to scale mbmax so that mbcnt doesn't become limiting
* if buffering efficiency is near the normal case.
*/
+int
sbreserve(sb, cc)
struct sockbuf *sb;
u_long cc;
@@ -390,6 +411,7 @@ sbreserve(sb, cc)
/*
* Free mbufs held by a socket, and reserved mbuf space.
*/
+void
sbrelease(sb)
struct sockbuf *sb;
{
@@ -429,6 +451,7 @@ sbrelease(sb)
* the mbuf chain is recorded in sb. Empty mbufs are
* discarded and mbufs are compacted where possible.
*/
+void
sbappend(sb, m)
struct sockbuf *sb;
struct mbuf *m;
@@ -451,6 +474,7 @@ sbappend(sb, m)
}
#ifdef SOCKBUF_DEBUG
+void
sbcheck(sb)
register struct sockbuf *sb;
{
@@ -477,6 +501,7 @@ sbcheck(sb)
* As above, except the mbuf chain
* begins a new record.
*/
+void
sbappendrecord(sb, m0)
register struct sockbuf *sb;
register struct mbuf *m0;
@@ -511,6 +536,7 @@ sbappendrecord(sb, m0)
* is inserted at the beginning of the sockbuf,
* but after any other OOB data.
*/
+void
sbinsertoob(sb, m0)
register struct sockbuf *sb;
register struct mbuf *m0;
@@ -555,6 +581,7 @@ sbinsertoob(sb, m0)
* m0 must include a packet header with total length.
* Returns 0 if no space in sockbuf or insufficient mbufs.
*/
+int
sbappendaddr(sb, asa, m0, control)
register struct sockbuf *sb;
struct sockaddr *asa;
@@ -597,6 +624,7 @@ panic("sbappendaddr");
return (1);
}
+int
sbappendcontrol(sb, m0, control)
struct sockbuf *sb;
struct mbuf *control, *m0;
@@ -633,6 +661,7 @@ sbappendcontrol(sb, m0, control)
* buffer sb following mbuf n. If n
* is null, the buffer is presumed empty.
*/
+void
sbcompress(sb, m, n)
register struct sockbuf *sb;
register struct mbuf *m, *n;
@@ -681,6 +710,7 @@ sbcompress(sb, m, n)
* Free all mbufs in a sockbuf.
* Check that all resources are reclaimed.
*/
+void
sbflush(sb)
register struct sockbuf *sb;
{
@@ -696,6 +726,7 @@ sbflush(sb)
/*
* Drop data from (the front of) a sockbuf.
*/
+void
sbdrop(sb, len)
register struct sockbuf *sb;
register int len;
@@ -739,6 +770,7 @@ sbdrop(sb, len)
* Drop a record off the front of a sockbuf
* and move the next record to the front.
*/
+void
sbdroprecord(sb)
register struct sockbuf *sb;
{
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index ed09ee63b9f4..ede6c08cf49c 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -46,6 +46,9 @@
#include <sys/socketvar.h>
#include <sys/resourcevar.h>
+void sofree __P((struct socket *));
+void sorflush __P((struct socket *));
+
/*
* Socket operation routines.
* These routines are called by the routines in
@@ -54,6 +57,7 @@
* switching out to the protocol specific routines.
*/
/*ARGSUSED*/
+int
socreate(dom, aso, type, proto)
int dom;
struct socket **aso;
@@ -91,6 +95,7 @@ socreate(dom, aso, type, proto)
return (0);
}
+int
sobind(so, nam)
struct socket *so;
struct mbuf *nam;
@@ -105,6 +110,7 @@ sobind(so, nam)
return (error);
}
+int
solisten(so, backlog)
register struct socket *so;
int backlog;
@@ -127,6 +133,7 @@ solisten(so, backlog)
return (0);
}
+void
sofree(so)
register struct socket *so;
{
@@ -148,6 +155,7 @@ sofree(so)
* Initiate disconnect if connected.
* Free socket when disconnect complete.
*/
+int
soclose(so)
register struct socket *so;
{
@@ -198,6 +206,7 @@ discard:
/*
* Must be called at splnet...
*/
+int
soabort(so)
struct socket *so;
{
@@ -207,6 +216,7 @@ soabort(so)
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
}
+int
soaccept(so, nam)
register struct socket *so;
struct mbuf *nam;
@@ -223,6 +233,7 @@ soaccept(so, nam)
return (error);
}
+int
soconnect(so, nam)
register struct socket *so;
struct mbuf *nam;
@@ -250,6 +261,7 @@ soconnect(so, nam)
return (error);
}
+int
soconnect2(so1, so2)
register struct socket *so1;
struct socket *so2;
@@ -263,6 +275,7 @@ soconnect2(so1, so2)
return (error);
}
+int
sodisconnect(so)
register struct socket *so;
{
@@ -302,6 +315,7 @@ bad:
* must check for short counts if EINTR/ERESTART are returned.
* Data and control buffers are freed on return.
*/
+int
sosend(so, addr, uio, top, control, flags)
register struct socket *so;
struct mbuf *addr;
@@ -477,6 +491,7 @@ out:
* an mbuf **mp0 for use in returning the chain. The uio is then used
* only for the count in uio_resid.
*/
+int
soreceive(so, paddr, uio, mp0, controlp, flagsp)
register struct socket *so;
struct mbuf **paddr;
@@ -489,7 +504,7 @@ soreceive(so, paddr, uio, mp0, controlp, flagsp)
register int flags, len, error, s, offset;
struct protosw *pr = so->so_proto;
struct mbuf *nextrecord;
- int moff, type;
+ int moff, type = 0;
int orig_resid = uio->uio_resid;
mp = mp0;
@@ -775,6 +790,7 @@ release:
return (error);
}
+int
soshutdown(so, how)
register struct socket *so;
register int how;
@@ -790,6 +806,7 @@ soshutdown(so, how)
return (0);
}
+void
sorflush(so)
register struct socket *so;
{
@@ -811,6 +828,7 @@ sorflush(so)
sbrelease(&asb);
}
+int
sosetopt(so, level, optname, m0)
register struct socket *so;
int level, optname;
@@ -927,6 +945,7 @@ bad:
return (error);
}
+int
sogetopt(so, level, optname, mp)
register struct socket *so;
int level, optname;
@@ -1011,6 +1030,7 @@ sogetopt(so, level, optname, mp)
}
}
+void
sohasoutofband(so)
register struct socket *so;
{
diff --git a/sys/kern/uipc_socket2.c b/sys/kern/uipc_socket2.c
index d4af592d79b5..dc153bd0cae3 100644
--- a/sys/kern/uipc_socket2.c
+++ b/sys/kern/uipc_socket2.c
@@ -44,6 +44,14 @@
#include <sys/socket.h>
#include <sys/socketvar.h>
+void soqinsque __P((struct socket *, struct socket *, int));
+void sowakeup __P((struct socket *, struct sockbuf *));
+void sbrelease __P((struct sockbuf *));
+void sbappendrecord __P((struct sockbuf *, struct mbuf *));
+void sbcompress __P((struct sockbuf *, struct mbuf *, struct mbuf *));
+void sbflush __P((struct sockbuf *));
+void sbdrop __P((struct sockbuf *, int));
+
/*
* Primitive routines for operating on sockets and socket buffers
*/
@@ -85,6 +93,7 @@ u_long sb_max = SB_MAX; /* patchable */
* cause software-interrupt process scheduling.
*/
+void
soisconnecting(so)
register struct socket *so;
{
@@ -93,6 +102,7 @@ soisconnecting(so)
so->so_state |= SS_ISCONNECTING;
}
+void
soisconnected(so)
register struct socket *so;
{
@@ -111,6 +121,7 @@ soisconnected(so)
}
}
+void
soisdisconnecting(so)
register struct socket *so;
{
@@ -122,6 +133,7 @@ soisdisconnecting(so)
sorwakeup(so);
}
+void
soisdisconnected(so)
register struct socket *so;
{
@@ -181,6 +193,7 @@ sonewconn1(head, connstatus)
return (so);
}
+void
soqinsque(head, so, q)
register struct socket *head, *so;
int q;
@@ -202,6 +215,7 @@ soqinsque(head, so, q)
*prev = so;
}
+int
soqremque(so, q)
register struct socket *so;
int q;
@@ -240,6 +254,7 @@ soqremque(so, q)
* Data queued for reading in the socket may yet be read.
*/
+void
socantsendmore(so)
struct socket *so;
{
@@ -248,6 +263,7 @@ socantsendmore(so)
sowwakeup(so);
}
+void
socantrcvmore(so)
struct socket *so;
{
@@ -259,6 +275,7 @@ socantrcvmore(so)
/*
* Wait for data to arrive at/drain from a socket buffer.
*/
+int
sbwait(sb)
struct sockbuf *sb;
{
@@ -273,6 +290,7 @@ sbwait(sb)
* Lock a sockbuf already known to be locked;
* return any error returned from sleep (EINTR).
*/
+int
sb_lock(sb)
register struct sockbuf *sb;
{
@@ -294,6 +312,7 @@ sb_lock(sb)
* Do asynchronous notification via SIGIO
* if the socket has the SS_ASYNC flag set.
*/
+void
sowakeup(so, sb)
register struct socket *so;
register struct sockbuf *sb;
@@ -346,6 +365,7 @@ sowakeup(so, sb)
* should be released by calling sbrelease() when the socket is destroyed.
*/
+int
soreserve(so, sndcc, rcvcc)
register struct socket *so;
u_long sndcc, rcvcc;
@@ -373,6 +393,7 @@ bad:
* Attempt to scale mbmax so that mbcnt doesn't become limiting
* if buffering efficiency is near the normal case.
*/
+int
sbreserve(sb, cc)
struct sockbuf *sb;
u_long cc;
@@ -390,6 +411,7 @@ sbreserve(sb, cc)
/*
* Free mbufs held by a socket, and reserved mbuf space.
*/
+void
sbrelease(sb)
struct sockbuf *sb;
{
@@ -429,6 +451,7 @@ sbrelease(sb)
* the mbuf chain is recorded in sb. Empty mbufs are
* discarded and mbufs are compacted where possible.
*/
+void
sbappend(sb, m)
struct sockbuf *sb;
struct mbuf *m;
@@ -451,6 +474,7 @@ sbappend(sb, m)
}
#ifdef SOCKBUF_DEBUG
+void
sbcheck(sb)
register struct sockbuf *sb;
{
@@ -477,6 +501,7 @@ sbcheck(sb)
* As above, except the mbuf chain
* begins a new record.
*/
+void
sbappendrecord(sb, m0)
register struct sockbuf *sb;
register struct mbuf *m0;
@@ -511,6 +536,7 @@ sbappendrecord(sb, m0)
* is inserted at the beginning of the sockbuf,
* but after any other OOB data.
*/
+void
sbinsertoob(sb, m0)
register struct sockbuf *sb;
register struct mbuf *m0;
@@ -555,6 +581,7 @@ sbinsertoob(sb, m0)
* m0 must include a packet header with total length.
* Returns 0 if no space in sockbuf or insufficient mbufs.
*/
+int
sbappendaddr(sb, asa, m0, control)
register struct sockbuf *sb;
struct sockaddr *asa;
@@ -597,6 +624,7 @@ panic("sbappendaddr");
return (1);
}
+int
sbappendcontrol(sb, m0, control)
struct sockbuf *sb;
struct mbuf *control, *m0;
@@ -633,6 +661,7 @@ sbappendcontrol(sb, m0, control)
* buffer sb following mbuf n. If n
* is null, the buffer is presumed empty.
*/
+void
sbcompress(sb, m, n)
register struct sockbuf *sb;
register struct mbuf *m, *n;
@@ -681,6 +710,7 @@ sbcompress(sb, m, n)
* Free all mbufs in a sockbuf.
* Check that all resources are reclaimed.
*/
+void
sbflush(sb)
register struct sockbuf *sb;
{
@@ -696,6 +726,7 @@ sbflush(sb)
/*
* Drop data from (the front of) a sockbuf.
*/
+void
sbdrop(sb, len)
register struct sockbuf *sb;
register int len;
@@ -739,6 +770,7 @@ sbdrop(sb, len)
* Drop a record off the front of a sockbuf
* and move the next record to the front.
*/
+void
sbdroprecord(sb)
register struct sockbuf *sb;
{
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 89b7ffdf1960..5113f78f4ee7 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/filedesc.h>
#include <sys/proc.h>
#include <sys/file.h>
@@ -61,6 +62,7 @@ struct socket_args {
int type;
int protocol;
};
+int
socket(p, uap, retval)
struct proc *p;
register struct socket_args *uap;
@@ -92,6 +94,7 @@ struct bind_args {
int namelen;
};
/* ARGSUSED */
+int
bind(p, uap, retval)
struct proc *p;
register struct bind_args *uap;
@@ -115,6 +118,7 @@ struct listen_args {
int backlog;
};
/* ARGSUSED */
+int
listen(p, uap, retval)
struct proc *p;
register struct listen_args *uap;
@@ -138,6 +142,7 @@ struct accept_args {
};
#ifdef COMPAT_OLDSOCK
+int
accept(p, uap, retval)
struct proc *p;
struct accept_args *uap;
@@ -148,6 +153,7 @@ accept(p, uap, retval)
return (accept1(p, uap, retval));
}
+int
oaccept(p, uap, retval)
struct proc *p;
struct accept_args *uap;
@@ -162,6 +168,7 @@ oaccept(p, uap, retval)
#define accept1 accept
#endif
+int
accept1(p, uap, retval)
struct proc *p;
register struct accept_args *uap;
@@ -244,6 +251,7 @@ struct connect_args {
int namelen;
};
/* ARGSUSED */
+int
connect(p, uap, retval)
struct proc *p;
register struct connect_args *uap;
@@ -292,6 +300,7 @@ struct socketpair_args {
int protocol;
int *rsv;
};
+int
socketpair(p, uap, retval)
struct proc *p;
register struct socketpair_args *uap;
@@ -354,6 +363,7 @@ struct sendto_args {
caddr_t to;
int tolen;
};
+int
sendto(p, uap, retval)
struct proc *p;
register struct sendto_args *uap;
@@ -382,6 +392,7 @@ struct osend_args {
int len;
int flags;
};
+int
osend(p, uap, retval)
struct proc *p;
register struct osend_args *uap;
@@ -407,6 +418,7 @@ struct osendmsg_args {
caddr_t msg;
int flags;
};
+int
osendmsg(p, uap, retval)
struct proc *p;
register struct osendmsg_args *uap;
@@ -444,6 +456,7 @@ struct sendmsg_args {
caddr_t msg;
int flags;
};
+int
sendmsg(p, uap, retval)
struct proc *p;
register struct sendmsg_args *uap;
@@ -478,6 +491,7 @@ done:
return (error);
}
+int
sendit(p, s, mp, flags, retsize)
register struct proc *p;
int s;
@@ -505,8 +519,6 @@ sendit(p, s, mp, flags, retsize)
auio.uio_resid = 0;
iov = mp->msg_iov;
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
- if (iov->iov_len < 0)
- return (EINVAL);
if ((auio.uio_resid += iov->iov_len) < 0)
return (EINVAL);
}
@@ -589,6 +601,7 @@ struct recvfrom_args {
};
#ifdef COMPAT_OLDSOCK
+int
orecvfrom(p, uap, retval)
struct proc *p;
struct recvfrom_args *uap;
@@ -600,6 +613,7 @@ orecvfrom(p, uap, retval)
}
#endif
+int
recvfrom(p, uap, retval)
struct proc *p;
register struct recvfrom_args *uap;
@@ -632,6 +646,7 @@ struct orecv_args {
int len;
int flags;
};
+int
orecv(p, uap, retval)
struct proc *p;
register struct orecv_args *uap;
@@ -661,6 +676,7 @@ struct orecvmsg_args {
struct omsghdr *msg;
int flags;
};
+int
orecvmsg(p, uap, retval)
struct proc *p;
register struct orecvmsg_args *uap;
@@ -703,6 +719,7 @@ struct recvmsg_args {
struct msghdr *msg;
int flags;
};
+int
recvmsg(p, uap, retval)
struct proc *p;
register struct recvmsg_args *uap;
@@ -742,6 +759,7 @@ done:
return (error);
}
+int
recvit(p, s, mp, namelenp, retsize)
register struct proc *p;
int s;
@@ -770,8 +788,6 @@ recvit(p, s, mp, namelenp, retsize)
auio.uio_resid = 0;
iov = mp->msg_iov;
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
- if (iov->iov_len < 0)
- return (EINVAL);
if ((auio.uio_resid += iov->iov_len) < 0)
return (EINVAL);
}
@@ -877,6 +893,7 @@ struct shutdown_args {
int how;
};
/* ARGSUSED */
+int
shutdown(p, uap, retval)
struct proc *p;
register struct shutdown_args *uap;
@@ -898,6 +915,7 @@ struct setsockopt_args {
int valsize;
};
/* ARGSUSED */
+int
setsockopt(p, uap, retval)
struct proc *p;
register struct setsockopt_args *uap;
@@ -934,6 +952,7 @@ struct getsockopt_args {
int *avalsize;
};
/* ARGSUSED */
+int
getsockopt(p, uap, retval)
struct proc *p;
register struct getsockopt_args *uap;
@@ -969,6 +988,7 @@ struct pipe_args {
int dummy;
};
/* ARGSUSED */
+int
pipe(p, uap, retval)
struct proc *p;
struct pipe_args *uap;
@@ -1025,6 +1045,7 @@ struct getsockname_args {
#endif
};
#ifdef COMPAT_OLDSOCK
+int
getsockname(p, uap, retval)
struct proc *p;
struct getsockname_args *uap;
@@ -1035,6 +1056,7 @@ getsockname(p, uap, retval)
return (getsockname1(p, uap, retval));
}
+int
ogetsockname(p, uap, retval)
struct proc *p;
struct getsockname_args *uap;
@@ -1050,6 +1072,7 @@ ogetsockname(p, uap, retval)
#endif
/* ARGSUSED */
+int
getsockname1(p, uap, retval)
struct proc *p;
register struct getsockname_args *uap;
@@ -1099,6 +1122,7 @@ struct getpeername_args {
};
#ifdef COMPAT_OLDSOCK
+int
getpeername(p, uap, retval)
struct proc *p;
struct getpeername_args *uap;
@@ -1109,6 +1133,7 @@ getpeername(p, uap, retval)
return (getpeername1(p, uap, retval));
}
+int
ogetpeername(p, uap, retval)
struct proc *p;
struct getpeername_args *uap;
@@ -1124,6 +1149,7 @@ ogetpeername(p, uap, retval)
#endif
/* ARGSUSED */
+int
getpeername1(p, uap, retval)
struct proc *p;
register struct getpeername_args *uap;
@@ -1161,6 +1187,7 @@ bad:
return (error);
}
+int
sockargs(mp, buf, buflen, type)
struct mbuf **mp;
caddr_t buf;
@@ -1200,6 +1227,7 @@ sockargs(mp, buf, buflen, type)
return (error);
}
+int
getsock(fdp, fdes, fpp)
struct filedesc *fdp;
int fdes;
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 94bf8f744c86..b61e9f4a2372 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -49,6 +49,15 @@
#include <sys/stat.h>
#include <sys/mbuf.h>
+void unp_detach __P((struct unpcb *));
+void unp_disconnect __P((struct unpcb *));
+void unp_shutdown __P((struct unpcb *));
+void unp_drop __P((struct unpcb *, int));
+void unp_gc __P((void));
+void unp_scan __P((struct mbuf *, void (*)(struct file *)));
+void unp_mark __P((struct file *));
+void unp_discard __P((struct file *));
+
/*
* Unix communications domain.
*
@@ -61,6 +70,7 @@ struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX };
ino_t unp_ino; /* prototype for fake inode numbers */
/*ARGSUSED*/
+int
uipc_usrreq(so, req, m, nam, control)
struct socket *so;
int req;
@@ -313,6 +323,7 @@ u_long unpdg_recvspace = 4*1024;
int unp_rights; /* file descriptors in flight */
+int
unp_attach(so)
struct socket *so;
{
@@ -346,6 +357,7 @@ unp_attach(so)
return (0);
}
+void
unp_detach(unp)
register struct unpcb *unp;
{
@@ -376,6 +388,7 @@ unp_detach(unp)
}
}
+int
unp_bind(unp, nam, p)
struct unpcb *unp;
struct mbuf *nam;
@@ -423,6 +436,7 @@ unp_bind(unp, nam, p)
return (0);
}
+int
unp_connect(so, nam, p)
struct socket *so;
struct mbuf *nam;
@@ -478,6 +492,7 @@ bad:
return (error);
}
+int
unp_connect2(so, so2)
register struct socket *so;
register struct socket *so2;
@@ -509,6 +524,7 @@ unp_connect2(so, so2)
return (0);
}
+void
unp_disconnect(unp)
struct unpcb *unp;
{
@@ -546,6 +562,7 @@ unp_disconnect(unp)
}
#ifdef notdef
+void
unp_abort(unp)
struct unpcb *unp;
{
@@ -554,6 +571,7 @@ unp_abort(unp)
}
#endif
+void
unp_shutdown(unp)
struct unpcb *unp;
{
@@ -564,6 +582,7 @@ unp_shutdown(unp)
socantrcvmore(so);
}
+void
unp_drop(unp, errno)
struct unpcb *unp;
int errno;
@@ -581,12 +600,14 @@ unp_drop(unp, errno)
}
#ifdef notdef
+void
unp_drain()
{
}
#endif
+int
unp_externalize(rights)
struct mbuf *rights;
{
@@ -618,6 +639,7 @@ unp_externalize(rights)
return (0);
}
+int
unp_internalize(control, p)
struct mbuf *control;
struct proc *p;
@@ -652,9 +674,9 @@ unp_internalize(control, p)
}
int unp_defer, unp_gcing;
-int unp_mark();
extern struct domain unixdomain;
+void
unp_gc()
{
register struct file *fp, *nextfp;
@@ -765,18 +787,18 @@ unp_gc()
unp_gcing = 0;
}
+void
unp_dispose(m)
struct mbuf *m;
{
- int unp_discard();
-
if (m)
unp_scan(m, unp_discard);
}
+void
unp_scan(m0, op)
register struct mbuf *m0;
- int (*op)();
+ void (*op)(struct file *);
{
register struct mbuf *m;
register struct file **rp;
@@ -803,6 +825,7 @@ unp_scan(m0, op)
}
}
+void
unp_mark(fp)
struct file *fp;
{
@@ -813,6 +836,7 @@ unp_mark(fp)
fp->f_flag |= (FMARK|FDEFER);
}
+void
unp_discard(fp)
struct file *fp;
{
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index ec5c962f7dfd..6c12d7f7fe9f 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,181 +1,260 @@
-/*-
- * Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice immediately at the beginning of the file, without modification,
+ * this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
+ * 3. Absolutely no warranty of function or purpose is made by the author
+ * John S. Dyson.
+ * 4. Modifications may be freely made to this file if the above conditions
+ * are met.
*/
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kernel.h>
#include <sys/proc.h>
-#include <sys/buf.h>
#include <sys/vnode.h>
+#include <sys/buf.h>
#include <sys/mount.h>
-#include <sys/trace.h>
#include <sys/malloc.h>
#include <sys/resourcevar.h>
-
+#include <vm/vm.h>
+#include <vm/vm_pageout.h>
+
+#include <miscfs/specfs/specdev.h>
+
+struct buf *buf; /* the buffer pool itself */
+int nbuf; /* number of buffer headers */
+int bufpages; /* number of memory pages in the buffer pool */
+struct buf *swbuf; /* swap I/O headers */
+int nswbuf;
+#define BUFHSZ 512
+int bufhash = BUFHSZ - 1;
+
+struct buf *getnewbuf(int,int);
+extern vm_map_t buffer_map, io_map;
+void vm_hold_free_pages(vm_offset_t from, vm_offset_t to);
+void vm_hold_load_pages(vm_offset_t from, vm_offset_t to);
/*
* Definitions for the buffer hash lists.
*/
#define BUFHASH(dvp, lbn) \
(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
-LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
-u_long bufhash;
-
-/*
- * Insq/Remq for the buffer hash lists.
- */
-#define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
-#define bremhash(bp) LIST_REMOVE(bp, b_hash)
/*
* Definitions for the buffer free lists.
*/
-#define BQUEUES 4 /* number of free buffer queues */
-
-#define BQ_LOCKED 0 /* super-blocks &c */
-#define BQ_LRU 1 /* lru, useful buffers */
-#define BQ_AGE 2 /* rubbish */
-#define BQ_EMPTY 3 /* buffer headers with no memory */
+#define BQUEUES 5 /* number of free buffer queues */
+LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
-int needbuffer;
+
+#define BQ_NONE 0 /* on no queue */
+#define BQ_LOCKED 1 /* locked buffers */
+#define BQ_LRU 2 /* useful buffers */
+#define BQ_AGE 3 /* less useful buffers */
+#define BQ_EMPTY 4 /* empty buffer headers*/
+
+int needsbuffer;
/*
- * Insq/Remq for the buffer free lists.
+ * Internal update daemon, process 3
+ * The variable vfs_update_wakeup allows for internal syncs.
*/
-#define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
-#define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
-
-void
-bremfree(bp)
- struct buf *bp;
-{
- struct bqueues *dp = NULL;
-
- /*
- * We only calculate the head of the freelist when removing
- * the last element of the list as that is the only time that
- * it is needed (e.g. to reset the tail pointer).
- *
- * NB: This makes an assumption about how tailq's are implemented.
- */
- if (bp->b_freelist.tqe_next == NULL) {
- for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
- if (dp->tqh_last == &bp->b_freelist.tqe_next)
- break;
- if (dp == &bufqueues[BQUEUES])
- panic("bremfree: lost tail");
- }
- TAILQ_REMOVE(dp, bp, b_freelist);
-}
+int vfs_update_wakeup;
/*
- * Initialize buffers and hash links for buffers.
+ * Initialize buffer headers and related structures.
*/
-void
-bufinit()
+void bufinit()
{
- register struct buf *bp;
- struct bqueues *dp;
- register int i;
- int base, residual;
-
- for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
- TAILQ_INIT(dp);
- bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
- base = bufpages / nbuf;
- residual = bufpages % nbuf;
- for (i = 0; i < nbuf; i++) {
+ struct buf *bp;
+ int i;
+
+ TAILQ_INIT(&bswlist);
+ LIST_INIT(&invalhash);
+
+ /* first, make a null hash table */
+ for(i=0;i<BUFHSZ;i++)
+ LIST_INIT(&bufhashtbl[i]);
+
+ /* next, make a null set of free lists */
+ for(i=0;i<BQUEUES;i++)
+ TAILQ_INIT(&bufqueues[i]);
+
+ /* finally, initialize each buffer header and stick on empty q */
+ for(i=0;i<nbuf;i++) {
bp = &buf[i];
- bzero((char *)bp, sizeof *bp);
+ bzero(bp, sizeof *bp);
+ bp->b_flags = B_INVAL; /* we're just an empty header */
bp->b_dev = NODEV;
+ bp->b_vp = NULL;
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
+ bp->b_qindex = BQ_EMPTY;
bp->b_vnbufs.le_next = NOLIST;
- bp->b_data = buffers + i * MAXBSIZE;
- if (i < residual)
- bp->b_bufsize = (base + 1) * CLBYTES;
- else
- bp->b_bufsize = base * CLBYTES;
- bp->b_flags = B_INVAL;
- dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
- binsheadfree(bp, dp);
- binshash(bp, &invalhash);
+ bp->b_data = (caddr_t)kmem_alloc_pageable(buffer_map, MAXBSIZE);
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_EMPTY], bp, b_freelist);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
}
}
-bread(a1, a2, a3, a4, a5)
- struct vnode *a1;
- daddr_t a2;
- int a3;
- struct ucred *a4;
- struct buf **a5;
+/*
+ * remove the buffer from the appropriate free list
+ */
+void
+bremfree(struct buf *bp)
{
+ int s = splbio();
+ if( bp->b_qindex != BQ_NONE) {
+ TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
+ bp->b_qindex = BQ_NONE;
+ } else {
+ panic("bremfree: removing a buffer when not on a queue");
+ }
+ splx(s);
+}
- /*
- * Body deleted.
- */
- return (EIO);
+/*
+ * Get a buffer with the specified data. Look in the cache first.
+ */
+int
+bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
+ struct buf **bpp)
+{
+ struct buf *bp;
+
+ bp = getblk (vp, blkno, size, 0, 0);
+ *bpp = bp;
+
+ /* if not found in cache, do some I/O */
+ if ((bp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats) /* count block I/O */
+ curproc->p_stats->p_ru.ru_inblock++;
+ bp->b_flags |= B_READ;
+ bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( bp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ bp->b_rcred = cred;
+ }
+ VOP_STRATEGY(bp);
+ return( biowait (bp));
+ }
+
+ return (0);
}
-breadn(a1, a2, a3, a4, a5, a6, a7, a8)
- struct vnode *a1;
- daddr_t a2; int a3;
- daddr_t a4[]; int a5[];
- int a6;
- struct ucred *a7;
- struct buf **a8;
+/*
+ * Operates like bread, but also starts asynchronous I/O on
+ * read-ahead blocks.
+ */
+int
+breadn(struct vnode *vp, daddr_t blkno, int size,
+ daddr_t *rablkno, int *rabsize,
+ int cnt, struct ucred *cred, struct buf **bpp)
{
+ struct buf *bp, *rabp;
+ int i;
+ int rv = 0, readwait = 0;
+
+ *bpp = bp = getblk (vp, blkno, size, 0, 0);
+
+ /* if not found in cache, do some I/O */
+ if ((bp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats) /* count block I/O */
+ curproc->p_stats->p_ru.ru_inblock++;
+ bp->b_flags |= B_READ;
+ bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( bp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ bp->b_rcred = cred;
+ }
+ VOP_STRATEGY(bp);
+ ++readwait;
+ }
+
+ for(i=0;i<cnt;i++, rablkno++, rabsize++) {
+ if( incore(vp, *rablkno)) {
+ continue;
+ }
+ rabp = getblk (vp, *rablkno, *rabsize, 0, 0);
+
+ if ((rabp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats)
+ curproc->p_stats->p_ru.ru_inblock++;
+ rabp->b_flags |= B_READ | B_ASYNC;
+ rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( rabp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ rabp->b_rcred = cred;
+ }
+ VOP_STRATEGY(rabp);
+ } else {
+ brelse(rabp);
+ }
+ }
- /*
- * Body deleted.
- */
- return (EIO);
+ if( readwait) {
+ rv = biowait (bp);
+ }
+
+ return (rv);
}
-bwrite(a1)
- struct buf *a1;
+/*
+ * Write, release buffer on completion. (Done by iodone
+ * if async.)
+ */
+int
+bwrite(struct buf *bp)
{
+ int oldflags = bp->b_flags;
+
+ if(bp->b_flags & B_INVAL) {
+ brelse(bp);
+ return (0);
+ }
- /*
- * Body deleted.
- */
- return (EIO);
+ if(!(bp->b_flags & B_BUSY))
+ panic("bwrite: buffer is not busy???");
+
+ bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
+ bp->b_flags |= B_WRITEINPROG;
+
+ if (oldflags & B_ASYNC) {
+ if (oldflags & B_DELWRI) {
+ reassignbuf(bp, bp->b_vp);
+ } else if( curproc) {
+ ++curproc->p_stats->p_ru.ru_oublock;
+ }
+ }
+
+ bp->b_vp->v_numoutput++;
+ VOP_STRATEGY(bp);
+
+ if( (oldflags & B_ASYNC) == 0) {
+ int rtval = biowait(bp);
+ if (oldflags & B_DELWRI) {
+ reassignbuf(bp, bp->b_vp);
+ } else if( curproc) {
+ ++curproc->p_stats->p_ru.ru_oublock;
+ }
+ brelse(bp);
+ return (rtval);
+ }
+
+ return(0);
}
int
@@ -185,155 +264,469 @@ vn_bwrite(ap)
return (bwrite(ap->a_bp));
}
-bdwrite(a1)
- struct buf *a1;
+/*
+ * Delayed write. (Buffer is marked dirty).
+ */
+void
+bdwrite(struct buf *bp)
{
- /*
- * Body deleted.
- */
+ if((bp->b_flags & B_BUSY) == 0) {
+ panic("bdwrite: buffer is not busy");
+ }
+
+ if(bp->b_flags & B_INVAL) {
+ brelse(bp);
+ return;
+ }
+
+ if(bp->b_flags & B_TAPE) {
+ bawrite(bp);
+ return;
+ }
+
+ bp->b_flags &= ~B_READ;
+ if( (bp->b_flags & B_DELWRI) == 0) {
+ if( curproc)
+ ++curproc->p_stats->p_ru.ru_oublock;
+ bp->b_flags |= B_DONE|B_DELWRI;
+ reassignbuf(bp, bp->b_vp);
+ }
+ brelse(bp);
return;
}
-bawrite(a1)
- struct buf *a1;
+/*
+ * Asynchronous write.
+ * Start output on a buffer, but do not wait for it to complete.
+ * The buffer is released when the output completes.
+ */
+void
+bawrite(struct buf *bp)
{
-
- /*
- * Body deleted.
- */
- return;
+ bp->b_flags |= B_ASYNC;
+ (void) bwrite(bp);
}
-brelse(a1)
- struct buf *a1;
+/*
+ * Release a buffer.
+ */
+void
+brelse(struct buf *bp)
{
+ int x;
- /*
- * Body deleted.
- */
- return;
+ /* anyone need a "free" block? */
+ x=splbio();
+ if (needsbuffer) {
+ needsbuffer = 0;
+ wakeup((caddr_t)&needsbuffer);
+ }
+ /* anyone need this very block? */
+ if (bp->b_flags & B_WANTED) {
+ bp->b_flags &= ~(B_WANTED|B_AGE);
+ wakeup((caddr_t)bp);
+ }
+
+ if (bp->b_flags & B_LOCKED)
+ bp->b_flags &= ~B_ERROR;
+
+ if ((bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR)) ||
+ (bp->b_bufsize <= 0)) {
+ bp->b_flags |= B_INVAL;
+ bp->b_flags &= ~(B_DELWRI|B_CACHE);
+ if(bp->b_vp)
+ brelvp(bp);
+ }
+
+ if( bp->b_qindex != BQ_NONE)
+ panic("brelse: free buffer onto another queue???");
+
+ /* enqueue */
+ /* buffers with junk contents */
+ if(bp->b_bufsize == 0) {
+ bp->b_qindex = BQ_EMPTY;
+ TAILQ_INSERT_HEAD(&bufqueues[BQ_EMPTY], bp, b_freelist);
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ bp->b_dev = NODEV;
+ } else if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE)) {
+ bp->b_qindex = BQ_AGE;
+ TAILQ_INSERT_HEAD(&bufqueues[BQ_AGE], bp, b_freelist);
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ bp->b_dev = NODEV;
+ /* buffers that are locked */
+ } else if(bp->b_flags & B_LOCKED) {
+ bp->b_qindex = BQ_LOCKED;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
+ /* buffers with stale but valid contents */
+ } else if(bp->b_flags & B_AGE) {
+ bp->b_qindex = BQ_AGE;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
+ /* buffers with valid and quite potentially reuseable contents */
+ } else {
+ bp->b_qindex = BQ_LRU;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
+ }
+
+ /* unlock */
+ bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_NOCACHE|B_AGE);
+ splx(x);
}
+int freebufspace;
+int allocbufspace;
+
+/*
+ * Find a buffer header which is available for use.
+ */
struct buf *
-incore(a1, a2)
- struct vnode *a1;
- daddr_t a2;
+getnewbuf(int slpflag, int slptimeo)
{
+ struct buf *bp;
+ int x;
+ x = splbio();
+start:
+ /* can we constitute a new buffer? */
+ if (bp = bufqueues[BQ_EMPTY].tqh_first) {
+ if( bp->b_qindex != BQ_EMPTY)
+ panic("getnewbuf: inconsistent EMPTY queue");
+ bremfree(bp);
+ goto fillbuf;
+ }
- /*
- * Body deleted.
- */
- return (0);
+tryfree:
+ if (bp = bufqueues[BQ_AGE].tqh_first) {
+ if( bp->b_qindex != BQ_AGE)
+ panic("getnewbuf: inconsistent AGE queue");
+ bremfree(bp);
+ } else if (bp = bufqueues[BQ_LRU].tqh_first) {
+ if( bp->b_qindex != BQ_LRU)
+ panic("getnewbuf: inconsistent LRU queue");
+ bremfree(bp);
+ } else {
+ /* wait for a free buffer of any kind */
+ needsbuffer = 1;
+ tsleep((caddr_t)&needsbuffer, PRIBIO, "newbuf", 0);
+ splx(x);
+ return (0);
+ }
+
+
+ /* if we are a delayed write, convert to an async write */
+ if (bp->b_flags & B_DELWRI) {
+ bp->b_flags |= B_BUSY;
+ bawrite (bp);
+ goto start;
+ }
+
+ if(bp->b_vp)
+ brelvp(bp);
+
+ /* we are not free, nor do we contain interesting data */
+ if (bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if (bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+fillbuf:
+ bp->b_flags = B_BUSY;
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ splx(x);
+ bp->b_dev = NODEV;
+ bp->b_vp = NULL;
+ bp->b_blkno = bp->b_lblkno = 0;
+ bp->b_iodone = 0;
+ bp->b_error = 0;
+ bp->b_resid = 0;
+ bp->b_bcount = 0;
+ bp->b_wcred = bp->b_rcred = NOCRED;
+ bp->b_dirtyoff = bp->b_dirtyend = 0;
+ bp->b_validoff = bp->b_validend = 0;
+ return (bp);
}
+/*
+ * Check to see if a block is currently memory resident.
+ */
struct buf *
-getblk(a1, a2, a3, a4, a5)
- struct vnode *a1;
- daddr_t a2;
- int a3, a4, a5;
+incore(struct vnode *vp, daddr_t blkno)
{
+ struct buf *bp;
+ struct bufhashhdr *bh;
+
+ int s = splbio();
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ bh = BUFHASH(vp, blkno);
+ bp = bh->lh_first;
+
+ /* Search hash chain */
+ while (bp) {
+ if( (bp < buf) || (bp >= buf + nbuf)) {
+ printf("incore: buf out of range: %lx, hash: %d\n",
+ bp, bh - bufhashtbl);
+ panic("incore: buf fault");
+ }
+ /* hit */
+ if (bp->b_lblkno == blkno && bp->b_vp == vp
+ && (bp->b_flags & B_INVAL) == 0)
+ return (bp);
+ bp = bp->b_hash.le_next;
+ }
+ splx(s);
+
+ return(0);
}
+/*
+ * Get a block given a specified block and offset into a file/device.
+ */
struct buf *
-geteblk(a1)
- int a1;
+getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
-
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ struct buf *bp;
+ int x;
+ struct bufhashhdr *bh;
+
+ x = splbio();
+loop:
+ if (bp = incore(vp, blkno)) {
+ if (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
+ goto loop;
+ }
+ bp->b_flags |= B_BUSY | B_CACHE;
+ bremfree(bp);
+ /*
+ * check for size inconsistancies
+ */
+ if (bp->b_bcount != size) {
+ printf("getblk: invalid buffer size: %d\n", bp->b_bcount);
+ bp->b_flags |= B_INVAL;
+ bwrite(bp);
+ goto loop;
+ }
+ } else {
+
+ if ((bp = getnewbuf(0, 0)) == 0)
+ goto loop;
+ allocbuf(bp, size);
+ /*
+ * have to check again, because of a possible
+ * race condition.
+ */
+ if (incore( vp, blkno)) {
+ allocbuf(bp, 0);
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ goto loop;
+ }
+ bp->b_blkno = bp->b_lblkno = blkno;
+ bgetvp(vp, bp);
+ LIST_REMOVE(bp, b_hash);
+ bh = BUFHASH(vp, blkno);
+ LIST_INSERT_HEAD(bh, bp, b_hash);
+ }
+ splx(x);
+ return (bp);
}
-allocbuf(a1, a2)
- struct buf *a1;
- int a2;
+/*
+ * Get an empty, disassociated buffer of given size.
+ */
+struct buf *
+geteblk(int size)
{
-
- /*
- * Body deleted.
- */
- return (0);
+ struct buf *bp;
+ while ((bp = getnewbuf(0, 0)) == 0)
+ ;
+ allocbuf(bp, size);
+ bp->b_flags |= B_INVAL;
+ return (bp);
}
-struct buf *
-getnewbuf(a1, a2)
- int a1, a2;
+/*
+ * Modify the length of a buffer's underlying buffer storage without
+ * destroying information (unless, of course the buffer is shrinking).
+ */
+void
+allocbuf(struct buf *bp, int size)
{
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ int newbsize = round_page(size);
+
+ if( newbsize == bp->b_bufsize) {
+ bp->b_bcount = size;
+ return;
+ } else if( newbsize < bp->b_bufsize) {
+ vm_hold_free_pages(
+ (vm_offset_t) bp->b_data + newbsize,
+ (vm_offset_t) bp->b_data + bp->b_bufsize);
+ } else if( newbsize > bp->b_bufsize) {
+ vm_hold_load_pages(
+ (vm_offset_t) bp->b_data + bp->b_bufsize,
+ (vm_offset_t) bp->b_data + newbsize);
+ }
+
+ /* adjust buffer cache's idea of memory allocated to buffer contents */
+ freebufspace -= newbsize - bp->b_bufsize;
+ allocbufspace += newbsize - bp->b_bufsize;
+
+ bp->b_bufsize = newbsize;
+ bp->b_bcount = size;
}
-biowait(a1)
- struct buf *a1;
+/*
+ * Wait for buffer I/O completion, returning error status.
+ */
+int
+biowait(register struct buf *bp)
{
-
- /*
- * Body deleted.
- */
- return (EIO);
+ int x;
+
+ x = splbio();
+ while ((bp->b_flags & B_DONE) == 0)
+ tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
+ if((bp->b_flags & B_ERROR) || bp->b_error) {
+ if ((bp->b_flags & B_INVAL) == 0) {
+ bp->b_flags |= B_INVAL;
+ bp->b_dev = NODEV;
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ }
+ if (!bp->b_error)
+ bp->b_error = EIO;
+ else
+ bp->b_flags |= B_ERROR;
+ splx(x);
+ return (bp->b_error);
+ } else {
+ splx(x);
+ return (0);
+ }
}
+/*
+ * Finish I/O on a buffer, calling an optional function.
+ * This is usually called from interrupt level, so process blocking
+ * is not *a good idea*.
+ */
void
-biodone(a1)
- struct buf *a1;
+biodone(register struct buf *bp)
{
+ int s;
+ s = splbio();
+ bp->b_flags |= B_DONE;
- /*
- * Body deleted.
- */
- return;
+ if ((bp->b_flags & B_READ) == 0) {
+ vwakeup(bp);
+ }
+
+ /* call optional completion function if requested */
+ if (bp->b_flags & B_CALL) {
+ bp->b_flags &= ~B_CALL;
+ (*bp->b_iodone)(bp);
+ splx(s);
+ return;
+ }
+
+/*
+ * For asynchronous completions, release the buffer now. The brelse
+ * checks for B_WANTED and will do the wakeup there if necessary -
+ * so no need to do a wakeup here in the async case.
+ */
+
+ if (bp->b_flags & B_ASYNC) {
+ brelse(bp);
+ } else {
+ bp->b_flags &= ~B_WANTED;
+ wakeup((caddr_t) bp);
+ }
+ splx(s);
}
int
count_lock_queue()
{
+ int count;
+ struct buf *bp;
+
+ count = 0;
+ for(bp = bufqueues[BQ_LOCKED].tqh_first;
+ bp != NULL;
+ bp = bp->b_freelist.tqe_next)
+ count++;
+ return(count);
+}
- /*
- * Body deleted.
- */
- return (0);
+#ifndef UPDATE_INTERVAL
+int vfs_update_interval = 30;
+#else
+int vfs_update_interval = UPDATE_INTERVAL;
+#endif
+
+void
+vfs_update() {
+ (void) spl0();
+ while(1) {
+ tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
+ hz * vfs_update_interval);
+ vfs_update_wakeup = 0;
+ sync(curproc, NULL, NULL);
+ }
}
-#ifdef DIAGNOSTIC
/*
- * Print out statistics on the current allocation of the buffer pool.
- * Can be enabled to print out on every ``sync'' by setting "syncprt"
- * in vfs_syscalls.c using sysctl.
+ * these routines are not in the correct place (yet)
+ * also they work *ONLY* for kernel_pmap!!!
*/
void
-vfs_bufstats()
-{
- int s, i, j, count;
- register struct buf *bp;
- register struct bqueues *dp;
- int counts[MAXBSIZE/CLBYTES+1];
- static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
-
- for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
- count = 0;
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
- counts[j] = 0;
- s = splbio();
- for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
- counts[bp->b_bufsize/CLBYTES]++;
- count++;
+vm_hold_load_pages(vm_offset_t froma, vm_offset_t toa) {
+ vm_offset_t pg;
+ vm_page_t p;
+ vm_offset_t from = round_page(froma);
+ vm_offset_t to = round_page(toa);
+
+ for(pg = from ; pg < to ; pg += PAGE_SIZE) {
+ vm_offset_t pa;
+
+ tryagain:
+ p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS);
+ if( !p) {
+ VM_WAIT;
+ goto tryagain;
}
- splx(s);
- printf("%s: total-%d", bname[i], count);
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
- if (counts[j] != 0)
- printf(", %d-%d", j * CLBYTES, counts[j]);
- printf("\n");
+
+ vm_page_wire(p);
+ pmap_enter(kernel_pmap, pg, VM_PAGE_TO_PHYS(p),
+ VM_PROT_READ|VM_PROT_WRITE, 1);
}
}
-#endif /* DIAGNOSTIC */
+
+void
+vm_hold_free_pages(vm_offset_t froma, vm_offset_t toa) {
+ vm_offset_t pg;
+ vm_page_t p;
+ vm_offset_t from = round_page(froma);
+ vm_offset_t to = round_page(toa);
+
+ for(pg = from ; pg < to ; pg += PAGE_SIZE) {
+ vm_offset_t pa;
+ pa = pmap_kextract(pg);
+ if( !pa) {
+ printf("No pa for va: %x\n", pg);
+ } else {
+ p = PHYS_TO_VM_PAGE( pa);
+ pmap_remove(kernel_pmap, pg, pg + PAGE_SIZE);
+ vm_page_free(p);
+ }
+ }
+}
+
+void
+bufstats()
+{
+}
+
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 4ccfd7289a04..2ddf644c98d9 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -186,6 +186,7 @@ cache_lookup(dvp, vpp, cnp)
/*
* Add an entry to the cache
*/
+void
cache_enter(dvp, vp, cnp)
struct vnode *dvp;
struct vnode *vp;
@@ -252,6 +253,7 @@ cache_enter(dvp, vp, cnp)
/*
* Name cache initialization, from vfs_init() when we are booting
*/
+void
nchinit()
{
@@ -263,6 +265,7 @@ nchinit()
* Cache flush, a particular vnode; called when a vnode is renamed to
* hide entries that would now be invalid
*/
+void
cache_purge(vp)
struct vnode *vp;
{
@@ -288,6 +291,7 @@ cache_purge(vp)
* if the cache lru chain is modified while we are dumping the
* inode. This makes the algorithm O(n^2), but do you think I care?
*/
+void
cache_purgevfs(mp)
struct mount *mp;
{
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index c34fbc34a679..40fa3be52f93 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
@@ -41,7 +42,6 @@
#include <sys/trace.h>
#include <sys/malloc.h>
#include <sys/resourcevar.h>
-#include <libkern/libkern.h>
#ifdef DEBUG
#include <vm/vm.h>
@@ -106,6 +106,7 @@ int doclusterraz = 0;
* rbp is the read-ahead block.
* If either is NULL, then you don't have to do the I/O.
*/
+int
cluster_read(vp, filesize, lblkno, size, cred, bpp)
struct vnode *vp;
u_quad_t filesize;
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 9891fe61c198..9535b8a7231e 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -62,6 +62,8 @@
#include <miscfs/specfs/specdev.h>
+void insmntque __P((struct vnode *, struct mount *));
+
enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
@@ -86,6 +88,7 @@ struct mntlist mountlist; /* mounted filesystem list */
/*
* Initialize the vnode management data structures.
*/
+void
vntblinit()
{
@@ -97,6 +100,7 @@ vntblinit()
* Lock a filesystem.
* Used to prevent access to it while mounting and unmounting.
*/
+int
vfs_lock(mp)
register struct mount *mp;
{
@@ -131,6 +135,7 @@ vfs_unlock(mp)
* Mark a mount point as busy.
* Used to synchronize access and to delay unmounting.
*/
+int
vfs_busy(mp)
register struct mount *mp;
{
@@ -149,6 +154,7 @@ vfs_busy(mp)
* Free a busy filesystem.
* Panic if filesystem is not busy.
*/
+void
vfs_unbusy(mp)
register struct mount *mp;
{
@@ -209,12 +215,14 @@ static u_short xxxfs_mntid;
/*
* Set vnode attributes to VNOVAL
*/
-void vattr_null(vap)
+void
+vattr_null(vap)
register struct vattr *vap;
{
vap->va_type = VNON;
- vap->va_size = vap->va_bytes = VNOVAL;
+ vap->va_size = VNOVAL;
+ vap->va_bytes = VNOVAL;
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
@@ -236,6 +244,7 @@ extern struct vattr va_null;
/*
* Return the next vnode from the free list.
*/
+int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
@@ -298,6 +307,7 @@ getnewvnode(tag, mp, vops, vpp)
/*
* Move a vnode from one mount queue to another.
*/
+void
insmntque(vp, mp)
register struct vnode *vp;
register struct mount *mp;
@@ -319,6 +329,7 @@ insmntque(vp, mp)
/*
* Update outstanding I/O count and do wakeup if requested.
*/
+void
vwakeup(bp)
register struct buf *bp;
{
@@ -411,6 +422,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Associate a buffer with a vnode.
*/
+void
bgetvp(vp, bp)
register struct vnode *vp;
register struct buf *bp;
@@ -433,6 +445,7 @@ bgetvp(vp, bp)
/*
* Disassociate a buffer from a vnode.
*/
+void
brelvp(bp)
register struct buf *bp;
{
@@ -455,6 +468,7 @@ brelvp(bp)
* Used to assign file specific control information
* (indirect blocks) to the vnode to which they belong.
*/
+void
reassignbuf(bp, newvp)
register struct buf *bp;
register struct vnode *newvp;
@@ -486,6 +500,7 @@ reassignbuf(bp, newvp)
* Used for root filesystem, argdev, and swap areas.
* Also used for memory file system special devices.
*/
+int
bdevvp(dev, vpp)
dev_t dev;
struct vnode **vpp;
@@ -579,6 +594,7 @@ loop:
* indicate that the vnode is no longer usable (possibly having
* been changed to a new file system type).
*/
+int
vget(vp, lockflag)
register struct vnode *vp;
int lockflag;
@@ -612,7 +628,8 @@ vget(vp, lockflag)
/*
* Vnode reference, just increment the count
*/
-void vref(vp)
+void
+vref(vp)
struct vnode *vp;
{
@@ -624,7 +641,8 @@ void vref(vp)
/*
* vput(), just unlock and vrele()
*/
-void vput(vp)
+void
+vput(vp)
register struct vnode *vp;
{
@@ -636,7 +654,8 @@ void vput(vp)
* Vnode release.
* If count drops to zero, call inactive routine and return to freelist.
*/
-void vrele(vp)
+void
+vrele(vp)
register struct vnode *vp;
{
@@ -663,7 +682,8 @@ void vrele(vp)
/*
* Page or buffer structure gets a reference.
*/
-void vhold(vp)
+void
+vhold(vp)
register struct vnode *vp;
{
@@ -673,7 +693,8 @@ void vhold(vp)
/*
* Page or buffer structure frees a reference.
*/
-void holdrele(vp)
+void
+holdrele(vp)
register struct vnode *vp;
{
@@ -695,6 +716,7 @@ int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = { "busyprt", &busyprt };
#endif
+int
vflush(mp, skipvp, flags)
struct mount *mp;
struct vnode *skipvp;
@@ -837,7 +859,8 @@ vclean(vp, flags)
* Eliminate all activity associated with the requested vnode
* and with all vnodes aliased to the requested vnode.
*/
-void vgoneall(vp)
+void
+vgoneall(vp)
register struct vnode *vp;
{
register struct vnode *vq;
@@ -880,7 +903,8 @@ void vgoneall(vp)
* Eliminate all activity associated with a vnode
* in preparation for reuse.
*/
-void vgone(vp)
+void
+vgone(vp)
register struct vnode *vp;
{
register struct vnode *vq;
@@ -966,6 +990,7 @@ void vgone(vp)
/*
* Lookup a vnode by device number.
*/
+int
vfinddev(dev, type, vpp)
dev_t dev;
enum vtype type;
@@ -985,6 +1010,7 @@ vfinddev(dev, type, vpp)
/*
* Calculate the total number of references to a special device.
*/
+int
vcount(vp)
register struct vnode *vp;
{
@@ -1016,6 +1042,7 @@ loop:
static char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
+void
vprint(label, vp)
char *label;
register struct vnode *vp;
@@ -1057,6 +1084,7 @@ vprint(label, vp)
* List all of the locked vnodes in the system.
* Called when debugging the kernel.
*/
+void
printlockedvnodes()
{
register struct mount *mp;
@@ -1081,6 +1109,7 @@ int kinfo_vgetfailed;
* Copyout address of vnode followed by vnode.
*/
/* ARGSUSED */
+int
sysctl_vnode(where, sizep)
char *where;
size_t *sizep;
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 345c7a79bf20..f5c3d7835314 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -55,7 +55,8 @@
#include <vm/vm.h>
#include <sys/sysctl.h>
-static int change_dir __P((struct nameidata *ndp, struct proc *p));
+void cvtstat __P((struct stat *, struct ostat *));
+static int change_dir __P((struct nameidata *ndp, struct proc *p));
/*
* Virtual File System System Calls
@@ -71,6 +72,7 @@ struct mount_args {
caddr_t data;
};
/* ARGSUSED */
+int
mount(p, uap, retval)
struct proc *p;
register struct mount_args *uap;
@@ -78,7 +80,7 @@ mount(p, uap, retval)
{
register struct vnode *vp;
register struct mount *mp;
- int error, flag;
+ int error, flag = 0;
struct nameidata nd;
/*
@@ -200,6 +202,7 @@ struct unmount_args {
int flags;
};
/* ARGSUSED */
+int
unmount(p, uap, retval)
struct proc *p;
register struct unmount_args *uap;
@@ -240,6 +243,7 @@ unmount(p, uap, retval)
/*
* Do the actual file system unmount.
*/
+int
dounmount(mp, flags, p)
register struct mount *mp;
int flags;
@@ -289,6 +293,7 @@ struct sync_args {
int dummy;
};
/* ARGSUSED */
+int
sync(p, uap, retval)
struct proc *p;
struct sync_args *uap;
@@ -330,6 +335,7 @@ struct quotactl_args {
caddr_t arg;
};
/* ARGSUSED */
+int
quotactl(p, uap, retval)
struct proc *p;
register struct quotactl_args *uap;
@@ -355,6 +361,7 @@ struct statfs_args {
struct statfs *buf;
};
/* ARGSUSED */
+int
statfs(p, uap, retval)
struct proc *p;
register struct statfs_args *uap;
@@ -385,6 +392,7 @@ struct fstatfs_args {
struct statfs *buf;
};
/* ARGSUSED */
+int
fstatfs(p, uap, retval)
struct proc *p;
register struct fstatfs_args *uap;
@@ -413,6 +421,7 @@ struct getfsstat_args {
long bufsize;
int flags;
};
+int
getfsstat(p, uap, retval)
struct proc *p;
register struct getfsstat_args *uap;
@@ -459,6 +468,7 @@ struct fchdir_args {
int fd;
};
/* ARGSUSED */
+int
fchdir(p, uap, retval)
struct proc *p;
struct fchdir_args *uap;
@@ -493,6 +503,7 @@ struct chdir_args {
char *path;
};
/* ARGSUSED */
+int
chdir(p, uap, retval)
struct proc *p;
struct chdir_args *uap;
@@ -517,6 +528,7 @@ struct chroot_args {
char *path;
};
/* ARGSUSED */
+int
chroot(p, uap, retval)
struct proc *p;
struct chroot_args *uap;
@@ -570,6 +582,7 @@ struct open_args {
int flags;
int mode;
};
+int
open(p, uap, retval)
struct proc *p;
register struct open_args *uap;
@@ -646,6 +659,7 @@ struct ocreat_args {
char *path;
int mode;
};
+int
ocreat(p, uap, retval)
struct proc *p;
register struct ocreat_args *uap;
@@ -669,6 +683,7 @@ struct mknod_args {
int dev;
};
/* ARGSUSED */
+int
mknod(p, uap, retval)
struct proc *p;
register struct mknod_args *uap;
@@ -730,6 +745,7 @@ struct mkfifo_args {
int mode;
};
/* ARGSUSED */
+int
mkfifo(p, uap, retval)
struct proc *p;
register struct mkfifo_args *uap;
@@ -770,6 +786,7 @@ struct link_args {
char *link;
};
/* ARGSUSED */
+int
link(p, uap, retval)
struct proc *p;
register struct link_args *uap;
@@ -820,6 +837,7 @@ struct symlink_args {
char *link;
};
/* ARGSUSED */
+int
symlink(p, uap, retval)
struct proc *p;
register struct symlink_args *uap;
@@ -862,6 +880,7 @@ struct unlink_args {
char *path;
};
/* ARGSUSED */
+int
unlink(p, uap, retval)
struct proc *p;
struct unlink_args *uap;
@@ -912,6 +931,7 @@ struct lseek_args {
off_t offset;
int whence;
};
+int
lseek(p, uap, retval)
struct proc *p;
register struct lseek_args *uap;
@@ -957,6 +977,7 @@ struct olseek_args {
long offset;
int whence;
};
+int
olseek(p, uap, retval)
struct proc *p;
register struct olseek_args *uap;
@@ -982,6 +1003,7 @@ struct access_args {
char *path;
int flags;
};
+int
access(p, uap, retval)
struct proc *p;
register struct access_args *uap;
@@ -1029,6 +1051,7 @@ struct ostat_args {
struct ostat *ub;
};
/* ARGSUSED */
+int
ostat(p, uap, retval)
struct proc *p;
register struct ostat_args *uap;
@@ -1059,6 +1082,7 @@ struct olstat_args {
struct ostat *ub;
};
/* ARGSUSED */
+int
olstat(p, uap, retval)
struct proc *p;
register struct olstat_args *uap;
@@ -1084,6 +1108,7 @@ olstat(p, uap, retval)
/*
* Convert from an old to a new stat structure.
*/
+void
cvtstat(st, ost)
struct stat *st;
struct ostat *ost;
@@ -1118,6 +1143,7 @@ struct stat_args {
struct stat *ub;
};
/* ARGSUSED */
+int
stat(p, uap, retval)
struct proc *p;
register struct stat_args *uap;
@@ -1146,6 +1172,7 @@ struct lstat_args {
struct stat *ub;
};
/* ARGSUSED */
+int
lstat(p, uap, retval)
struct proc *p;
register struct lstat_args *uap;
@@ -1204,6 +1231,7 @@ struct pathconf_args {
int name;
};
/* ARGSUSED */
+int
pathconf(p, uap, retval)
struct proc *p;
register struct pathconf_args *uap;
@@ -1229,6 +1257,7 @@ struct readlink_args {
int count;
};
/* ARGSUSED */
+int
readlink(p, uap, retval)
struct proc *p;
register struct readlink_args *uap;
@@ -1271,6 +1300,7 @@ struct chflags_args {
int flags;
};
/* ARGSUSED */
+int
chflags(p, uap, retval)
struct proc *p;
register struct chflags_args *uap;
@@ -1306,6 +1336,7 @@ struct fchflags_args {
int flags;
};
/* ARGSUSED */
+int
fchflags(p, uap, retval)
struct proc *p;
register struct fchflags_args *uap;
@@ -1340,6 +1371,7 @@ struct chmod_args {
int mode;
};
/* ARGSUSED */
+int
chmod(p, uap, retval)
struct proc *p;
register struct chmod_args *uap;
@@ -1375,6 +1407,7 @@ struct fchmod_args {
int mode;
};
/* ARGSUSED */
+int
fchmod(p, uap, retval)
struct proc *p;
register struct fchmod_args *uap;
@@ -1410,6 +1443,7 @@ struct chown_args {
int gid;
};
/* ARGSUSED */
+int
chown(p, uap, retval)
struct proc *p;
register struct chown_args *uap;
@@ -1447,6 +1481,7 @@ struct fchown_args {
int gid;
};
/* ARGSUSED */
+int
fchown(p, uap, retval)
struct proc *p;
register struct fchown_args *uap;
@@ -1482,6 +1517,7 @@ struct utimes_args {
struct timeval *tptr;
};
/* ARGSUSED */
+int
utimes(p, uap, retval)
struct proc *p;
register struct utimes_args *uap;
@@ -1528,6 +1564,7 @@ struct truncate_args {
off_t length;
};
/* ARGSUSED */
+int
truncate(p, uap, retval)
struct proc *p;
register struct truncate_args *uap;
@@ -1565,6 +1602,7 @@ struct ftruncate_args {
off_t length;
};
/* ARGSUSED */
+int
ftruncate(p, uap, retval)
struct proc *p;
register struct ftruncate_args *uap;
@@ -1602,6 +1640,7 @@ struct otruncate_args {
long length;
};
/* ARGSUSED */
+int
otruncate(p, uap, retval)
struct proc *p;
register struct otruncate_args *uap;
@@ -1622,6 +1661,7 @@ struct oftruncate_args {
long length;
};
/* ARGSUSED */
+int
oftruncate(p, uap, retval)
struct proc *p;
register struct oftruncate_args *uap;
@@ -1642,6 +1682,7 @@ struct fsync_args {
int fd;
};
/* ARGSUSED */
+int
fsync(p, uap, retval)
struct proc *p;
struct fsync_args *uap;
@@ -1669,6 +1710,7 @@ struct rename_args {
char *to;
};
/* ARGSUSED */
+int
rename(p, uap, retval)
struct proc *p;
register struct rename_args *uap;
@@ -1754,6 +1796,7 @@ struct mkdir_args {
int mode;
};
/* ARGSUSED */
+int
mkdir(p, uap, retval)
struct proc *p;
register struct mkdir_args *uap;
@@ -1794,6 +1837,7 @@ struct rmdir_args {
char *path;
};
/* ARGSUSED */
+int
rmdir(p, uap, retval)
struct proc *p;
struct rmdir_args *uap;
@@ -1849,6 +1893,7 @@ struct ogetdirentries_args {
u_int count;
long *basep;
};
+int
ogetdirentries(p, uap, retval)
struct proc *p;
register struct ogetdirentries_args *uap;
@@ -1947,6 +1992,7 @@ struct getdirentries_args {
u_int count;
long *basep;
};
+int
getdirentries(p, uap, retval)
struct proc *p;
register struct getdirentries_args *uap;
@@ -2057,6 +2103,7 @@ struct revoke_args {
char *path;
};
/* ARGSUSED */
+int
revoke(p, uap, retval)
struct proc *p;
register struct revoke_args *uap;
@@ -2090,6 +2137,7 @@ out:
/*
* Convert a user file descriptor to a kernel file entry.
*/
+int
getvnode(fdp, fd, fpp)
struct filedesc *fdp;
struct file **fpp;
diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c
index 1ce7347bdc86..3ab520d652e8 100644
--- a/sys/kern/vfs_init.c
+++ b/sys/kern/vfs_init.c
@@ -217,6 +217,7 @@ struct vattr va_null;
/*
* Initialize the vnode structures and initialize each file system type.
*/
+void
vfsinit()
{
struct vfsops **vfsp;
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 9891fe61c198..9535b8a7231e 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -62,6 +62,8 @@
#include <miscfs/specfs/specdev.h>
+void insmntque __P((struct vnode *, struct mount *));
+
enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
@@ -86,6 +88,7 @@ struct mntlist mountlist; /* mounted filesystem list */
/*
* Initialize the vnode management data structures.
*/
+void
vntblinit()
{
@@ -97,6 +100,7 @@ vntblinit()
* Lock a filesystem.
* Used to prevent access to it while mounting and unmounting.
*/
+int
vfs_lock(mp)
register struct mount *mp;
{
@@ -131,6 +135,7 @@ vfs_unlock(mp)
* Mark a mount point as busy.
* Used to synchronize access and to delay unmounting.
*/
+int
vfs_busy(mp)
register struct mount *mp;
{
@@ -149,6 +154,7 @@ vfs_busy(mp)
* Free a busy filesystem.
* Panic if filesystem is not busy.
*/
+void
vfs_unbusy(mp)
register struct mount *mp;
{
@@ -209,12 +215,14 @@ static u_short xxxfs_mntid;
/*
* Set vnode attributes to VNOVAL
*/
-void vattr_null(vap)
+void
+vattr_null(vap)
register struct vattr *vap;
{
vap->va_type = VNON;
- vap->va_size = vap->va_bytes = VNOVAL;
+ vap->va_size = VNOVAL;
+ vap->va_bytes = VNOVAL;
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
@@ -236,6 +244,7 @@ extern struct vattr va_null;
/*
* Return the next vnode from the free list.
*/
+int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
@@ -298,6 +307,7 @@ getnewvnode(tag, mp, vops, vpp)
/*
* Move a vnode from one mount queue to another.
*/
+void
insmntque(vp, mp)
register struct vnode *vp;
register struct mount *mp;
@@ -319,6 +329,7 @@ insmntque(vp, mp)
/*
* Update outstanding I/O count and do wakeup if requested.
*/
+void
vwakeup(bp)
register struct buf *bp;
{
@@ -411,6 +422,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
/*
* Associate a buffer with a vnode.
*/
+void
bgetvp(vp, bp)
register struct vnode *vp;
register struct buf *bp;
@@ -433,6 +445,7 @@ bgetvp(vp, bp)
/*
* Disassociate a buffer from a vnode.
*/
+void
brelvp(bp)
register struct buf *bp;
{
@@ -455,6 +468,7 @@ brelvp(bp)
* Used to assign file specific control information
* (indirect blocks) to the vnode to which they belong.
*/
+void
reassignbuf(bp, newvp)
register struct buf *bp;
register struct vnode *newvp;
@@ -486,6 +500,7 @@ reassignbuf(bp, newvp)
* Used for root filesystem, argdev, and swap areas.
* Also used for memory file system special devices.
*/
+int
bdevvp(dev, vpp)
dev_t dev;
struct vnode **vpp;
@@ -579,6 +594,7 @@ loop:
* indicate that the vnode is no longer usable (possibly having
* been changed to a new file system type).
*/
+int
vget(vp, lockflag)
register struct vnode *vp;
int lockflag;
@@ -612,7 +628,8 @@ vget(vp, lockflag)
/*
* Vnode reference, just increment the count
*/
-void vref(vp)
+void
+vref(vp)
struct vnode *vp;
{
@@ -624,7 +641,8 @@ void vref(vp)
/*
* vput(), just unlock and vrele()
*/
-void vput(vp)
+void
+vput(vp)
register struct vnode *vp;
{
@@ -636,7 +654,8 @@ void vput(vp)
* Vnode release.
* If count drops to zero, call inactive routine and return to freelist.
*/
-void vrele(vp)
+void
+vrele(vp)
register struct vnode *vp;
{
@@ -663,7 +682,8 @@ void vrele(vp)
/*
* Page or buffer structure gets a reference.
*/
-void vhold(vp)
+void
+vhold(vp)
register struct vnode *vp;
{
@@ -673,7 +693,8 @@ void vhold(vp)
/*
* Page or buffer structure frees a reference.
*/
-void holdrele(vp)
+void
+holdrele(vp)
register struct vnode *vp;
{
@@ -695,6 +716,7 @@ int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = { "busyprt", &busyprt };
#endif
+int
vflush(mp, skipvp, flags)
struct mount *mp;
struct vnode *skipvp;
@@ -837,7 +859,8 @@ vclean(vp, flags)
* Eliminate all activity associated with the requested vnode
* and with all vnodes aliased to the requested vnode.
*/
-void vgoneall(vp)
+void
+vgoneall(vp)
register struct vnode *vp;
{
register struct vnode *vq;
@@ -880,7 +903,8 @@ void vgoneall(vp)
* Eliminate all activity associated with a vnode
* in preparation for reuse.
*/
-void vgone(vp)
+void
+vgone(vp)
register struct vnode *vp;
{
register struct vnode *vq;
@@ -966,6 +990,7 @@ void vgone(vp)
/*
* Lookup a vnode by device number.
*/
+int
vfinddev(dev, type, vpp)
dev_t dev;
enum vtype type;
@@ -985,6 +1010,7 @@ vfinddev(dev, type, vpp)
/*
* Calculate the total number of references to a special device.
*/
+int
vcount(vp)
register struct vnode *vp;
{
@@ -1016,6 +1042,7 @@ loop:
static char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
+void
vprint(label, vp)
char *label;
register struct vnode *vp;
@@ -1057,6 +1084,7 @@ vprint(label, vp)
* List all of the locked vnodes in the system.
* Called when debugging the kernel.
*/
+void
printlockedvnodes()
{
register struct mount *mp;
@@ -1081,6 +1109,7 @@ int kinfo_vgetfailed;
* Copyout address of vnode followed by vnode.
*/
/* ARGSUSED */
+int
sysctl_vnode(where, sizep)
char *where;
size_t *sizep;
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 345c7a79bf20..f5c3d7835314 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -55,7 +55,8 @@
#include <vm/vm.h>
#include <sys/sysctl.h>
-static int change_dir __P((struct nameidata *ndp, struct proc *p));
+void cvtstat __P((struct stat *, struct ostat *));
+static int change_dir __P((struct nameidata *ndp, struct proc *p));
/*
* Virtual File System System Calls
@@ -71,6 +72,7 @@ struct mount_args {
caddr_t data;
};
/* ARGSUSED */
+int
mount(p, uap, retval)
struct proc *p;
register struct mount_args *uap;
@@ -78,7 +80,7 @@ mount(p, uap, retval)
{
register struct vnode *vp;
register struct mount *mp;
- int error, flag;
+ int error, flag = 0;
struct nameidata nd;
/*
@@ -200,6 +202,7 @@ struct unmount_args {
int flags;
};
/* ARGSUSED */
+int
unmount(p, uap, retval)
struct proc *p;
register struct unmount_args *uap;
@@ -240,6 +243,7 @@ unmount(p, uap, retval)
/*
* Do the actual file system unmount.
*/
+int
dounmount(mp, flags, p)
register struct mount *mp;
int flags;
@@ -289,6 +293,7 @@ struct sync_args {
int dummy;
};
/* ARGSUSED */
+int
sync(p, uap, retval)
struct proc *p;
struct sync_args *uap;
@@ -330,6 +335,7 @@ struct quotactl_args {
caddr_t arg;
};
/* ARGSUSED */
+int
quotactl(p, uap, retval)
struct proc *p;
register struct quotactl_args *uap;
@@ -355,6 +361,7 @@ struct statfs_args {
struct statfs *buf;
};
/* ARGSUSED */
+int
statfs(p, uap, retval)
struct proc *p;
register struct statfs_args *uap;
@@ -385,6 +392,7 @@ struct fstatfs_args {
struct statfs *buf;
};
/* ARGSUSED */
+int
fstatfs(p, uap, retval)
struct proc *p;
register struct fstatfs_args *uap;
@@ -413,6 +421,7 @@ struct getfsstat_args {
long bufsize;
int flags;
};
+int
getfsstat(p, uap, retval)
struct proc *p;
register struct getfsstat_args *uap;
@@ -459,6 +468,7 @@ struct fchdir_args {
int fd;
};
/* ARGSUSED */
+int
fchdir(p, uap, retval)
struct proc *p;
struct fchdir_args *uap;
@@ -493,6 +503,7 @@ struct chdir_args {
char *path;
};
/* ARGSUSED */
+int
chdir(p, uap, retval)
struct proc *p;
struct chdir_args *uap;
@@ -517,6 +528,7 @@ struct chroot_args {
char *path;
};
/* ARGSUSED */
+int
chroot(p, uap, retval)
struct proc *p;
struct chroot_args *uap;
@@ -570,6 +582,7 @@ struct open_args {
int flags;
int mode;
};
+int
open(p, uap, retval)
struct proc *p;
register struct open_args *uap;
@@ -646,6 +659,7 @@ struct ocreat_args {
char *path;
int mode;
};
+int
ocreat(p, uap, retval)
struct proc *p;
register struct ocreat_args *uap;
@@ -669,6 +683,7 @@ struct mknod_args {
int dev;
};
/* ARGSUSED */
+int
mknod(p, uap, retval)
struct proc *p;
register struct mknod_args *uap;
@@ -730,6 +745,7 @@ struct mkfifo_args {
int mode;
};
/* ARGSUSED */
+int
mkfifo(p, uap, retval)
struct proc *p;
register struct mkfifo_args *uap;
@@ -770,6 +786,7 @@ struct link_args {
char *link;
};
/* ARGSUSED */
+int
link(p, uap, retval)
struct proc *p;
register struct link_args *uap;
@@ -820,6 +837,7 @@ struct symlink_args {
char *link;
};
/* ARGSUSED */
+int
symlink(p, uap, retval)
struct proc *p;
register struct symlink_args *uap;
@@ -862,6 +880,7 @@ struct unlink_args {
char *path;
};
/* ARGSUSED */
+int
unlink(p, uap, retval)
struct proc *p;
struct unlink_args *uap;
@@ -912,6 +931,7 @@ struct lseek_args {
off_t offset;
int whence;
};
+int
lseek(p, uap, retval)
struct proc *p;
register struct lseek_args *uap;
@@ -957,6 +977,7 @@ struct olseek_args {
long offset;
int whence;
};
+int
olseek(p, uap, retval)
struct proc *p;
register struct olseek_args *uap;
@@ -982,6 +1003,7 @@ struct access_args {
char *path;
int flags;
};
+int
access(p, uap, retval)
struct proc *p;
register struct access_args *uap;
@@ -1029,6 +1051,7 @@ struct ostat_args {
struct ostat *ub;
};
/* ARGSUSED */
+int
ostat(p, uap, retval)
struct proc *p;
register struct ostat_args *uap;
@@ -1059,6 +1082,7 @@ struct olstat_args {
struct ostat *ub;
};
/* ARGSUSED */
+int
olstat(p, uap, retval)
struct proc *p;
register struct olstat_args *uap;
@@ -1084,6 +1108,7 @@ olstat(p, uap, retval)
/*
* Convert from an old to a new stat structure.
*/
+void
cvtstat(st, ost)
struct stat *st;
struct ostat *ost;
@@ -1118,6 +1143,7 @@ struct stat_args {
struct stat *ub;
};
/* ARGSUSED */
+int
stat(p, uap, retval)
struct proc *p;
register struct stat_args *uap;
@@ -1146,6 +1172,7 @@ struct lstat_args {
struct stat *ub;
};
/* ARGSUSED */
+int
lstat(p, uap, retval)
struct proc *p;
register struct lstat_args *uap;
@@ -1204,6 +1231,7 @@ struct pathconf_args {
int name;
};
/* ARGSUSED */
+int
pathconf(p, uap, retval)
struct proc *p;
register struct pathconf_args *uap;
@@ -1229,6 +1257,7 @@ struct readlink_args {
int count;
};
/* ARGSUSED */
+int
readlink(p, uap, retval)
struct proc *p;
register struct readlink_args *uap;
@@ -1271,6 +1300,7 @@ struct chflags_args {
int flags;
};
/* ARGSUSED */
+int
chflags(p, uap, retval)
struct proc *p;
register struct chflags_args *uap;
@@ -1306,6 +1336,7 @@ struct fchflags_args {
int flags;
};
/* ARGSUSED */
+int
fchflags(p, uap, retval)
struct proc *p;
register struct fchflags_args *uap;
@@ -1340,6 +1371,7 @@ struct chmod_args {
int mode;
};
/* ARGSUSED */
+int
chmod(p, uap, retval)
struct proc *p;
register struct chmod_args *uap;
@@ -1375,6 +1407,7 @@ struct fchmod_args {
int mode;
};
/* ARGSUSED */
+int
fchmod(p, uap, retval)
struct proc *p;
register struct fchmod_args *uap;
@@ -1410,6 +1443,7 @@ struct chown_args {
int gid;
};
/* ARGSUSED */
+int
chown(p, uap, retval)
struct proc *p;
register struct chown_args *uap;
@@ -1447,6 +1481,7 @@ struct fchown_args {
int gid;
};
/* ARGSUSED */
+int
fchown(p, uap, retval)
struct proc *p;
register struct fchown_args *uap;
@@ -1482,6 +1517,7 @@ struct utimes_args {
struct timeval *tptr;
};
/* ARGSUSED */
+int
utimes(p, uap, retval)
struct proc *p;
register struct utimes_args *uap;
@@ -1528,6 +1564,7 @@ struct truncate_args {
off_t length;
};
/* ARGSUSED */
+int
truncate(p, uap, retval)
struct proc *p;
register struct truncate_args *uap;
@@ -1565,6 +1602,7 @@ struct ftruncate_args {
off_t length;
};
/* ARGSUSED */
+int
ftruncate(p, uap, retval)
struct proc *p;
register struct ftruncate_args *uap;
@@ -1602,6 +1640,7 @@ struct otruncate_args {
long length;
};
/* ARGSUSED */
+int
otruncate(p, uap, retval)
struct proc *p;
register struct otruncate_args *uap;
@@ -1622,6 +1661,7 @@ struct oftruncate_args {
long length;
};
/* ARGSUSED */
+int
oftruncate(p, uap, retval)
struct proc *p;
register struct oftruncate_args *uap;
@@ -1642,6 +1682,7 @@ struct fsync_args {
int fd;
};
/* ARGSUSED */
+int
fsync(p, uap, retval)
struct proc *p;
struct fsync_args *uap;
@@ -1669,6 +1710,7 @@ struct rename_args {
char *to;
};
/* ARGSUSED */
+int
rename(p, uap, retval)
struct proc *p;
register struct rename_args *uap;
@@ -1754,6 +1796,7 @@ struct mkdir_args {
int mode;
};
/* ARGSUSED */
+int
mkdir(p, uap, retval)
struct proc *p;
register struct mkdir_args *uap;
@@ -1794,6 +1837,7 @@ struct rmdir_args {
char *path;
};
/* ARGSUSED */
+int
rmdir(p, uap, retval)
struct proc *p;
struct rmdir_args *uap;
@@ -1849,6 +1893,7 @@ struct ogetdirentries_args {
u_int count;
long *basep;
};
+int
ogetdirentries(p, uap, retval)
struct proc *p;
register struct ogetdirentries_args *uap;
@@ -1947,6 +1992,7 @@ struct getdirentries_args {
u_int count;
long *basep;
};
+int
getdirentries(p, uap, retval)
struct proc *p;
register struct getdirentries_args *uap;
@@ -2057,6 +2103,7 @@ struct revoke_args {
char *path;
};
/* ARGSUSED */
+int
revoke(p, uap, retval)
struct proc *p;
register struct revoke_args *uap;
@@ -2090,6 +2137,7 @@ out:
/*
* Convert a user file descriptor to a kernel file entry.
*/
+int
getvnode(fdp, fd, fpp)
struct filedesc *fdp;
struct file **fpp;
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index d104bb9de773..d63a39c158bc 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -60,6 +60,7 @@ struct fileops vnops =
* Common code for vnode open operations.
* Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
*/
+int
vn_open(ndp, fmode, cmode)
register struct nameidata *ndp;
int fmode, cmode;
@@ -152,6 +153,7 @@ bad:
* The read-only status of the file system is checked.
* Also, prototype text segments cannot be written.
*/
+int
vn_writechk(vp)
register struct vnode *vp;
{
@@ -180,6 +182,7 @@ vn_writechk(vp)
/*
* Vnode close call
*/
+int
vn_close(vp, flags, cred, p)
register struct vnode *vp;
int flags;
@@ -198,6 +201,7 @@ vn_close(vp, flags, cred, p)
/*
* Package up an I/O request on a vnode into a uio and do it.
*/
+int
vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
enum uio_rw rw;
struct vnode *vp;
@@ -243,6 +247,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
/*
* File table vnode read routine.
*/
+int
vn_read(fp, uio, cred)
struct file *fp;
struct uio *uio;
@@ -265,6 +270,7 @@ vn_read(fp, uio, cred)
/*
* File table vnode write routine.
*/
+int
vn_write(fp, uio, cred)
struct file *fp;
struct uio *uio;
@@ -293,6 +299,7 @@ vn_write(fp, uio, cred)
/*
* File table vnode stat routine.
*/
+int
vn_stat(vp, sb, p)
struct vnode *vp;
register struct stat *sb;
@@ -357,6 +364,7 @@ vn_stat(vp, sb, p)
/*
* File table vnode ioctl routine.
*/
+int
vn_ioctl(fp, com, data, p)
struct file *fp;
int com;
@@ -399,6 +407,7 @@ vn_ioctl(fp, com, data, p)
/*
* File table vnode select routine.
*/
+int
vn_select(fp, which, p)
struct file *fp;
int which;
@@ -412,6 +421,7 @@ vn_select(fp, which, p)
/*
* File table vnode close routine.
*/
+int
vn_closefile(fp, p)
struct file *fp;
struct proc *p;
diff --git a/sys/miscfs/deadfs/dead_vnops.c b/sys/miscfs/deadfs/dead_vnops.c
index 9d04652b7fc8..cac8775810c0 100644
--- a/sys/miscfs/deadfs/dead_vnops.c
+++ b/sys/miscfs/deadfs/dead_vnops.c
@@ -156,6 +156,7 @@ dead_lookup(ap)
* Open always fails as if device did not exist.
*/
/* ARGSUSED */
+int
dead_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -172,6 +173,7 @@ dead_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
dead_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -195,6 +197,7 @@ dead_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
dead_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -213,6 +216,7 @@ dead_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
dead_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -230,6 +234,7 @@ dead_ioctl(ap)
}
/* ARGSUSED */
+int
dead_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -249,6 +254,7 @@ dead_select(ap)
/*
* Just call the device strategy routine
*/
+int
dead_strategy(ap)
struct vop_strategy_args /* {
struct buf *a_bp;
@@ -266,6 +272,7 @@ dead_strategy(ap)
/*
* Wait until the vnode has finished changing state.
*/
+int
dead_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -280,6 +287,7 @@ dead_lock(ap)
/*
* Wait until the vnode has finished changing state.
*/
+int
dead_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -299,6 +307,7 @@ dead_bmap(ap)
* Print out the contents of a dead vnode.
*/
/* ARGSUSED */
+int
dead_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -306,11 +315,13 @@ dead_print(ap)
{
printf("tag VT_NON, dead vnode\n");
+ return (0);
}
/*
* Empty vnode failed operation
*/
+int
dead_ebadf()
{
@@ -320,6 +331,7 @@ dead_ebadf()
/*
* Empty vnode bad operation
*/
+int
dead_badop()
{
@@ -330,6 +342,7 @@ dead_badop()
/*
* Empty vnode null operation
*/
+int
dead_nullop()
{
@@ -340,6 +353,7 @@ dead_nullop()
* We have to wait during times when the vnode is
* in a state of change.
*/
+int
chkvnlock(vp)
register struct vnode *vp;
{
diff --git a/sys/miscfs/fdesc/fdesc_vnops.c b/sys/miscfs/fdesc/fdesc_vnops.c
index 00d8675aea2f..83e665705532 100644
--- a/sys/miscfs/fdesc/fdesc_vnops.c
+++ b/sys/miscfs/fdesc/fdesc_vnops.c
@@ -88,6 +88,7 @@ static struct fdcache fdcache[NFDCACHE];
/*
* Initialise cache headers
*/
+int
fdesc_init()
{
struct fdcache *fc;
@@ -96,6 +97,7 @@ fdesc_init()
for (fc = fdcache; fc < fdcache + NFDCACHE; fc++)
fc->fc_forw = fc->fc_back = (struct fdescnode *) fc;
+ return (0);
}
/*
@@ -183,7 +185,7 @@ fdesc_lookup(ap)
char *pname;
struct proc *p;
int nfiles;
- unsigned fd;
+ unsigned fd = 0;
int error;
struct vnode *fvp;
char *ln;
@@ -800,6 +802,7 @@ fdesc_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
fdesc_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
diff --git a/sys/miscfs/fifofs/fifo_vnops.c b/sys/miscfs/fifofs/fifo_vnops.c
index bad33a430b62..a1ba3f4abf93 100644
--- a/sys/miscfs/fifofs/fifo_vnops.c
+++ b/sys/miscfs/fifofs/fifo_vnops.c
@@ -111,6 +111,7 @@ struct vnodeopv_desc fifo_vnodeop_opv_desc =
* Trivial lookup routine that always fails.
*/
/* ARGSUSED */
+int
fifo_lookup(ap)
struct vop_lookup_args /* {
struct vnode * a_dvp;
@@ -128,6 +129,7 @@ fifo_lookup(ap)
* to find an active instance of a fifo.
*/
/* ARGSUSED */
+int
fifo_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -218,6 +220,7 @@ fifo_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
fifo_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -257,6 +260,7 @@ fifo_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
fifo_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -286,6 +290,7 @@ fifo_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
fifo_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -308,6 +313,7 @@ fifo_ioctl(ap)
}
/* ARGSUSED */
+int
fifo_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -329,6 +335,7 @@ fifo_select(ap)
/*
* This is a noop, simply returning what one has been given.
*/
+int
fifo_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -349,6 +356,7 @@ fifo_bmap(ap)
* At the moment we do not do any locking.
*/
/* ARGSUSED */
+int
fifo_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -359,6 +367,7 @@ fifo_lock(ap)
}
/* ARGSUSED */
+int
fifo_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -372,6 +381,7 @@ fifo_unlock(ap)
* Device close routine
*/
/* ARGSUSED */
+int
fifo_close(ap)
struct vop_close_args /* {
struct vnode *a_vp;
@@ -407,6 +417,7 @@ fifo_close(ap)
/*
* Print out the contents of a fifo vnode.
*/
+int
fifo_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -416,11 +427,13 @@ fifo_print(ap)
printf("tag VT_NON");
fifo_printinfo(ap->a_vp);
printf("\n");
+ return (0);
}
/*
* Print out internal contents of a fifo vnode.
*/
+int
fifo_printinfo(vp)
struct vnode *vp;
{
@@ -428,11 +441,13 @@ fifo_printinfo(vp)
printf(", fifo with %d readers and %d writers",
fip->fi_readers, fip->fi_writers);
+ return (0);
}
/*
* Return POSIX pathconf information applicable to fifo's.
*/
+int
fifo_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -460,6 +475,7 @@ fifo_pathconf(ap)
/*
* Fifo failed operation
*/
+int
fifo_ebadf()
{
@@ -470,6 +486,7 @@ fifo_ebadf()
* Fifo advisory byte-level locks.
*/
/* ARGSUSED */
+int
fifo_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
@@ -486,6 +503,7 @@ fifo_advlock(ap)
/*
* Fifo bad operation
*/
+int
fifo_badop()
{
diff --git a/sys/miscfs/kernfs/kernfs_vfsops.c b/sys/miscfs/kernfs/kernfs_vfsops.c
index b68d76eaddfd..a3fa803b3e04 100644
--- a/sys/miscfs/kernfs/kernfs_vfsops.c
+++ b/sys/miscfs/kernfs/kernfs_vfsops.c
@@ -84,6 +84,7 @@ cdevvp(dev, vpp)
return (0);
}
+int
kernfs_init()
{
int cmaj;
@@ -107,11 +108,13 @@ kernfs_init()
printf("kernfs: no raw boot device\n");
rrootvp = 0;
}
+ return (0);
}
/*
* Mount the Kernel params filesystem
*/
+int
kernfs_mount(mp, path, data, ndp, p)
struct mount *mp;
char *path;
@@ -160,6 +163,7 @@ kernfs_mount(mp, path, data, ndp, p)
return (0);
}
+int
kernfs_start(mp, flags, p)
struct mount *mp;
int flags;
@@ -168,6 +172,7 @@ kernfs_start(mp, flags, p)
return (0);
}
+int
kernfs_unmount(mp, mntflags, p)
struct mount *mp;
int mntflags;
@@ -221,6 +226,7 @@ kernfs_unmount(mp, mntflags, p)
return 0;
}
+int
kernfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
@@ -241,6 +247,7 @@ kernfs_root(mp, vpp)
return (0);
}
+int
kernfs_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
@@ -251,6 +258,7 @@ kernfs_quotactl(mp, cmd, uid, arg, p)
return (EOPNOTSUPP);
}
+int
kernfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
@@ -277,6 +285,7 @@ kernfs_statfs(mp, sbp, p)
return (0);
}
+int
kernfs_sync(mp, waitfor)
struct mount *mp;
int waitfor;
@@ -288,6 +297,7 @@ kernfs_sync(mp, waitfor)
* Kernfs flat namespace lookup.
* Currently unsupported.
*/
+int
kernfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
@@ -298,6 +308,7 @@ kernfs_vget(mp, ino, vpp)
}
+int
kernfs_fhtovp(mp, fhp, setgen, vpp)
struct mount *mp;
struct fid *fhp;
@@ -307,6 +318,7 @@ kernfs_fhtovp(mp, fhp, setgen, vpp)
return (EOPNOTSUPP);
}
+int
kernfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
diff --git a/sys/miscfs/kernfs/kernfs_vnops.c b/sys/miscfs/kernfs/kernfs_vnops.c
index 10b7d7c0a64c..db7377dfe3b8 100644
--- a/sys/miscfs/kernfs/kernfs_vnops.c
+++ b/sys/miscfs/kernfs/kernfs_vnops.c
@@ -186,6 +186,7 @@ kernfs_xwrite(kt, buf, len)
* vp is the current namei directory
* ndp is the name to locate in that directory...
*/
+int
kernfs_lookup(ap)
struct vop_lookup_args /* {
struct vnode * a_dvp;
@@ -289,6 +290,7 @@ bad:;
return (error);
}
+int
kernfs_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -352,6 +354,7 @@ kernfs_access(ap)
}
+int
kernfs_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
@@ -411,6 +414,7 @@ kernfs_getattr(ap)
return (error);
}
+int
kernfs_setattr(ap)
struct vop_setattr_args /* {
struct vnode *a_vp;
@@ -501,6 +505,7 @@ kernfs_write(ap)
}
+int
kernfs_readdir(ap)
struct vop_readdir_args /* {
struct vnode *a_vp;
@@ -551,6 +556,7 @@ kernfs_readdir(ap)
return (error);
}
+int
kernfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
@@ -569,6 +575,7 @@ kernfs_inactive(ap)
return (0);
}
+int
kernfs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
@@ -588,6 +595,7 @@ kernfs_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
kernfs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -625,6 +633,7 @@ kernfs_pathconf(ap)
* Print out the contents of a /dev/fd vnode.
*/
/* ARGSUSED */
+int
kernfs_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -636,6 +645,7 @@ kernfs_print(ap)
}
/*void*/
+int
kernfs_vfree(ap)
struct vop_vfree_args /* {
struct vnode *a_pvp;
@@ -650,6 +660,7 @@ kernfs_vfree(ap)
/*
* /dev/fd vnode unsupported operation
*/
+int
kernfs_enotsupp()
{
@@ -659,6 +670,7 @@ kernfs_enotsupp()
/*
* /dev/fd "should never get here" operation
*/
+int
kernfs_badop()
{
@@ -669,6 +681,7 @@ kernfs_badop()
/*
* kernfs vnode null operation
*/
+int
kernfs_nullop()
{
diff --git a/sys/miscfs/nullfs/null_subr.c b/sys/miscfs/nullfs/null_subr.c
index a31723fe4c22..5541fb3bf80d 100644
--- a/sys/miscfs/nullfs/null_subr.c
+++ b/sys/miscfs/nullfs/null_subr.c
@@ -73,6 +73,7 @@ static struct null_node_cache null_node_cache[NNULLNODECACHE];
/*
* Initialise cache headers
*/
+int
nullfs_init()
{
struct null_node_cache *ac;
@@ -82,6 +83,7 @@ nullfs_init()
for (ac = null_node_cache; ac < null_node_cache + NNULLNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct null_node *) ac;
+ return (0);
}
/*
diff --git a/sys/miscfs/portal/portal_vnops.c b/sys/miscfs/portal/portal_vnops.c
index 5e170261e71f..c19e8152099f 100644
--- a/sys/miscfs/portal/portal_vnops.c
+++ b/sys/miscfs/portal/portal_vnops.c
@@ -524,6 +524,7 @@ portal_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
portal_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
diff --git a/sys/miscfs/procfs/procfs_vfsops.c b/sys/miscfs/procfs/procfs_vfsops.c
index 3938ca123576..67dc31f78b3b 100644
--- a/sys/miscfs/procfs/procfs_vfsops.c
+++ b/sys/miscfs/procfs/procfs_vfsops.c
@@ -62,6 +62,7 @@
* mount system call
*/
/* ARGSUSED */
+int
procfs_mount(mp, path, data, ndp, p)
struct mount *mp;
char *path;
@@ -96,6 +97,7 @@ procfs_mount(mp, path, data, ndp, p)
/*
* unmount system call
*/
+int
procfs_unmount(mp, mntflags, p)
struct mount *mp;
int mntflags;
@@ -118,6 +120,7 @@ procfs_unmount(mp, mntflags, p)
return (0);
}
+int
procfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
@@ -141,6 +144,7 @@ procfs_root(mp, vpp)
/*
*/
/* ARGSUSED */
+int
procfs_start(mp, flags, p)
struct mount *mp;
int flags;
@@ -153,6 +157,7 @@ procfs_start(mp, flags, p)
/*
* Get file system statistics.
*/
+int
procfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
@@ -177,6 +182,7 @@ procfs_statfs(mp, sbp, p)
}
+int
procfs_quotactl(mp, cmds, uid, arg, p)
struct mount *mp;
int cmds;
@@ -188,6 +194,7 @@ procfs_quotactl(mp, cmds, uid, arg, p)
return (EOPNOTSUPP);
}
+int
procfs_sync(mp, waitfor)
struct mount *mp;
int waitfor;
@@ -196,6 +203,7 @@ procfs_sync(mp, waitfor)
return (0);
}
+int
procfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
@@ -205,6 +213,7 @@ procfs_vget(mp, ino, vpp)
return (EOPNOTSUPP);
}
+int
procfs_fhtovp(mp, fhp, vpp)
struct mount *mp;
struct fid *fhp;
@@ -214,6 +223,7 @@ procfs_fhtovp(mp, fhp, vpp)
return (EINVAL);
}
+int
procfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
@@ -222,6 +232,7 @@ procfs_vptofh(vp, fhp)
return EINVAL;
}
+int
procfs_init()
{
diff --git a/sys/miscfs/procfs/procfs_vnops.c b/sys/miscfs/procfs/procfs_vnops.c
index 4e1ee002bb90..b6c6e6fa572a 100644
--- a/sys/miscfs/procfs/procfs_vnops.c
+++ b/sys/miscfs/procfs/procfs_vnops.c
@@ -100,6 +100,7 @@ static pid_t atopid __P((const char *, u_int));
* is to support exclusive open on process
* memory images.
*/
+int
procfs_open(ap)
struct vop_open_args *ap;
{
@@ -134,6 +135,7 @@ procfs_open(ap)
* nothing to do for procfs other than undo
* any exclusive open flag (see _open above).
*/
+int
procfs_close(ap)
struct vop_close_args *ap;
{
@@ -153,6 +155,7 @@ procfs_close(ap)
* do an ioctl operation on pfsnode (vp).
* (vp) is not locked on entry or exit.
*/
+int
procfs_ioctl(ap)
struct vop_ioctl_args *ap;
{
@@ -170,6 +173,7 @@ procfs_ioctl(ap)
* usual no-op bmap, although returning
* (EIO) would be a reasonable alternative.
*/
+int
procfs_bmap(ap)
struct vop_bmap_args *ap;
{
@@ -197,6 +201,7 @@ procfs_bmap(ap)
*
* (vp) is not locked on entry or exit.
*/
+int
procfs_inactive(ap)
struct vop_inactive_args *ap;
{
@@ -215,6 +220,7 @@ procfs_inactive(ap)
* to free any private data and remove the node
* from any private lists.
*/
+int
procfs_reclaim(ap)
struct vop_reclaim_args *ap;
{
@@ -227,6 +233,7 @@ procfs_reclaim(ap)
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
procfs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -265,6 +272,7 @@ procfs_pathconf(ap)
* just print a readable description
* of (vp).
*/
+int
procfs_print(ap)
struct vop_print_args *ap;
{
@@ -273,6 +281,7 @@ procfs_print(ap)
printf("tag VT_PROCFS, pid %d, mode %x, flags %x\n",
pfs->pfs_pid,
pfs->pfs_mode, pfs->pfs_flags);
+ return (0);
}
/*
@@ -281,6 +290,7 @@ procfs_print(ap)
* for undoing any side-effects caused by the lookup.
* this will always include freeing the pathname buffer.
*/
+int
procfs_abortop(ap)
struct vop_abortop_args *ap;
{
@@ -293,6 +303,7 @@ procfs_abortop(ap)
/*
* generic entry point for unsupported operations
*/
+int
procfs_badop()
{
@@ -308,6 +319,7 @@ procfs_badop()
*
* this is relatively minimal for procfs.
*/
+int
procfs_getattr(ap)
struct vop_getattr_args *ap;
{
@@ -423,6 +435,7 @@ procfs_getattr(ap)
return (error);
}
+int
procfs_setattr(ap)
struct vop_setattr_args *ap;
{
@@ -451,6 +464,7 @@ procfs_setattr(ap)
* but does mean that the i/o entry points need to check
* that the operation really does make sense.
*/
+int
procfs_access(ap)
struct vop_access_args *ap;
{
@@ -502,6 +516,7 @@ found:
* filesystem doesn't do any locking of its own. otherwise
* read and inwardly digest ufs_lookup().
*/
+int
procfs_lookup(ap)
struct vop_lookup_args *ap;
{
@@ -612,6 +627,7 @@ procfs_lookup(ap)
*
* this should just be done through read()
*/
+int
procfs_readdir(ap)
struct vop_readdir_args *ap;
{
diff --git a/sys/miscfs/specfs/spec_vnops.c b/sys/miscfs/specfs/spec_vnops.c
index 111c517b1627..55b5dd8d433b 100644
--- a/sys/miscfs/specfs/spec_vnops.c
+++ b/sys/miscfs/specfs/spec_vnops.c
@@ -126,6 +126,7 @@ spec_lookup(ap)
* Open a special file.
*/
/* ARGSUSED */
+int
spec_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
@@ -203,6 +204,7 @@ spec_open(ap)
* Vnode op for read
*/
/* ARGSUSED */
+int
spec_read(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -285,6 +287,7 @@ spec_read(ap)
* Vnode op for write
*/
/* ARGSUSED */
+int
spec_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -365,6 +368,7 @@ spec_write(ap)
* Device ioctl operation.
*/
/* ARGSUSED */
+int
spec_ioctl(ap)
struct vop_ioctl_args /* {
struct vnode *a_vp;
@@ -399,6 +403,7 @@ spec_ioctl(ap)
}
/* ARGSUSED */
+int
spec_select(ap)
struct vop_select_args /* {
struct vnode *a_vp;
@@ -476,6 +481,7 @@ loop:
/*
* Just call the device strategy routine
*/
+int
spec_strategy(ap)
struct vop_strategy_args /* {
struct buf *a_bp;
@@ -489,6 +495,7 @@ spec_strategy(ap)
/*
* This is a noop, simply returning what one has been given.
*/
+int
spec_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
@@ -509,6 +516,7 @@ spec_bmap(ap)
* At the moment we do not do any locking.
*/
/* ARGSUSED */
+int
spec_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -519,6 +527,7 @@ spec_lock(ap)
}
/* ARGSUSED */
+int
spec_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -532,6 +541,7 @@ spec_unlock(ap)
* Device close routine
*/
/* ARGSUSED */
+int
spec_close(ap)
struct vop_close_args /* {
struct vnode *a_vp;
@@ -606,6 +616,7 @@ spec_close(ap)
/*
* Print out the contents of a special device vnode.
*/
+int
spec_print(ap)
struct vop_print_args /* {
struct vnode *a_vp;
@@ -614,11 +625,13 @@ spec_print(ap)
printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
minor(ap->a_vp->v_rdev));
+ return (0);
}
/*
* Return POSIX pathconf information applicable to special devices.
*/
+int
spec_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -656,6 +669,7 @@ spec_pathconf(ap)
* Special device advisory byte-level locks.
*/
/* ARGSUSED */
+int
spec_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
@@ -672,6 +686,7 @@ spec_advlock(ap)
/*
* Special device failed operation
*/
+int
spec_ebadf()
{
@@ -681,6 +696,7 @@ spec_ebadf()
/*
* Special device bad operation
*/
+int
spec_badop()
{
diff --git a/sys/miscfs/umapfs/umap_subr.c b/sys/miscfs/umapfs/umap_subr.c
index 6f1f077a6217..b640891a410c 100644
--- a/sys/miscfs/umapfs/umap_subr.c
+++ b/sys/miscfs/umapfs/umap_subr.c
@@ -73,6 +73,7 @@ static struct umap_node_cache umap_node_cache[NUMAPNODECACHE];
/*
* Initialise cache headers
*/
+int
umapfs_init()
{
struct umap_node_cache *ac;
@@ -82,6 +83,7 @@ umapfs_init()
for (ac = umap_node_cache; ac < umap_node_cache + NUMAPNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct umap_node *) ac;
+ return (0);
}
/*
diff --git a/sys/miscfs/umapfs/umap_vnops.c b/sys/miscfs/umapfs/umap_vnops.c
index 287804e15618..0c1955f1ed33 100644
--- a/sys/miscfs/umapfs/umap_vnops.c
+++ b/sys/miscfs/umapfs/umap_vnops.c
@@ -67,7 +67,7 @@ umap_bypass(ap)
{
extern int (**umap_vnodeop_p)(); /* not extern, really "forward" */
struct ucred **credpp = 0, *credp = 0;
- struct ucred *savecredp, *savecompcredp = 0;
+ struct ucred *savecredp = 0, *savecompcredp = 0;
struct ucred *compcredp = 0;
struct vnode **this_vp_p;
int error;
diff --git a/sys/miscfs/union/union_subr.c b/sys/miscfs/union/union_subr.c
index 77947d1dfbe1..ea4f804a24db 100644
--- a/sys/miscfs/union/union_subr.c
+++ b/sys/miscfs/union/union_subr.c
@@ -49,9 +49,7 @@
#include <sys/queue.h>
#include <miscfs/union/union.h>
-#ifdef DIAGNOSTIC
#include <sys/proc.h>
-#endif
/* must be power of two, otherwise change UNION_HASH() */
#define NHASH 32
@@ -71,6 +69,7 @@ union_init()
for (i = 0; i < NHASH; i++)
LIST_INIT(&unhead[i]);
bzero((caddr_t) unvplock, sizeof(unvplock));
+ return (0);
}
static int
@@ -223,10 +222,10 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
struct vnode *lowervp; /* may be null */
{
int error;
- struct union_node *un;
+ struct union_node *un = 0;
struct union_node **pp;
struct vnode *xlowervp = NULLVP;
- int hash;
+ int hash = 0;
int try;
if (uppervp == NULLVP && lowervp == NULLVP)
diff --git a/sys/miscfs/union/union_vfsops.c b/sys/miscfs/union/union_vfsops.c
index 9fa27460e3d4..42931d7c7bc1 100644
--- a/sys/miscfs/union/union_vfsops.c
+++ b/sys/miscfs/union/union_vfsops.c
@@ -73,7 +73,7 @@ union_mount(mp, path, data, ndp, p)
struct ucred *cred = 0;
struct ucred *scred;
struct vattr va;
- char *cp;
+ char *cp = 0;
int len;
u_int size;
diff --git a/sys/miscfs/union/union_vnops.c b/sys/miscfs/union/union_vnops.c
index 96327b0922d4..30f223350b21 100644
--- a/sys/miscfs/union/union_vnops.c
+++ b/sys/miscfs/union/union_vnops.c
@@ -162,7 +162,7 @@ union_lookup(ap)
int lockparent = cnp->cn_flags & LOCKPARENT;
int rdonly = cnp->cn_flags & RDONLY;
struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
- struct ucred *saved_cred;
+ struct ucred *saved_cred = 0;
cnp->cn_flags |= LOCKPARENT;
diff --git a/sys/net/bpf_filter.c b/sys/net/bpf_filter.c
index 6a30a6657542..cfae7769fc41 100644
--- a/sys/net/bpf_filter.c
+++ b/sys/net/bpf_filter.c
@@ -174,7 +174,7 @@ bpf_filter(pc, p, wirelen, buflen)
u_int wirelen;
register u_int buflen;
{
- register u_long A, X;
+ register u_long A = 0, X = 0;
register int k;
long mem[BPF_MEMWORDS];
@@ -183,10 +183,7 @@ bpf_filter(pc, p, wirelen, buflen)
* No filter means accept all.
*/
return (u_int)-1;
-#ifdef lint
- A = 0;
- X = 0;
-#endif
+
--pc;
while (1) {
++pc;
diff --git a/sys/net/bpfdesc.h b/sys/net/bpfdesc.h
index a13320e86a6a..36e3d2618242 100644
--- a/sys/net/bpfdesc.h
+++ b/sys/net/bpfdesc.h
@@ -40,6 +40,8 @@
* @(#) $Header: bpfdesc.h,v 1.9 91/10/27 21:22:38 mccanne Exp $ (LBL)
*/
+#include <sys/select.h>
+
/*
* Descriptor associated with each open bpf file.
*/
diff --git a/sys/net/if.h b/sys/net/if.h
index c27c4f9cf632..e0bf7958b73c 100644
--- a/sys/net/if.h
+++ b/sys/net/if.h
@@ -114,20 +114,20 @@ struct ifnet {
struct timeval ifi_lastchange;/* last updated */
} if_data;
/* procedure handles */
- int (*if_init) /* init routine */
+ void (*if_init) /* init routine */
__P((int));
int (*if_output) /* output routine (enqueue) */
__P((struct ifnet *, struct mbuf *, struct sockaddr *,
struct rtentry *));
- int (*if_start) /* initiate output routine */
+ void (*if_start) /* initiate output routine */
__P((struct ifnet *));
int (*if_done) /* output complete routine */
__P((struct ifnet *)); /* (XXX not used; fake prototype) */
int (*if_ioctl) /* ioctl routine */
__P((struct ifnet *, int, caddr_t));
- int (*if_reset)
+ void (*if_reset)
__P((int)); /* new autoconfig will permit removal */
- int (*if_watchdog) /* timer routine */
+ void (*if_watchdog) /* timer routine */
__P((int));
struct ifqueue {
struct mbuf *ifq_head;
@@ -356,7 +356,7 @@ void ifafree __P((struct ifaddr *));
void link_rtrequest __P((int, struct rtentry *, struct sockaddr *));
int loioctl __P((struct ifnet *, int, caddr_t));
-void loopattach __P((int));
+void loopattach __P((void));
int looutput __P((struct ifnet *,
struct mbuf *, struct sockaddr *, struct rtentry *));
void lortrequest __P((int, struct rtentry *, struct sockaddr *));
diff --git a/sys/net/if_loop.c b/sys/net/if_loop.c
index f09295e34be8..3b9ee74346fc 100644
--- a/sys/net/if_loop.c
+++ b/sys/net/if_loop.c
@@ -78,14 +78,10 @@ struct ifnet loif;
/* ARGSUSED */
void
-loopattach(n)
- int n;
+loopattach(void)
{
register struct ifnet *ifp = &loif;
-#ifdef lint
- n = n; /* Highlander: there can only be one... */
-#endif
ifp->if_name = "lo";
ifp->if_mtu = LOMTU;
ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
@@ -100,6 +96,8 @@ loopattach(n)
#endif
}
+TEXT_SET(pseudo_set, loopattach);
+
int
looutput(ifp, m, dst, rt)
struct ifnet *ifp;
diff --git a/sys/net/if_sl.c b/sys/net/if_sl.c
index 56ce96f4b9d3..46ae9adf809d 100644
--- a/sys/net/if_sl.c
+++ b/sys/net/if_sl.c
@@ -209,6 +209,8 @@ slattach()
}
}
+TEXT_SET(pseudo_set, slattach);
+
static int
slinit(sc)
register struct sl_softc *sc;
@@ -397,7 +399,7 @@ slstart(tp)
struct mbuf *m2;
#if NBPFILTER > 0
u_char bpfbuf[SLMTU + SLIP_HDRLEN];
- register int len;
+ register int len = 0;
#endif
extern int cfreecount;
@@ -479,6 +481,7 @@ slstart(tp)
#endif
sc->sc_if.if_lastchange = time;
+#if 0
/*
* If system is getting low on clists, just flush our
* output queue (if the stuff was important, it'll get
@@ -489,6 +492,7 @@ slstart(tp)
sc->sc_if.if_collisions++;
continue;
}
+#endif
/*
* The extra FRAME_END will start up a new packet, and thus
* will flush any accumulated garbage. We do this whenever
diff --git a/sys/net/netisr.h b/sys/net/netisr.h
index e2e465379d3f..03c5288d8d2d 100644
--- a/sys/net/netisr.h
+++ b/sys/net/netisr.h
@@ -61,27 +61,8 @@
#define schednetisr(anisr) { netisr |= 1<<(anisr); setsoftnet(); }
-#ifdef i386
-/* XXX Temporary -- soon to vanish - wfj */
-#define NETISR_SCLK 11 /* softclock */
-#define NETISR_AST 12 /* ast -- resched */
-
-#undef schednetisr
-#define schednetisr(anisr) {\
- if(netisr == 0) { \
- softem++; \
- } \
- netisr |= 1<<(anisr); \
-}
-#ifndef LOCORE
-#ifdef KERNEL
-int softem;
-#endif
-#endif
-#endif /* i386 */
-
#ifndef LOCORE
#ifdef KERNEL
-int netisr; /* scheduling bits for network */
+volatile unsigned int netisr; /* scheduling bits for network */
#endif
#endif
diff --git a/sys/net/radix.c b/sys/net/radix.c
index f182eb77abfa..fb34adb9e0a7 100644
--- a/sys/net/radix.c
+++ b/sys/net/radix.c
@@ -383,7 +383,7 @@ rn_addroute(v_arg, n_arg, head, treenodes)
struct radix_node treenodes[2];
{
caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
- register struct radix_node *t, *x, *tt;
+ register struct radix_node *t, *x = 0, *tt;
struct radix_node *saved_tt, *top = head->rnh_treetop;
short b = 0, b_leaf;
int mlen, keyduplicated;
diff --git a/sys/net/route.c b/sys/net/route.c
index 96902dace19e..f37e84c6f6e4 100644
--- a/sys/net/route.c
+++ b/sys/net/route.c
@@ -175,7 +175,7 @@ ifafree(ifa)
* N.B.: must be called at splnet
*
*/
-int
+void
rtredirect(dst, gateway, netmask, flags, src, rtp)
struct sockaddr *dst, *gateway, *netmask, *src;
int flags;
diff --git a/sys/net/route.h b/sys/net/route.h
index 2fbed9ea0a14..92f672c19ecb 100644
--- a/sys/net/route.h
+++ b/sys/net/route.h
@@ -254,7 +254,7 @@ struct rtentry *
void rtfree __P((struct rtentry *));
int rtinit __P((struct ifaddr *, int, int));
int rtioctl __P((int, caddr_t, struct proc *));
-int rtredirect __P((struct sockaddr *, struct sockaddr *,
+void rtredirect __P((struct sockaddr *, struct sockaddr *,
struct sockaddr *, int, struct sockaddr *, struct rtentry **));
int rtrequest __P((int, struct sockaddr *,
struct sockaddr *, struct sockaddr *, int, struct rtentry **));
diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c
index d128121708d9..db0192f4d91f 100644
--- a/sys/net/rtsock.c
+++ b/sys/net/rtsock.c
@@ -618,9 +618,9 @@ rt_newaddrmsg(cmd, ifa, error, rt)
register struct rtentry *rt;
{
struct rt_addrinfo info;
- struct sockaddr *sa;
+ struct sockaddr *sa = 0;
int pass;
- struct mbuf *m;
+ struct mbuf *m = 0;
struct ifnet *ifp = ifa->ifa_ifp;
if (route_cb.any_count == 0)
diff --git a/sys/netinet/igmp.c b/sys/netinet/igmp.c
index 78b426c49eaf..cc240eb806f8 100644
--- a/sys/netinet/igmp.c
+++ b/sys/netinet/igmp.c
@@ -41,6 +41,7 @@
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/protosw.h>
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index e8b481b4005c..bcf34e816931 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/ioctl.h>
#include <sys/errno.h>
#include <sys/malloc.h>
@@ -91,6 +92,7 @@ int subnetsarelocal = SUBNETSARELOCAL;
* is true, this includes other subnets of the local net.
* Otherwise, it includes only the directly-connected (sub)nets.
*/
+int
in_localaddr(in)
struct in_addr in;
{
@@ -114,6 +116,7 @@ in_localaddr(in)
* that may not be forwarded, or whether datagrams to that destination
* may be forwarded.
*/
+int
in_canforward(in)
struct in_addr in;
{
@@ -156,6 +159,7 @@ extern struct ifnet loif;
* Ifp is 0 if not an interface-specific ioctl.
*/
/* ARGSUSED */
+int
in_control(so, cmd, data, ifp)
struct socket *so;
int cmd;
@@ -395,6 +399,7 @@ in_ifscrub(ifp, ia)
* Initialize an interface's internet address
* and routing table entry.
*/
+int
in_ifinit(ifp, ia, sin, scrub)
register struct ifnet *ifp;
register struct in_ifaddr *ia;
@@ -483,6 +488,7 @@ in_ifinit(ifp, ia, sin, scrub)
/*
* Return 1 if the address might be a local broadcast address.
*/
+int
in_broadcast(in, ifp)
struct in_addr in;
struct ifnet *ifp;
@@ -584,7 +590,7 @@ in_addmulti(ap, ifp)
/*
* Delete a multicast address record.
*/
-int
+void
in_delmulti(inm)
register struct in_multi *inm;
{
diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c
index 01b6b17961c3..a8b1c6f8cbf4 100644
--- a/sys/netinet/in_pcb.c
+++ b/sys/netinet/in_pcb.c
@@ -162,7 +162,7 @@ in_pcbconnect(inp, nam)
struct mbuf *nam;
{
struct in_ifaddr *ia;
- struct sockaddr_in *ifaddr;
+ struct sockaddr_in *ifaddr = 0;
register struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
if (nam->m_len != sizeof (*sin))
@@ -274,7 +274,7 @@ in_pcbconnect(inp, nam)
return (0);
}
-int
+void
in_pcbdisconnect(inp)
struct inpcb *inp;
{
@@ -285,7 +285,7 @@ in_pcbdisconnect(inp)
in_pcbdetach(inp);
}
-int
+void
in_pcbdetach(inp)
struct inpcb *inp;
{
@@ -302,7 +302,7 @@ in_pcbdetach(inp)
FREE(inp, M_PCB);
}
-int
+void
in_setsockaddr(inp, nam)
register struct inpcb *inp;
struct mbuf *nam;
@@ -318,7 +318,7 @@ in_setsockaddr(inp, nam)
sin->sin_addr = inp->inp_laddr;
}
-int
+void
in_setpeeraddr(inp, nam)
struct inpcb *inp;
struct mbuf *nam;
@@ -345,7 +345,7 @@ in_setpeeraddr(inp, nam)
*
* Must be called at splnet.
*/
-int
+void
in_pcbnotify(head, dst, fport_arg, laddr, lport_arg, cmd, notify)
struct inpcb *head;
struct sockaddr *dst;
@@ -403,7 +403,7 @@ in_pcbnotify(head, dst, fport_arg, laddr, lport_arg, cmd, notify)
* routing information. If the route was created dynamically
* (by a redirect), time to try a default gateway again.
*/
-int
+void
in_losing(inp)
struct inpcb *inp;
{
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index c85324702a7f..baa8be30a8be 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -71,18 +71,18 @@ struct inpcb {
#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb)
#ifdef KERNEL
-int in_losing __P((struct inpcb *));
+void in_losing __P((struct inpcb *));
int in_pcballoc __P((struct socket *, struct inpcb *));
int in_pcbbind __P((struct inpcb *, struct mbuf *));
int in_pcbconnect __P((struct inpcb *, struct mbuf *));
-int in_pcbdetach __P((struct inpcb *));
-int in_pcbdisconnect __P((struct inpcb *));
+void in_pcbdetach __P((struct inpcb *));
+void in_pcbdisconnect __P((struct inpcb *));
struct inpcb *
in_pcblookup __P((struct inpcb *,
struct in_addr, u_int, struct in_addr, u_int, int));
-int in_pcbnotify __P((struct inpcb *, struct sockaddr *,
+void in_pcbnotify __P((struct inpcb *, struct sockaddr *,
u_int, struct in_addr, u_int, int, void (*)(struct inpcb *, int)));
void in_rtchange __P((struct inpcb *, int));
-int in_setpeeraddr __P((struct inpcb *, struct mbuf *));
-int in_setsockaddr __P((struct inpcb *, struct mbuf *));
+void in_setpeeraddr __P((struct inpcb *, struct mbuf *));
+void in_setsockaddr __P((struct inpcb *, struct mbuf *));
#endif
diff --git a/sys/netinet/in_proto.c b/sys/netinet/in_proto.c
index 00916b4ce1a1..9927c5acb595 100644
--- a/sys/netinet/in_proto.c
+++ b/sys/netinet/in_proto.c
@@ -162,6 +162,7 @@ struct domain impdomain =
impsw, &impsw[sizeof (impsw)/sizeof(impsw[0])] };
#endif
+#if 0
#include "hy.h"
#if NHY > 0
/*
@@ -181,3 +182,4 @@ struct protosw hysw[] = {
struct domain hydomain =
{ AF_HYLINK, "hy", 0, 0, 0, hysw, &hysw[sizeof (hysw)/sizeof(hysw[0])] };
#endif
+#endif
diff --git a/sys/netinet/in_var.h b/sys/netinet/in_var.h
index 8218f0b74a38..d3d4c258d473 100644
--- a/sys/netinet/in_var.h
+++ b/sys/netinet/in_var.h
@@ -194,7 +194,7 @@ struct in_multistep {
int in_ifinit __P((struct ifnet *,
struct in_ifaddr *, struct sockaddr_in *, int));
struct in_multi *in_addmulti __P((struct in_addr *, struct ifnet *));
-int in_delmulti __P((struct in_multi *));
+void in_delmulti __P((struct in_multi *));
void in_ifscrub __P((struct ifnet *, struct in_ifaddr *));
int in_control __P((struct socket *, int, caddr_t, struct ifnet *));
#endif
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index d3bfeac4b19e..c720ff47f4c4 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -998,7 +998,7 @@ ip_forward(m, srcrt)
register struct ip *ip = mtod(m, struct ip *);
register struct sockaddr_in *sin;
register struct rtentry *rt;
- int error, type = 0, code;
+ int error, type = 0, code = 0;
struct mbuf *mcopy;
n_long dest;
struct ifnet *destifp;
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index 1744ec17fb65..bb26c874182e 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -53,6 +53,7 @@ int ip_mrtproto; /* for netstat only */
#else
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/ioctl.h>
#include <sys/malloc.h>
diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c
index 4c22a5e53ec3..1fb1481c3110 100644
--- a/sys/netinet/ip_output.c
+++ b/sys/netinet/ip_output.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/errno.h>
@@ -483,7 +484,7 @@ ip_ctloutput(op, so, level, optname, mp)
{
register struct inpcb *inp = sotoinpcb(so);
register struct mbuf *m = *mp;
- register int optval;
+ register int optval = 0;
int error = 0;
if (level != IPPROTO_IP) {
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 2dd1d749c409..3af81406fd33 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -217,13 +217,13 @@ tcp_input(m, iphlen)
register struct tcpiphdr *ti;
register struct inpcb *inp;
caddr_t optp = NULL;
- int optlen;
+ int optlen = 0;
int len, tlen, off;
register struct tcpcb *tp = 0;
register int tiflags;
- struct socket *so;
+ struct socket *so = 0;
int todrop, acked, ourfinisacked, needoutput = 0;
- short ostate;
+ short ostate = 0;
struct in_addr laddr;
int dropsocket = 0;
int iss = 0;
@@ -1173,7 +1173,7 @@ step6:
* but if two URG's are pending at once, some out-of-band
* data may creep in... ick.
*/
- if (ti->ti_urp <= ti->ti_len
+ if (ti->ti_urp <= (u_long)ti->ti_len
#ifdef SO_OOBINLINE
&& (so->so_options & SO_OOBINLINE) == 0
#endif
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index 2dd1d749c409..3af81406fd33 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -217,13 +217,13 @@ tcp_input(m, iphlen)
register struct tcpiphdr *ti;
register struct inpcb *inp;
caddr_t optp = NULL;
- int optlen;
+ int optlen = 0;
int len, tlen, off;
register struct tcpcb *tp = 0;
register int tiflags;
- struct socket *so;
+ struct socket *so = 0;
int todrop, acked, ourfinisacked, needoutput = 0;
- short ostate;
+ short ostate = 0;
struct in_addr laddr;
int dropsocket = 0;
int iss = 0;
@@ -1173,7 +1173,7 @@ step6:
* but if two URG's are pending at once, some out-of-band
* data may creep in... ick.
*/
- if (ti->ti_urp <= ti->ti_len
+ if (ti->ti_urp <= (u_long)ti->ti_len
#ifdef SO_OOBINLINE
&& (so->so_options & SO_OOBINLINE) == 0
#endif
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 8edb853bedea..dd5940bbf4ac 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -292,7 +292,7 @@ tcp_close(tp)
if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
(rt = inp->inp_route.ro_rt) &&
((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
- register u_long i;
+ register u_long i = 0;
if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
i = tp->t_srtt *
diff --git a/sys/netinet/tcp_timer.h b/sys/netinet/tcp_timer.h
index 301a10f4034c..7c31125e40f0 100644
--- a/sys/netinet/tcp_timer.h
+++ b/sys/netinet/tcp_timer.h
@@ -113,9 +113,9 @@ char *tcptimers[] =
*/
#define TCPT_RANGESET(tv, value, tvmin, tvmax) { \
(tv) = (value); \
- if ((tv) < (tvmin)) \
+ if ((u_long)(tv) < (u_long)(tvmin)) \
(tv) = (tvmin); \
- else if ((tv) > (tvmax)) \
+ else if ((u_long)(tv) > (u_long)(tvmax)) \
(tv) = (tvmax); \
}
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index 8edb853bedea..dd5940bbf4ac 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -292,7 +292,7 @@ tcp_close(tp)
if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
(rt = inp->inp_route.ro_rt) &&
((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
- register u_long i;
+ register u_long i = 0;
if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
i = tp->t_srtt *
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index 38a08d6d0c2e..42c45a43a9a0 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -77,7 +77,7 @@ tcp_usrreq(so, req, m, nam, control)
struct mbuf *m, *nam, *control;
{
register struct inpcb *inp;
- register struct tcpcb *tp;
+ register struct tcpcb *tp = 0;
int s;
int error = 0;
int ostate;
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index 95b1895ac0a2..9ed53e8bef69 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
@@ -382,7 +383,7 @@ udp_output(inp, m, addr, control)
register struct udpiphdr *ui;
register int len = m->m_pkthdr.len;
struct in_addr laddr;
- int s, error = 0;
+ int s = 0, error = 0;
if (control)
m_freem(control); /* XXX */
@@ -618,6 +619,7 @@ udp_detach(inp)
/*
* Sysctl for udp variables.
*/
+int
udp_sysctl(name, namelen, oldp, oldlenp, newp, newlen)
int *name;
u_int namelen;
diff --git a/sys/nfs/nfs_bio.c b/sys/nfs/nfs_bio.c
index 177a278b6310..9ef81fe71999 100644
--- a/sys/nfs/nfs_bio.c
+++ b/sys/nfs/nfs_bio.c
@@ -63,6 +63,7 @@ extern int nfs_numasync;
* Vnode op for read using bio
* Any similarity to readip() is purely coincidental
*/
+int
nfs_bioread(vp, uio, ioflag, cred)
register struct vnode *vp;
register struct uio *uio;
@@ -71,13 +72,13 @@ nfs_bioread(vp, uio, ioflag, cred)
{
register struct nfsnode *np = VTONFS(vp);
register int biosize, diff;
- struct buf *bp, *rabp;
+ struct buf *bp = 0, *rabp;
struct vattr vattr;
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, bn, rabn;
caddr_t baddr;
- int got_buf, nra, error = 0, n, on, not_readin;
+ int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
#ifdef lint
ioflag = ioflag;
@@ -346,6 +347,7 @@ again:
/*
* Vnode op for write using bio
*/
+int
nfs_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -566,6 +568,7 @@ nfs_getcacheblk(vp, bn, size, p)
* Flush and invalidate all dirty buffers. If another process is already
* doing the flush, just wait for completion.
*/
+int
nfs_vinvalbuf(vp, flags, cred, p, intrflg)
struct vnode *vp;
int flags;
@@ -626,6 +629,7 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg)
* This is mainly to avoid queueing async I/O requests when the nfsiods
* are all hung on a dead server.
*/
+int
nfs_asyncio(bp, cred)
register struct buf *bp;
struct ucred *cred;
@@ -670,7 +674,7 @@ nfs_doio(bp, cr, p)
register struct vnode *vp;
struct nfsnode *np;
struct nfsmount *nmp;
- int error, diff, len;
+ int error = 0, diff, len;
struct uio uio;
struct iovec io;
diff --git a/sys/nfs/nfs_common.c b/sys/nfs/nfs_common.c
index 5778f7d7f01a..d17cde79f60e 100644
--- a/sys/nfs/nfs_common.c
+++ b/sys/nfs/nfs_common.c
@@ -262,6 +262,7 @@ nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest,
/*
* copies mbuf chain to the uio scatter/gather list
*/
+int
nfsm_mbuftouio(mrep, uiop, siz, dpos)
struct mbuf **mrep;
register struct uio *uiop;
@@ -336,6 +337,7 @@ nfsm_mbuftouio(mrep, uiop, siz, dpos)
/*
* copies a uio scatter/gather list to an mbuf chain...
*/
+int
nfsm_uiotombuf(uiop, mq, siz, bpos)
register struct uio *uiop;
struct mbuf **mq;
@@ -423,6 +425,7 @@ nfsm_uiotombuf(uiop, mq, siz, bpos)
* This is used by the macros nfsm_dissect and nfsm_dissecton for tough
* cases. (The macros use the vars. dpos and dpos2)
*/
+int
nfsm_disct(mdp, dposp, siz, left, cp2)
struct mbuf **mdp;
caddr_t *dposp;
@@ -485,6 +488,7 @@ nfsm_disct(mdp, dposp, siz, left, cp2)
/*
* Advance the position in the mbuf chain.
*/
+int
nfs_adv(mdp, dposp, offs, left)
struct mbuf **mdp;
caddr_t *dposp;
@@ -511,13 +515,14 @@ nfs_adv(mdp, dposp, offs, left)
/*
* Copy a string into mbufs for the hard cases...
*/
+int
nfsm_strtmbuf(mb, bpos, cp, siz)
struct mbuf **mb;
char **bpos;
char *cp;
long siz;
{
- register struct mbuf *m1, *m2;
+ register struct mbuf *m1 = 0, *m2;
long left, xfer, len, tlen;
u_long *tl;
int putsize;
@@ -576,6 +581,7 @@ nfsm_strtmbuf(mb, bpos, cp, siz)
/*
* Called once to initialize data structures...
*/
+int
nfs_init()
{
register int i;
@@ -626,6 +632,8 @@ nfs_init()
*/
nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
nfs_timer();
+
+ return (0);
}
/*
@@ -642,6 +650,7 @@ nfs_init()
* Iff vap not NULL
* copy the attributes to *vaper
*/
+int
nfs_loadattrcache(vpp, mdp, dposp, vaper)
struct vnode **vpp;
struct mbuf **mdp;
@@ -806,6 +815,7 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper)
* If the cache is valid, copy contents to *vap and return 0
* otherwise return an error
*/
+int
nfs_getattrcache(vp, vaper)
register struct vnode *vp;
struct vattr *vaper;
@@ -862,6 +872,7 @@ nfs_getattrcache(vp, vaper)
/*
* Set up nameidata for a lookup() call and do it
*/
+int
nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
register struct nameidata *ndp;
fhandle_t *fhp;
@@ -1035,6 +1046,7 @@ nfsm_adj(mp, len, nul)
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
* - if not lockflag unlock it with VOP_UNLOCK()
*/
+int
nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
fhandle_t *fhp;
int lockflag;
@@ -1094,6 +1106,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
*/
+int
netaddr_match(family, haddr, nam)
int family;
union nethostaddr *haddr;
diff --git a/sys/nfs/nfs_node.c b/sys/nfs/nfs_node.c
index 032bdef0d5ab..9c70c535820d 100644
--- a/sys/nfs/nfs_node.c
+++ b/sys/nfs/nfs_node.c
@@ -63,6 +63,7 @@ u_long nheadhash;
* Initialize hash links for nfsnodes
* and build nfsnode free list.
*/
+void
nfs_nhinit()
{
@@ -97,6 +98,7 @@ nfs_hash(fhp)
* In all cases, a pointer to a
* nfsnode structure is returned.
*/
+int
nfs_nget(mntp, fhp, npp)
struct mount *mntp;
register nfsv2fh_t *fhp;
@@ -153,6 +155,7 @@ loop:
return (0);
}
+int
nfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
@@ -188,6 +191,7 @@ nfs_inactive(ap)
/*
* Reclaim an nfsnode so that it can be used for other purposes.
*/
+int
nfs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
@@ -230,6 +234,7 @@ nfs_reclaim(ap)
/*
* Lock an nfsnode
*/
+int
nfs_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -254,6 +259,7 @@ nfs_lock(ap)
/*
* Unlock an nfsnode
*/
+int
nfs_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -266,6 +272,7 @@ nfs_unlock(ap)
/*
* Check for a locked nfsnode
*/
+int
nfs_islocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
diff --git a/sys/nfs/nfs_nqlease.c b/sys/nfs/nfs_nqlease.c
index 965f46132a6c..7ba69e5b8591 100644
--- a/sys/nfs/nfs_nqlease.c
+++ b/sys/nfs/nfs_nqlease.c
@@ -157,6 +157,7 @@ extern struct nfsreq nfsreqh;
* is when a new lease is being allocated, since it is not in the timer
* queue yet. (Ditto for the splsoftclock() and splx(s) calls)
*/
+int
nqsrv_getlease(vp, duration, flags, nd, nam, cachablep, frev, cred)
struct vnode *vp;
u_long *duration;
@@ -167,8 +168,8 @@ nqsrv_getlease(vp, duration, flags, nd, nam, cachablep, frev, cred)
u_quad_t *frev;
struct ucred *cred;
{
- register struct nqlease *lp, *lq, **lpp;
- register struct nqhost *lph;
+ register struct nqlease *lp, *lq, **lpp = 0;
+ register struct nqhost *lph = 0;
struct nqlease *tlp;
struct nqm **lphp;
struct vattr vattr;
@@ -398,6 +399,7 @@ nqsrv_instimeq(lp, duration)
* This is somewhat messy due to the union in the nqhost structure.
* The local host is indicated by the special value of NQLOCALSLP for slp.
*/
+int
nqsrv_cmpnam(slp, nam, lph)
register struct nfssvc_sock *slp;
struct mbuf *nam;
@@ -679,6 +681,7 @@ nqnfs_serverd()
* Do the from/to xdr translation and call nqsrv_getlease() to
* do the real work.
*/
+int
nqnfsrv_getlease(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -731,6 +734,7 @@ nqnfsrv_getlease(nfsd, mrep, md, dpos, cred, nam, mrq)
* Called from nfssvc_nfsd() when a "vacated" message is received from a
* client. Find the entry and expire it.
*/
+int
nqnfsrv_vacated(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -802,6 +806,7 @@ nfsmout:
/*
* Client get lease rpc function.
*/
+int
nqnfs_getlease(vp, rwflag, cred, p)
register struct vnode *vp;
int rwflag;
@@ -846,6 +851,7 @@ nqnfs_getlease(vp, rwflag, cred, p)
/*
* Client vacated message function.
*/
+int
nqnfs_vacated(vp, cred)
register struct vnode *vp;
struct ucred *cred;
@@ -891,6 +897,7 @@ nqnfs_vacated(vp, cred)
/*
* Called for client side callbacks
*/
+int
nqnfs_callback(nmp, mrep, md, dpos)
struct nfsmount *nmp;
struct mbuf *mrep, *md;
@@ -952,6 +959,7 @@ nqnfs_callback(nmp, mrep, md, dpos)
* "sleep" since nfs_reclaim() called from vclean() can pull a node off
* the list asynchronously.
*/
+int
nqnfs_clientd(nmp, cred, ncd, flag, argp, p)
register struct nfsmount *nmp;
struct ucred *cred;
@@ -963,7 +971,7 @@ nqnfs_clientd(nmp, cred, ncd, flag, argp, p)
register struct nfsnode *np;
struct vnode *vp;
struct nfsreq myrep;
- int error, vpid;
+ int error = 0, vpid;
/*
* First initialize some variables
diff --git a/sys/nfs/nfs_serv.c b/sys/nfs/nfs_serv.c
index f31b96e02edc..32b0da20dd2b 100644
--- a/sys/nfs/nfs_serv.c
+++ b/sys/nfs/nfs_serv.c
@@ -88,6 +88,7 @@ nfstype nfs_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON,
/*
* nqnfs access service
*/
+int
nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -101,7 +102,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
register u_long *tl;
register long t1;
caddr_t bpos;
- int error = 0, rdonly, cache, mode = 0;
+ int error = 0, rdonly, cache = 0, mode = 0;
char *cp2;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -126,6 +127,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs getattr service
*/
+int
nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -163,6 +165,7 @@ nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs setattr service
*/
+int
nfsrv_setattr(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -263,6 +266,7 @@ out:
/*
* nfs lookup rpc
*/
+int
nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -336,6 +340,7 @@ nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs readlink service
*/
+int
nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -351,7 +356,7 @@ nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
caddr_t bpos;
int error = 0, rdonly, cache, i, tlen, len;
char *cp2;
- struct mbuf *mb, *mb2, *mp2, *mp3, *mreq;
+ struct mbuf *mb, *mb2, *mp2 = 0, *mp3 = 0, *mreq;
struct vnode *vp;
nfsv2fh_t nfh;
fhandle_t *fhp;
@@ -418,6 +423,7 @@ out:
/*
* nfs read service
*/
+int
nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -537,6 +543,7 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs write service
*/
+int
nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -666,6 +673,7 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
* nfs create service
* now does a truncate to 0 length via. setattr if it already exists
*/
+int
nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -826,11 +834,13 @@ out:
vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
+ return (0);
}
/*
* nfs remove service
*/
+int
nfsrv_remove(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -892,6 +902,7 @@ out:
/*
* nfs rename service
*/
+int
nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -906,7 +917,7 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
char *cp2;
struct mbuf *mb, *mreq;
struct nameidata fromnd, tond;
- struct vnode *fvp, *tvp, *tdvp;
+ struct vnode *fvp = 0, *tvp, *tdvp;
nfsv2fh_t fnfh, tnfh;
fhandle_t *ffhp, *tfhp;
u_quad_t frev;
@@ -1024,6 +1035,7 @@ nfsmout:
/*
* nfs link service
*/
+int
nfsrv_link(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1089,6 +1101,7 @@ out1:
/*
* nfs symbolic link service
*/
+int
nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1171,6 +1184,7 @@ nfsmout:
/*
* nfs mkdir service
*/
+int
nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1249,6 +1263,7 @@ nfsmout:
/*
* nfs rmdir service
*/
+int
nfsrv_rmdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1348,6 +1363,7 @@ struct flrep {
u_long fl_fattr[NFSX_NQFATTR / sizeof (u_long)];
};
+int
nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1532,6 +1548,7 @@ again:
nfsm_srvdone;
}
+int
nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1772,6 +1789,7 @@ invalid:
/*
* nfs statfs service
*/
+int
nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1784,7 +1802,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
register u_long *tl;
register long t1;
caddr_t bpos;
- int error = 0, rdonly, cache, isnq;
+ int error = 0, rdonly, cache = 0, isnq;
char *cp2;
struct mbuf *mb, *mb2, *mreq;
struct vnode *vp;
@@ -1819,6 +1837,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
* Null operation, used by clients to ping server
*/
/* ARGSUSED */
+int
nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1827,7 +1846,7 @@ nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
struct mbuf *nam, **mrq;
{
caddr_t bpos;
- int error = VNOVAL, cache;
+ int error = VNOVAL, cache = 0;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -1839,6 +1858,7 @@ nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
* No operation, used for obsolete procedures
*/
/* ARGSUSED */
+int
nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1847,7 +1867,7 @@ nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
struct mbuf *nam, **mrq;
{
caddr_t bpos;
- int error, cache;
+ int error, cache = 0;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -1869,6 +1889,7 @@ nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
* this because it opens a security hole, but since the nfs server opens
* a security hole the size of a barn door anyhow, what the heck.
*/
+int
nfsrv_access(vp, flags, cred, rdonly, p)
register struct vnode *vp;
int flags;
diff --git a/sys/nfs/nfs_socket.c b/sys/nfs/nfs_socket.c
index cf88ed33d92d..c00f7d06cc73 100644
--- a/sys/nfs/nfs_socket.c
+++ b/sys/nfs/nfs_socket.c
@@ -213,6 +213,7 @@ struct nfsreq nfsreqh;
* Initialize sockets and congestion for a new NFS connection.
* We do not free the sockaddr if error.
*/
+int
nfs_connect(nmp, rep)
register struct nfsmount *nmp;
struct nfsreq *rep;
@@ -351,6 +352,7 @@ bad:
* If this fails the mount point is DEAD!
* nb: Must be called with the nfs_sndlock() set on the mount point.
*/
+int
nfs_reconnect(rep)
register struct nfsreq *rep;
{
@@ -408,6 +410,7 @@ nfs_disconnect(nmp)
* - return EPIPE if a connection is lost for connection based sockets (TCP...)
* - do any cleanup required by recoverable socket errors (???)
*/
+int
nfs_send(so, nam, top, rep)
register struct socket *so;
struct mbuf *nam;
@@ -475,6 +478,7 @@ nfs_send(so, nam, top, rep)
* For SOCK_STREAM we must be very careful to read an entire record once
* we have read any of it, even if the system call has been interrupted.
*/
+int
nfs_receive(rep, aname, mp)
register struct nfsreq *rep;
struct mbuf **aname;
@@ -681,6 +685,7 @@ errout:
* with outstanding requests using the xid, until ours is found.
*/
/* ARGSUSED */
+int
nfs_reply(myrep)
struct nfsreq *myrep;
{
@@ -847,6 +852,7 @@ nfsmout:
* by mrep or error
* nb: always frees up mreq mbuf list
*/
+int
nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp)
struct vnode *vp;
struct mbuf *mrest;
@@ -1122,6 +1128,7 @@ nfsmout:
* Generate the rpc reply header
* siz arg. is used to decide if adding a cluster is worthwhile
*/
+int
nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp)
int siz;
struct nfsd *nd;
@@ -1341,6 +1348,7 @@ nfs_timer(arg)
* Test for a termination condition pending on the process.
* This is used for NFSMNT_INT mounts.
*/
+int
nfs_sigintr(nmp, rep, p)
struct nfsmount *nmp;
struct nfsreq *rep;
@@ -1364,6 +1372,7 @@ nfs_sigintr(nmp, rep, p)
* and also to avoid race conditions between the processes with nfs requests
* in progress when a reconnect is necessary.
*/
+int
nfs_sndlock(flagp, rep)
register int *flagp;
struct nfsreq *rep;
@@ -1409,6 +1418,7 @@ nfs_sndunlock(flagp)
}
}
+int
nfs_rcvlock(rep)
register struct nfsreq *rep;
{
@@ -1659,6 +1669,7 @@ dorecs:
* stream socket. The "waitflag" argument indicates whether or not it
* can sleep.
*/
+int
nfsrv_getstream(slp, waitflag)
register struct nfssvc_sock *slp;
int waitflag;
@@ -1666,7 +1677,7 @@ nfsrv_getstream(slp, waitflag)
register struct mbuf *m;
register char *cp1, *cp2;
register int len;
- struct mbuf *om, *m2, *recm;
+ struct mbuf *om, *m2, *recm = 0;
u_long recmark;
if (slp->ns_flag & SLP_GETSTREAM)
@@ -1763,6 +1774,7 @@ nfsrv_getstream(slp, waitflag)
/*
* Parse an RPC header.
*/
+int
nfsrv_dorec(slp, nd)
register struct nfssvc_sock *slp;
register struct nfsd *nd;
@@ -1798,6 +1810,7 @@ nfsrv_dorec(slp, nd)
* - verify it
* - fill in the cred struct.
*/
+int
nfs_getreq(nd, has_header)
register struct nfsd *nd;
int has_header;
@@ -1975,6 +1988,7 @@ nfsrv_wakenfsd(slp)
nfsd_head.nd_flag |= NFSD_CHECKSLP;
}
+int
nfs_msg(p, server, msg)
struct proc *p;
char *server, *msg;
@@ -1987,4 +2001,5 @@ nfs_msg(p, server, msg)
tpr = NULL;
tprintf(tpr, "nfs server %s: %s\n", server, msg);
tprintf_close(tpr);
+ return (0);
}
diff --git a/sys/nfs/nfs_srvcache.c b/sys/nfs/nfs_srvcache.c
index 63d8bb72d82f..45bfe1bb042f 100644
--- a/sys/nfs/nfs_srvcache.c
+++ b/sys/nfs/nfs_srvcache.c
@@ -135,6 +135,7 @@ static int repliesstatus[NFS_NPROCS] = {
/*
* Initialize the server request cache list
*/
+void
nfsrv_initcache()
{
@@ -155,6 +156,7 @@ nfsrv_initcache()
* return DOIT
* Update/add new request at end of lru list
*/
+int
nfsrv_getcache(nam, nd, repp)
struct mbuf *nam;
register struct nfsd *nd;
diff --git a/sys/nfs/nfs_subs.c b/sys/nfs/nfs_subs.c
index 5778f7d7f01a..d17cde79f60e 100644
--- a/sys/nfs/nfs_subs.c
+++ b/sys/nfs/nfs_subs.c
@@ -262,6 +262,7 @@ nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest,
/*
* copies mbuf chain to the uio scatter/gather list
*/
+int
nfsm_mbuftouio(mrep, uiop, siz, dpos)
struct mbuf **mrep;
register struct uio *uiop;
@@ -336,6 +337,7 @@ nfsm_mbuftouio(mrep, uiop, siz, dpos)
/*
* copies a uio scatter/gather list to an mbuf chain...
*/
+int
nfsm_uiotombuf(uiop, mq, siz, bpos)
register struct uio *uiop;
struct mbuf **mq;
@@ -423,6 +425,7 @@ nfsm_uiotombuf(uiop, mq, siz, bpos)
* This is used by the macros nfsm_dissect and nfsm_dissecton for tough
* cases. (The macros use the vars. dpos and dpos2)
*/
+int
nfsm_disct(mdp, dposp, siz, left, cp2)
struct mbuf **mdp;
caddr_t *dposp;
@@ -485,6 +488,7 @@ nfsm_disct(mdp, dposp, siz, left, cp2)
/*
* Advance the position in the mbuf chain.
*/
+int
nfs_adv(mdp, dposp, offs, left)
struct mbuf **mdp;
caddr_t *dposp;
@@ -511,13 +515,14 @@ nfs_adv(mdp, dposp, offs, left)
/*
* Copy a string into mbufs for the hard cases...
*/
+int
nfsm_strtmbuf(mb, bpos, cp, siz)
struct mbuf **mb;
char **bpos;
char *cp;
long siz;
{
- register struct mbuf *m1, *m2;
+ register struct mbuf *m1 = 0, *m2;
long left, xfer, len, tlen;
u_long *tl;
int putsize;
@@ -576,6 +581,7 @@ nfsm_strtmbuf(mb, bpos, cp, siz)
/*
* Called once to initialize data structures...
*/
+int
nfs_init()
{
register int i;
@@ -626,6 +632,8 @@ nfs_init()
*/
nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
nfs_timer();
+
+ return (0);
}
/*
@@ -642,6 +650,7 @@ nfs_init()
* Iff vap not NULL
* copy the attributes to *vaper
*/
+int
nfs_loadattrcache(vpp, mdp, dposp, vaper)
struct vnode **vpp;
struct mbuf **mdp;
@@ -806,6 +815,7 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper)
* If the cache is valid, copy contents to *vap and return 0
* otherwise return an error
*/
+int
nfs_getattrcache(vp, vaper)
register struct vnode *vp;
struct vattr *vaper;
@@ -862,6 +872,7 @@ nfs_getattrcache(vp, vaper)
/*
* Set up nameidata for a lookup() call and do it
*/
+int
nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
register struct nameidata *ndp;
fhandle_t *fhp;
@@ -1035,6 +1046,7 @@ nfsm_adj(mp, len, nul)
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
* - if not lockflag unlock it with VOP_UNLOCK()
*/
+int
nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
fhandle_t *fhp;
int lockflag;
@@ -1094,6 +1106,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
*/
+int
netaddr_match(family, haddr, nam)
int family;
union nethostaddr *haddr;
diff --git a/sys/nfs/nfs_syscalls.c b/sys/nfs/nfs_syscalls.c
index 5d86b42ee20a..b00a225de340 100644
--- a/sys/nfs/nfs_syscalls.c
+++ b/sys/nfs/nfs_syscalls.c
@@ -69,6 +69,8 @@
#include <nfs/nqnfs.h>
#include <nfs/nfsrtt.h>
+void nfsrv_zapsock __P((struct nfssvc_sock *));
+
/* Global defs. */
extern u_long nfs_prog, nfs_vers;
extern int (*nfsrv_procs[NFS_NPROCS])();
@@ -106,6 +108,7 @@ struct getfh_args {
char *fname;
fhandle_t *fhp;
};
+int
getfh(p, uap, retval)
struct proc *p;
register struct getfh_args *uap;
@@ -148,6 +151,7 @@ struct nfssvc_args {
int flag;
caddr_t argp;
};
+int
nfssvc(p, uap, retval)
struct proc *p;
register struct nfssvc_args *uap;
@@ -278,6 +282,7 @@ nfssvc(p, uap, retval)
/*
* Adds a socket to the list for servicing by nfsds.
*/
+int
nfssvc_addsock(fp, mynam)
struct file *fp;
struct mbuf *mynam;
@@ -369,6 +374,7 @@ nfssvc_addsock(fp, mynam)
* Called by nfssvc() for nfsds. Just loops around servicing rpc requests
* until it is killed by a signal.
*/
+int
nfssvc_nfsd(nsd, argp, p)
struct nfsd_srvargs *nsd;
caddr_t argp;
@@ -383,7 +389,7 @@ nfssvc_nfsd(nsd, argp, p)
struct mbuf *mreq, *nam;
struct timeval starttime;
struct nfsuid *uidp;
- int error, cacherep, s;
+ int error = 0, cacherep, s;
int sotype;
s = splnet();
@@ -631,6 +637,7 @@ done:
* They do read-ahead and write-behind operations on the block I/O cache.
* Never returns unless it fails or gets killed.
*/
+int
nfssvc_iod(p)
struct proc *p;
{
@@ -683,6 +690,7 @@ nfssvc_iod(p)
* will stop using it and clear ns_flag at the end so that it will not be
* reassigned during cleanup.
*/
+void
nfsrv_zapsock(slp)
register struct nfssvc_sock *slp;
{
@@ -719,6 +727,7 @@ nfsrv_zapsock(slp)
* Get an authorization string for the uid by having the mount_nfs sitting
* on this mount point porpous out of the kernel and do it.
*/
+int
nfs_getauth(nmp, rep, cred, auth_type, auth_str, auth_len)
register struct nfsmount *nmp;
struct nfsreq *rep;
diff --git a/sys/nfs/nfs_vnops.c b/sys/nfs/nfs_vnops.c
index a909b48dc67d..9e76ded98434 100644
--- a/sys/nfs/nfs_vnops.c
+++ b/sys/nfs/nfs_vnops.c
@@ -480,7 +480,7 @@ nfs_setattr(ap)
register struct vnode *vp = ap->a_vp;
register struct nfsnode *np = VTONFS(vp);
register struct vattr *vap = ap->a_vap;
- u_quad_t frev, tsize;
+ u_quad_t frev, tsize = 0;
if (vap->va_size != VNOVAL || vap->va_mtime.ts_sec != VNOVAL ||
vap->va_atime.ts_sec != VNOVAL) {
@@ -572,14 +572,14 @@ nfs_lookup(ap)
register long t1, t2;
struct nfsmount *nmp;
caddr_t bpos, dpos, cp2;
- time_t reqtime;
+ time_t reqtime = 0;
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
struct vnode *newvp;
long len;
nfsv2fh_t *fhp;
struct nfsnode *np;
int lockparent, wantparent, error = 0;
- int nqlflag, cachable;
+ int nqlflag = 0, cachable = 0;
u_quad_t frev;
*vpp = NULL;
@@ -953,7 +953,7 @@ nfs_mknod(ap)
register u_long *tl;
register caddr_t cp;
register long t1, t2;
- struct vnode *newvp;
+ struct vnode *newvp = 0;
struct vattr vattr;
char *cp2;
caddr_t bpos, dpos;
@@ -1589,11 +1589,11 @@ nfs_readdirrpc(vp, uiop, cred)
struct ucred *cred;
{
register long len;
- register struct dirent *dp;
+ register struct dirent *dp = 0;
register u_long *tl;
register caddr_t cp;
register long t1;
- long tlen, lastlen;
+ long tlen, lastlen = 0;
caddr_t bpos, dpos, cp2;
int error = 0;
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
@@ -1601,8 +1601,8 @@ nfs_readdirrpc(vp, uiop, cred)
caddr_t dpos2;
int siz;
int more_dirs = 1;
- u_long off, savoff;
- struct dirent *savdp;
+ u_long off, savoff = 0;
+ struct dirent *savdp = 0;
struct nfsmount *nmp;
struct nfsnode *np = VTONFS(vp);
long tresid;
@@ -1732,7 +1732,7 @@ nfs_readdirlookrpc(vp, uiop, cred)
struct ucred *cred;
{
register int len;
- register struct dirent *dp;
+ register struct dirent *dp = 0;
register u_long *tl;
register caddr_t cp;
register long t1;
@@ -1740,15 +1740,15 @@ nfs_readdirlookrpc(vp, uiop, cred)
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
struct nameidata nami, *ndp = &nami;
struct componentname *cnp = &ndp->ni_cnd;
- u_long off, endoff, fileno;
- time_t reqtime, ltime;
+ u_long off, endoff = 0, fileno;
+ time_t reqtime, ltime = 0;
struct nfsmount *nmp;
struct nfsnode *np;
struct vnode *newvp;
nfsv2fh_t *fhp;
u_quad_t frev;
int error = 0, tlen, more_dirs = 1, tresid, doit, bigenough, i;
- int cachable;
+ int cachable = 0;
if (uiop->uio_iovcnt != 1)
panic("nfs rdirlook");
@@ -2177,6 +2177,7 @@ loop:
* information from the remote server.
*/
/* ARGSUSED */
+int
nfs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -2225,6 +2226,7 @@ nfs_print(ap)
fifo_printinfo(vp);
#endif /* FIFO */
printf("\n");
+ return (0);
}
/*
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 177a278b6310..9ef81fe71999 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -63,6 +63,7 @@ extern int nfs_numasync;
* Vnode op for read using bio
* Any similarity to readip() is purely coincidental
*/
+int
nfs_bioread(vp, uio, ioflag, cred)
register struct vnode *vp;
register struct uio *uio;
@@ -71,13 +72,13 @@ nfs_bioread(vp, uio, ioflag, cred)
{
register struct nfsnode *np = VTONFS(vp);
register int biosize, diff;
- struct buf *bp, *rabp;
+ struct buf *bp = 0, *rabp;
struct vattr vattr;
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, bn, rabn;
caddr_t baddr;
- int got_buf, nra, error = 0, n, on, not_readin;
+ int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
#ifdef lint
ioflag = ioflag;
@@ -346,6 +347,7 @@ again:
/*
* Vnode op for write using bio
*/
+int
nfs_write(ap)
struct vop_write_args /* {
struct vnode *a_vp;
@@ -566,6 +568,7 @@ nfs_getcacheblk(vp, bn, size, p)
* Flush and invalidate all dirty buffers. If another process is already
* doing the flush, just wait for completion.
*/
+int
nfs_vinvalbuf(vp, flags, cred, p, intrflg)
struct vnode *vp;
int flags;
@@ -626,6 +629,7 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg)
* This is mainly to avoid queueing async I/O requests when the nfsiods
* are all hung on a dead server.
*/
+int
nfs_asyncio(bp, cred)
register struct buf *bp;
struct ucred *cred;
@@ -670,7 +674,7 @@ nfs_doio(bp, cr, p)
register struct vnode *vp;
struct nfsnode *np;
struct nfsmount *nmp;
- int error, diff, len;
+ int error = 0, diff, len;
struct uio uio;
struct iovec io;
diff --git a/sys/nfsclient/nfs_nfsiod.c b/sys/nfsclient/nfs_nfsiod.c
index 5d86b42ee20a..b00a225de340 100644
--- a/sys/nfsclient/nfs_nfsiod.c
+++ b/sys/nfsclient/nfs_nfsiod.c
@@ -69,6 +69,8 @@
#include <nfs/nqnfs.h>
#include <nfs/nfsrtt.h>
+void nfsrv_zapsock __P((struct nfssvc_sock *));
+
/* Global defs. */
extern u_long nfs_prog, nfs_vers;
extern int (*nfsrv_procs[NFS_NPROCS])();
@@ -106,6 +108,7 @@ struct getfh_args {
char *fname;
fhandle_t *fhp;
};
+int
getfh(p, uap, retval)
struct proc *p;
register struct getfh_args *uap;
@@ -148,6 +151,7 @@ struct nfssvc_args {
int flag;
caddr_t argp;
};
+int
nfssvc(p, uap, retval)
struct proc *p;
register struct nfssvc_args *uap;
@@ -278,6 +282,7 @@ nfssvc(p, uap, retval)
/*
* Adds a socket to the list for servicing by nfsds.
*/
+int
nfssvc_addsock(fp, mynam)
struct file *fp;
struct mbuf *mynam;
@@ -369,6 +374,7 @@ nfssvc_addsock(fp, mynam)
* Called by nfssvc() for nfsds. Just loops around servicing rpc requests
* until it is killed by a signal.
*/
+int
nfssvc_nfsd(nsd, argp, p)
struct nfsd_srvargs *nsd;
caddr_t argp;
@@ -383,7 +389,7 @@ nfssvc_nfsd(nsd, argp, p)
struct mbuf *mreq, *nam;
struct timeval starttime;
struct nfsuid *uidp;
- int error, cacherep, s;
+ int error = 0, cacherep, s;
int sotype;
s = splnet();
@@ -631,6 +637,7 @@ done:
* They do read-ahead and write-behind operations on the block I/O cache.
* Never returns unless it fails or gets killed.
*/
+int
nfssvc_iod(p)
struct proc *p;
{
@@ -683,6 +690,7 @@ nfssvc_iod(p)
* will stop using it and clear ns_flag at the end so that it will not be
* reassigned during cleanup.
*/
+void
nfsrv_zapsock(slp)
register struct nfssvc_sock *slp;
{
@@ -719,6 +727,7 @@ nfsrv_zapsock(slp)
* Get an authorization string for the uid by having the mount_nfs sitting
* on this mount point porpous out of the kernel and do it.
*/
+int
nfs_getauth(nmp, rep, cred, auth_type, auth_str, auth_len)
register struct nfsmount *nmp;
struct nfsreq *rep;
diff --git a/sys/nfsclient/nfs_node.c b/sys/nfsclient/nfs_node.c
index 032bdef0d5ab..9c70c535820d 100644
--- a/sys/nfsclient/nfs_node.c
+++ b/sys/nfsclient/nfs_node.c
@@ -63,6 +63,7 @@ u_long nheadhash;
* Initialize hash links for nfsnodes
* and build nfsnode free list.
*/
+void
nfs_nhinit()
{
@@ -97,6 +98,7 @@ nfs_hash(fhp)
* In all cases, a pointer to a
* nfsnode structure is returned.
*/
+int
nfs_nget(mntp, fhp, npp)
struct mount *mntp;
register nfsv2fh_t *fhp;
@@ -153,6 +155,7 @@ loop:
return (0);
}
+int
nfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
@@ -188,6 +191,7 @@ nfs_inactive(ap)
/*
* Reclaim an nfsnode so that it can be used for other purposes.
*/
+int
nfs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
@@ -230,6 +234,7 @@ nfs_reclaim(ap)
/*
* Lock an nfsnode
*/
+int
nfs_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -254,6 +259,7 @@ nfs_lock(ap)
/*
* Unlock an nfsnode
*/
+int
nfs_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
@@ -266,6 +272,7 @@ nfs_unlock(ap)
/*
* Check for a locked nfsnode
*/
+int
nfs_islocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
diff --git a/sys/nfsclient/nfs_socket.c b/sys/nfsclient/nfs_socket.c
index cf88ed33d92d..c00f7d06cc73 100644
--- a/sys/nfsclient/nfs_socket.c
+++ b/sys/nfsclient/nfs_socket.c
@@ -213,6 +213,7 @@ struct nfsreq nfsreqh;
* Initialize sockets and congestion for a new NFS connection.
* We do not free the sockaddr if error.
*/
+int
nfs_connect(nmp, rep)
register struct nfsmount *nmp;
struct nfsreq *rep;
@@ -351,6 +352,7 @@ bad:
* If this fails the mount point is DEAD!
* nb: Must be called with the nfs_sndlock() set on the mount point.
*/
+int
nfs_reconnect(rep)
register struct nfsreq *rep;
{
@@ -408,6 +410,7 @@ nfs_disconnect(nmp)
* - return EPIPE if a connection is lost for connection based sockets (TCP...)
* - do any cleanup required by recoverable socket errors (???)
*/
+int
nfs_send(so, nam, top, rep)
register struct socket *so;
struct mbuf *nam;
@@ -475,6 +478,7 @@ nfs_send(so, nam, top, rep)
* For SOCK_STREAM we must be very careful to read an entire record once
* we have read any of it, even if the system call has been interrupted.
*/
+int
nfs_receive(rep, aname, mp)
register struct nfsreq *rep;
struct mbuf **aname;
@@ -681,6 +685,7 @@ errout:
* with outstanding requests using the xid, until ours is found.
*/
/* ARGSUSED */
+int
nfs_reply(myrep)
struct nfsreq *myrep;
{
@@ -847,6 +852,7 @@ nfsmout:
* by mrep or error
* nb: always frees up mreq mbuf list
*/
+int
nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp)
struct vnode *vp;
struct mbuf *mrest;
@@ -1122,6 +1128,7 @@ nfsmout:
* Generate the rpc reply header
* siz arg. is used to decide if adding a cluster is worthwhile
*/
+int
nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp)
int siz;
struct nfsd *nd;
@@ -1341,6 +1348,7 @@ nfs_timer(arg)
* Test for a termination condition pending on the process.
* This is used for NFSMNT_INT mounts.
*/
+int
nfs_sigintr(nmp, rep, p)
struct nfsmount *nmp;
struct nfsreq *rep;
@@ -1364,6 +1372,7 @@ nfs_sigintr(nmp, rep, p)
* and also to avoid race conditions between the processes with nfs requests
* in progress when a reconnect is necessary.
*/
+int
nfs_sndlock(flagp, rep)
register int *flagp;
struct nfsreq *rep;
@@ -1409,6 +1418,7 @@ nfs_sndunlock(flagp)
}
}
+int
nfs_rcvlock(rep)
register struct nfsreq *rep;
{
@@ -1659,6 +1669,7 @@ dorecs:
* stream socket. The "waitflag" argument indicates whether or not it
* can sleep.
*/
+int
nfsrv_getstream(slp, waitflag)
register struct nfssvc_sock *slp;
int waitflag;
@@ -1666,7 +1677,7 @@ nfsrv_getstream(slp, waitflag)
register struct mbuf *m;
register char *cp1, *cp2;
register int len;
- struct mbuf *om, *m2, *recm;
+ struct mbuf *om, *m2, *recm = 0;
u_long recmark;
if (slp->ns_flag & SLP_GETSTREAM)
@@ -1763,6 +1774,7 @@ nfsrv_getstream(slp, waitflag)
/*
* Parse an RPC header.
*/
+int
nfsrv_dorec(slp, nd)
register struct nfssvc_sock *slp;
register struct nfsd *nd;
@@ -1798,6 +1810,7 @@ nfsrv_dorec(slp, nd)
* - verify it
* - fill in the cred struct.
*/
+int
nfs_getreq(nd, has_header)
register struct nfsd *nd;
int has_header;
@@ -1975,6 +1988,7 @@ nfsrv_wakenfsd(slp)
nfsd_head.nd_flag |= NFSD_CHECKSLP;
}
+int
nfs_msg(p, server, msg)
struct proc *p;
char *server, *msg;
@@ -1987,4 +2001,5 @@ nfs_msg(p, server, msg)
tpr = NULL;
tprintf(tpr, "nfs server %s: %s\n", server, msg);
tprintf_close(tpr);
+ return (0);
}
diff --git a/sys/nfsclient/nfs_subs.c b/sys/nfsclient/nfs_subs.c
index 5778f7d7f01a..d17cde79f60e 100644
--- a/sys/nfsclient/nfs_subs.c
+++ b/sys/nfsclient/nfs_subs.c
@@ -262,6 +262,7 @@ nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest,
/*
* copies mbuf chain to the uio scatter/gather list
*/
+int
nfsm_mbuftouio(mrep, uiop, siz, dpos)
struct mbuf **mrep;
register struct uio *uiop;
@@ -336,6 +337,7 @@ nfsm_mbuftouio(mrep, uiop, siz, dpos)
/*
* copies a uio scatter/gather list to an mbuf chain...
*/
+int
nfsm_uiotombuf(uiop, mq, siz, bpos)
register struct uio *uiop;
struct mbuf **mq;
@@ -423,6 +425,7 @@ nfsm_uiotombuf(uiop, mq, siz, bpos)
* This is used by the macros nfsm_dissect and nfsm_dissecton for tough
* cases. (The macros use the vars. dpos and dpos2)
*/
+int
nfsm_disct(mdp, dposp, siz, left, cp2)
struct mbuf **mdp;
caddr_t *dposp;
@@ -485,6 +488,7 @@ nfsm_disct(mdp, dposp, siz, left, cp2)
/*
* Advance the position in the mbuf chain.
*/
+int
nfs_adv(mdp, dposp, offs, left)
struct mbuf **mdp;
caddr_t *dposp;
@@ -511,13 +515,14 @@ nfs_adv(mdp, dposp, offs, left)
/*
* Copy a string into mbufs for the hard cases...
*/
+int
nfsm_strtmbuf(mb, bpos, cp, siz)
struct mbuf **mb;
char **bpos;
char *cp;
long siz;
{
- register struct mbuf *m1, *m2;
+ register struct mbuf *m1 = 0, *m2;
long left, xfer, len, tlen;
u_long *tl;
int putsize;
@@ -576,6 +581,7 @@ nfsm_strtmbuf(mb, bpos, cp, siz)
/*
* Called once to initialize data structures...
*/
+int
nfs_init()
{
register int i;
@@ -626,6 +632,8 @@ nfs_init()
*/
nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
nfs_timer();
+
+ return (0);
}
/*
@@ -642,6 +650,7 @@ nfs_init()
* Iff vap not NULL
* copy the attributes to *vaper
*/
+int
nfs_loadattrcache(vpp, mdp, dposp, vaper)
struct vnode **vpp;
struct mbuf **mdp;
@@ -806,6 +815,7 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper)
* If the cache is valid, copy contents to *vap and return 0
* otherwise return an error
*/
+int
nfs_getattrcache(vp, vaper)
register struct vnode *vp;
struct vattr *vaper;
@@ -862,6 +872,7 @@ nfs_getattrcache(vp, vaper)
/*
* Set up nameidata for a lookup() call and do it
*/
+int
nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
register struct nameidata *ndp;
fhandle_t *fhp;
@@ -1035,6 +1046,7 @@ nfsm_adj(mp, len, nul)
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
* - if not lockflag unlock it with VOP_UNLOCK()
*/
+int
nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
fhandle_t *fhp;
int lockflag;
@@ -1094,6 +1106,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
*/
+int
netaddr_match(family, haddr, nam)
int family;
union nethostaddr *haddr;
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index a909b48dc67d..9e76ded98434 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -480,7 +480,7 @@ nfs_setattr(ap)
register struct vnode *vp = ap->a_vp;
register struct nfsnode *np = VTONFS(vp);
register struct vattr *vap = ap->a_vap;
- u_quad_t frev, tsize;
+ u_quad_t frev, tsize = 0;
if (vap->va_size != VNOVAL || vap->va_mtime.ts_sec != VNOVAL ||
vap->va_atime.ts_sec != VNOVAL) {
@@ -572,14 +572,14 @@ nfs_lookup(ap)
register long t1, t2;
struct nfsmount *nmp;
caddr_t bpos, dpos, cp2;
- time_t reqtime;
+ time_t reqtime = 0;
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
struct vnode *newvp;
long len;
nfsv2fh_t *fhp;
struct nfsnode *np;
int lockparent, wantparent, error = 0;
- int nqlflag, cachable;
+ int nqlflag = 0, cachable = 0;
u_quad_t frev;
*vpp = NULL;
@@ -953,7 +953,7 @@ nfs_mknod(ap)
register u_long *tl;
register caddr_t cp;
register long t1, t2;
- struct vnode *newvp;
+ struct vnode *newvp = 0;
struct vattr vattr;
char *cp2;
caddr_t bpos, dpos;
@@ -1589,11 +1589,11 @@ nfs_readdirrpc(vp, uiop, cred)
struct ucred *cred;
{
register long len;
- register struct dirent *dp;
+ register struct dirent *dp = 0;
register u_long *tl;
register caddr_t cp;
register long t1;
- long tlen, lastlen;
+ long tlen, lastlen = 0;
caddr_t bpos, dpos, cp2;
int error = 0;
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
@@ -1601,8 +1601,8 @@ nfs_readdirrpc(vp, uiop, cred)
caddr_t dpos2;
int siz;
int more_dirs = 1;
- u_long off, savoff;
- struct dirent *savdp;
+ u_long off, savoff = 0;
+ struct dirent *savdp = 0;
struct nfsmount *nmp;
struct nfsnode *np = VTONFS(vp);
long tresid;
@@ -1732,7 +1732,7 @@ nfs_readdirlookrpc(vp, uiop, cred)
struct ucred *cred;
{
register int len;
- register struct dirent *dp;
+ register struct dirent *dp = 0;
register u_long *tl;
register caddr_t cp;
register long t1;
@@ -1740,15 +1740,15 @@ nfs_readdirlookrpc(vp, uiop, cred)
struct mbuf *mreq, *mrep, *md, *mb, *mb2;
struct nameidata nami, *ndp = &nami;
struct componentname *cnp = &ndp->ni_cnd;
- u_long off, endoff, fileno;
- time_t reqtime, ltime;
+ u_long off, endoff = 0, fileno;
+ time_t reqtime, ltime = 0;
struct nfsmount *nmp;
struct nfsnode *np;
struct vnode *newvp;
nfsv2fh_t *fhp;
u_quad_t frev;
int error = 0, tlen, more_dirs = 1, tresid, doit, bigenough, i;
- int cachable;
+ int cachable = 0;
if (uiop->uio_iovcnt != 1)
panic("nfs rdirlook");
@@ -2177,6 +2177,7 @@ loop:
* information from the remote server.
*/
/* ARGSUSED */
+int
nfs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
@@ -2225,6 +2226,7 @@ nfs_print(ap)
fifo_printinfo(vp);
#endif /* FIFO */
printf("\n");
+ return (0);
}
/*
diff --git a/sys/nfsserver/nfs_serv.c b/sys/nfsserver/nfs_serv.c
index f31b96e02edc..32b0da20dd2b 100644
--- a/sys/nfsserver/nfs_serv.c
+++ b/sys/nfsserver/nfs_serv.c
@@ -88,6 +88,7 @@ nfstype nfs_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON,
/*
* nqnfs access service
*/
+int
nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -101,7 +102,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
register u_long *tl;
register long t1;
caddr_t bpos;
- int error = 0, rdonly, cache, mode = 0;
+ int error = 0, rdonly, cache = 0, mode = 0;
char *cp2;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -126,6 +127,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs getattr service
*/
+int
nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -163,6 +165,7 @@ nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs setattr service
*/
+int
nfsrv_setattr(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -263,6 +266,7 @@ out:
/*
* nfs lookup rpc
*/
+int
nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -336,6 +340,7 @@ nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs readlink service
*/
+int
nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -351,7 +356,7 @@ nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
caddr_t bpos;
int error = 0, rdonly, cache, i, tlen, len;
char *cp2;
- struct mbuf *mb, *mb2, *mp2, *mp3, *mreq;
+ struct mbuf *mb, *mb2, *mp2 = 0, *mp3 = 0, *mreq;
struct vnode *vp;
nfsv2fh_t nfh;
fhandle_t *fhp;
@@ -418,6 +423,7 @@ out:
/*
* nfs read service
*/
+int
nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -537,6 +543,7 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* nfs write service
*/
+int
nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -666,6 +673,7 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
* nfs create service
* now does a truncate to 0 length via. setattr if it already exists
*/
+int
nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -826,11 +834,13 @@ out:
vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
+ return (0);
}
/*
* nfs remove service
*/
+int
nfsrv_remove(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -892,6 +902,7 @@ out:
/*
* nfs rename service
*/
+int
nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -906,7 +917,7 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
char *cp2;
struct mbuf *mb, *mreq;
struct nameidata fromnd, tond;
- struct vnode *fvp, *tvp, *tdvp;
+ struct vnode *fvp = 0, *tvp, *tdvp;
nfsv2fh_t fnfh, tnfh;
fhandle_t *ffhp, *tfhp;
u_quad_t frev;
@@ -1024,6 +1035,7 @@ nfsmout:
/*
* nfs link service
*/
+int
nfsrv_link(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1089,6 +1101,7 @@ out1:
/*
* nfs symbolic link service
*/
+int
nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1171,6 +1184,7 @@ nfsmout:
/*
* nfs mkdir service
*/
+int
nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1249,6 +1263,7 @@ nfsmout:
/*
* nfs rmdir service
*/
+int
nfsrv_rmdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1348,6 +1363,7 @@ struct flrep {
u_long fl_fattr[NFSX_NQFATTR / sizeof (u_long)];
};
+int
nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1532,6 +1548,7 @@ again:
nfsm_srvdone;
}
+int
nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1772,6 +1789,7 @@ invalid:
/*
* nfs statfs service
*/
+int
nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1784,7 +1802,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
register u_long *tl;
register long t1;
caddr_t bpos;
- int error = 0, rdonly, cache, isnq;
+ int error = 0, rdonly, cache = 0, isnq;
char *cp2;
struct mbuf *mb, *mb2, *mreq;
struct vnode *vp;
@@ -1819,6 +1837,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
* Null operation, used by clients to ping server
*/
/* ARGSUSED */
+int
nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1827,7 +1846,7 @@ nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
struct mbuf *nam, **mrq;
{
caddr_t bpos;
- int error = VNOVAL, cache;
+ int error = VNOVAL, cache = 0;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -1839,6 +1858,7 @@ nfsrv_null(nfsd, mrep, md, dpos, cred, nam, mrq)
* No operation, used for obsolete procedures
*/
/* ARGSUSED */
+int
nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
struct nfsd *nfsd;
struct mbuf *mrep, *md;
@@ -1847,7 +1867,7 @@ nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
struct mbuf *nam, **mrq;
{
caddr_t bpos;
- int error, cache;
+ int error, cache = 0;
struct mbuf *mb, *mreq;
u_quad_t frev;
@@ -1869,6 +1889,7 @@ nfsrv_noop(nfsd, mrep, md, dpos, cred, nam, mrq)
* this because it opens a security hole, but since the nfs server opens
* a security hole the size of a barn door anyhow, what the heck.
*/
+int
nfsrv_access(vp, flags, cred, rdonly, p)
register struct vnode *vp;
int flags;
diff --git a/sys/nfsserver/nfs_srvcache.c b/sys/nfsserver/nfs_srvcache.c
index 63d8bb72d82f..45bfe1bb042f 100644
--- a/sys/nfsserver/nfs_srvcache.c
+++ b/sys/nfsserver/nfs_srvcache.c
@@ -135,6 +135,7 @@ static int repliesstatus[NFS_NPROCS] = {
/*
* Initialize the server request cache list
*/
+void
nfsrv_initcache()
{
@@ -155,6 +156,7 @@ nfsrv_initcache()
* return DOIT
* Update/add new request at end of lru list
*/
+int
nfsrv_getcache(nam, nd, repp)
struct mbuf *nam;
register struct nfsd *nd;
diff --git a/sys/nfsserver/nfs_srvsock.c b/sys/nfsserver/nfs_srvsock.c
index cf88ed33d92d..c00f7d06cc73 100644
--- a/sys/nfsserver/nfs_srvsock.c
+++ b/sys/nfsserver/nfs_srvsock.c
@@ -213,6 +213,7 @@ struct nfsreq nfsreqh;
* Initialize sockets and congestion for a new NFS connection.
* We do not free the sockaddr if error.
*/
+int
nfs_connect(nmp, rep)
register struct nfsmount *nmp;
struct nfsreq *rep;
@@ -351,6 +352,7 @@ bad:
* If this fails the mount point is DEAD!
* nb: Must be called with the nfs_sndlock() set on the mount point.
*/
+int
nfs_reconnect(rep)
register struct nfsreq *rep;
{
@@ -408,6 +410,7 @@ nfs_disconnect(nmp)
* - return EPIPE if a connection is lost for connection based sockets (TCP...)
* - do any cleanup required by recoverable socket errors (???)
*/
+int
nfs_send(so, nam, top, rep)
register struct socket *so;
struct mbuf *nam;
@@ -475,6 +478,7 @@ nfs_send(so, nam, top, rep)
* For SOCK_STREAM we must be very careful to read an entire record once
* we have read any of it, even if the system call has been interrupted.
*/
+int
nfs_receive(rep, aname, mp)
register struct nfsreq *rep;
struct mbuf **aname;
@@ -681,6 +685,7 @@ errout:
* with outstanding requests using the xid, until ours is found.
*/
/* ARGSUSED */
+int
nfs_reply(myrep)
struct nfsreq *myrep;
{
@@ -847,6 +852,7 @@ nfsmout:
* by mrep or error
* nb: always frees up mreq mbuf list
*/
+int
nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp)
struct vnode *vp;
struct mbuf *mrest;
@@ -1122,6 +1128,7 @@ nfsmout:
* Generate the rpc reply header
* siz arg. is used to decide if adding a cluster is worthwhile
*/
+int
nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp)
int siz;
struct nfsd *nd;
@@ -1341,6 +1348,7 @@ nfs_timer(arg)
* Test for a termination condition pending on the process.
* This is used for NFSMNT_INT mounts.
*/
+int
nfs_sigintr(nmp, rep, p)
struct nfsmount *nmp;
struct nfsreq *rep;
@@ -1364,6 +1372,7 @@ nfs_sigintr(nmp, rep, p)
* and also to avoid race conditions between the processes with nfs requests
* in progress when a reconnect is necessary.
*/
+int
nfs_sndlock(flagp, rep)
register int *flagp;
struct nfsreq *rep;
@@ -1409,6 +1418,7 @@ nfs_sndunlock(flagp)
}
}
+int
nfs_rcvlock(rep)
register struct nfsreq *rep;
{
@@ -1659,6 +1669,7 @@ dorecs:
* stream socket. The "waitflag" argument indicates whether or not it
* can sleep.
*/
+int
nfsrv_getstream(slp, waitflag)
register struct nfssvc_sock *slp;
int waitflag;
@@ -1666,7 +1677,7 @@ nfsrv_getstream(slp, waitflag)
register struct mbuf *m;
register char *cp1, *cp2;
register int len;
- struct mbuf *om, *m2, *recm;
+ struct mbuf *om, *m2, *recm = 0;
u_long recmark;
if (slp->ns_flag & SLP_GETSTREAM)
@@ -1763,6 +1774,7 @@ nfsrv_getstream(slp, waitflag)
/*
* Parse an RPC header.
*/
+int
nfsrv_dorec(slp, nd)
register struct nfssvc_sock *slp;
register struct nfsd *nd;
@@ -1798,6 +1810,7 @@ nfsrv_dorec(slp, nd)
* - verify it
* - fill in the cred struct.
*/
+int
nfs_getreq(nd, has_header)
register struct nfsd *nd;
int has_header;
@@ -1975,6 +1988,7 @@ nfsrv_wakenfsd(slp)
nfsd_head.nd_flag |= NFSD_CHECKSLP;
}
+int
nfs_msg(p, server, msg)
struct proc *p;
char *server, *msg;
@@ -1987,4 +2001,5 @@ nfs_msg(p, server, msg)
tpr = NULL;
tprintf(tpr, "nfs server %s: %s\n", server, msg);
tprintf_close(tpr);
+ return (0);
}
diff --git a/sys/nfsserver/nfs_srvsubs.c b/sys/nfsserver/nfs_srvsubs.c
index 5778f7d7f01a..d17cde79f60e 100644
--- a/sys/nfsserver/nfs_srvsubs.c
+++ b/sys/nfsserver/nfs_srvsubs.c
@@ -262,6 +262,7 @@ nfsm_rpchead(cr, nqnfs, procid, auth_type, auth_len, auth_str, mrest,
/*
* copies mbuf chain to the uio scatter/gather list
*/
+int
nfsm_mbuftouio(mrep, uiop, siz, dpos)
struct mbuf **mrep;
register struct uio *uiop;
@@ -336,6 +337,7 @@ nfsm_mbuftouio(mrep, uiop, siz, dpos)
/*
* copies a uio scatter/gather list to an mbuf chain...
*/
+int
nfsm_uiotombuf(uiop, mq, siz, bpos)
register struct uio *uiop;
struct mbuf **mq;
@@ -423,6 +425,7 @@ nfsm_uiotombuf(uiop, mq, siz, bpos)
* This is used by the macros nfsm_dissect and nfsm_dissecton for tough
* cases. (The macros use the vars. dpos and dpos2)
*/
+int
nfsm_disct(mdp, dposp, siz, left, cp2)
struct mbuf **mdp;
caddr_t *dposp;
@@ -485,6 +488,7 @@ nfsm_disct(mdp, dposp, siz, left, cp2)
/*
* Advance the position in the mbuf chain.
*/
+int
nfs_adv(mdp, dposp, offs, left)
struct mbuf **mdp;
caddr_t *dposp;
@@ -511,13 +515,14 @@ nfs_adv(mdp, dposp, offs, left)
/*
* Copy a string into mbufs for the hard cases...
*/
+int
nfsm_strtmbuf(mb, bpos, cp, siz)
struct mbuf **mb;
char **bpos;
char *cp;
long siz;
{
- register struct mbuf *m1, *m2;
+ register struct mbuf *m1 = 0, *m2;
long left, xfer, len, tlen;
u_long *tl;
int putsize;
@@ -576,6 +581,7 @@ nfsm_strtmbuf(mb, bpos, cp, siz)
/*
* Called once to initialize data structures...
*/
+int
nfs_init()
{
register int i;
@@ -626,6 +632,8 @@ nfs_init()
*/
nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh;
nfs_timer();
+
+ return (0);
}
/*
@@ -642,6 +650,7 @@ nfs_init()
* Iff vap not NULL
* copy the attributes to *vaper
*/
+int
nfs_loadattrcache(vpp, mdp, dposp, vaper)
struct vnode **vpp;
struct mbuf **mdp;
@@ -806,6 +815,7 @@ nfs_loadattrcache(vpp, mdp, dposp, vaper)
* If the cache is valid, copy contents to *vap and return 0
* otherwise return an error
*/
+int
nfs_getattrcache(vp, vaper)
register struct vnode *vp;
struct vattr *vaper;
@@ -862,6 +872,7 @@ nfs_getattrcache(vp, vaper)
/*
* Set up nameidata for a lookup() call and do it
*/
+int
nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
register struct nameidata *ndp;
fhandle_t *fhp;
@@ -1035,6 +1046,7 @@ nfsm_adj(mp, len, nul)
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
* - if not lockflag unlock it with VOP_UNLOCK()
*/
+int
nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
fhandle_t *fhp;
int lockflag;
@@ -1094,6 +1106,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
*/
+int
netaddr_match(family, haddr, nam)
int family;
union nethostaddr *haddr;
diff --git a/sys/nfsserver/nfs_syscalls.c b/sys/nfsserver/nfs_syscalls.c
index 5d86b42ee20a..b00a225de340 100644
--- a/sys/nfsserver/nfs_syscalls.c
+++ b/sys/nfsserver/nfs_syscalls.c
@@ -69,6 +69,8 @@
#include <nfs/nqnfs.h>
#include <nfs/nfsrtt.h>
+void nfsrv_zapsock __P((struct nfssvc_sock *));
+
/* Global defs. */
extern u_long nfs_prog, nfs_vers;
extern int (*nfsrv_procs[NFS_NPROCS])();
@@ -106,6 +108,7 @@ struct getfh_args {
char *fname;
fhandle_t *fhp;
};
+int
getfh(p, uap, retval)
struct proc *p;
register struct getfh_args *uap;
@@ -148,6 +151,7 @@ struct nfssvc_args {
int flag;
caddr_t argp;
};
+int
nfssvc(p, uap, retval)
struct proc *p;
register struct nfssvc_args *uap;
@@ -278,6 +282,7 @@ nfssvc(p, uap, retval)
/*
* Adds a socket to the list for servicing by nfsds.
*/
+int
nfssvc_addsock(fp, mynam)
struct file *fp;
struct mbuf *mynam;
@@ -369,6 +374,7 @@ nfssvc_addsock(fp, mynam)
* Called by nfssvc() for nfsds. Just loops around servicing rpc requests
* until it is killed by a signal.
*/
+int
nfssvc_nfsd(nsd, argp, p)
struct nfsd_srvargs *nsd;
caddr_t argp;
@@ -383,7 +389,7 @@ nfssvc_nfsd(nsd, argp, p)
struct mbuf *mreq, *nam;
struct timeval starttime;
struct nfsuid *uidp;
- int error, cacherep, s;
+ int error = 0, cacherep, s;
int sotype;
s = splnet();
@@ -631,6 +637,7 @@ done:
* They do read-ahead and write-behind operations on the block I/O cache.
* Never returns unless it fails or gets killed.
*/
+int
nfssvc_iod(p)
struct proc *p;
{
@@ -683,6 +690,7 @@ nfssvc_iod(p)
* will stop using it and clear ns_flag at the end so that it will not be
* reassigned during cleanup.
*/
+void
nfsrv_zapsock(slp)
register struct nfssvc_sock *slp;
{
@@ -719,6 +727,7 @@ nfsrv_zapsock(slp)
* Get an authorization string for the uid by having the mount_nfs sitting
* on this mount point porpous out of the kernel and do it.
*/
+int
nfs_getauth(nmp, rep, cred, auth_type, auth_str, auth_len)
register struct nfsmount *nmp;
struct nfsreq *rep;
diff --git a/sys/powerpc/include/_limits.h b/sys/powerpc/include/_limits.h
index 5aed8709f850..e507313a046b 100644
--- a/sys/powerpc/include/_limits.h
+++ b/sys/powerpc/include/_limits.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1988 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,40 +30,60 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
- * $Id: limits.h,v 1.5 1994/02/26 00:56:02 ache Exp $
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
*/
#ifndef _MACHINE_LIMITS_H_
#define _MACHINE_LIMITS_H_ 1
#define CHAR_BIT 8 /* number of bits in a char */
-#define MB_LEN_MAX 6 /* allow 21-bit UTF2 */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
-#define SCHAR_MIN (-0x7f-1) /* max value for a signed char */
-#define SCHAR_MAX 0x7f /* min value for a signed char */
-#define UCHAR_MAX 0xff /* max value for an unsigned char */
-#define CHAR_MAX 0x7f /* max value for a char */
-#define CHAR_MIN (-0x7f-1) /* min value for a char */
+#define CLK_TCK 128 /* ticks per second */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 127 /* min value for a signed char */
+#define SCHAR_MIN (-128) /* max value for a signed char */
+
+#define UCHAR_MAX 255 /* max value for an unsigned char */
+#define CHAR_MAX 127 /* max value for a char */
+#define CHAR_MIN (-128) /* min value for a char */
-#define USHRT_MAX 0xffff /* max value for an unsigned short */
-#define SHRT_MAX 0x7fff /* max value for a short */
-#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+#define USHRT_MAX 65535 /* max value for an unsigned short */
+#define SHRT_MAX 32767 /* max value for a short */
+#define SHRT_MIN (-32768) /* min value for a short */
#define UINT_MAX 0xffffffff /* max value for an unsigned int */
-#define INT_MAX 0x7fffffff /* max value for an int */
-#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+#define INT_MAX 2147483647 /* max value for an int */
+#define INT_MIN (-2147483647-1) /* min value for an int */
#define ULONG_MAX 0xffffffff /* max value for an unsigned long */
-#define LONG_MAX 0x7fffffff /* max value for a long */
-#define LONG_MIN (-0x7fffffff-1) /* min value for a long */
+#define LONG_MAX 2147483647 /* max value for a long */
+#define LONG_MIN (-2147483647-1) /* min value for a long */
-#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
-#define CLK_TCK 128 /* ticks per second */
-#define UQUAD_MAX 0xffffffffffffffffLL /* max unsigned quad */
-#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */
-#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */
-#endif
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ANSI_SOURCE */
#endif /* _MACHINE_LIMITS_H_ */
diff --git a/sys/powerpc/include/limits.h b/sys/powerpc/include/limits.h
index 5aed8709f850..e507313a046b 100644
--- a/sys/powerpc/include/limits.h
+++ b/sys/powerpc/include/limits.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1988 The Regents of the University of California.
- * All rights reserved.
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,40 +30,60 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
- * $Id: limits.h,v 1.5 1994/02/26 00:56:02 ache Exp $
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
*/
#ifndef _MACHINE_LIMITS_H_
#define _MACHINE_LIMITS_H_ 1
#define CHAR_BIT 8 /* number of bits in a char */
-#define MB_LEN_MAX 6 /* allow 21-bit UTF2 */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
-#define SCHAR_MIN (-0x7f-1) /* max value for a signed char */
-#define SCHAR_MAX 0x7f /* min value for a signed char */
-#define UCHAR_MAX 0xff /* max value for an unsigned char */
-#define CHAR_MAX 0x7f /* max value for a char */
-#define CHAR_MIN (-0x7f-1) /* min value for a char */
+#define CLK_TCK 128 /* ticks per second */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 127 /* min value for a signed char */
+#define SCHAR_MIN (-128) /* max value for a signed char */
+
+#define UCHAR_MAX 255 /* max value for an unsigned char */
+#define CHAR_MAX 127 /* max value for a char */
+#define CHAR_MIN (-128) /* min value for a char */
-#define USHRT_MAX 0xffff /* max value for an unsigned short */
-#define SHRT_MAX 0x7fff /* max value for a short */
-#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+#define USHRT_MAX 65535 /* max value for an unsigned short */
+#define SHRT_MAX 32767 /* max value for a short */
+#define SHRT_MIN (-32768) /* min value for a short */
#define UINT_MAX 0xffffffff /* max value for an unsigned int */
-#define INT_MAX 0x7fffffff /* max value for an int */
-#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+#define INT_MAX 2147483647 /* max value for an int */
+#define INT_MIN (-2147483647-1) /* min value for an int */
#define ULONG_MAX 0xffffffff /* max value for an unsigned long */
-#define LONG_MAX 0x7fffffff /* max value for a long */
-#define LONG_MIN (-0x7fffffff-1) /* min value for a long */
+#define LONG_MAX 2147483647 /* max value for a long */
+#define LONG_MIN (-2147483647-1) /* min value for a long */
-#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
-#define CLK_TCK 128 /* ticks per second */
-#define UQUAD_MAX 0xffffffffffffffffLL /* max unsigned quad */
-#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */
-#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */
-#endif
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ANSI_SOURCE */
#endif /* _MACHINE_LIMITS_H_ */
diff --git a/sys/scsi/cd.c b/sys/scsi/cd.c
index 0d64665856c2..899db937f71a 100644
--- a/sys/scsi/cd.c
+++ b/sys/scsi/cd.c
@@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
*
- * $Id: cd.c,v 1.17 1994/03/23 09:15:51 davidg Exp $
+ * $Id: cd.c,v 1.18 1994/04/20 07:06:51 davidg Exp $
*/
#define SPLCD splbio
@@ -496,7 +496,7 @@ cdstart(unit)
}
dp = &cd->buf_queue;
if ((bp = dp->b_actf) != NULL) { /* yes, an assign */
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
} else {
return;
}
@@ -689,7 +689,7 @@ cdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
args->data_format, args->track, &data, len)) {
break;
}
- len = MIN(len, ((data.header.data_len[0] << 8) + data.header.data_len[1] +
+ len = min(len, ((data.header.data_len[0] << 8) + data.header.data_len[1] +
sizeof(struct cd_sub_channel_header)));
if (copyout(&data, args->data, len) != 0) {
error = EFAULT;
@@ -728,7 +728,7 @@ cdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
(struct cd_toc_entry *)&data,
len + sizeof(struct ioc_toc_header)))
break;
- len = MIN(len, ((((th->len & 0xff) << 8) + ((th->len >> 8))) - (sizeof(th->starting_track) + sizeof(th->ending_track))));
+ len = min(len, ((((th->len & 0xff) << 8) + ((th->len >> 8))) - (sizeof(th->starting_track) + sizeof(th->ending_track))));
if (copyout(data.entries, te->data, len) != 0) {
error = EFAULT;
}
diff --git a/sys/scsi/scsi_base.c b/sys/scsi/scsi_base.c
index 02af5f210723..cb7f009a9fba 100644
--- a/sys/scsi/scsi_base.c
+++ b/sys/scsi/scsi_base.c
@@ -8,24 +8,19 @@
* file.
*
* Written by Julian Elischer (julian@dialix.oz.au)
- * $Id: scsi_base.c,v 1.7 1994/04/20 07:06:54 davidg Exp $
+ * $Id: scsi_base.c,v 1.8 1994/05/19 22:21:05 jkh Exp $
*/
#define SPLSD splbio
#define ESUCCESS 0
#include <sys/types.h>
#include <sys/param.h>
-#include <machine/param.h>
-#include <vm/vm_statistics.h>
-#include <vm/vm_param.h>
-#include <vm/lock.h>
-#include <machine/pmap.h>
-#include <machine/vmparam.h>
-#include "systm.h"
+#include <sys/systm.h>
#include <sys/buf.h>
#include <sys/uio.h>
#include <sys/malloc.h>
#include <sys/errno.h>
+#include <vm/vm.h>
#include <scsi/scsi_all.h>
#include <scsi/scsi_disk.h>
#include <scsi/scsiconf.h>
diff --git a/sys/scsi/scsi_ioctl.c b/sys/scsi/scsi_ioctl.c
index a52b3a5eebd6..d35ad2de53d6 100644
--- a/sys/scsi/scsi_ioctl.c
+++ b/sys/scsi/scsi_ioctl.c
@@ -6,21 +6,15 @@
*
*
*/
-#include <sys/types.h>
#include <sys/param.h>
-#include <machine/param.h>
-#include <vm/vm_statistics.h>
-#include <vm/vm_param.h>
-#include <vm/lock.h>
-#include <machine/pmap.h>
-#include <machine/vmparam.h>
-#include "systm.h"
+#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#define b_screq b_driver1 /* a patch in buf.h */
#define b_sc_link b_driver2 /* a patch in buf.h */
#include <sys/proc.h>
+#include <vm/vm.h>
#include "scbus.h"
#include <scsi/scsi_all.h>
@@ -227,7 +221,7 @@ errval scsi_do_ioctl(struct scsi_link *sc_link, int cmd, caddr_t addr, int f)
SC_DEBUG(sc_link,SDEV_DB2,("scsi_do_ioctl(0x%x)\n",cmd));
switch(cmd)
{
-#ifndef NetBSD
+#if 0
case SCIOCCOMMAND:
{
/*
diff --git a/sys/scsi/sd.c b/sys/scsi/sd.c
index 783de9935ab6..691430e42646 100644
--- a/sys/scsi/sd.c
+++ b/sys/scsi/sd.c
@@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
*
- * $Id: sd.c,v 1.22 1994/04/05 03:23:32 davidg Exp $
+ * $Id: sd.c,v 1.23 1994/04/20 07:06:57 davidg Exp $
*/
#define SPLSD splbio
@@ -33,6 +33,7 @@
#include <sys/uio.h>
#include <sys/malloc.h>
#include <sys/errno.h>
+#include <sys/dkstat.h>
#include <sys/disklabel.h>
#include <scsi/scsi_all.h>
#include <scsi/scsi_disk.h>
@@ -72,6 +73,7 @@ int Debugger();
#define WHOLE_DISK(unit) ( (unit << UNITSHIFT) + RAW_PART )
+extern char *readdisklabel();
errval sdgetdisklabel __P((unsigned char unit));
errval sd_get_parms __P((int unit, int flags));
void sdstrategy __P((struct buf *));
@@ -425,15 +427,24 @@ sdstrategy(bp)
/*
* Use a bounce buffer if necessary
*/
+/*
#ifndef NOBOUNCE
if (sd->sc_link->flags & SDEV_BOUNCE)
vm_bounce_alloc(bp);
#endif
+*/
/*
* Place it in the queue of disk activities for this disk
*/
+/*
cldisksort(dp, bp, 64*1024);
+*/
+if ((bp->b_blkno < 0) || (bp->b_bcount > 3000000) /* || (bp->b_flags & B_WRITE) */) {
+ printf("blkno=%d bcount=%d flags=0x%x\n", bp->b_blkno, bp->b_bcount, bp->b_flags);
+ Debugger("");
+}
+ disksort(dp, bp);
/*
* Tell the device to get going on the transfer if it's
@@ -504,7 +515,7 @@ sdstart(unit)
if ((bp = dp->b_actf) == NULL) { /* yes, an assign */
return;
}
- dp->b_actf = bp->av_forw;
+ dp->b_actf = bp->b_actf;
/*
* If the device has become invalid, abort all the
@@ -610,11 +621,13 @@ sdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
else
error = setdisklabel(&sd->disklabel,
(struct disklabel *)addr,
- /*(sd->flags & DKFL_BSDLABEL) ? sd->openparts : */ 0,
+ /*(sd->flags & DKFL_BSDLABEL) ? sd->openparts : */ 0
#ifdef NetBSD
- &sd->cpudisklabel
+ ,&sd->cpudisklabel
#else
- sd->dosparts
+#if 0
+ ,sd->dosparts
+#endif
#endif
);
if (error == 0) {
@@ -637,11 +650,13 @@ sdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
else {
error = setdisklabel(&sd->disklabel,
(struct disklabel *)addr,
- /*(sd->flags & SDHAVELABEL) ? sd->openparts : */ 0,
+ /*(sd->flags & SDHAVELABEL) ? sd->openparts : */ 0
#ifdef NetBSD
- &sd->cpudisklabel
+ ,&sd->cpudisklabel
#else
- sd->dosparts
+#if 0
+ ,sd->dosparts
+#endif
#endif
);
if (!error) {
@@ -655,11 +670,13 @@ sdioctl(dev_t dev, int cmd, caddr_t addr, int flag)
wlab = sd->wlabel;
sd->wlabel = 1;
error = writedisklabel(dev, sdstrategy,
- &sd->disklabel,
+ &sd->disklabel
#ifdef NetBSD
- &sd->cpudisklabel
+ ,&sd->cpudisklabel
#else
- sd->dosparts
+#if 0
+ ,sd->dosparts
+#endif
#endif
);
sd->wlabel = wlab;
@@ -685,7 +702,9 @@ sdgetdisklabel(unsigned char unit)
{
char *errstring;
struct sd_data *sd = sd_data[unit];
+ dev_t dev;
+ dev = makedev(0, (unit << UNITSHIFT) + 3);
/*
* If the inflo is already loaded, use it
*/
@@ -714,15 +733,13 @@ sdgetdisklabel(unsigned char unit)
/*
* Call the generic disklabel extraction routine
*/
- if (errstring = readdisklabel(makedev(0, (unit << UNITSHIFT) + 3),
+ if (errstring = readdisklabel(makedev(0, (unit << UNITSHIFT) + 3),
sdstrategy,
- &sd->disklabel,
+ &sd->disklabel
#ifdef NetBSD
- &sd->cpudisklabel
+ ,&sd->cpu_disklabel,
#else
- sd->dosparts,
- 0,
- 0
+ ,sd->dosparts, 0
#endif
)) {
printf("sd%d: %s\n", unit, errstring);
diff --git a/sys/sys/bio.h b/sys/sys/bio.h
index e6c329f239da..ac35dae9b8b1 100644
--- a/sys/sys/bio.h
+++ b/sys/sys/bio.h
@@ -54,6 +54,7 @@ struct buf {
struct buf *b_actf, **b_actb; /* Device driver queue when active. */
struct proc *b_proc; /* Associated proc; NULL if kernel. */
volatile long b_flags; /* B_* flags. */
+ int b_qindex; /* buffer queue index */
int b_error; /* Errno value. */
long b_bufsize; /* Allocated buffer size. */
long b_bcount; /* Valid bytes in buffer. */
@@ -75,6 +76,13 @@ struct buf {
struct ucred *b_wcred; /* Write credentials reference. */
int b_validoff; /* Offset in buffer of valid region. */
int b_validend; /* Offset of end of valid region. */
+ daddr_t b_pblkno; /* physical block number */
+ caddr_t b_savekva; /* saved kva for transfer while bouncing */
+ TAILQ_HEAD(b_clusterhd,buf) b_cluster; /* low level clustering */
+ void *b_driver1; /* for private use by the driver */
+ void *b_driver2; /* for private use by the driver */
+ void *b_spc;
+
};
/* Device driver compatibility definitions. */
@@ -114,6 +122,8 @@ struct buf {
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_WRITEINPROG 0x01000000 /* Write in progress. */
#define B_XXX 0x02000000 /* Debugging flag. */
+#define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
+#define B_BOUNCE 0x80000000 /* bounce buffer flag */
/*
* This structure describes a clustered I/O. It is stored in the b_saveaddr
@@ -148,20 +158,20 @@ char *buffers; /* The buffer contents. */
int bufpages; /* Number of memory pages in the buffer pool. */
struct buf *swbuf; /* Swap I/O buffer headers. */
int nswbuf; /* Number of swap I/O buffer headers. */
-struct buf bswlist; /* Head of swap I/O buffer headers free list. */
+TAILQ_HEAD(swqueue, buf) bswlist;
struct buf *bclnlist; /* Head of cleaned page list. */
__BEGIN_DECLS
-int allocbuf __P((struct buf *, int));
-int bawrite __P((struct buf *));
-int bdwrite __P((struct buf *));
+void allocbuf __P((struct buf *, int));
+void bawrite __P((struct buf *));
+void bdwrite __P((struct buf *));
void biodone __P((struct buf *));
int biowait __P((struct buf *));
int bread __P((struct vnode *, daddr_t, int,
struct ucred *, struct buf **));
int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int,
struct ucred *, struct buf **));
-int brelse __P((struct buf *));
+void brelse __P((struct buf *));
void bufinit __P((void));
int bwrite __P((struct buf *));
void cluster_callback __P((struct buf *));
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index e6c329f239da..ac35dae9b8b1 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -54,6 +54,7 @@ struct buf {
struct buf *b_actf, **b_actb; /* Device driver queue when active. */
struct proc *b_proc; /* Associated proc; NULL if kernel. */
volatile long b_flags; /* B_* flags. */
+ int b_qindex; /* buffer queue index */
int b_error; /* Errno value. */
long b_bufsize; /* Allocated buffer size. */
long b_bcount; /* Valid bytes in buffer. */
@@ -75,6 +76,13 @@ struct buf {
struct ucred *b_wcred; /* Write credentials reference. */
int b_validoff; /* Offset in buffer of valid region. */
int b_validend; /* Offset of end of valid region. */
+ daddr_t b_pblkno; /* physical block number */
+ caddr_t b_savekva; /* saved kva for transfer while bouncing */
+ TAILQ_HEAD(b_clusterhd,buf) b_cluster; /* low level clustering */
+ void *b_driver1; /* for private use by the driver */
+ void *b_driver2; /* for private use by the driver */
+ void *b_spc;
+
};
/* Device driver compatibility definitions. */
@@ -114,6 +122,8 @@ struct buf {
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_WRITEINPROG 0x01000000 /* Write in progress. */
#define B_XXX 0x02000000 /* Debugging flag. */
+#define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
+#define B_BOUNCE 0x80000000 /* bounce buffer flag */
/*
* This structure describes a clustered I/O. It is stored in the b_saveaddr
@@ -148,20 +158,20 @@ char *buffers; /* The buffer contents. */
int bufpages; /* Number of memory pages in the buffer pool. */
struct buf *swbuf; /* Swap I/O buffer headers. */
int nswbuf; /* Number of swap I/O buffer headers. */
-struct buf bswlist; /* Head of swap I/O buffer headers free list. */
+TAILQ_HEAD(swqueue, buf) bswlist;
struct buf *bclnlist; /* Head of cleaned page list. */
__BEGIN_DECLS
-int allocbuf __P((struct buf *, int));
-int bawrite __P((struct buf *));
-int bdwrite __P((struct buf *));
+void allocbuf __P((struct buf *, int));
+void bawrite __P((struct buf *));
+void bdwrite __P((struct buf *));
void biodone __P((struct buf *));
int biowait __P((struct buf *));
int bread __P((struct vnode *, daddr_t, int,
struct ucred *, struct buf **));
int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int,
struct ucred *, struct buf **));
-int brelse __P((struct buf *));
+void brelse __P((struct buf *));
void bufinit __P((void));
int bwrite __P((struct buf *));
void cluster_callback __P((struct buf *));
diff --git a/sys/sys/cdefs.h b/sys/sys/cdefs.h
index c104b9e964dd..3c50711f5303 100644
--- a/sys/sys/cdefs.h
+++ b/sys/sys/cdefs.h
@@ -105,7 +105,7 @@
* these work for GNU C++ (modulo a slight glitch in the C++ grammar
* in the distribution version of 2.5.5).
*/
-#if !defined(__GNUC__) || __GNUC__ < 2 || __GNUC_MINOR__ < 5
+#if !defined(__GNUC__) || __GNUC__ < 2
#define __attribute__(x) /* delete __attribute__ if non-gcc or gcc1 */
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
#define __dead __volatile
diff --git a/sys/sys/clist.h b/sys/sys/clist.h
index bad26477015d..3e05cf6b7700 100644
--- a/sys/sys/clist.h
+++ b/sys/sys/clist.h
@@ -34,9 +34,9 @@
*/
struct cblock {
- struct cblock *c_next; /* next cblock in queue */
- char c_quote[CBQSIZE]; /* quoted characters */
- char c_info[CBSIZE]; /* characters */
+ struct cblock *c_next; /* next cblock in queue */
+ unsigned char c_quote[CBQSIZE]; /* quoted characters */
+ unsigned char c_info[CBSIZE]; /* characters */
};
#ifdef KERNEL
diff --git a/sys/sys/cons.h b/sys/sys/cons.h
index 5e0f30d88b2a..2766193d03f0 100644
--- a/sys/sys/cons.h
+++ b/sys/sys/cons.h
@@ -69,12 +69,12 @@ extern struct tty *cn_tty;
struct proc; struct uio;
/* cdevsw[] entries */
-extern int cnopen(int /*dev_t*/, int, int, struct proc *);
-extern int cnclose(int /*dev_t*/, int, int, struct proc *);
-extern int cnread(int /*dev_t*/, struct uio *, int);
-extern int cnwrite(int /*dev_t*/, struct uio *, int);
-extern int cnioctl(int /*dev_t*/, int, caddr_t, int, struct proc *);
-extern int cnselect(int /*dev_t*/, int, struct proc *);
+extern int cnopen(dev_t, int, int, struct proc *);
+extern int cnclose(dev_t, int, int, struct proc *);
+extern int cnread(dev_t, struct uio *, int);
+extern int cnwrite(dev_t, struct uio *, int);
+extern int cnioctl(dev_t, int, caddr_t, int, struct proc *);
+extern int cnselect(dev_t, int, struct proc *);
/* other kernel entry points */
extern void cninit(void);
diff --git a/sys/sys/disklabel.h b/sys/sys/disklabel.h
index a25ee29e363d..60b8f8f8aaf9 100644
--- a/sys/sys/disklabel.h
+++ b/sys/sys/disklabel.h
@@ -164,6 +164,8 @@ struct disklabel {
#define p_sgs __partition_u1.sgs
} d_partitions[MAXPARTITIONS]; /* actually may be more */
};
+struct cpu_disklabel {
+};
#else /* LOCORE */
/*
* offsets for asm boot files.
@@ -188,6 +190,11 @@ struct disklabel {
#define DTYPE_HPFL 8 /* HP Fiber-link */
#define DTYPE_FLOPPY 10 /* floppy */
+/* d_subtype values: */
+#define DSTYPE_INDOSPART 0x8 /* is inside dos partition */
+#define DSTYPE_DOSPART(s) ((s) & 3) /* dos partition number */
+#define DSTYPE_GEOMETRY 0x10 /* drive params in label */
+
#ifdef DKTYPENAMES
static char *dktypenames[] = {
"unknown",
@@ -300,6 +307,31 @@ struct partinfo {
struct partition *part;
};
+/* DOS partition table -- located in boot block */
+
+#define DOSBBSECTOR 0 /* DOS boot block relative sector number */
+#define DOSPARTOFF 446
+#define NDOSPART 4
+
+struct dos_partition {
+ unsigned char dp_flag; /* bootstrap flags */
+ unsigned char dp_shd; /* starting head */
+ unsigned char dp_ssect; /* starting sector */
+ unsigned char dp_scyl; /* starting cylinder */
+ unsigned char dp_typ; /* partition type */
+#define DOSPTYP_386BSD 0xa5 /* 386BSD partition type */
+ unsigned char dp_ehd; /* end head */
+ unsigned char dp_esect; /* end sector */
+ unsigned char dp_ecyl; /* end cylinder */
+ unsigned long dp_start; /* absolute starting sector number */
+ unsigned long dp_size; /* partition size in sectors */
+};
+
+extern struct dos_partition dos_partitions[NDOSPART];
+
+#define DPSECT(s) ((s) & 0x3f) /* isolate relevant bits of sector */
+#define DPCYL(c, s) ((c) + (((s) & 0xc0)<<2)) /* and those that are cylinder */
+
/*
* Disk-specific ioctls.
*/
diff --git a/sys/sys/diskmbr.h b/sys/sys/diskmbr.h
index a25ee29e363d..60b8f8f8aaf9 100644
--- a/sys/sys/diskmbr.h
+++ b/sys/sys/diskmbr.h
@@ -164,6 +164,8 @@ struct disklabel {
#define p_sgs __partition_u1.sgs
} d_partitions[MAXPARTITIONS]; /* actually may be more */
};
+struct cpu_disklabel {
+};
#else /* LOCORE */
/*
* offsets for asm boot files.
@@ -188,6 +190,11 @@ struct disklabel {
#define DTYPE_HPFL 8 /* HP Fiber-link */
#define DTYPE_FLOPPY 10 /* floppy */
+/* d_subtype values: */
+#define DSTYPE_INDOSPART 0x8 /* is inside dos partition */
+#define DSTYPE_DOSPART(s) ((s) & 3) /* dos partition number */
+#define DSTYPE_GEOMETRY 0x10 /* drive params in label */
+
#ifdef DKTYPENAMES
static char *dktypenames[] = {
"unknown",
@@ -300,6 +307,31 @@ struct partinfo {
struct partition *part;
};
+/* DOS partition table -- located in boot block */
+
+#define DOSBBSECTOR 0 /* DOS boot block relative sector number */
+#define DOSPARTOFF 446
+#define NDOSPART 4
+
+struct dos_partition {
+ unsigned char dp_flag; /* bootstrap flags */
+ unsigned char dp_shd; /* starting head */
+ unsigned char dp_ssect; /* starting sector */
+ unsigned char dp_scyl; /* starting cylinder */
+ unsigned char dp_typ; /* partition type */
+#define DOSPTYP_386BSD 0xa5 /* 386BSD partition type */
+ unsigned char dp_ehd; /* end head */
+ unsigned char dp_esect; /* end sector */
+ unsigned char dp_ecyl; /* end cylinder */
+ unsigned long dp_start; /* absolute starting sector number */
+ unsigned long dp_size; /* partition size in sectors */
+};
+
+extern struct dos_partition dos_partitions[NDOSPART];
+
+#define DPSECT(s) ((s) & 0x3f) /* isolate relevant bits of sector */
+#define DPCYL(c, s) ((c) + (((s) & 0xc0)<<2)) /* and those that are cylinder */
+
/*
* Disk-specific ioctls.
*/
diff --git a/sys/sys/diskpc98.h b/sys/sys/diskpc98.h
index a25ee29e363d..60b8f8f8aaf9 100644
--- a/sys/sys/diskpc98.h
+++ b/sys/sys/diskpc98.h
@@ -164,6 +164,8 @@ struct disklabel {
#define p_sgs __partition_u1.sgs
} d_partitions[MAXPARTITIONS]; /* actually may be more */
};
+struct cpu_disklabel {
+};
#else /* LOCORE */
/*
* offsets for asm boot files.
@@ -188,6 +190,11 @@ struct disklabel {
#define DTYPE_HPFL 8 /* HP Fiber-link */
#define DTYPE_FLOPPY 10 /* floppy */
+/* d_subtype values: */
+#define DSTYPE_INDOSPART 0x8 /* is inside dos partition */
+#define DSTYPE_DOSPART(s) ((s) & 3) /* dos partition number */
+#define DSTYPE_GEOMETRY 0x10 /* drive params in label */
+
#ifdef DKTYPENAMES
static char *dktypenames[] = {
"unknown",
@@ -300,6 +307,31 @@ struct partinfo {
struct partition *part;
};
+/* DOS partition table -- located in boot block */
+
+#define DOSBBSECTOR 0 /* DOS boot block relative sector number */
+#define DOSPARTOFF 446
+#define NDOSPART 4
+
+struct dos_partition {
+ unsigned char dp_flag; /* bootstrap flags */
+ unsigned char dp_shd; /* starting head */
+ unsigned char dp_ssect; /* starting sector */
+ unsigned char dp_scyl; /* starting cylinder */
+ unsigned char dp_typ; /* partition type */
+#define DOSPTYP_386BSD 0xa5 /* 386BSD partition type */
+ unsigned char dp_ehd; /* end head */
+ unsigned char dp_esect; /* end sector */
+ unsigned char dp_ecyl; /* end cylinder */
+ unsigned long dp_start; /* absolute starting sector number */
+ unsigned long dp_size; /* partition size in sectors */
+};
+
+extern struct dos_partition dos_partitions[NDOSPART];
+
+#define DPSECT(s) ((s) & 0x3f) /* isolate relevant bits of sector */
+#define DPCYL(c, s) ((c) + (((s) & 0xc0)<<2)) /* and those that are cylinder */
+
/*
* Disk-specific ioctls.
*/
diff --git a/sys/sys/exec.h b/sys/sys/exec.h
index 443e14434148..be8cd7194dd6 100644
--- a/sys/sys/exec.h
+++ b/sys/sys/exec.h
@@ -66,6 +66,6 @@ struct ps_strings {
*/
struct execve_args {
char *fname;
- char **argp;
- char **envp;
+ char **argv;
+ char **envv;
};
diff --git a/sys/sys/imgact.h b/sys/sys/imgact.h
index 8cba997e6c10..6681b7272738 100644
--- a/sys/sys/imgact.h
+++ b/sys/sys/imgact.h
@@ -40,12 +40,6 @@
#include "namei.h"
#include "vnode.h"
-struct execve_args {
- char *fname; /* file name */
- char **argv; /* pointer to table of argument pointers */
- char **envv; /* pointer to table of environment pointers */
-};
-
struct image_params {
struct proc *proc; /* our process struct */
struct execve_args *uap; /* syscall arguments */
diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h
index 682e6c8c1940..aafa8430eaaa 100644
--- a/sys/sys/kernel.h
+++ b/sys/sys/kernel.h
@@ -44,6 +44,9 @@
extern long hostid;
extern char hostname[MAXHOSTNAMELEN];
extern int hostnamelen;
+extern char domainname[MAXHOSTNAMELEN];
+extern int domainnamelen;
+
/* 1.2 */
extern volatile struct timeval mono_time;
@@ -57,3 +60,24 @@ extern int hz; /* system clock's frequency */
extern int stathz; /* statistics clock's frequency */
extern int profhz; /* profiling clock's frequency */
extern int lbolt; /* once a second sleep address */
+
+/*
+ * The following macros are used to declare global sets of objects, which
+ * are collected by the linker into a `struct linker_set' as defined below.
+ *
+ * NB: the constants defined below must match those defined in
+ * ld/ld.h. Since their calculation requires arithmetic, we
+ * can't name them symbolically (e.g., 23 is N_SETT | N_EXT).
+ */
+#define MAKE_SET(set, sym, type) \
+ asm(".stabs \"_" #set "\", " #type ", 0, 0, _" #sym)
+#define TEXT_SET(set, sym) MAKE_SET(set, sym, 23)
+#define DATA_SET(set, sym) MAKE_SET(set, sym, 25)
+#define BSS_SET(set, sym) MAKE_SET(set, sym, 27)
+#define ABS_SET(set, sym) MAKE_SET(set, sym, 21)
+
+struct linker_set {
+ int ls_length;
+ caddr_t ls_items[1]; /* really ls_length of them, trailing NULL */
+};
+
diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index ba67bda1f5a4..aa10965fedac 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -107,7 +107,8 @@
#define M_ISOFSMNT 57 /* ISOFS mount structure */
#define M_ISOFSNODE 58 /* ISOFS vnode private part */
#define M_TEMP 74 /* misc temporary data buffers */
-#define M_LAST 75 /* Must be last type + 1 */
+#define M_TTYS 75 /* tty data structures */
+#define M_LAST 76 /* Must be last type + 1 */
#define INITKMEMNAMES { \
"free", /* 0 M_FREE */ \
@@ -173,6 +174,7 @@
NULL, NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, NULL, NULL, \
"temp", /* 74 M_TEMP */ \
+ "ttys", /* 75 M_TTYS */ \
}
struct kmemstats {
diff --git a/sys/sys/mtio.h b/sys/sys/mtio.h
index 7b4ef0c017cb..e5e696574547 100644
--- a/sys/sys/mtio.h
+++ b/sys/sys/mtio.h
@@ -33,6 +33,9 @@
* @(#)mtio.h 8.1 (Berkeley) 6/2/93
*/
+#ifndef _SYS_MTIO_H_
+#define _SYS_MTIO_H_ 1
+
/*
* Structures and definitions for mag tape io control commands
*/
@@ -55,6 +58,21 @@ struct mtop {
#define MTCACHE 8 /* enable controller cache */
#define MTNOCACHE 9 /* disable controller cache */
+#if defined(__FreeBSD__)
+/* Set block size for device. If device is a variable size dev */
+/* a non zero parameter will change the device to a fixed block size */
+/* device with block size set to that of the parameter passed in. */
+/* Resetting the block size to 0 will restore the device to a variable */
+/* block size device. */
+
+#define MTSETBSIZ 10
+
+/* Set density values for device. Thye aredefined in the SCSI II spec */
+/* and range from 0 to 0x17. Sets the value for the openned mode only */
+
+#define MTSETDNSTY 11
+#endif
+
/* structure for MTIOCGET - mag tape get status command */
struct mtget {
@@ -64,6 +82,18 @@ struct mtget {
short mt_erreg; /* ``error'' register */
/* end device-dependent registers */
short mt_resid; /* residual count */
+#if defined (__FreeBSD__)
+ daddr_t mt_blksiz; /* presently operatin blocksize */
+ daddr_t mt_density; /* presently operatin density */
+ daddr_t mt_blksiz0; /* blocksize for mode 0 */
+ daddr_t mt_blksiz1; /* blocksize for mode 1 */
+ daddr_t mt_blksiz2; /* blocksize for mode 2 */
+ daddr_t mt_blksiz3; /* blocksize for mode 3 */
+ daddr_t mt_density0; /* density for mode 0 */
+ daddr_t mt_density1; /* density for mode 1 */
+ daddr_t mt_density2; /* density for mode 2 */
+ daddr_t mt_density3; /* density for mode 3 */
+#endif
/* the following two are not yet implemented */
daddr_t mt_fileno; /* file number of current position */
daddr_t mt_blkno; /* block number of current position */
@@ -102,7 +132,7 @@ struct mtget {
#define MTIOCEEOT _IO('m', 4) /* enable EOT error */
#ifndef KERNEL
-#define DEFTAPE "/dev/rmt12"
+#define DEFTAPE "/dev/nrst0"
#endif
#ifdef KERNEL
@@ -118,3 +148,4 @@ struct mtget {
#define T_6250BPI 020 /* select 6250 bpi */
#define T_BADBPI 030 /* undefined selection */
#endif
+#endif /* _SYS_MTIO_H_ */
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 91bdfd8facc5..661af88f8eb0 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -134,7 +134,7 @@
#define clrnd(i) (((i) + (CLSIZE-1)) &~ (CLSIZE-1))
#endif
-#define CBLOCK 64 /* Clist block size, must be a power of 2. */
+#define CBLOCK 128 /* Clist block size, must be a power of 2. */
#define CBQSIZE (CBLOCK/NBBY) /* Quote bytes/cblock - can do better. */
/* Data chars/clist. */
#define CBSIZE (CBLOCK - sizeof(struct cblock *) - CBQSIZE)
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index bbe60cddcac7..883227e829a3 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -96,7 +96,8 @@ struct proc {
int p_flag; /* P_* flags. */
char p_stat; /* S* process status. */
- char p_pad1[3];
+ char p_lock; /* process lock count */
+ char p_pad1[2];
pid_t p_pid; /* Process identifier. */
struct proc *p_hash; /* Hashed based on p_pid for kill+exit+... */
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index 8df8eb4fc51f..98e85dd021fd 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -160,6 +160,10 @@
#define SYS_statfs 157
#define SYS_fstatfs 158
#define SYS_getfh 161
+#define SYS_getdomainname 162
+#define SYS_setdomainname 163
+#define SYS_uname 164
+#define SYS_sysarch 165
#define SYS_shmsys 171
#define SYS_setgid 181
#define SYS_setegid 182
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 91cb64bd5fa7..ba2f12b69f6e 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -38,6 +38,8 @@
* @(#)systm.h 8.4 (Berkeley) 2/23/94
*/
+#include <machine/cpufunc.h>
+
/*
* The `securelevel' variable controls the security level of the system.
* It can only be decreased by process 1 (/sbin/init).
@@ -147,6 +149,7 @@ int suword __P((void *base, int word));
int suiword __P((void *base, int word));
int hzto __P((struct timeval *tv));
+typedef void (*timeout_func_t)(void *);
void timeout __P((void (*func)(void *), void *arg, int ticks));
void untimeout __P((void (*func)(void *), void *arg));
void realitexpire __P((void *));
diff --git a/sys/sys/termios.h b/sys/sys/termios.h
index 4ad04a10fb19..09c34a10466b 100644
--- a/sys/sys/termios.h
+++ b/sys/sys/termios.h
@@ -131,8 +131,9 @@
#define CLOCAL 0x00008000 /* ignore modem status lines */
#ifndef _POSIX_SOURCE
#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */
-#define CRTSCTS CCTS_OFLOW /* ??? */
+#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW)
#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */
+#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */
#define MDMBUF 0x00100000 /* flow control output via Carrier */
#endif
diff --git a/sys/sys/ttycom.h b/sys/sys/ttycom.h
index a12d8d00354c..3dade47daad4 100644
--- a/sys/sys/ttycom.h
+++ b/sys/sys/ttycom.h
@@ -120,9 +120,17 @@ struct winsize {
#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */
#define TIOCSIG _IO('t', 95) /* pty: generate signal */
#define TIOCDRAIN _IO('t', 94) /* wait till output drained */
+#define TIOCMSBIDIR _IOW('t', 93, int) /* modem: set bidir cap. */
+#define TIOCMGBIDIR _IOR('t', 92, int) /* modem: get bidir cap. */
+#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */
+#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */
+#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* get timestamp of
+ last interrupt for xntp. */
#define TTYDISC 0 /* termios tty line discipline */
#define TABLDISC 3 /* tablet discipline */
#define SLIPDISC 4 /* serial IP discipline */
+#define PPPDISC 5 /* PPP discipline */
+
#endif /* !_SYS_TTYCOM_H_ */
diff --git a/sys/sys/un.h b/sys/sys/un.h
index 3e214a26bb5d..dcbf800c113f 100644
--- a/sys/sys/un.h
+++ b/sys/sys/un.h
@@ -42,10 +42,7 @@ struct sockaddr_un {
char sun_path[104]; /* path name (gag) */
};
-#ifdef KERNEL
-int unp_discard();
-#else
-
+#ifndef KERNEL
/* actual length of an initialized sockaddr_un */
#define SUN_LEN(su) \
(sizeof(*(su)) - sizeof((su)->sun_path) + strlen((su)->sun_path))
diff --git a/sys/sys/utsname.h b/sys/sys/utsname.h
index aa0f2c75ab62..60e6091067ba 100644
--- a/sys/sys/utsname.h
+++ b/sys/sys/utsname.h
@@ -39,18 +39,30 @@
#ifndef _SYS_UTSNAME_H
#define _SYS_UTSNAME_H
+#define SYS_NMLN 32
+
struct utsname {
- char sysname[256]; /* Name of this OS. */
- char nodename[256]; /* Name of this network node. */
- char release[256]; /* Release level. */
- char version[256]; /* Version level. */
- char machine[256]; /* Hardware type. */
+ char sysname[SYS_NMLN]; /* Name of this OS. */
+ char nodename[SYS_NMLN]; /* Name of this network node. */
+ char release[SYS_NMLN]; /* Release level. */
+ char version[SYS_NMLN]; /* Version level. */
+ char machine[SYS_NMLN]; /* Hardware type. */
};
+
#include <sys/cdefs.h>
+
+#ifndef KERNEL
+#ifdef __STDC__
__BEGIN_DECLS
int uname __P((struct utsname *));
__END_DECLS
+#else
+extern int uname();
+#endif
+#else
+extern struct utsname utsname;
+#endif /* KERNEL */
#endif /* !_SYS_UTSNAME_H */
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index f0b3d57f3363..ef890c295f1d 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -74,6 +74,8 @@ struct vmmeter {
*/
unsigned v_page_size; /* page size in bytes */
unsigned v_kernel_pages;/* number of pages in use by kernel */
+ unsigned v_page_count; /* total number of pages in system */
+ unsigned v_free_reserved; /* number of pages reserved for deadlock */
unsigned v_free_target; /* number of pages desired free */
unsigned v_free_min; /* minimum number of pages desired free */
unsigned v_free_count; /* number of pages free */
diff --git a/sys/ufs/ffs/ffs_alloc.c b/sys/ufs/ffs/ffs_alloc.c
index cdd2e4b2b35c..15e871836f2f 100644
--- a/sys/ufs/ffs/ffs_alloc.c
+++ b/sys/ufs/ffs/ffs_alloc.c
@@ -63,6 +63,8 @@ static u_long ffs_hashalloc
static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int));
static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int));
+void ffs_clusteracct __P((struct fs *, struct cg *, daddr_t, int));
+
/*
* Allocate a block in the file system.
*
@@ -82,6 +84,7 @@ static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int));
* 2) quadradically rehash into other cylinder groups, until an
* available block is located.
*/
+int
ffs_alloc(ip, lbn, bpref, size, cred, bnp)
register struct inode *ip;
daddr_t lbn, bpref;
@@ -146,6 +149,7 @@ nospace:
* the original block. Failing that, the regular block allocator is
* invoked to get an appropriate block.
*/
+int
ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
register struct inode *ip;
daddr_t lbprev;
@@ -304,7 +308,9 @@ nospace:
*/
#include <sys/sysctl.h>
int doasyncfree = 1;
+#ifdef DEBUG
struct ctldebug debug14 = { "doasyncfree", &doasyncfree };
+#endif
int
ffs_reallocblks(ap)
struct vop_reallocblks_args /* {
@@ -316,7 +322,7 @@ ffs_reallocblks(ap)
struct inode *ip;
struct vnode *vp;
struct buf *sbp, *ebp;
- daddr_t *bap, *sbap, *ebap;
+ daddr_t *bap, *sbap, *ebap = 0;
struct cluster_save *buflist;
daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno;
struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
@@ -466,6 +472,7 @@ fail:
* 2) quadradically rehash into other cylinder groups, until an
* available inode is located.
*/
+int
ffs_valloc(ap)
struct vop_valloc_args /* {
struct vnode *a_pvp;
@@ -1150,6 +1157,7 @@ gotit:
* free map. If a fragment is deallocated, a possible
* block reassembly is checked.
*/
+void
ffs_blkfree(ip, bno, size)
register struct inode *ip;
daddr_t bno;
@@ -1380,6 +1388,7 @@ ffs_mapsearch(fs, cgp, bpref, allocsiz)
*
* Cnt == 1 means free; cnt == -1 means allocating.
*/
+void
ffs_clusteracct(fs, cgp, blkno, cnt)
struct fs *fs;
struct cg *cgp;
diff --git a/sys/ufs/ffs/ffs_balloc.c b/sys/ufs/ffs/ffs_balloc.c
index 752feec9947b..2addf95b6c6f 100644
--- a/sys/ufs/ffs/ffs_balloc.c
+++ b/sys/ufs/ffs/ffs_balloc.c
@@ -54,6 +54,7 @@
* by allocating the physical blocks on a device given
* the inode and the logical block number in a file.
*/
+int
ffs_balloc(ip, bn, size, cred, bpp, flags)
register struct inode *ip;
register daddr_t bn;
diff --git a/sys/ufs/ffs/ffs_extern.h b/sys/ufs/ffs/ffs_extern.h
index ab467a272a94..6c30389f49a5 100644
--- a/sys/ufs/ffs/ffs_extern.h
+++ b/sys/ufs/ffs/ffs_extern.h
@@ -53,7 +53,7 @@ int ffs_alloc __P((struct inode *,
int ffs_balloc __P((struct inode *,
daddr_t, int, struct ucred *, struct buf **, int));
int ffs_blkatoff __P((struct vop_blkatoff_args *));
-int ffs_blkfree __P((struct inode *, daddr_t, long));
+void ffs_blkfree __P((struct inode *, daddr_t, long));
daddr_t ffs_blkpref __P((struct inode *, daddr_t, int, daddr_t *));
int ffs_bmap __P((struct vop_bmap_args *));
void ffs_clrblock __P((struct fs *, u_char *, daddr_t));
diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c
index b45aee53552f..cf4fae3fa413 100644
--- a/sys/ufs/ffs/ffs_inode.c
+++ b/sys/ufs/ffs/ffs_inode.c
@@ -137,6 +137,7 @@ ffs_update(ap)
* Truncate the inode oip to at most length size, freeing the
* disk blocks.
*/
+int
ffs_truncate(ap)
struct vop_truncate_args /* {
struct vnode *a_vp;
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 505dd5db8cbf..41f4a0d2e85e 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -84,6 +84,7 @@ extern u_long nextgennumber;
*/
#define ROOTNAME "root_device"
+int
ffs_mountroot()
{
extern struct vnode *rootvp;
@@ -146,7 +147,7 @@ ffs_mount(mp, path, data, ndp, p)
{
struct vnode *devvp;
struct ufs_args args;
- struct ufsmount *ump;
+ struct ufsmount *ump = 0;
register struct fs *fs;
u_int size;
int error, flags;
@@ -238,6 +239,7 @@ ffs_mount(mp, path, data, ndp, p)
* 5) invalidate all cached file data.
* 6) re-read inode data for all active vnodes.
*/
+int
ffs_reload(mountp, cred, p)
register struct mount *mountp;
struct ucred *cred;
@@ -447,6 +449,7 @@ out:
*
* XXX - goes away some day.
*/
+int
ffs_oldfscompat(fs)
struct fs *fs;
{
@@ -509,6 +512,7 @@ ffs_unmount(mp, mntflags, p)
/*
* Flush out all the files in a filesystem.
*/
+int
ffs_flushfiles(mp, flags, p)
register struct mount *mp;
int flags;
@@ -786,6 +790,7 @@ ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
* Vnode pointer to File handle
*/
/* ARGSUSED */
+int
ffs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
diff --git a/sys/ufs/lfs/lfs_bio.c b/sys/ufs/lfs/lfs_bio.c
index 0f021f172088..d2254271d325 100644
--- a/sys/ufs/lfs/lfs_bio.c
+++ b/sys/ufs/lfs/lfs_bio.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
diff --git a/sys/ufs/lfs/lfs_segment.c b/sys/ufs/lfs/lfs_segment.c
index 249d59ddda50..65cab73f51b7 100644
--- a/sys/ufs/lfs/lfs_segment.c
+++ b/sys/ufs/lfs/lfs_segment.c
@@ -1085,6 +1085,7 @@ lfs_shellsort(bp_array, lb_array, nmemb)
/*
* Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
*/
+int
lfs_vref(vp)
register struct vnode *vp;
{
diff --git a/sys/ufs/lfs/lfs_subr.c b/sys/ufs/lfs/lfs_subr.c
index afcd8c29b3f1..79e36f4f49b5 100644
--- a/sys/ufs/lfs/lfs_subr.c
+++ b/sys/ufs/lfs/lfs_subr.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/buf.h>
diff --git a/sys/ufs/lfs/lfs_syscalls.c b/sys/ufs/lfs/lfs_syscalls.c
index 666595e6b595..a4b1501179b0 100644
--- a/sys/ufs/lfs/lfs_syscalls.c
+++ b/sys/ufs/lfs/lfs_syscalls.c
@@ -34,6 +34,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/mount.h>
@@ -92,7 +93,7 @@ lfs_markv(p, uap, retval)
BLOCK_INFO *blkp;
IFILE *ifp;
struct buf *bp, **bpp;
- struct inode *ip;
+ struct inode *ip = 0;
struct lfs *fs;
struct mount *mntp;
struct vnode *vp;
diff --git a/sys/ufs/lfs/lfs_vfsops.c b/sys/ufs/lfs/lfs_vfsops.c
index 0c8186e2322a..f5dd1c6a809e 100644
--- a/sys/ufs/lfs/lfs_vfsops.c
+++ b/sys/ufs/lfs/lfs_vfsops.c
@@ -86,6 +86,7 @@ lfs_mountroot()
*
* mount system call
*/
+int
lfs_mount(mp, path, data, ndp, p)
register struct mount *mp;
char *path;
@@ -95,7 +96,7 @@ lfs_mount(mp, path, data, ndp, p)
{
struct vnode *devvp;
struct ufs_args args;
- struct ufsmount *ump;
+ struct ufsmount *ump = 0;
register struct lfs *fs; /* LFS */
u_int size;
int error;
@@ -312,6 +313,7 @@ out:
/*
* unmount system call
*/
+int
lfs_unmount(mp, mntflags, p)
struct mount *mp;
int mntflags;
@@ -371,6 +373,7 @@ lfs_unmount(mp, mntflags, p)
/*
* Get file system statistics.
*/
+int
lfs_statfs(mp, sbp, p)
struct mount *mp;
register struct statfs *sbp;
@@ -409,6 +412,7 @@ lfs_statfs(mp, sbp, p)
*
* Note: we are always called with the filesystem marked `MPBUSY'.
*/
+int
lfs_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
@@ -557,6 +561,7 @@ lfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
* Vnode pointer to File handle
*/
/* ARGSUSED */
+int
lfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
diff --git a/sys/ufs/lfs/lfs_vnops.c b/sys/ufs/lfs/lfs_vnops.c
index fc6bd480d22a..e5da7fccf0be 100644
--- a/sys/ufs/lfs/lfs_vnops.c
+++ b/sys/ufs/lfs/lfs_vnops.c
@@ -216,6 +216,7 @@ struct vnodeopv_desc lfs_fifoop_opv_desc =
* Synch an open file.
*/
/* ARGSUSED */
+int
lfs_fsync(ap)
struct vop_fsync_args /* {
struct vnode *a_vp;
diff --git a/sys/ufs/mfs/mfs_vfsops.c b/sys/ufs/mfs/mfs_vfsops.c
index 3fcbdf379284..fd8777107e26 100644
--- a/sys/ufs/mfs/mfs_vfsops.c
+++ b/sys/ufs/mfs/mfs_vfsops.c
@@ -86,6 +86,7 @@ struct vfsops mfs_vfsops = {
*/
#define ROOTNAME "mfs_root"
+int
mfs_mountroot()
{
extern struct vnode *rootvp;
@@ -149,6 +150,7 @@ mfs_mountroot()
* This is called early in boot to set the base address and size
* of the mini-root.
*/
+int
mfs_initminiroot(base)
caddr_t base;
{
@@ -294,6 +296,7 @@ mfs_start(mp, flags, p)
/*
* Get file system statistics.
*/
+int
mfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
diff --git a/sys/ufs/mfs/mfs_vnops.c b/sys/ufs/mfs/mfs_vnops.c
index 71adf069b1db..c70657b4f336 100644
--- a/sys/ufs/mfs/mfs_vnops.c
+++ b/sys/ufs/mfs/mfs_vnops.c
@@ -423,10 +423,12 @@ mfs_badop()
/*
* Memory based filesystem initialization.
*/
+int
mfs_init()
{
#if !defined(hp300) && !defined(i386) && !defined(mips) && !defined(sparc) && !defined(luna68k)
rminit(mfsmap, (long)MFS_MAPREG, (long)1, "mfs mapreg", MFS_MAPSIZE);
#endif
+ return (0);
}
diff --git a/sys/ufs/ufs/ufs_bmap.c b/sys/ufs/ufs/ufs_bmap.c
index bcd838d036a1..a424d31d7a85 100644
--- a/sys/ufs/ufs/ufs_bmap.c
+++ b/sys/ufs/ufs/ufs_bmap.c
@@ -112,7 +112,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct indir a[NIADDR], *xap;
daddr_t daddr;
long metalbn;
- int error, maxrun, num;
+ int error, maxrun = 0, num;
ip = VTOI(vp);
mp = vp->v_mount;
diff --git a/sys/ufs/ufs/ufs_disksubr.c b/sys/ufs/ufs/ufs_disksubr.c
index 78dede4da773..cc0f28d37f15 100644
--- a/sys/ufs/ufs/ufs_disksubr.c
+++ b/sys/ufs/ufs/ufs_disksubr.c
@@ -43,6 +43,7 @@
#include <sys/buf.h>
#include <sys/disklabel.h>
#include <sys/syslog.h>
+#include <sys/dkbad.h>
/*
* Seek sort for disks. We depend on the driver which calls us using b_resid
@@ -153,14 +154,19 @@ insert: bp->b_actf = bq->b_actf;
* string on failure.
*/
char *
-readdisklabel(dev, strat, lp)
+readdisklabel(dev, strat, lp, dp, bdp)
dev_t dev;
int (*strat)();
register struct disklabel *lp;
+ struct dos_partition *dp;
+ struct dkbad *bdp;
{
register struct buf *bp;
struct disklabel *dlp;
char *msg = NULL;
+ int dospartoff;
+ int i;
+ int cyl;
if (lp->d_secperunit == 0)
lp->d_secperunit = 0x1fffffff;
@@ -170,11 +176,61 @@ readdisklabel(dev, strat, lp)
lp->d_partitions[0].p_offset = 0;
bp = geteblk((int)lp->d_secsize);
- bp->b_dev = dev;
- bp->b_blkno = LABELSECTOR;
+ /* do dos partitions in the process of getting disklabel? */
+ dospartoff = 0;
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ struct dos_partition *ap;
+
+ /* read master boot record */
+ bp->b_dev = dev;
+ bp->b_blkno = DOSBBSECTOR;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylinder = DOSBBSECTOR / lp->d_secpercyl;
+ (*strat)(bp);
+
+ /* if successful, wander through dos partition table */
+ if (biowait(bp)) {
+ msg = "dos partition I/O error";
+ goto done;
+ } else {
+ /* XXX how do we check veracity/bounds of this? */
+ bcopy(bp->b_un.b_addr + DOSPARTOFF, dp,
+ NDOSPART * sizeof(*dp));
+ for (i = 0; i < NDOSPART; i++, dp++)
+ /* is this ours? */
+ if (dp->dp_size &&
+ dp->dp_typ == DOSPTYP_386BSD
+ && dospartoff == 0) {
+
+ /* need sector address for SCSI/IDE,
+ cylinder for ESDI/ST506/RLL */
+ dospartoff = dp->dp_start;
+ cyl = DPCYL(dp->dp_scyl, dp->dp_ssect);
+
+ /* update disklabel with details */
+ lp->d_partitions[0].p_size =
+ dp->dp_size;
+ lp->d_partitions[0].p_offset =
+ dp->dp_start;
+ lp->d_ntracks = dp->dp_ehd + 1;
+ lp->d_nsectors = DPSECT(dp->dp_esect);
+ lp->d_subtype |= (lp->d_subtype & 3)
+ + i | DSTYPE_INDOSPART;
+ lp->d_secpercyl = lp->d_ntracks *
+ lp->d_nsectors;
+ }
+ }
+
+ }
+
+ /* next, dig out disk label */
+ bp->b_blkno = dospartoff + LABELSECTOR;
+ bp->b_dev = dev;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
- bp->b_cylinder = LABELSECTOR / lp->d_secpercyl;
+ bp->b_cylinder = cyl;
(*strat)(bp);
if (biowait(bp))
msg = "I/O error";
@@ -194,6 +250,46 @@ readdisklabel(dev, strat, lp)
break;
}
}
+ if (msg)
+ goto done;
+
+ /* obtain bad sector table if requested and present */
+ if (bdp && (lp->d_flags & D_BADSECT)) {
+ struct dkbad *db;
+
+ printf("d_secsize: %d\n", lp->d_secsize);
+ i = 0;
+ do {
+ /* read a bad sector table */
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
+ if (lp->d_secsize > DEV_BSIZE)
+ bp->b_blkno *= lp->d_secsize / DEV_BSIZE;
+ else
+ bp->b_blkno /= DEV_BSIZE / lp->d_secsize;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_cylinder = lp->d_ncylinders - 1;
+ (*strat)(bp);
+
+ /* if successful, validate, otherwise try another */
+ if (biowait(bp)) {
+ msg = "bad sector table I/O error";
+ } else {
+ db = (struct dkbad *)(bp->b_un.b_addr);
+#define DKBAD_MAGIC 0x4321
+ if (db->bt_mbz == 0
+ && db->bt_flag == DKBAD_MAGIC) {
+ msg = NULL;
+ *bdp = *db;
+ break;
+ } else
+ msg = "bad sector table corrupted";
+ }
+ } while ((bp->b_flags & B_ERROR) && (i += 2) < 10 &&
+ i < lp->d_nsectors);
+ }
+
+done:
bp->b_flags = B_INVAL | B_AGE;
brelse(bp);
return (msg);
@@ -294,6 +390,7 @@ done:
/*
* Compute checksum for disk label.
*/
+int
dkcksum(lp)
register struct disklabel *lp;
{
diff --git a/sys/ufs/ufs/ufs_readwrite.c b/sys/ufs/ufs/ufs_readwrite.c
index 5ead2c1a9adf..212210c583ac 100644
--- a/sys/ufs/ufs/ufs_readwrite.c
+++ b/sys/ufs/ufs/ufs_readwrite.c
@@ -57,6 +57,7 @@
* Vnode op for reading.
*/
/* ARGSUSED */
+int
READ(ap)
struct vop_read_args /* {
struct vnode *a_vp;
@@ -159,6 +160,7 @@ READ(ap)
/*
* Vnode op for writing.
*/
+int
WRITE(ap)
struct vop_write_args /* {
struct vnode *a_vp;
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index 7b7c88376b95..83bef926a824 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -1580,7 +1580,8 @@ ufs_readlink(ap)
int isize;
isize = ip->i_size;
- if (isize < vp->v_mount->mnt_maxsymlinklen) {
+ if ((isize < vp->v_mount->mnt_maxsymlinklen) ||
+ (ip->i_din.di_blocks == 0)) { /* XXX - for old fastlink support */
uiomove((char *)ip->i_shortlink, isize, ap->a_uio);
return (0);
}
@@ -1876,6 +1877,7 @@ ufsfifo_write(ap)
*
* Update the times on the inode then do device close.
*/
+int
ufsfifo_close(ap)
struct vop_close_args /* {
struct vnode *a_vp;
@@ -1896,6 +1898,7 @@ ufsfifo_close(ap)
/*
* Return POSIX pathconf information applicable to ufs filesystems.
*/
+int
ufs_pathconf(ap)
struct vop_pathconf_args /* {
struct vnode *a_vp;
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 235c917a0c67..b8083df16273 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)device_pager.c 8.5 (Berkeley) 1/12/94
+ * @(#)device_pager.c 8.1 (Berkeley) 6/11/93
*/
/*
@@ -53,8 +53,8 @@
#include <vm/vm_page.h>
#include <vm/device_pager.h>
-struct pagerlst dev_pager_list; /* list of managed devices */
-struct pglist dev_pager_fakelist; /* list of available vm_page_t's */
+struct pagerlst dev_pager_list; /* list of managed devices */
+struct pglist dev_pager_fakelist; /* list of available vm_page_t's */
#ifdef DEBUG
int dpagerdebug = 0;
@@ -68,11 +68,11 @@ static vm_pager_t dev_pager_alloc
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
static void dev_pager_dealloc __P((vm_pager_t));
static int dev_pager_getpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t, boolean_t));
static boolean_t dev_pager_haspage __P((vm_pager_t, vm_offset_t));
static void dev_pager_init __P((void));
static int dev_pager_putpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t, boolean_t));
static vm_page_t dev_pager_getfake __P((vm_offset_t));
static void dev_pager_putfake __P((vm_page_t));
@@ -81,9 +81,10 @@ struct pagerops devicepagerops = {
dev_pager_alloc,
dev_pager_dealloc,
dev_pager_getpage,
+ 0,
dev_pager_putpage,
- dev_pager_haspage,
- vm_pager_clusternull
+ 0,
+ dev_pager_haspage
};
static void
@@ -109,7 +110,7 @@ dev_pager_alloc(handle, size, prot, foff)
int (*mapfunc)();
vm_object_t object;
dev_pager_t devp;
- int npages, off;
+ unsigned int npages, off;
#ifdef DEBUG
if (dpagerdebug & DDB_FOLLOW)
@@ -127,7 +128,7 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Make sure this device can be mapped.
*/
- dev = (dev_t)handle;
+ dev = (dev_t)(u_long)handle;
mapfunc = cdevsw[major(dev)].d_mmap;
if (mapfunc == NULL || mapfunc == enodev || mapfunc == nullop)
return(NULL);
@@ -135,7 +136,7 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Offset should be page aligned.
*/
- if (foff & PAGE_MASK)
+ if (foff & (PAGE_SIZE-1))
return(NULL);
/*
@@ -169,15 +170,15 @@ top:
pager->pg_handle = handle;
pager->pg_ops = &devicepagerops;
pager->pg_type = PG_DEVICE;
+ pager->pg_data = (caddr_t)devp;
pager->pg_flags = 0;
- pager->pg_data = devp;
TAILQ_INIT(&devp->devp_pglist);
/*
* Allocate object and associate it with the pager.
*/
object = devp->devp_object = vm_object_allocate(0);
vm_object_enter(object, pager);
- vm_object_setpager(object, pager, (vm_offset_t)0, FALSE);
+ vm_object_setpager(object, pager, (vm_offset_t)foff, FALSE);
/*
* Finally, put it on the managed list so other can find it.
* First we re-lookup in case someone else beat us to this
@@ -239,7 +240,7 @@ dev_pager_dealloc(pager)
/*
* Free up our fake pages.
*/
- while ((m = devp->devp_pglist.tqh_first) != NULL) {
+ while (m=devp->devp_pglist.tqh_first) {
TAILQ_REMOVE(&devp->devp_pglist, m, pageq);
dev_pager_putfake(m);
}
@@ -248,39 +249,33 @@ dev_pager_dealloc(pager)
}
static int
-dev_pager_getpage(pager, mlist, npages, sync)
+dev_pager_getpage(pager, m, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t m;
boolean_t sync;
{
register vm_object_t object;
vm_offset_t offset, paddr;
vm_page_t page;
dev_t dev;
+ int s;
int (*mapfunc)(), prot;
- vm_page_t m;
#ifdef DEBUG
if (dpagerdebug & DDB_FOLLOW)
- printf("dev_pager_getpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
+ printf("dev_pager_getpage(%x, %x)\n", pager, m);
#endif
- if (npages != 1)
- panic("dev_pager_getpage: cannot handle multiple pages");
- m = *mlist;
-
object = m->object;
- dev = (dev_t)pager->pg_handle;
+ dev = (dev_t)(u_long)pager->pg_handle;
offset = m->offset + object->paging_offset;
prot = PROT_READ; /* XXX should pass in? */
mapfunc = cdevsw[major(dev)].d_mmap;
-#ifdef DIAGNOSTIC
+
if (mapfunc == NULL || mapfunc == enodev || mapfunc == nullop)
panic("dev_pager_getpage: no map function");
-#endif
- paddr = pmap_phys_address((*mapfunc)(dev, (int)offset, prot));
+
+ paddr = pmap_phys_address((*mapfunc)((dev_t)dev, (int)offset, prot));
#ifdef DIAGNOSTIC
if (paddr == -1)
panic("dev_pager_getpage: map function returns error");
@@ -290,13 +285,15 @@ dev_pager_getpage(pager, mlist, npages, sync)
* up the original.
*/
page = dev_pager_getfake(paddr);
- TAILQ_INSERT_TAIL(&((dev_pager_t)pager->pg_data)->devp_pglist, page,
- pageq);
+ TAILQ_INSERT_TAIL(&((dev_pager_t)pager->pg_data)->devp_pglist,
+ page, pageq);
vm_object_lock(object);
vm_page_lock_queues();
vm_page_free(m);
- vm_page_insert(page, object, offset);
vm_page_unlock_queues();
+ s = splhigh();
+ vm_page_insert(page, object, offset);
+ splx(s);
PAGE_WAKEUP(m);
if (offset + PAGE_SIZE > object->size)
object->size = offset + PAGE_SIZE; /* XXX anal */
@@ -306,19 +303,17 @@ dev_pager_getpage(pager, mlist, npages, sync)
}
static int
-dev_pager_putpage(pager, mlist, npages, sync)
+dev_pager_putpage(pager, m, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t m;
boolean_t sync;
{
#ifdef DEBUG
if (dpagerdebug & DDB_FOLLOW)
- printf("dev_pager_putpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
+ printf("dev_pager_putpage(%x, %x)\n", pager, m);
#endif
if (pager == NULL)
- return;
+ return 0;
panic("dev_pager_putpage called");
}
@@ -350,9 +345,12 @@ dev_pager_getfake(paddr)
}
m = dev_pager_fakelist.tqh_first;
TAILQ_REMOVE(&dev_pager_fakelist, m, pageq);
+
m->flags = PG_BUSY | PG_CLEAN | PG_FAKE | PG_FICTITIOUS;
- m->phys_addr = paddr;
+
m->wire_count = 1;
+ m->phys_addr = paddr;
+
return(m);
}
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 899a6cf41a00..5a1efaef3951 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 1994 John S. Dyson
* Copyright (c) 1990 University of Utah.
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
@@ -51,179 +52,145 @@
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/buf.h>
-#include <sys/map.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
#include <miscfs/specfs/specdev.h>
+#include <sys/rlist.h>
#include <vm/vm.h>
+#include <vm/vm_pager.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/swap_pager.h>
-#define NSWSIZES 16 /* size of swtab */
-#define MAXDADDRS 64 /* max # of disk addrs for fixed allocations */
#ifndef NPENDINGIO
-#define NPENDINGIO 64 /* max # of pending cleans */
+#define NPENDINGIO 16
#endif
-#ifdef DEBUG
-int swpagerdebug = 0x100;
-#define SDB_FOLLOW 0x001
-#define SDB_INIT 0x002
-#define SDB_ALLOC 0x004
-#define SDB_IO 0x008
-#define SDB_WRITE 0x010
-#define SDB_FAIL 0x020
-#define SDB_ALLOCBLK 0x040
-#define SDB_FULL 0x080
-#define SDB_ANOM 0x100
-#define SDB_ANOMPANIC 0x200
-#define SDB_CLUSTER 0x400
-#define SDB_PARANOIA 0x800
-#endif
+extern int nswbuf;
+int nswiodone;
+extern int vm_pageout_rate_limit;
+static int cleandone;
+extern int hz;
+int swap_pager_full;
+extern vm_map_t pager_map;
+extern int vm_pageout_pages_needed;
+extern int vm_swap_size;
+extern struct vnode *swapdev_vp;
+
+#define MAX_PAGEOUT_CLUSTER 8
TAILQ_HEAD(swpclean, swpagerclean);
+typedef struct swpagerclean *swp_clean_t;
+
struct swpagerclean {
TAILQ_ENTRY(swpagerclean) spc_list;
int spc_flags;
struct buf *spc_bp;
sw_pager_t spc_swp;
vm_offset_t spc_kva;
- vm_page_t spc_m;
- int spc_npages;
-} swcleanlist[NPENDINGIO];
-typedef struct swpagerclean *swp_clean_t;
-
-/* spc_flags values */
-#define SPC_FREE 0x00
-#define SPC_BUSY 0x01
-#define SPC_DONE 0x02
-#define SPC_ERROR 0x04
-
-struct swtab {
- vm_size_t st_osize; /* size of object (bytes) */
- int st_bsize; /* vs. size of swap block (DEV_BSIZE units) */
-#ifdef DEBUG
- u_long st_inuse; /* number in this range in use */
- u_long st_usecnt; /* total used of this size */
-#endif
-} swtab[NSWSIZES+1];
+ vm_offset_t spc_altkva;
+ int spc_count;
+ vm_page_t spc_m[MAX_PAGEOUT_CLUSTER];
+} swcleanlist [NPENDINGIO] ;
-#ifdef DEBUG
-int swap_pager_poip; /* pageouts in progress */
-int swap_pager_piip; /* pageins in progress */
-#endif
-int swap_pager_maxcluster; /* maximum cluster size */
-int swap_pager_npendingio; /* number of pager clean structs */
+extern vm_map_t kernel_map;
-struct swpclean swap_pager_inuse; /* list of pending page cleans */
-struct swpclean swap_pager_free; /* list of free pager clean structs */
-struct pagerlst swap_pager_list; /* list of "named" anon regions */
+/* spc_flags values */
+#define SPC_ERROR 0x01
+
+#define SWB_EMPTY (-1)
+
+void swap_pager_init(void);
+vm_pager_t swap_pager_alloc(caddr_t, vm_size_t, vm_prot_t, vm_offset_t);
+void swap_pager_dealloc(vm_pager_t);
+boolean_t swap_pager_getpage(vm_pager_t, vm_page_t, boolean_t);
+boolean_t swap_pager_putpage(vm_pager_t, vm_page_t, boolean_t);
+boolean_t swap_pager_getmulti(vm_pager_t, vm_page_t *, int, int, boolean_t);
+boolean_t swap_pager_haspage(vm_pager_t, vm_offset_t);
+int swap_pager_io(sw_pager_t, vm_page_t *, int, int, int);
+void swap_pager_iodone(struct buf *);
+boolean_t swap_pager_clean();
+
+extern struct pagerops swappagerops;
+
+struct swpclean swap_pager_done; /* list of compileted page cleans */
+struct swpclean swap_pager_inuse; /* list of pending page cleans */
+struct swpclean swap_pager_free; /* list of free pager clean structs */
+struct pagerlst swap_pager_list; /* list of "named" anon regions */
+struct pagerlst swap_pager_un_list; /* list of "unnamed" anon pagers */
+
+#define SWAP_FREE_NEEDED 0x1 /* need a swap block */
+int swap_pager_needflags;
+struct rlist *swapfrag;
+
+struct pagerlst *swp_qs[]={
+ &swap_pager_list, &swap_pager_un_list, (struct pagerlst *) 0
+};
-static void swap_pager_init __P((void));
-static vm_pager_t swap_pager_alloc
- __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
-static void swap_pager_clean __P((int));
-#ifdef DEBUG
-static void swap_pager_clean_check __P((vm_page_t *, int, int));
-#endif
-static void swap_pager_cluster
- __P((vm_pager_t, vm_offset_t,
- vm_offset_t *, vm_offset_t *));
-static void swap_pager_dealloc __P((vm_pager_t));
-static int swap_pager_getpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
-static boolean_t swap_pager_haspage __P((vm_pager_t, vm_offset_t));
-static int swap_pager_io __P((sw_pager_t, vm_page_t *, int, int));
-static void swap_pager_iodone __P((struct buf *));
-static int swap_pager_putpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+int swap_pager_putmulti();
struct pagerops swappagerops = {
swap_pager_init,
swap_pager_alloc,
swap_pager_dealloc,
swap_pager_getpage,
+ swap_pager_getmulti,
swap_pager_putpage,
- swap_pager_haspage,
- swap_pager_cluster
+ swap_pager_putmulti,
+ swap_pager_haspage
};
-static void
+extern int nswbuf;
+
+int npendingio = NPENDINGIO;
+int pendingiowait;
+int require_swap_init;
+void swap_pager_finish();
+int dmmin, dmmax;
+extern int vm_page_count;
+
+struct buf * getpbuf() ;
+void relpbuf(struct buf *bp) ;
+
+static inline void swapsizecheck() {
+ if( vm_swap_size < 128*btodb(PAGE_SIZE)) {
+ if( swap_pager_full)
+ printf("swap_pager: out of space\n");
+ swap_pager_full = 1;
+ } else if( vm_swap_size > 192*btodb(PAGE_SIZE))
+ swap_pager_full = 0;
+}
+
+void
swap_pager_init()
{
- register swp_clean_t spc;
- register int i, bsize;
extern int dmmin, dmmax;
- int maxbsize;
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FOLLOW|SDB_INIT))
- printf("swpg_init()\n");
-#endif
dfltpagerops = &swappagerops;
- TAILQ_INIT(&swap_pager_list);
- /*
- * Allocate async IO structures.
- *
- * XXX it would be nice if we could do this dynamically based on
- * the value of nswbuf (since we are ultimately limited by that)
- * but neither nswbuf or malloc has been initialized yet. So the
- * structs are statically allocated above.
- */
- swap_pager_npendingio = NPENDINGIO;
+ TAILQ_INIT(&swap_pager_list);
+ TAILQ_INIT(&swap_pager_un_list);
/*
* Initialize clean lists
*/
TAILQ_INIT(&swap_pager_inuse);
+ TAILQ_INIT(&swap_pager_done);
TAILQ_INIT(&swap_pager_free);
- for (i = 0, spc = swcleanlist; i < swap_pager_npendingio; i++, spc++) {
- TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
- spc->spc_flags = SPC_FREE;
- }
+
+ require_swap_init = 1;
/*
* Calculate the swap allocation constants.
*/
- if (dmmin == 0) {
- dmmin = DMMIN;
- if (dmmin < CLBYTES/DEV_BSIZE)
- dmmin = CLBYTES/DEV_BSIZE;
- }
- if (dmmax == 0)
- dmmax = DMMAX;
-
- /*
- * Fill in our table of object size vs. allocation size
- */
- bsize = btodb(PAGE_SIZE);
- if (bsize < dmmin)
- bsize = dmmin;
- maxbsize = btodb(sizeof(sw_bm_t) * NBBY * PAGE_SIZE);
- if (maxbsize > dmmax)
- maxbsize = dmmax;
- for (i = 0; i < NSWSIZES; i++) {
- swtab[i].st_osize = (vm_size_t) (MAXDADDRS * dbtob(bsize));
- swtab[i].st_bsize = bsize;
- if (bsize <= btodb(MAXPHYS))
- swap_pager_maxcluster = dbtob(bsize);
-#ifdef DEBUG
- if (swpagerdebug & SDB_INIT)
- printf("swpg_init: ix %d, size %x, bsize %x\n",
- i, swtab[i].st_osize, swtab[i].st_bsize);
-#endif
- if (bsize >= maxbsize)
- break;
- bsize *= 2;
- }
- swtab[i].st_osize = 0;
- swtab[i].st_bsize = bsize;
+
+ dmmin = CLBYTES/DEV_BSIZE;
+ dmmax = btodb(SWB_NPAGES*PAGE_SIZE)*2;
+
}
/*
@@ -231,22 +198,43 @@ swap_pager_init()
* Note that if we are called from the pageout daemon (handle == NULL)
* we should not wait for memory as it could resulting in deadlock.
*/
-static vm_pager_t
-swap_pager_alloc(handle, size, prot, foff)
+vm_pager_t
+swap_pager_alloc(handle, size, prot, offset)
caddr_t handle;
register vm_size_t size;
vm_prot_t prot;
- vm_offset_t foff;
+ vm_offset_t offset;
{
register vm_pager_t pager;
register sw_pager_t swp;
- struct swtab *swt;
int waitok;
-
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC))
- printf("swpg_alloc(%x, %x, %x)\n", handle, size, prot);
-#endif
+ int i,j;
+
+ if (require_swap_init) {
+ swp_clean_t spc;
+ struct buf *bp;
+ /*
+ * kva's are allocated here so that we dont need to keep
+ * doing kmem_alloc pageables at runtime
+ */
+ for (i = 0, spc = swcleanlist; i < npendingio ; i++, spc++) {
+ spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE);
+ if (!spc->spc_kva) {
+ break;
+ }
+ spc->spc_bp = malloc( sizeof( *bp), M_TEMP, M_NOWAIT);
+ if (!spc->spc_bp) {
+ kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE);
+ break;
+ }
+ spc->spc_flags = 0;
+ TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
+ }
+ require_swap_init = 0;
+ if( size == 0)
+ return(NULL);
+ }
+
/*
* If this is a "named" anonymous region, look it up and
* return the appropriate pager if it exists.
@@ -264,50 +252,43 @@ swap_pager_alloc(handle, size, prot, foff)
return(pager);
}
}
+
+ if (swap_pager_full) {
+ return(NULL);
+ }
+
/*
* Pager doesn't exist, allocate swap management resources
* and initialize.
*/
- waitok = handle ? M_WAITOK : M_NOWAIT;
+ waitok = handle ? M_WAITOK : M_NOWAIT;
pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, waitok);
if (pager == NULL)
return(NULL);
swp = (sw_pager_t)malloc(sizeof *swp, M_VMPGDATA, waitok);
if (swp == NULL) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_FAIL)
- printf("swpg_alloc: swpager malloc failed\n");
-#endif
free((caddr_t)pager, M_VMPAGER);
return(NULL);
}
size = round_page(size);
- for (swt = swtab; swt->st_osize; swt++)
- if (size <= swt->st_osize)
- break;
-#ifdef DEBUG
- swt->st_inuse++;
- swt->st_usecnt++;
-#endif
swp->sw_osize = size;
- swp->sw_bsize = swt->st_bsize;
- swp->sw_nblocks = (btodb(size) + swp->sw_bsize - 1) / swp->sw_bsize;
+ swp->sw_nblocks = (btodb(size) + btodb(SWB_NPAGES * PAGE_SIZE) - 1) / btodb(SWB_NPAGES*PAGE_SIZE);
swp->sw_blocks = (sw_blk_t)
malloc(swp->sw_nblocks*sizeof(*swp->sw_blocks),
- M_VMPGDATA, M_NOWAIT);
+ M_VMPGDATA, waitok);
if (swp->sw_blocks == NULL) {
free((caddr_t)swp, M_VMPGDATA);
free((caddr_t)pager, M_VMPAGER);
-#ifdef DEBUG
- if (swpagerdebug & SDB_FAIL)
- printf("swpg_alloc: sw_blocks malloc failed\n");
- swt->st_inuse--;
- swt->st_usecnt--;
-#endif
- return(FALSE);
+ return(NULL);
+ }
+
+ for (i = 0; i < swp->sw_nblocks; i++) {
+ swp->sw_blocks[i].swb_valid = 0;
+ swp->sw_blocks[i].swb_locked = 0;
+ for (j = 0; j < SWB_NPAGES; j++)
+ swp->sw_blocks[i].swb_block[j] = SWB_EMPTY;
}
- bzero((caddr_t)swp->sw_blocks,
- swp->sw_nblocks * sizeof(*swp->sw_blocks));
+
swp->sw_poip = 0;
if (handle) {
vm_object_t object;
@@ -324,686 +305,1530 @@ swap_pager_alloc(handle, size, prot, foff)
vm_object_setpager(object, pager, 0, FALSE);
} else {
swp->sw_flags = 0;
- pager->pg_list.tqe_next = NULL;
- pager->pg_list.tqe_prev = NULL;
+ TAILQ_INSERT_TAIL(&swap_pager_un_list, pager, pg_list);
}
pager->pg_handle = handle;
pager->pg_ops = &swappagerops;
pager->pg_type = PG_SWAP;
- pager->pg_flags = PG_CLUSTERPUT;
- pager->pg_data = swp;
+ pager->pg_data = (caddr_t)swp;
-#ifdef DEBUG
- if (swpagerdebug & SDB_ALLOC)
- printf("swpg_alloc: pg_data %x, %x of %x at %x\n",
- swp, swp->sw_nblocks, swp->sw_bsize, swp->sw_blocks);
-#endif
return(pager);
}
+/*
+ * returns disk block associated with pager and offset
+ * additionally, as a side effect returns a flag indicating
+ * if the block has been written
+ */
+
+static int *
+swap_pager_diskaddr(swp, offset, valid)
+ sw_pager_t swp;
+ vm_offset_t offset;
+ int *valid;
+{
+ register sw_blk_t swb;
+ int ix;
+
+ if (valid)
+ *valid = 0;
+ ix = offset / (SWB_NPAGES*PAGE_SIZE);
+ if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
+ return(FALSE);
+ }
+ swb = &swp->sw_blocks[ix];
+ ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE;
+ if (valid)
+ *valid = swb->swb_valid & (1<<ix);
+ return &swb->swb_block[ix];
+}
+
+/*
+ * Utility routine to set the valid (written) bit for
+ * a block associated with a pager and offset
+ */
static void
+swap_pager_setvalid(swp, offset, valid)
+ sw_pager_t swp;
+ vm_offset_t offset;
+ int valid;
+{
+ register sw_blk_t swb;
+ int ix;
+
+ ix = offset / (SWB_NPAGES*PAGE_SIZE);
+ if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks)
+ return;
+
+ swb = &swp->sw_blocks[ix];
+ ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE;
+ if (valid)
+ swb->swb_valid |= (1 << ix);
+ else
+ swb->swb_valid &= ~(1 << ix);
+ return;
+}
+
+/*
+ * this routine allocates swap space with a fragmentation
+ * minimization policy.
+ */
+int
+swap_pager_getswapspace( unsigned amount, unsigned *rtval) {
+ unsigned tmpalloc;
+ unsigned nblocksfrag = btodb(SWB_NPAGES*PAGE_SIZE);
+ if( amount < nblocksfrag) {
+ if( rlist_alloc(&swapfrag, amount, rtval))
+ return 1;
+ if( !rlist_alloc(&swapmap, nblocksfrag, &tmpalloc))
+ return 0;
+ rlist_free( &swapfrag, tmpalloc+amount, tmpalloc + nblocksfrag - 1);
+ *rtval = tmpalloc;
+ return 1;
+ }
+ if( !rlist_alloc(&swapmap, amount, rtval))
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * this routine frees swap space with a fragmentation
+ * minimization policy.
+ */
+void
+swap_pager_freeswapspace( unsigned from, unsigned to) {
+ unsigned nblocksfrag = btodb(SWB_NPAGES*PAGE_SIZE);
+ unsigned tmpalloc;
+ if( ((to + 1) - from) >= nblocksfrag) {
+ while( (from + nblocksfrag) <= to + 1) {
+ rlist_free(&swapmap, from, from + nblocksfrag - 1);
+ from += nblocksfrag;
+ }
+ }
+ if( from >= to)
+ return;
+ rlist_free(&swapfrag, from, to);
+ while( rlist_alloc(&swapfrag, nblocksfrag, &tmpalloc)) {
+ rlist_free(&swapmap, tmpalloc, tmpalloc + nblocksfrag-1);
+ }
+}
+/*
+ * this routine frees swap blocks from a specified pager
+ */
+void
+_swap_pager_freespace(swp, start, size)
+ sw_pager_t swp;
+ vm_offset_t start;
+ vm_offset_t size;
+{
+ vm_offset_t i;
+ int s;
+
+ s = splbio();
+ for (i = start; i < round_page(start + size - 1); i += PAGE_SIZE) {
+ int valid;
+ int *addr = swap_pager_diskaddr(swp, i, &valid);
+ if (addr && *addr != SWB_EMPTY) {
+ swap_pager_freeswapspace(*addr, *addr+btodb(PAGE_SIZE) - 1);
+ if( valid) {
+ vm_swap_size += btodb(PAGE_SIZE);
+ swap_pager_setvalid(swp, i, 0);
+ }
+ *addr = SWB_EMPTY;
+ }
+ }
+ swapsizecheck();
+ splx(s);
+}
+
+void
+swap_pager_freespace(pager, start, size)
+ vm_pager_t pager;
+ vm_offset_t start;
+ vm_offset_t size;
+{
+ _swap_pager_freespace((sw_pager_t) pager->pg_data, start, size);
+}
+
+/*
+ * swap_pager_reclaim frees up over-allocated space from all pagers
+ * this eliminates internal fragmentation due to allocation of space
+ * for segments that are never swapped to. It has been written so that
+ * it does not block until the rlist_free operation occurs; it keeps
+ * the queues consistant.
+ */
+
+/*
+ * Maximum number of blocks (pages) to reclaim per pass
+ */
+#define MAXRECLAIM 256
+
+void
+swap_pager_reclaim()
+{
+ vm_pager_t p;
+ sw_pager_t swp;
+ int i, j, k;
+ int s;
+ int reclaimcount;
+ static int reclaims[MAXRECLAIM];
+ static int in_reclaim;
+
+/*
+ * allow only one process to be in the swap_pager_reclaim subroutine
+ */
+ s = splbio();
+ if (in_reclaim) {
+ tsleep((caddr_t) &in_reclaim, PSWP, "swrclm", 0);
+ splx(s);
+ return;
+ }
+ in_reclaim = 1;
+ reclaimcount = 0;
+
+ /* for each pager queue */
+ for (k = 0; swp_qs[k]; k++) {
+
+ p = swp_qs[k]->tqh_first;
+ while (p && (reclaimcount < MAXRECLAIM)) {
+
+ /*
+ * see if any blocks associated with a pager has been
+ * allocated but not used (written)
+ */
+ swp = (sw_pager_t) p->pg_data;
+ for (i = 0; i < swp->sw_nblocks; i++) {
+ sw_blk_t swb = &swp->sw_blocks[i];
+ if( swb->swb_locked)
+ continue;
+ for (j = 0; j < SWB_NPAGES; j++) {
+ if (swb->swb_block[j] != SWB_EMPTY &&
+ (swb->swb_valid & (1 << j)) == 0) {
+ reclaims[reclaimcount++] = swb->swb_block[j];
+ swb->swb_block[j] = SWB_EMPTY;
+ if (reclaimcount >= MAXRECLAIM)
+ goto rfinished;
+ }
+ }
+ }
+ p = p->pg_list.tqe_next;
+ }
+ }
+
+rfinished:
+
+/*
+ * free the blocks that have been added to the reclaim list
+ */
+ for (i = 0; i < reclaimcount; i++) {
+ swap_pager_freeswapspace(reclaims[i], reclaims[i]+btodb(PAGE_SIZE) - 1);
+ swapsizecheck();
+ wakeup((caddr_t) &in_reclaim);
+ }
+
+ splx(s);
+ in_reclaim = 0;
+ wakeup((caddr_t) &in_reclaim);
+}
+
+
+/*
+ * swap_pager_copy copies blocks from one pager to another and
+ * destroys the source pager
+ */
+
+void
+swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
+ vm_pager_t srcpager;
+ vm_offset_t srcoffset;
+ vm_pager_t dstpager;
+ vm_offset_t dstoffset;
+ vm_offset_t offset;
+{
+ sw_pager_t srcswp, dstswp;
+ vm_offset_t i;
+ int s;
+
+ srcswp = (sw_pager_t) srcpager->pg_data;
+ dstswp = (sw_pager_t) dstpager->pg_data;
+
+/*
+ * remove the source pager from the swap_pager internal queue
+ */
+ s = splbio();
+ if (srcswp->sw_flags & SW_NAMED) {
+ TAILQ_REMOVE(&swap_pager_list, srcpager, pg_list);
+ srcswp->sw_flags &= ~SW_NAMED;
+ } else {
+ TAILQ_REMOVE(&swap_pager_un_list, srcpager, pg_list);
+ }
+
+ while (srcswp->sw_poip) {
+ tsleep((caddr_t)srcswp, PVM, "spgout", 0);
+ }
+ splx(s);
+
+/*
+ * clean all of the pages that are currently active and finished
+ */
+ (void) swap_pager_clean();
+
+ s = splbio();
+/*
+ * clear source block before destination object
+ * (release allocated space)
+ */
+ for (i = 0; i < offset + srcoffset; i += PAGE_SIZE) {
+ int valid;
+ int *addr = swap_pager_diskaddr(srcswp, i, &valid);
+ if (addr && *addr != SWB_EMPTY) {
+ swap_pager_freeswapspace(*addr, *addr+btodb(PAGE_SIZE) - 1);
+ if( valid)
+ vm_swap_size += btodb(PAGE_SIZE);
+ swapsizecheck();
+ *addr = SWB_EMPTY;
+ }
+ }
+/*
+ * transfer source to destination
+ */
+ for (i = 0; i < dstswp->sw_osize; i += PAGE_SIZE) {
+ int srcvalid, dstvalid;
+ int *srcaddrp = swap_pager_diskaddr(srcswp, i + offset + srcoffset,
+ &srcvalid);
+ int *dstaddrp;
+ /*
+ * see if the source has space allocated
+ */
+ if (srcaddrp && *srcaddrp != SWB_EMPTY) {
+ /*
+ * if the source is valid and the dest has no space, then
+ * copy the allocation from the srouce to the dest.
+ */
+ if (srcvalid) {
+ dstaddrp = swap_pager_diskaddr(dstswp, i + dstoffset, &dstvalid);
+ /*
+ * if the dest already has a valid block, deallocate the
+ * source block without copying.
+ */
+ if (!dstvalid && dstaddrp && *dstaddrp != SWB_EMPTY) {
+ swap_pager_freeswapspace(*dstaddrp, *dstaddrp+btodb(PAGE_SIZE) - 1);
+ *dstaddrp = SWB_EMPTY;
+ }
+ if (dstaddrp && *dstaddrp == SWB_EMPTY) {
+ *dstaddrp = *srcaddrp;
+ *srcaddrp = SWB_EMPTY;
+ swap_pager_setvalid(dstswp, i + dstoffset, 1);
+ vm_swap_size -= btodb(PAGE_SIZE);
+ }
+ }
+ /*
+ * if the source is not empty at this point, then deallocate the space.
+ */
+ if (*srcaddrp != SWB_EMPTY) {
+ swap_pager_freeswapspace(*srcaddrp, *srcaddrp+btodb(PAGE_SIZE) - 1);
+ if( srcvalid)
+ vm_swap_size += btodb(PAGE_SIZE);
+ *srcaddrp = SWB_EMPTY;
+ }
+ }
+ }
+
+/*
+ * deallocate the rest of the source object
+ */
+ for (i = dstswp->sw_osize + offset + srcoffset; i < srcswp->sw_osize; i += PAGE_SIZE) {
+ int valid;
+ int *srcaddrp = swap_pager_diskaddr(srcswp, i, &valid);
+ if (srcaddrp && *srcaddrp != SWB_EMPTY) {
+ swap_pager_freeswapspace(*srcaddrp, *srcaddrp+btodb(PAGE_SIZE) - 1);
+ if( valid)
+ vm_swap_size += btodb(PAGE_SIZE);
+ *srcaddrp = SWB_EMPTY;
+ }
+ }
+
+ swapsizecheck();
+ splx(s);
+
+ free((caddr_t)srcswp->sw_blocks, M_VMPGDATA);
+ srcswp->sw_blocks = 0;
+ free((caddr_t)srcswp, M_VMPGDATA);
+ srcpager->pg_data = 0;
+ free((caddr_t)srcpager, M_VMPAGER);
+
+ return;
+}
+
+
+void
swap_pager_dealloc(pager)
vm_pager_t pager;
{
- register int i;
+ register int i,j;
register sw_blk_t bp;
register sw_pager_t swp;
- struct swtab *swt;
int s;
-#ifdef DEBUG
- /* save panic time state */
- if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
- return;
- if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC))
- printf("swpg_dealloc(%x)\n", pager);
-#endif
/*
* Remove from list right away so lookups will fail if we
* block for pageout completion.
*/
+ s = splbio();
swp = (sw_pager_t) pager->pg_data;
if (swp->sw_flags & SW_NAMED) {
TAILQ_REMOVE(&swap_pager_list, pager, pg_list);
swp->sw_flags &= ~SW_NAMED;
+ } else {
+ TAILQ_REMOVE(&swap_pager_un_list, pager, pg_list);
}
-#ifdef DEBUG
- for (swt = swtab; swt->st_osize; swt++)
- if (swp->sw_osize <= swt->st_osize)
- break;
- swt->st_inuse--;
-#endif
-
/*
* Wait for all pageouts to finish and remove
* all entries from cleaning list.
*/
- s = splbio();
+
while (swp->sw_poip) {
- swp->sw_flags |= SW_WANTED;
- (void) tsleep(swp, PVM, "swpgdealloc", 0);
+ tsleep((caddr_t)swp, PVM, "swpout", 0);
}
splx(s);
- swap_pager_clean(B_WRITE);
+
+
+ (void) swap_pager_clean();
/*
* Free left over swap blocks
*/
- for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++)
- if (bp->swb_block) {
-#ifdef DEBUG
- if (swpagerdebug & (SDB_ALLOCBLK|SDB_FULL))
- printf("swpg_dealloc: blk %x\n",
- bp->swb_block);
-#endif
- rmfree(swapmap, swp->sw_bsize, bp->swb_block);
+ s = splbio();
+ for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) {
+ for (j = 0; j < SWB_NPAGES; j++)
+ if (bp->swb_block[j] != SWB_EMPTY) {
+ swap_pager_freeswapspace((unsigned)bp->swb_block[j],
+ (unsigned)bp->swb_block[j] + btodb(PAGE_SIZE) - 1);
+ if( bp->swb_valid & (1<<j))
+ vm_swap_size += btodb(PAGE_SIZE);
+ bp->swb_block[j] = SWB_EMPTY;
}
+ }
+ splx(s);
+ swapsizecheck();
+
/*
* Free swap management resources
*/
free((caddr_t)swp->sw_blocks, M_VMPGDATA);
+ swp->sw_blocks = 0;
free((caddr_t)swp, M_VMPGDATA);
+ pager->pg_data = 0;
free((caddr_t)pager, M_VMPAGER);
}
-static int
-swap_pager_getpage(pager, mlist, npages, sync)
+/*
+ * swap_pager_getmulti can get multiple pages.
+ */
+int
+swap_pager_getmulti(pager, m, count, reqpage, sync)
+ vm_pager_t pager;
+ vm_page_t *m;
+ int count;
+ int reqpage;
+ boolean_t sync;
+{
+ if( reqpage >= count)
+ panic("swap_pager_getmulti: reqpage >= count\n");
+ return swap_pager_input((sw_pager_t) pager->pg_data, m, count, reqpage);
+}
+
+/*
+ * swap_pager_getpage gets individual pages
+ */
+int
+swap_pager_getpage(pager, m, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t m;
boolean_t sync;
{
-#ifdef DEBUG
- if (swpagerdebug & SDB_FOLLOW)
- printf("swpg_getpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
-#endif
- return(swap_pager_io((sw_pager_t)pager->pg_data,
- mlist, npages, B_READ));
+ vm_page_t marray[1];
+
+ marray[0] = m;
+ return swap_pager_input((sw_pager_t)pager->pg_data, marray, 1, 0);
}
-static int
-swap_pager_putpage(pager, mlist, npages, sync)
+int
+swap_pager_putmulti(pager, m, c, sync, rtvals)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t *m;
+ int c;
boolean_t sync;
+ int *rtvals;
{
int flags;
-#ifdef DEBUG
- if (swpagerdebug & SDB_FOLLOW)
- printf("swpg_putpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
-#endif
if (pager == NULL) {
- swap_pager_clean(B_WRITE);
- return (VM_PAGER_OK); /* ??? */
+ (void) swap_pager_clean();
+ return VM_PAGER_OK;
}
+
flags = B_WRITE;
if (!sync)
flags |= B_ASYNC;
- return(swap_pager_io((sw_pager_t)pager->pg_data,
- mlist, npages, flags));
+
+ return swap_pager_output((sw_pager_t)pager->pg_data, m, c, flags, rtvals);
}
-static boolean_t
-swap_pager_haspage(pager, offset)
+/*
+ * swap_pager_putpage writes individual pages
+ */
+int
+swap_pager_putpage(pager, m, sync)
vm_pager_t pager;
+ vm_page_t m;
+ boolean_t sync;
+{
+ int flags;
+ vm_page_t marray[1];
+ int rtvals[1];
+
+
+ if (pager == NULL) {
+ (void) swap_pager_clean();
+ return VM_PAGER_OK;
+ }
+
+ marray[0] = m;
+ flags = B_WRITE;
+ if (!sync)
+ flags |= B_ASYNC;
+
+ swap_pager_output((sw_pager_t)pager->pg_data, marray, 1, flags, rtvals);
+
+ return rtvals[0];
+}
+
+static inline int
+const swap_pager_block_index(swp, offset)
+ sw_pager_t swp;
+ vm_offset_t offset;
+{
+ return (offset / (SWB_NPAGES*PAGE_SIZE));
+}
+
+static inline int
+const swap_pager_block_offset(swp, offset)
+ sw_pager_t swp;
+ vm_offset_t offset;
+{
+ return ((offset % (PAGE_SIZE*SWB_NPAGES)) / PAGE_SIZE);
+}
+
+/*
+ * _swap_pager_haspage returns TRUE if the pager has data that has
+ * been written out.
+ */
+static boolean_t
+_swap_pager_haspage(swp, offset)
+ sw_pager_t swp;
vm_offset_t offset;
{
- register sw_pager_t swp;
register sw_blk_t swb;
int ix;
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK))
- printf("swpg_haspage(%x, %x) ", pager, offset);
-#endif
- swp = (sw_pager_t) pager->pg_data;
- ix = offset / dbtob(swp->sw_bsize);
+ ix = offset / (SWB_NPAGES*PAGE_SIZE);
if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FAIL|SDB_FOLLOW|SDB_ALLOCBLK))
- printf("swpg_haspage: %x bad offset %x, ix %x\n",
- swp->sw_blocks, offset, ix);
-#endif
return(FALSE);
}
swb = &swp->sw_blocks[ix];
- if (swb->swb_block)
- ix = atop(offset % dbtob(swp->sw_bsize));
-#ifdef DEBUG
- if (swpagerdebug & SDB_ALLOCBLK)
- printf("%x blk %x+%x ", swp->sw_blocks, swb->swb_block, ix);
- if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK))
- printf("-> %c\n",
- "FT"[swb->swb_block && (swb->swb_mask & (1 << ix))]);
-#endif
- if (swb->swb_block && (swb->swb_mask & (1 << ix)))
- return(TRUE);
+ ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE;
+ if (swb->swb_block[ix] != SWB_EMPTY) {
+ if (swb->swb_valid & (1 << ix))
+ return TRUE;
+ }
+
return(FALSE);
}
+/*
+ * swap_pager_haspage is the externally accessible version of
+ * _swap_pager_haspage above. this routine takes a vm_pager_t
+ * for an argument instead of sw_pager_t.
+ */
+boolean_t
+swap_pager_haspage(pager, offset)
+ vm_pager_t pager;
+ vm_offset_t offset;
+{
+ return _swap_pager_haspage((sw_pager_t) pager->pg_data, offset);
+}
+
+/*
+ * swap_pager_freepage is a convienience routine that clears the busy
+ * bit and deallocates a page.
+ */
static void
-swap_pager_cluster(pager, offset, loffset, hoffset)
- vm_pager_t pager;
- vm_offset_t offset;
- vm_offset_t *loffset;
- vm_offset_t *hoffset;
+swap_pager_freepage(m)
+ vm_page_t m;
{
- sw_pager_t swp;
- register int bsize;
- vm_offset_t loff, hoff;
+ PAGE_WAKEUP(m);
+ vm_page_free(m);
+}
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FOLLOW|SDB_CLUSTER))
- printf("swpg_cluster(%x, %x) ", pager, offset);
-#endif
- swp = (sw_pager_t) pager->pg_data;
- bsize = dbtob(swp->sw_bsize);
- if (bsize > swap_pager_maxcluster)
- bsize = swap_pager_maxcluster;
-
- loff = offset - (offset % bsize);
- if (loff >= swp->sw_osize)
- panic("swap_pager_cluster: bad offset");
-
- hoff = loff + bsize;
- if (hoff > swp->sw_osize)
- hoff = swp->sw_osize;
-
- *loffset = loff;
- *hoffset = hoff;
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FOLLOW|SDB_CLUSTER))
- printf("returns [%x-%x]\n", loff, hoff);
-#endif
+/*
+ * swap_pager_ridpages is a convienience routine that deallocates all
+ * but the required page. this is usually used in error returns that
+ * need to invalidate the "extra" readahead pages.
+ */
+static void
+swap_pager_ridpages(m, count, reqpage)
+ vm_page_t *m;
+ int count;
+ int reqpage;
+{
+ int i;
+ for (i = 0; i < count; i++)
+ if (i != reqpage)
+ swap_pager_freepage(m[i]);
}
+int swapwritecount=0;
+
/*
- * Scaled down version of swap().
- * Assumes that PAGE_SIZE < MAXPHYS; i.e. only one operation needed.
- * BOGUS: lower level IO routines expect a KVA so we have to map our
- * provided physical page into the KVA to keep them happy.
+ * swap_pager_iodone1 is the completion routine for both reads and async writes
*/
-static int
-swap_pager_io(swp, mlist, npages, flags)
+void
+swap_pager_iodone1(bp)
+ struct buf *bp;
+{
+ bp->b_flags |= B_DONE;
+ bp->b_flags &= ~B_ASYNC;
+ wakeup((caddr_t)bp);
+/*
+ if ((bp->b_flags & B_READ) == 0)
+ vwakeup(bp);
+*/
+}
+
+
+int
+swap_pager_input(swp, m, count, reqpage)
register sw_pager_t swp;
- vm_page_t *mlist;
- int npages;
- int flags;
+ vm_page_t *m;
+ int count, reqpage;
{
register struct buf *bp;
- register sw_blk_t swb;
+ sw_blk_t swb[count];
register int s;
- int ix, mask;
+ int i;
boolean_t rv;
- vm_offset_t kva, off;
+ vm_offset_t kva, off[count];
swp_clean_t spc;
- vm_page_t m;
+ vm_offset_t paging_offset;
+ vm_object_t object;
+ int reqaddr[count];
-#ifdef DEBUG
- /* save panic time state */
- if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
- return (VM_PAGER_FAIL); /* XXX: correct return? */
- if (swpagerdebug & (SDB_FOLLOW|SDB_IO))
- printf("swpg_io(%x, %x, %x, %x)\n", swp, mlist, npages, flags);
- if (flags & B_READ) {
- if (flags & B_ASYNC)
- panic("swap_pager_io: cannot do ASYNC reads");
- if (npages != 1)
- panic("swap_pager_io: cannot do clustered reads");
- }
-#endif
+ int first, last;
+ int failed;
+ int reqdskregion;
+ object = m[reqpage]->object;
+ paging_offset = object->paging_offset;
/*
* First determine if the page exists in the pager if this is
* a sync read. This quickly handles cases where we are
* following shadow chains looking for the top level object
* with the page.
*/
- m = *mlist;
- off = m->offset + m->object->paging_offset;
- ix = off / dbtob(swp->sw_bsize);
- if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
-#ifdef DEBUG
- if ((flags & B_READ) == 0 && (swpagerdebug & SDB_ANOM)) {
- printf("swap_pager_io: no swap block on write\n");
- return(VM_PAGER_BAD);
- }
-#endif
+ if (swp->sw_blocks == NULL) {
+ swap_pager_ridpages(m, count, reqpage);
return(VM_PAGER_FAIL);
}
- swb = &swp->sw_blocks[ix];
- off = off % dbtob(swp->sw_bsize);
- if ((flags & B_READ) &&
- (swb->swb_block == 0 || (swb->swb_mask & (1 << atop(off))) == 0))
+
+ for(i = 0; i < count; i++) {
+ vm_offset_t foff = m[i]->offset + paging_offset;
+ int ix = swap_pager_block_index(swp, foff);
+ if (ix >= swp->sw_nblocks) {
+ int j;
+ if( i <= reqpage) {
+ swap_pager_ridpages(m, count, reqpage);
+ return(VM_PAGER_FAIL);
+ }
+ for(j = i; j < count; j++) {
+ swap_pager_freepage(m[j]);
+ }
+ count = i;
+ break;
+ }
+
+ swb[i] = &swp->sw_blocks[ix];
+ off[i] = swap_pager_block_offset(swp, foff);
+ reqaddr[i] = swb[i]->swb_block[off[i]];
+ }
+
+ /* make sure that our required input request is existant */
+
+ if (reqaddr[reqpage] == SWB_EMPTY ||
+ (swb[reqpage]->swb_valid & (1 << off[reqpage])) == 0) {
+ swap_pager_ridpages(m, count, reqpage);
return(VM_PAGER_FAIL);
+ }
+
+
+ reqdskregion = reqaddr[reqpage] / dmmax;
/*
- * For reads (pageins) and synchronous writes, we clean up
- * all completed async pageouts.
+ * search backwards for the first contiguous page to transfer
*/
- if ((flags & B_ASYNC) == 0) {
- s = splbio();
- swap_pager_clean(flags&B_READ);
-#ifdef DEBUG
- if (swpagerdebug & SDB_PARANOIA)
- swap_pager_clean_check(mlist, npages, flags&B_READ);
-#endif
- splx(s);
+ failed = 0;
+ first = 0;
+ for (i = reqpage - 1; i >= 0; --i) {
+ if ( failed || (reqaddr[i] == SWB_EMPTY) ||
+ (swb[i]->swb_valid & (1 << off[i])) == 0 ||
+ (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
+ ((reqaddr[i] / dmmax) != reqdskregion)) {
+ failed = 1;
+ swap_pager_freepage(m[i]);
+ if (first == 0)
+ first = i + 1;
+ }
}
/*
- * For async writes (pageouts), we cleanup completed pageouts so
- * that all available resources are freed. Also tells us if this
- * page is already being cleaned. If it is, or no resources
- * are available, we try again later.
+ * search forwards for the last contiguous page to transfer
*/
- else {
- swap_pager_clean(B_WRITE);
-#ifdef DEBUG
- if (swpagerdebug & SDB_PARANOIA)
- swap_pager_clean_check(mlist, npages, B_WRITE);
-#endif
- if (swap_pager_free.tqh_first == NULL) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_FAIL)
- printf("%s: no available io headers\n",
- "swap_pager_io");
-#endif
- return(VM_PAGER_AGAIN);
+ failed = 0;
+ last = count;
+ for (i = reqpage + 1; i < count; i++) {
+ if ( failed || (reqaddr[i] == SWB_EMPTY) ||
+ (swb[i]->swb_valid & (1 << off[i])) == 0 ||
+ (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
+ ((reqaddr[i] / dmmax) != reqdskregion)) {
+ failed = 1;
+ swap_pager_freepage(m[i]);
+ if (last == count)
+ last = i;
+ }
+ }
+
+ count = last;
+ if (first != 0) {
+ for (i = first; i < count; i++) {
+ m[i-first] = m[i];
+ reqaddr[i-first] = reqaddr[i];
+ off[i-first] = off[i];
}
+ count -= first;
+ reqpage -= first;
}
+ ++swb[reqpage]->swb_locked;
+
/*
- * Allocate a swap block if necessary.
+ * at this point:
+ * "m" is a pointer to the array of vm_page_t for paging I/O
+ * "count" is the number of vm_page_t entries represented by "m"
+ * "object" is the vm_object_t for I/O
+ * "reqpage" is the index into "m" for the page actually faulted
*/
- if (swb->swb_block == 0) {
- swb->swb_block = rmalloc(swapmap, swp->sw_bsize);
- if (swb->swb_block == 0) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_FAIL)
- printf("swpg_io: rmalloc of %x failed\n",
- swp->sw_bsize);
-#endif
- /*
- * XXX this is technically a resource shortage that
- * should return AGAIN, but the situation isn't likely
- * to be remedied just by delaying a little while and
- * trying again (the pageout daemon's current response
- * to AGAIN) so we just return FAIL.
- */
- return(VM_PAGER_FAIL);
+
+ spc = NULL; /* we might not use an spc data structure */
+ kva = 0;
+
+ /*
+ * we allocate a new kva for transfers > 1 page
+ * but for transfers == 1 page, the swap_pager_free list contains
+ * entries that have pre-allocated kva's (for efficiency).
+ */
+ if (count > 1) {
+ kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE);
+ }
+
+
+ if (!kva) {
+ /*
+ * if a kva has not been allocated, we can only do a one page transfer,
+ * so we free the other pages that might have been allocated by
+ * vm_fault.
+ */
+ swap_pager_ridpages(m, count, reqpage);
+ m[0] = m[reqpage];
+ reqaddr[0] = reqaddr[reqpage];
+
+ count = 1;
+ reqpage = 0;
+ /*
+ * get a swap pager clean data structure, block until we get it
+ */
+ if (swap_pager_free.tqh_first == NULL) {
+ s = splbio();
+ if( curproc == pageproc)
+ (void) swap_pager_clean();
+ else
+ wakeup((caddr_t) &vm_pages_needed);
+ while (swap_pager_free.tqh_first == NULL) {
+ swap_pager_needflags |= SWAP_FREE_NEEDED;
+ tsleep((caddr_t)&swap_pager_free,
+ PVM, "swpfre", 0);
+ if( curproc == pageproc)
+ (void) swap_pager_clean();
+ else
+ wakeup((caddr_t) &vm_pages_needed);
+ }
+ splx(s);
}
-#ifdef DEBUG
- if (swpagerdebug & (SDB_FULL|SDB_ALLOCBLK))
- printf("swpg_io: %x alloc blk %x at ix %x\n",
- swp->sw_blocks, swb->swb_block, ix);
-#endif
+ spc = swap_pager_free.tqh_first;
+ TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
+ kva = spc->spc_kva;
}
+
/*
- * Allocate a kernel virtual address and initialize so that PTE
- * is available for lower level IO drivers.
+ * map our page(s) into kva for input
*/
- kva = vm_pager_map_pages(mlist, npages, !(flags & B_ASYNC));
- if (kva == NULL) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_FAIL)
- printf("%s: no KVA space to map pages\n",
- "swap_pager_io");
-#endif
- return(VM_PAGER_AGAIN);
+ for (i = 0; i < count; i++) {
+ pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
}
+ pmap_update();
+
/*
- * Get a swap buffer header and initialize it.
+ * Get a swap buffer header and perform the IO
*/
- s = splbio();
- while (bswlist.b_actf == NULL) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_ANOM)
- printf("swap_pager_io: wait on swbuf for %x (%d)\n",
- m, flags);
-#endif
- bswlist.b_flags |= B_WANTED;
- tsleep((caddr_t)&bswlist, PSWP+1, "swpgiobuf", 0);
+ if( spc) {
+ bp = spc->spc_bp;
+ bzero(bp, sizeof *bp);
+ bp->b_spc = spc;
+ } else {
+ bp = getpbuf();
}
- bp = bswlist.b_actf;
- bswlist.b_actf = bp->b_actf;
- splx(s);
- bp->b_flags = B_BUSY | (flags & B_READ);
+
+ s = splbio();
+ bp->b_flags = B_BUSY | B_READ | B_CALL;
+ bp->b_iodone = swap_pager_iodone1;
bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
- bp->b_data = (caddr_t)kva;
- bp->b_blkno = swb->swb_block + btodb(off);
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ crhold(bp->b_rcred);
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva;
+ bp->b_blkno = reqaddr[0];
+ bp->b_bcount = PAGE_SIZE*count;
+ bp->b_bufsize = PAGE_SIZE*count;
+
+/*
VHOLD(swapdev_vp);
bp->b_vp = swapdev_vp;
if (swapdev_vp->v_type == VBLK)
bp->b_dev = swapdev_vp->v_rdev;
- bp->b_bcount = npages * PAGE_SIZE;
+*/
+ bgetvp( swapdev_vp, bp);
+
+ swp->sw_piip++;
/*
- * For writes we set up additional buffer fields, record a pageout
- * in progress and mark that these swap blocks are now allocated.
+ * perform the I/O
*/
- if ((bp->b_flags & B_READ) == 0) {
- bp->b_dirtyoff = 0;
- bp->b_dirtyend = npages * PAGE_SIZE;
- swapdev_vp->v_numoutput++;
+ VOP_STRATEGY(bp);
+
+ /*
+ * wait for the sync I/O to complete
+ */
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "swread", 0);
+ }
+ rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK;
+ bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_CALL|B_DONE);
+
+ --swp->sw_piip;
+ if (swp->sw_piip == 0)
+ wakeup((caddr_t) swp);
+
+ /*
+ * relpbuf does this, but we maintain our own buffer
+ * list also...
+ */
+ if (bp->b_vp)
+ brelvp(bp);
+
+ splx(s);
+ --swb[reqpage]->swb_locked;
+
+ /*
+ * remove the mapping for kernel virtual
+ */
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + count * PAGE_SIZE);
+
+ if (spc) {
+ /*
+ * if we have used an spc, we need to free it.
+ */
+ if( bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+ TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
+ if (swap_pager_needflags & SWAP_FREE_NEEDED) {
+ swap_pager_needflags &= ~SWAP_FREE_NEEDED;
+ wakeup((caddr_t)&swap_pager_free);
+ }
+ } else {
+ /*
+ * free the kernel virtual addresses
+ */
+ kmem_free_wakeup(pager_map, kva, count * PAGE_SIZE);
+ /*
+ * release the physical I/O buffer
+ */
+ relpbuf(bp);
+ /*
+ * finish up input if everything is ok
+ */
+ if( rv == VM_PAGER_OK) {
+ for (i = 0; i < count; i++) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ m[i]->flags |= PG_CLEAN;
+ m[i]->flags &= ~PG_LAUNDRY;
+ if (i != reqpage) {
+ /*
+ * whether or not to leave the page activated
+ * is up in the air, but we should put the page
+ * on a page queue somewhere. (it already is in
+ * the object).
+ * After some emperical results, it is best
+ * to deactivate the readahead pages.
+ */
+ vm_page_deactivate(m[i]);
+ m[i]->act_count = 2;
+
+ /*
+ * just in case someone was asking for this
+ * page we now tell them that it is ok to use
+ */
+ m[i]->flags &= ~PG_FAKE;
+ PAGE_WAKEUP(m[i]);
+ }
+ }
+ if( swap_pager_full) {
+ _swap_pager_freespace( swp, m[0]->offset+paging_offset, count*PAGE_SIZE);
+ }
+ } else {
+ swap_pager_ridpages(m, count, reqpage);
+ }
+ }
+ return(rv);
+}
+
+int
+swap_pager_output(swp, m, count, flags, rtvals)
+ register sw_pager_t swp;
+ vm_page_t *m;
+ int count;
+ int flags;
+ int *rtvals;
+{
+ register struct buf *bp;
+ sw_blk_t swb[count];
+ register int s;
+ int i, j, ix;
+ boolean_t rv;
+ vm_offset_t kva, off, foff;
+ swp_clean_t spc;
+ vm_offset_t paging_offset;
+ vm_object_t object;
+ int reqaddr[count];
+ int failed;
+
+/*
+ if( count > 1)
+ printf("off: 0x%x, count: %d\n", m[0]->offset, count);
+*/
+ spc = NULL;
+
+ object = m[0]->object;
+ paging_offset = object->paging_offset;
+
+ failed = 0;
+ for(j=0;j<count;j++) {
+ foff = m[j]->offset + paging_offset;
+ ix = swap_pager_block_index(swp, foff);
+ swb[j] = 0;
+ if( swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
+ rtvals[j] = VM_PAGER_FAIL;
+ failed = 1;
+ continue;
+ } else {
+ rtvals[j] = VM_PAGER_OK;
+ }
+ swb[j] = &swp->sw_blocks[ix];
+ ++swb[j]->swb_locked;
+ if( failed) {
+ rtvals[j] = VM_PAGER_FAIL;
+ continue;
+ }
+ off = swap_pager_block_offset(swp, foff);
+ reqaddr[j] = swb[j]->swb_block[off];
+ if( reqaddr[j] == SWB_EMPTY) {
+ int blk;
+ int tries;
+ int ntoget;
+ tries = 0;
+ s = splbio();
+
+ /*
+ * if any other pages have been allocated in this block, we
+ * only try to get one page.
+ */
+ for (i = 0; i < SWB_NPAGES; i++) {
+ if (swb[j]->swb_block[i] != SWB_EMPTY)
+ break;
+ }
+
+
+ ntoget = (i == SWB_NPAGES) ? SWB_NPAGES : 1;
+ /*
+ * this code is alittle conservative, but works
+ * (the intent of this code is to allocate small chunks
+ * for small objects)
+ */
+ if( (m[j]->offset == 0) && (ntoget*PAGE_SIZE > object->size)) {
+ ntoget = (object->size + (PAGE_SIZE-1))/PAGE_SIZE;
+ }
+
+retrygetspace:
+ if (!swap_pager_full && ntoget > 1 &&
+ swap_pager_getswapspace(ntoget * btodb(PAGE_SIZE), &blk)) {
+
+ for (i = 0; i < ntoget; i++) {
+ swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i;
+ swb[j]->swb_valid = 0;
+ }
+
+ reqaddr[j] = swb[j]->swb_block[off];
+ } else if (!swap_pager_getswapspace(btodb(PAGE_SIZE),
+ &swb[j]->swb_block[off])) {
+ /*
+ * if the allocation has failed, we try to reclaim space and
+ * retry.
+ */
+ if (++tries == 1) {
+ swap_pager_reclaim();
+ goto retrygetspace;
+ }
+ rtvals[j] = VM_PAGER_AGAIN;
+ failed = 1;
+ } else {
+ reqaddr[j] = swb[j]->swb_block[off];
+ swb[j]->swb_valid &= ~(1<<off);
+ }
+ splx(s);
+ }
+ }
+
+ /*
+ * search forwards for the last contiguous page to transfer
+ */
+ failed = 0;
+ for (i = 0; i < count; i++) {
+ if( failed || (reqaddr[i] != reqaddr[0] + i*btodb(PAGE_SIZE)) ||
+ (reqaddr[i] / dmmax) != (reqaddr[0] / dmmax) ||
+ (rtvals[i] != VM_PAGER_OK)) {
+ failed = 1;
+ if( rtvals[i] == VM_PAGER_OK)
+ rtvals[i] = VM_PAGER_AGAIN;
+ }
+ }
+
+ for(i = 0; i < count; i++) {
+ if( rtvals[i] != VM_PAGER_OK) {
+ if( swb[i])
+ --swb[i]->swb_locked;
+ }
+ }
+
+ for(i = 0; i < count; i++)
+ if( rtvals[i] != VM_PAGER_OK)
+ break;
+
+ if( i == 0) {
+ return VM_PAGER_AGAIN;
+ }
+
+ count = i;
+ for(i=0;i<count;i++) {
+ if( reqaddr[i] == SWB_EMPTY)
+ printf("I/O to empty block????\n");
+ }
+
+ /*
+ */
+
+ /*
+ * For synchronous writes, we clean up
+ * all completed async pageouts.
+ */
+ if ((flags & B_ASYNC) == 0) {
+ swap_pager_clean();
+ }
+
+ kva = 0;
+
+ /*
+ * we allocate a new kva for transfers > 1 page
+ * but for transfers == 1 page, the swap_pager_free list contains
+ * entries that have pre-allocated kva's (for efficiency).
+ */
+ if ( count > 1) {
+ kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE);
+ if( !kva) {
+ for (i = 0; i < count; i++) {
+ if( swb[i])
+ --swb[i]->swb_locked;
+ rtvals[i] = VM_PAGER_AGAIN;
+ }
+ return VM_PAGER_AGAIN;
+ }
+ }
+
+ /*
+ * get a swap pager clean data structure, block until we get it
+ */
+ if (swap_pager_free.tqh_first == NULL) {
+/*
+ if (flags & B_ASYNC) {
+ for(i=0;i<count;i++) {
+ rtvals[i] = VM_PAGER_AGAIN;
+ if( swb[i])
+ --swb[i]->swb_locked;
+ }
+ return VM_PAGER_AGAIN;
+ }
+*/
+
s = splbio();
- swp->sw_poip++;
+ if( curproc == pageproc)
+ (void) swap_pager_clean();
+ else
+ wakeup((caddr_t) &vm_pages_needed);
+ while (swap_pager_free.tqh_first == NULL) {
+ swap_pager_needflags |= SWAP_FREE_NEEDED;
+ tsleep((caddr_t)&swap_pager_free,
+ PVM, "swpfre", 0);
+ if( curproc == pageproc)
+ (void) swap_pager_clean();
+ else
+ wakeup((caddr_t) &vm_pages_needed);
+ }
splx(s);
- mask = (~(~0 << npages)) << atop(off);
-#ifdef DEBUG
- swap_pager_poip++;
- if (swpagerdebug & SDB_WRITE)
- printf("swpg_io: write: bp=%x swp=%x poip=%d\n",
- bp, swp, swp->sw_poip);
- if ((swpagerdebug & SDB_ALLOCBLK) &&
- (swb->swb_mask & mask) != mask)
- printf("swpg_io: %x write %d pages at %x+%x\n",
- swp->sw_blocks, npages, swb->swb_block,
- atop(off));
- if (swpagerdebug & SDB_CLUSTER)
- printf("swpg_io: off=%x, npg=%x, mask=%x, bmask=%x\n",
- off, npages, mask, swb->swb_mask);
-#endif
- swb->swb_mask |= mask;
}
+
+ spc = swap_pager_free.tqh_first;
+ TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
+ if( !kva) {
+ kva = spc->spc_kva;
+ spc->spc_altkva = 0;
+ } else {
+ spc->spc_altkva = kva;
+ }
+
+ /*
+ * map our page(s) into kva for I/O
+ */
+ for (i = 0; i < count; i++) {
+ pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
+ }
+ pmap_update();
+
/*
- * If this is an async write we set up still more buffer fields
+ * get the base I/O offset into the swap file
+ */
+ for(i=0;i<count;i++) {
+ foff = m[i]->offset + paging_offset;
+ off = swap_pager_block_offset(swp, foff);
+ /*
+ * if we are setting the valid bit anew,
+ * then diminish the swap free space
+ */
+ if( (swb[i]->swb_valid & (1 << off)) == 0)
+ vm_swap_size -= btodb(PAGE_SIZE);
+
+ /*
+ * set the valid bit
+ */
+ swb[i]->swb_valid |= (1 << off);
+ /*
+ * and unlock the data structure
+ */
+ --swb[i]->swb_locked;
+ }
+
+ s = splbio();
+ /*
+ * Get a swap buffer header and perform the IO
+ */
+ bp = spc->spc_bp;
+ bzero(bp, sizeof *bp);
+ bp->b_spc = spc;
+
+ bp->b_flags = B_BUSY;
+ bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ crhold(bp->b_rcred);
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva;
+ bp->b_blkno = reqaddr[0];
+ bgetvp( swapdev_vp, bp);
+/*
+ VHOLD(swapdev_vp);
+ bp->b_vp = swapdev_vp;
+ if (swapdev_vp->v_type == VBLK)
+ bp->b_dev = swapdev_vp->v_rdev;
+*/
+ bp->b_bcount = PAGE_SIZE*count;
+ bp->b_bufsize = PAGE_SIZE*count;
+ swapdev_vp->v_numoutput++;
+
+ /*
+ * If this is an async write we set up additional buffer fields
* and place a "cleaning" entry on the inuse queue.
*/
- if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
-#ifdef DEBUG
- if (swap_pager_free.tqh_first == NULL)
- panic("swpg_io: lost spc");
-#endif
- spc = swap_pager_free.tqh_first;
- TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
-#ifdef DEBUG
- if (spc->spc_flags != SPC_FREE)
- panic("swpg_io: bad free spc");
-#endif
- spc->spc_flags = SPC_BUSY;
- spc->spc_bp = bp;
+ if ( flags & B_ASYNC ) {
+ spc->spc_flags = 0;
spc->spc_swp = swp;
- spc->spc_kva = kva;
+ for(i=0;i<count;i++)
+ spc->spc_m[i] = m[i];
+ spc->spc_count = count;
/*
- * Record the first page. This allows swap_pager_clean
- * to efficiently handle the common case of a single page.
- * For clusters, it allows us to locate the object easily
- * and we then reconstruct the rest of the mlist from spc_kva.
+ * the completion routine for async writes
*/
- spc->spc_m = m;
- spc->spc_npages = npages;
bp->b_flags |= B_CALL;
bp->b_iodone = swap_pager_iodone;
- s = splbio();
+ bp->b_dirtyoff = 0;
+ bp->b_dirtyend = bp->b_bcount;
+ swp->sw_poip++;
TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list);
+ } else {
+ swp->sw_poip++;
+ bp->b_flags |= B_CALL;
+ bp->b_iodone = swap_pager_iodone1;
+ }
+ /*
+ * perform the I/O
+ */
+ VOP_STRATEGY(bp);
+ if ((flags & (B_READ|B_ASYNC)) == B_ASYNC ) {
+ if ((bp->b_flags & B_DONE) == B_DONE) {
+ swap_pager_clean();
+ }
splx(s);
+ for(i=0;i<count;i++) {
+ rtvals[i] = VM_PAGER_PEND;
+ }
+ return VM_PAGER_PEND;
}
/*
- * Finally, start the IO operation.
- * If it is async we are all done, otherwise we must wait for
- * completion and cleanup afterwards.
+ * wait for the sync I/O to complete
*/
-#ifdef DEBUG
- if (swpagerdebug & SDB_IO)
- printf("swpg_io: IO start: bp %x, db %x, va %x, pa %x\n",
- bp, swb->swb_block+btodb(off), kva, VM_PAGE_TO_PHYS(m));
-#endif
- VOP_STRATEGY(bp);
- if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
-#ifdef DEBUG
- if (swpagerdebug & SDB_IO)
- printf("swpg_io: IO started: bp %x\n", bp);
-#endif
- return(VM_PAGER_PEND);
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "swwrt", 0);
}
- s = splbio();
-#ifdef DEBUG
- if (flags & B_READ)
- swap_pager_piip++;
- else
- swap_pager_poip++;
-#endif
- while ((bp->b_flags & B_DONE) == 0)
- (void) tsleep(bp, PVM, "swpgio", 0);
- if ((flags & B_READ) == 0)
- --swp->sw_poip;
-#ifdef DEBUG
- if (flags & B_READ)
- --swap_pager_piip;
- else
- --swap_pager_poip;
-#endif
- rv = (bp->b_flags & B_ERROR) ? VM_PAGER_ERROR : VM_PAGER_OK;
- bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY);
- bp->b_actf = bswlist.b_actf;
- bswlist.b_actf = bp;
+ rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK;
+ bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_CALL|B_DONE);
+
+ --swp->sw_poip;
+ if (swp->sw_poip == 0)
+ wakeup((caddr_t) swp);
+
if (bp->b_vp)
brelvp(bp);
- if (bswlist.b_flags & B_WANTED) {
- bswlist.b_flags &= ~B_WANTED;
- wakeup(&bswlist);
+
+ splx(s);
+
+ /*
+ * remove the mapping for kernel virtual
+ */
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + count * PAGE_SIZE);
+
+ /*
+ * if we have written the page, then indicate that the page
+ * is clean.
+ */
+ if (rv == VM_PAGER_OK) {
+ for(i=0;i<count;i++) {
+ if( rtvals[i] == VM_PAGER_OK) {
+ m[i]->flags |= PG_CLEAN;
+ m[i]->flags &= ~PG_LAUNDRY;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ /*
+ * optimization, if a page has been read during the
+ * pageout process, we activate it.
+ */
+ if ( (m[i]->flags & PG_ACTIVE) == 0 &&
+ pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))
+ vm_page_activate(m[i]);
+ }
+ }
+ } else {
+ for(i=0;i<count;i++) {
+ rtvals[i] = rv;
+ m[i]->flags |= PG_LAUNDRY;
+ }
}
- if ((flags & B_READ) == 0 && rv == VM_PAGER_OK) {
- m->flags |= PG_CLEAN;
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+
+ if( spc->spc_altkva)
+ kmem_free_wakeup(pager_map, kva, count * PAGE_SIZE);
+
+ if( bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+ TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
+ if (swap_pager_needflags & SWAP_FREE_NEEDED) {
+ swap_pager_needflags &= ~SWAP_FREE_NEEDED;
+ wakeup((caddr_t)&swap_pager_free);
}
- splx(s);
-#ifdef DEBUG
- if (swpagerdebug & SDB_IO)
- printf("swpg_io: IO done: bp %x, rv %d\n", bp, rv);
- if ((swpagerdebug & SDB_FAIL) && rv == VM_PAGER_ERROR)
- printf("swpg_io: IO error\n");
-#endif
- vm_pager_unmap_pages(kva, npages);
+
return(rv);
}
-static void
-swap_pager_clean(rw)
- int rw;
+boolean_t
+swap_pager_clean()
{
- register swp_clean_t spc;
- register int s, i;
- vm_object_t object;
- vm_page_t m;
-
-#ifdef DEBUG
- /* save panic time state */
- if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
- return;
- if (swpagerdebug & SDB_FOLLOW)
- printf("swpg_clean(%x)\n", rw);
-#endif
+ register swp_clean_t spc, tspc;
+ register int s;
+ tspc = NULL;
+ if (swap_pager_done.tqh_first == NULL)
+ return FALSE;
for (;;) {
+ s = splbio();
/*
- * Look up and removal from inuse list must be done
+ * Look up and removal from done list must be done
* at splbio() to avoid conflicts with swap_pager_iodone.
*/
- s = splbio();
- for (spc = swap_pager_inuse.tqh_first;
- spc != NULL;
- spc = spc->spc_list.tqe_next) {
- /*
- * If the operation is done, remove it from the
- * list and process it.
- *
- * XXX if we can't get the object lock we also
- * leave it on the list and try again later.
- * Is there something better we could do?
- */
- if ((spc->spc_flags & SPC_DONE) &&
- vm_object_lock_try(spc->spc_m->object)) {
- TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
- break;
+ while (spc = swap_pager_done.tqh_first) {
+ if( spc->spc_altkva) {
+ pmap_remove(vm_map_pmap(pager_map), spc->spc_altkva, spc->spc_altkva + spc->spc_count * PAGE_SIZE);
+ kmem_free_wakeup(pager_map, spc->spc_altkva, spc->spc_count * PAGE_SIZE);
+ spc->spc_altkva = 0;
+ } else {
+ pmap_remove(vm_map_pmap(pager_map), spc->spc_kva, spc->spc_kva + PAGE_SIZE);
}
+ swap_pager_finish(spc);
+ TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
+ goto doclean;
}
- splx(s);
/*
* No operations done, thats all we can do for now.
*/
- if (spc == NULL)
- break;
- /*
- * Found a completed operation so finish it off.
- * Note: no longer at splbio since entry is off the list.
- */
- m = spc->spc_m;
- object = m->object;
+ splx(s);
+ break;
/*
- * Process each page in the cluster.
- * The first page is explicitly kept in the cleaning
- * entry, others must be reconstructed from the KVA.
+ * The desired page was found to be busy earlier in
+ * the scan but has since completed.
*/
- for (i = 0; i < spc->spc_npages; i++) {
- if (i)
- m = vm_pager_atop(spc->spc_kva + ptoa(i));
- /*
- * If no error mark as clean and inform the pmap
- * system. If there was an error, mark as dirty
- * so we will try again.
- *
- * XXX could get stuck doing this, should give up
- * after awhile.
- */
- if (spc->spc_flags & SPC_ERROR) {
- printf("%s: clean of page %x failed\n",
- "swap_pager_clean",
- VM_PAGE_TO_PHYS(m));
- m->flags |= PG_LAUNDRY;
- } else {
- m->flags |= PG_CLEAN;
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
- }
- m->flags &= ~PG_BUSY;
- PAGE_WAKEUP(m);
+doclean:
+ if (tspc && tspc == spc) {
+ tspc = NULL;
}
+ spc->spc_flags = 0;
+ TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
+ if (swap_pager_needflags & SWAP_FREE_NEEDED) {
+ swap_pager_needflags &= ~SWAP_FREE_NEEDED;
+ wakeup((caddr_t)&swap_pager_free);
+ }
+ ++cleandone;
+ splx(s);
+ }
- /*
- * Done with the object, decrement the paging count
- * and unlock it.
- */
- if (--object->paging_in_progress == 0)
- wakeup(object);
- vm_object_unlock(object);
+ return(tspc ? TRUE : FALSE);
+}
+void
+swap_pager_finish(spc)
+ register swp_clean_t spc;
+{
+ vm_object_t object = spc->spc_m[0]->object;
+ int i;
+
+ if ((object->paging_in_progress -= spc->spc_count) == 0)
+ thread_wakeup((int) object);
+
+ /*
+ * If no error mark as clean and inform the pmap system.
+ * If error, mark as dirty so we will try again.
+ * (XXX could get stuck doing this, should give up after awhile)
+ */
+ if (spc->spc_flags & SPC_ERROR) {
+ for(i=0;i<spc->spc_count;i++) {
+ printf("swap_pager_finish: clean of page %x failed\n",
+ VM_PAGE_TO_PHYS(spc->spc_m[i]));
+ spc->spc_m[i]->flags |= PG_LAUNDRY;
+ }
+ } else {
+ for(i=0;i<spc->spc_count;i++) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
+ spc->spc_m[i]->flags |= PG_CLEAN;
+ }
+ }
+
+
+ for(i=0;i<spc->spc_count;i++) {
/*
- * Free up KVM used and put the entry back on the list.
+ * we wakeup any processes that are waiting on
+ * these pages.
*/
- vm_pager_unmap_pages(spc->spc_kva, spc->spc_npages);
- spc->spc_flags = SPC_FREE;
- TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
-#ifdef DEBUG
- if (swpagerdebug & SDB_WRITE)
- printf("swpg_clean: free spc %x\n", spc);
-#endif
+ PAGE_WAKEUP(spc->spc_m[i]);
}
+ nswiodone -= spc->spc_count;
+
+ return;
}
-#ifdef DEBUG
-static void
-swap_pager_clean_check(mlist, npages, rw)
- vm_page_t *mlist;
- int npages;
- int rw;
+/*
+ * swap_pager_iodone
+ */
+void
+swap_pager_iodone(bp)
+ register struct buf *bp;
{
register swp_clean_t spc;
- boolean_t bad;
- int i, j, s;
- vm_page_t m;
+ int s;
- if (panicstr)
- return;
+ s = splbio();
+ spc = (swp_clean_t) bp->b_spc;
+ TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
+ TAILQ_INSERT_TAIL(&swap_pager_done, spc, spc_list);
+ if (bp->b_flags & B_ERROR) {
+ spc->spc_flags |= SPC_ERROR;
+ printf("error %d blkno %d sz %d ",
+ bp->b_error, bp->b_blkno, bp->b_bcount);
+ }
+
+/*
+ if ((bp->b_flags & B_READ) == 0)
+ vwakeup(bp);
+*/
+
+ bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_ASYNC);
+ if (bp->b_vp) {
+ brelvp(bp);
+ }
+ if( bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+
+ nswiodone += spc->spc_count;
+ if (--spc->spc_swp->sw_poip == 0) {
+ wakeup((caddr_t)spc->spc_swp);
+ }
+
+ if ((swap_pager_needflags & SWAP_FREE_NEEDED) ||
+ swap_pager_inuse.tqh_first == 0) {
+ swap_pager_needflags &= ~SWAP_FREE_NEEDED;
+ wakeup((caddr_t)&swap_pager_free);
+ wakeup((caddr_t)&vm_pages_needed);
+ }
+
+ if (vm_pageout_pages_needed) {
+ wakeup((caddr_t)&vm_pageout_pages_needed);
+ }
+
+ if ((swap_pager_inuse.tqh_first == NULL) ||
+ (cnt.v_free_count < cnt.v_free_min &&
+ nswiodone + cnt.v_free_count >= cnt.v_free_min) ) {
+ wakeup((caddr_t)&vm_pages_needed);
+ }
+ splx(s);
+}
+
+int bswneeded;
+/* TAILQ_HEAD(swqueue, buf) bswlist; */
+/*
+ * allocate a physical buffer
+ */
+struct buf *
+getpbuf() {
+ int s;
+ struct buf *bp;
- bad = FALSE;
s = splbio();
- for (spc = swap_pager_inuse.tqh_first;
- spc != NULL;
- spc = spc->spc_list.tqe_next) {
- for (j = 0; j < spc->spc_npages; j++) {
- m = vm_pager_atop(spc->spc_kva + ptoa(j));
- for (i = 0; i < npages; i++)
- if (m == mlist[i]) {
- if (swpagerdebug & SDB_ANOM)
- printf(
- "swpg_clean_check: %s: page %x on list, flags %x\n",
- rw == B_WRITE ? "write" : "read", mlist[i], spc->spc_flags);
- bad = TRUE;
- }
- }
+ /* get a bp from the swap buffer header pool */
+ while ((bp = bswlist.tqh_first) == NULL) {
+ bswneeded = 1;
+ tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
}
+ TAILQ_REMOVE(&bswlist, bp, b_freelist);
+
splx(s);
- if (bad)
- panic("swpg_clean_check");
+
+ bzero(bp, sizeof *bp);
+ bp->b_rcred = NOCRED;
+ bp->b_wcred = NOCRED;
+ return bp;
}
-#endif
-static void
-swap_pager_iodone(bp)
- register struct buf *bp;
-{
- register swp_clean_t spc;
- daddr_t blk;
+/*
+ * allocate a physical buffer, if one is available
+ */
+struct buf *
+trypbuf() {
int s;
+ struct buf *bp;
-#ifdef DEBUG
- /* save panic time state */
- if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
- return;
- if (swpagerdebug & SDB_FOLLOW)
- printf("swpg_iodone(%x)\n", bp);
-#endif
s = splbio();
- for (spc = swap_pager_inuse.tqh_first;
- spc != NULL;
- spc = spc->spc_list.tqe_next)
- if (spc->spc_bp == bp)
- break;
-#ifdef DEBUG
- if (spc == NULL)
- panic("swap_pager_iodone: bp not found");
-#endif
+ if ((bp = bswlist.tqh_first) == NULL) {
+ splx(s);
+ return NULL;
+ }
+ TAILQ_REMOVE(&bswlist, bp, b_freelist);
+ splx(s);
- spc->spc_flags &= ~SPC_BUSY;
- spc->spc_flags |= SPC_DONE;
- if (bp->b_flags & B_ERROR)
- spc->spc_flags |= SPC_ERROR;
- spc->spc_bp = NULL;
- blk = bp->b_blkno;
-
-#ifdef DEBUG
- --swap_pager_poip;
- if (swpagerdebug & SDB_WRITE)
- printf("swpg_iodone: bp=%x swp=%x flags=%x spc=%x poip=%x\n",
- bp, spc->spc_swp, spc->spc_swp->sw_flags,
- spc, spc->spc_swp->sw_poip);
-#endif
+ bzero(bp, sizeof *bp);
+ bp->b_rcred = NOCRED;
+ bp->b_wcred = NOCRED;
+ return bp;
+}
+
+/*
+ * release a physical buffer
+ */
+void
+relpbuf(bp)
+ struct buf *bp;
+{
+ int s;
- spc->spc_swp->sw_poip--;
- if (spc->spc_swp->sw_flags & SW_WANTED) {
- spc->spc_swp->sw_flags &= ~SW_WANTED;
- wakeup(spc->spc_swp);
+ s = splbio();
+
+ if (bp->b_rcred != NOCRED) {
+ crfree(bp->b_rcred);
+ bp->b_rcred = NOCRED;
}
-
- bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY);
- bp->b_actf = bswlist.b_actf;
- bswlist.b_actf = bp;
+ if (bp->b_wcred != NOCRED) {
+ crfree(bp->b_wcred);
+ bp->b_wcred = NOCRED;
+ }
+
if (bp->b_vp)
brelvp(bp);
- if (bswlist.b_flags & B_WANTED) {
- bswlist.b_flags &= ~B_WANTED;
- wakeup(&bswlist);
+
+ TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
+
+ if (bswneeded) {
+ bswneeded = 0;
+ wakeup((caddr_t)&bswlist);
}
- wakeup(&vm_pages_needed);
splx(s);
}
+
+/*
+ * return true if any swap control structures can be allocated
+ */
+int
+swap_pager_ready() {
+ if( swap_pager_free.tqh_first)
+ return 1;
+ else
+ return 0;
+}
diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h
index 497d92a39386..853edd5d1b16 100644
--- a/sys/vm/swap_pager.h
+++ b/sys/vm/swap_pager.h
@@ -1,7 +1,7 @@
/*
* Copyright (c) 1990 University of Utah.
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
@@ -35,39 +35,31 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)swap_pager.h 8.1 (Berkeley) 6/11/93
+ * from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
+ * $Id: swap_pager.h,v 1.9 1994/03/14 21:54:23 davidg Exp $
+ */
+
+/*
+ * Modifications to the block allocation data structure by John S. Dyson
+ * 18 Dec 93.
*/
#ifndef _SWAP_PAGER_
#define _SWAP_PAGER_ 1
/*
- * In the swap pager, the backing store for an object is organized as an
- * array of some number of "swap blocks". A swap block consists of a bitmask
- * and some number of contiguous DEV_BSIZE disk blocks. The minimum size
- * of a swap block is:
- *
- * max(PAGE_SIZE, dmmin*DEV_BSIZE) [ 32k currently ]
- *
- * bytes (since the pager interface is page oriented), the maximum size is:
- *
- * min(#bits(swb_mask)*PAGE_SIZE, dmmax*DEV_BSIZE) [ 128k currently ]
- *
- * where dmmin and dmmax are left over from the old VM interface. The bitmask
- * (swb_mask) is used by swap_pager_haspage() to determine if a particular
- * page has actually been written; i.e. the pager copy of the page is valid.
- * All swap blocks in the backing store of an object will be the same size.
- *
- * The reason for variable sized swap blocks is to reduce fragmentation of
- * swap resources. Whenever possible we allocate smaller swap blocks to
- * smaller objects. The swap block size is determined from a table of
- * object-size vs. swap-block-size computed at boot time.
+ * SWB_NPAGES can be set to any value from 1 to 16 pages per allocation,
+ * however, due to the allocation spilling into non-swap pager backed memory,
+ * suggest keeping SWB_NPAGES small (1-4). If high performance is manditory
+ * perhaps up to 8 pages might be in order????
+ * Above problem has been fixed, now we support 16 pages per block. Unused
+ * space is recovered by the swap pager now...
*/
-typedef int sw_bm_t; /* pager bitmask */
-
+#define SWB_NPAGES 8
struct swblock {
- sw_bm_t swb_mask; /* bitmask of valid pages in this block */
- daddr_t swb_block; /* starting disk block for this block */
+ unsigned short swb_valid; /* bitmask for valid pages */
+ unsigned short swb_locked; /* block locked */
+ int swb_block[SWB_NPAGES]; /* unfortunately int instead of daddr_t */
};
typedef struct swblock *sw_blk_t;
@@ -76,15 +68,32 @@ typedef struct swblock *sw_blk_t;
*/
struct swpager {
vm_size_t sw_osize; /* size of object we are backing (bytes) */
- int sw_bsize; /* size of swap blocks (DEV_BSIZE units) */
int sw_nblocks;/* number of blocks in list (sw_blk_t units) */
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */
+ short sw_piip; /* pageins in progress */
};
typedef struct swpager *sw_pager_t;
#define SW_WANTED 0x01
#define SW_NAMED 0x02
+#ifdef KERNEL
+
+void swap_pager_init(void);
+vm_pager_t swap_pager_alloc(caddr_t, vm_size_t, vm_prot_t, vm_offset_t);
+void swap_pager_dealloc(vm_pager_t);
+boolean_t swap_pager_getpage(vm_pager_t, vm_page_t, boolean_t);
+boolean_t swap_pager_putpage(vm_pager_t, vm_page_t, boolean_t);
+boolean_t swap_pager_getmulti(vm_pager_t, vm_page_t *, int, int, boolean_t);
+boolean_t swap_pager_haspage(vm_pager_t, vm_offset_t);
+int swap_pager_io(sw_pager_t, vm_page_t *, int, int, int);
+void swap_pager_iodone(struct buf *);
+boolean_t swap_pager_clean();
+
+extern struct pagerops swappagerops;
+
+#endif
+
#endif /* _SWAP_PAGER_ */
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 85f892f29beb..bc18dd26b202 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -36,7 +36,7 @@
#ifndef VM_H
#define VM_H
-typedef int vm_inherit_t; /* XXX: inheritance codes */
+typedef char vm_inherit_t; /* XXX: inheritance codes */
union vm_map_object;
typedef union vm_map_object vm_map_object_t;
@@ -58,6 +58,7 @@ typedef struct pager_struct *vm_pager_t;
#include <sys/vmmeter.h>
#include <sys/queue.h>
+#include <machine/cpufunc.h>
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <vm/vm_prot.h>
@@ -87,5 +88,6 @@ struct vmspace {
caddr_t vm_taddr; /* user virtual address of text XXX */
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_maxsaddr; /* user VA at max stack growth */
+ caddr_t vm_minsaddr; /* user VA at max stack growth */
};
#endif /* VM_H */
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index bae5f005273d..bc62e4253d49 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -45,6 +45,16 @@ struct vnode;
void chgkprot __P((caddr_t, int, int));
#endif
+/*
+ * Try to get semi-meaningful wait messages into thread_sleep...
+ */
+extern void thread_sleep_(int, simple_lock_t, char *);
+#if __GNUC__ >= 2
+#define thread_sleep(a,b,c) thread_sleep_((a), (b), __FUNCTION__)
+#else
+#define thread_sleep(a,b,c) thread_sleep_((a), (b), "vmslp")
+#endif
+
#ifdef KERNEL
#ifdef TYPEDEF_FOR_UAP
int getpagesize __P((struct proc *p, void *, int *));
@@ -88,7 +98,7 @@ void swapout __P((struct proc *));
void swapout_threads __P((void));
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
-void thread_block __P((void));
+void thread_block __P((char *));
void thread_sleep __P((int, simple_lock_t, boolean_t));
void thread_wakeup __P((int));
int useracc __P((caddr_t, int, int));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f60abf2b5f3a..3ce2d6e452b7 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1,6 +1,11 @@
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ *
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
@@ -68,11 +73,21 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+
+#define VM_FAULT_READ_AHEAD 4
+#define VM_FAULT_READ_AHEAD_MIN 1
+#define VM_FAULT_READ_BEHIND 3
+#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
+extern int swap_pager_full;
+extern int vm_pageout_proc_limit;
+
/*
* vm_fault:
*
@@ -103,7 +118,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_map_entry_t entry;
register vm_object_t object;
register vm_offset_t offset;
- register vm_page_t m;
+ vm_page_t m;
vm_page_t first_m;
vm_prot_t prot;
int result;
@@ -113,6 +128,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
boolean_t page_exists;
vm_page_t old_m;
vm_object_t next_object;
+ vm_page_t marray[VM_FAULT_READ];
+ int reqpage;
+ int spl;
+ int hardfault=0;
cnt.v_faults++; /* needs lock XXX */
/*
@@ -141,11 +160,15 @@ vm_fault(map, vaddr, fault_type, change_wiring)
#define UNLOCK_THINGS { \
object->paging_in_progress--; \
+ if (object->paging_in_progress == 0) \
+ wakeup((caddr_t)object); \
vm_object_unlock(object); \
if (object != first_object) { \
vm_object_lock(first_object); \
FREE_PAGE(first_m); \
first_object->paging_in_progress--; \
+ if (first_object->paging_in_progress == 0) \
+ wakeup((caddr_t)first_object); \
vm_object_unlock(first_object); \
} \
UNLOCK_MAP; \
@@ -156,6 +179,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_object_deallocate(first_object); \
}
+
RetryFault: ;
/*
@@ -164,8 +188,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
- &first_object, &first_offset,
- &prot, &wired, &su)) != KERN_SUCCESS) {
+ &first_object, &first_offset,
+ &prot, &wired, &su)) != KERN_SUCCESS) {
return(result);
}
lookup_still_valid = TRUE;
@@ -241,25 +265,13 @@ vm_fault(map, vaddr, fault_type, change_wiring)
* wait for it and then retry.
*/
if (m->flags & PG_BUSY) {
-#ifdef DOTHREADS
- int wait_result;
-
- PAGE_ASSERT_WAIT(m, !change_wiring);
- UNLOCK_THINGS;
- thread_block();
- wait_result = current_thread()->wait_result;
- vm_object_deallocate(first_object);
- if (wait_result != THREAD_AWAKENED)
- return(KERN_SUCCESS);
- goto RetryFault;
-#else
- PAGE_ASSERT_WAIT(m, !change_wiring);
UNLOCK_THINGS;
- cnt.v_intrans++;
- thread_block();
+ if (m->flags & PG_BUSY) {
+ m->flags |= PG_WANTED;
+ tsleep((caddr_t)m,PSWP,"vmpfw",0);
+ }
vm_object_deallocate(first_object);
goto RetryFault;
-#endif
}
/*
@@ -268,6 +280,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
vm_page_lock_queues();
+ spl = splimp();
if (m->flags & PG_INACTIVE) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
m->flags &= ~PG_INACTIVE;
@@ -280,6 +293,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
m->flags &= ~PG_ACTIVE;
cnt.v_active_count--;
}
+ splx(spl);
vm_page_unlock_queues();
/*
@@ -290,9 +304,31 @@ vm_fault(map, vaddr, fault_type, change_wiring)
}
if (((object->pager != NULL) &&
- (!change_wiring || wired))
+ (!change_wiring || wired))
|| (object == first_object)) {
+#if 0
+ if (curproc && (vaddr < VM_MAXUSER_ADDRESS) &&
+ (curproc->p_rlimit[RLIMIT_RSS].rlim_max <
+ curproc->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG)) {
+ UNLOCK_AND_DEALLOCATE;
+ vm_fault_free_pages(curproc);
+ goto RetryFault;
+ }
+#endif
+
+ if (swap_pager_full && !object->shadow && (!object->pager ||
+ (object->pager && object->pager->pg_type == PG_SWAP &&
+ !vm_pager_has_page(object->pager, offset+object->paging_offset)))) {
+ if (vaddr < VM_MAXUSER_ADDRESS && curproc && curproc->p_pid >= 48) /* XXX */ {
+ printf("Process %d killed by vm_fault -- out of swap\n", curproc->p_pid);
+ psignal(curproc, SIGKILL);
+ curproc->p_estcpu = 0;
+ curproc->p_nice = PRIO_MIN;
+ setpriority(curproc);
+ }
+ }
+
/*
* Allocate a new page for this object/offset
* pair.
@@ -309,33 +345,46 @@ vm_fault(map, vaddr, fault_type, change_wiring)
if (object->pager != NULL && (!change_wiring || wired)) {
int rv;
+ int faultcount;
+ int reqpage;
/*
* Now that we have a busy page, we can
* release the object lock.
*/
vm_object_unlock(object);
-
/*
- * Call the pager to retrieve the data, if any,
- * after releasing the lock on the map.
+ * now we find out if any other pages should
+ * be paged in at this time
+ * this routine checks to see if the pages surrounding this fault
+ * reside in the same object as the page for this fault. If
+ * they do, then they are faulted in also into the
+ * object. The array "marray" returned contains an array of
+ * vm_page_t structs where one of them is the vm_page_t passed to
+ * the routine. The reqpage return value is the index into the
+ * marray for the vm_page_t passed to the routine.
*/
- UNLOCK_MAP;
cnt.v_pageins++;
- rv = vm_pager_get(object->pager, m, TRUE);
+ faultcount = vm_fault_additional_pages(first_object, first_offset,
+ m, VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, marray, &reqpage);
/*
- * Reaquire the object lock to preserve our
- * invariant.
+ * Call the pager to retrieve the data, if any,
+ * after releasing the lock on the map.
*/
- vm_object_lock(object);
+ UNLOCK_MAP;
- /*
- * Found the page.
- * Leave it busy while we play with it.
- */
+ rv = faultcount ?
+ vm_pager_get_pages(object->pager,
+ marray, faultcount, reqpage, TRUE): VM_PAGER_FAIL;
if (rv == VM_PAGER_OK) {
/*
+ * Found the page.
+ * Leave it busy while we play with it.
+ */
+ vm_object_lock(object);
+
+ /*
* Relookup in case pager changed page.
* Pager is responsible for disposition
* of old page if moved.
@@ -344,36 +393,42 @@ vm_fault(map, vaddr, fault_type, change_wiring)
cnt.v_pgpgin++;
m->flags &= ~PG_FAKE;
- m->flags |= PG_CLEAN;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ hardfault++;
break;
}
/*
- * IO error or page outside the range of the pager:
- * cleanup and return an error.
+ * Remove the bogus page (which does not
+ * exist at this object/offset); before
+ * doing so, we must get back our object
+ * lock to preserve our invariant.
+ *
+ * Also wake up any other thread that may want
+ * to bring in this page.
+ *
+ * If this is the top-level object, we must
+ * leave the busy page to prevent another
+ * thread from rushing past us, and inserting
+ * the page in that object at the same time
+ * that we are.
*/
- if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
+
+ vm_object_lock(object);
+ /*
+ * Data outside the range of the pager; an error
+ */
+ if ((rv == VM_PAGER_ERROR) || (rv == VM_PAGER_BAD)) {
FREE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
return(KERN_PROTECTION_FAILURE); /* XXX */
}
- /*
- * rv == VM_PAGER_FAIL:
- *
- * Page does not exist at this object/offset.
- * Free the bogus page (waking up anyone waiting
- * for it) and continue on to the next object.
- *
- * If this is the top-level object, we must
- * leave the busy page to prevent another
- * thread from rushing past us, and inserting
- * the page in that object at the same time
- * that we are.
- */
if (object != first_object) {
FREE_PAGE(m);
- /* note that `m' is not used after this */
+ /*
+ * XXX - we cannot just fall out at this
+ * point, m has been freed and is invalid!
+ */
}
}
@@ -398,6 +453,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
if (object != first_object) {
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_unlock(object);
object = first_object;
@@ -414,16 +471,20 @@ vm_fault(map, vaddr, fault_type, change_wiring)
}
else {
vm_object_lock(next_object);
- if (object != first_object)
+ if (object != first_object) {
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
+ }
vm_object_unlock(object);
object = next_object;
object->paging_in_progress++;
}
}
- if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY)
- panic("vm_fault: active, inactive or !busy after main loop");
+ if ((m->flags & (PG_ACTIVE|PG_INACTIVE) != 0) ||
+ (m->flags & PG_BUSY) == 0)
+ panic("vm_fault: absent or active or inactive or not busy after main loop");
/*
* PAGE HAS BEEN FOUND.
@@ -486,9 +547,11 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
vm_page_lock_queues();
+
vm_page_activate(m);
- vm_page_deactivate(m);
pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+ if ((m->flags & PG_CLEAN) == 0)
+ m->flags |= PG_LAUNDRY;
vm_page_unlock_queues();
/*
@@ -496,6 +559,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
PAGE_WAKEUP(m);
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_unlock(object);
/*
@@ -517,6 +582,8 @@ vm_fault(map, vaddr, fault_type, change_wiring)
* paging_in_progress to do that...
*/
object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
vm_object_collapse(object);
object->paging_in_progress++;
}
@@ -572,38 +639,18 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_m = vm_page_lookup(copy_object, copy_offset);
if (page_exists = (copy_m != NULL)) {
if (copy_m->flags & PG_BUSY) {
-#ifdef DOTHREADS
- int wait_result;
-
- /*
- * If the page is being brought
- * in, wait for it and then retry.
- */
- PAGE_ASSERT_WAIT(copy_m, !change_wiring);
- RELEASE_PAGE(m);
- copy_object->ref_count--;
- vm_object_unlock(copy_object);
- UNLOCK_THINGS;
- thread_block();
- wait_result = current_thread()->wait_result;
- vm_object_deallocate(first_object);
- if (wait_result != THREAD_AWAKENED)
- return(KERN_SUCCESS);
- goto RetryFault;
-#else
/*
* If the page is being brought
* in, wait for it and then retry.
*/
- PAGE_ASSERT_WAIT(copy_m, !change_wiring);
+ PAGE_ASSERT_WAIT(copy_m, !change_wiring);
RELEASE_PAGE(m);
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
+ thread_block("fltcpy");
vm_object_deallocate(first_object);
goto RetryFault;
-#endif
}
}
@@ -625,8 +672,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
* found that the copy_object's pager
* doesn't have the page...
*/
- copy_m = vm_page_alloc(copy_object,
- copy_offset);
+ copy_m = vm_page_alloc(copy_object, copy_offset);
if (copy_m == NULL) {
/*
* Wait for a page, then retry.
@@ -700,10 +746,16 @@ vm_fault(map, vaddr, fault_type, change_wiring)
* pmaps use it.)
*/
vm_page_lock_queues();
+
+ vm_page_activate(old_m);
+
+
pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
VM_PROT_NONE);
+ if ((old_m->flags & PG_CLEAN) == 0)
+ old_m->flags |= PG_LAUNDRY;
copy_m->flags &= ~PG_CLEAN;
- vm_page_activate(copy_m); /* XXX */
+ vm_page_activate(copy_m);
vm_page_unlock_queues();
PAGE_WAKEUP(copy_m);
@@ -832,8 +884,18 @@ vm_fault(map, vaddr, fault_type, change_wiring)
else
vm_page_unwire(m);
}
- else
+ else {
vm_page_activate(m);
+ }
+
+ if( curproc && curproc->p_stats) {
+ if (hardfault) {
+ curproc->p_stats->p_ru.ru_majflt++;
+ } else {
+ curproc->p_stats->p_ru.ru_minflt++;
+ }
+ }
+
vm_page_unlock_queues();
/*
@@ -857,9 +919,10 @@ vm_fault_wire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
+
register vm_offset_t va;
register pmap_t pmap;
- int rv;
+ int rv;
pmap = vm_map_pmap(map);
@@ -893,7 +956,8 @@ vm_fault_wire(map, start, end)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, start, end)
+void
+vm_fault_unwire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
@@ -942,13 +1006,13 @@ void vm_fault_unwire(map, start, end)
* entry corresponding to a main map entry that is wired down).
*/
-void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
+void
+vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
vm_map_t dst_map;
vm_map_t src_map;
vm_map_entry_t dst_entry;
vm_map_entry_t src_entry;
{
-
vm_object_t dst_object;
vm_object_t src_object;
vm_offset_t dst_offset;
@@ -960,7 +1024,7 @@ void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
#ifdef lint
src_map++;
-#endif
+#endif lint
src_object = src_entry->object.vm_object;
src_offset = src_entry->offset;
@@ -1031,5 +1095,211 @@ void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
PAGE_WAKEUP(dst_m);
vm_object_unlock(dst_object);
}
+}
+
+
+/*
+ * looks page up in shadow chain
+ */
+
+int
+vm_fault_page_lookup(object, offset, rtobject, rtoffset, rtm)
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_object_t *rtobject;
+ vm_offset_t *rtoffset;
+ vm_page_t *rtm;
+{
+ vm_page_t m;
+ vm_object_t first_object = object;
+
+ *rtm = 0;
+ *rtobject = 0;
+ *rtoffset = 0;
+
+
+ while (!(m=vm_page_lookup(object, offset))) {
+ if (object->pager) {
+ if (vm_pager_has_page(object->pager, object->paging_offset+offset)) {
+ *rtobject = object;
+ *rtoffset = offset;
+ return 1;
+ }
+ }
+
+ if (!object->shadow)
+ return 0;
+ else {
+ offset += object->shadow_offset;
+ object = object->shadow;
+ }
+ }
+ *rtobject = object;
+ *rtoffset = offset;
+ *rtm = m;
+ return 1;
+}
+
+/*
+ * This routine checks around the requested page for other pages that
+ * might be able to be faulted in.
+ *
+ * Inputs:
+ * first_object, first_offset, m, rbehind, rahead
+ *
+ * Outputs:
+ * marray (array of vm_page_t), reqpage (index of requested page)
+ *
+ * Return value:
+ * number of pages in marray
+ */
+int
+vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marray, reqpage)
+ vm_object_t first_object;
+ vm_offset_t first_offset;
+ vm_page_t m;
+ int rbehind;
+ int raheada;
+ vm_page_t *marray;
+ int *reqpage;
+{
+ int i;
+ vm_page_t tmpm;
+ vm_object_t object;
+ vm_offset_t offset, startoffset, endoffset, toffset, size;
+ vm_object_t rtobject;
+ vm_page_t rtm;
+ vm_offset_t rtoffset;
+ vm_offset_t offsetdiff;
+ int rahead;
+ int treqpage;
+
+ object = m->object;
+ offset = m->offset;
+
+ offsetdiff = offset - first_offset;
+
+ /*
+ * if the requested page is not available, then give up now
+ */
+
+ if (!vm_pager_has_page(object->pager, object->paging_offset+offset))
+ return 0;
+
+ /*
+ * if there is no getmulti routine for this pager, then just allow
+ * one page to be read.
+ */
+/*
+ if (!object->pager->pg_ops->pgo_getpages) {
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+*/
+
+ /*
+ * try to do any readahead that we might have free pages for.
+ */
+ rahead = raheada;
+ if (rahead > (cnt.v_free_count - cnt.v_free_reserved)) {
+ rahead = cnt.v_free_count - cnt.v_free_reserved;
+ rbehind = 0;
+ }
+
+ if (cnt.v_free_count < cnt.v_free_min) {
+ if (rahead > VM_FAULT_READ_AHEAD_MIN)
+ rahead = VM_FAULT_READ_AHEAD_MIN;
+ rbehind = 0;
+ }
+
+ /*
+ * if we don't have any free pages, then just read one page.
+ */
+ if (rahead <= 0) {
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+
+ /*
+ * scan backward for the read behind pages --
+ * in memory or on disk not in same object
+ */
+ toffset = offset - NBPG;
+ if( rbehind*NBPG > offset)
+ rbehind = offset / NBPG;
+ startoffset = offset - rbehind*NBPG;
+ while (toffset >= startoffset) {
+ if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
+ rtm != 0 || rtobject != object) {
+ startoffset = toffset + NBPG;
+ break;
+ }
+ if( toffset == 0)
+ break;
+ toffset -= NBPG;
+ }
+
+ /*
+ * scan forward for the read ahead pages --
+ * in memory or on disk not in same object
+ */
+ toffset = offset + NBPG;
+ endoffset = offset + (rahead+1)*NBPG;
+ while (toffset < object->size && toffset < endoffset) {
+ if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
+ rtm != 0 || rtobject != object) {
+ break;
+ }
+ toffset += NBPG;
+ }
+ endoffset = toffset;
+ /* calculate number of bytes of pages */
+ size = (endoffset - startoffset) / NBPG;
+
+ /* calculate the page offset of the required page */
+ treqpage = (offset - startoffset) / NBPG;
+
+ /* see if we have space (again) */
+ if (cnt.v_free_count >= cnt.v_free_reserved + size) {
+ bzero(marray, (rahead + rbehind + 1) * sizeof(vm_page_t));
+ /*
+ * get our pages and don't block for them
+ */
+ for (i = 0; i < size; i++) {
+ if (i != treqpage)
+ rtm = vm_page_alloc(object, startoffset + i * NBPG);
+ else
+ rtm = m;
+ marray[i] = rtm;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (marray[i] == 0)
+ break;
+ }
+
+ /*
+ * if we could not get our block of pages, then
+ * free the readahead/readbehind pages.
+ */
+ if (i < size) {
+ for (i = 0; i < size; i++) {
+ if (i != treqpage && marray[i])
+ FREE_PAGE(marray[i]);
+ }
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
+ }
+
+ *reqpage = treqpage;
+ return size;
+ }
+ *reqpage = 0;
+ marray[0] = m;
+ return 1;
}
+
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 5676ff3f7cc2..f181ab034f1d 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -67,16 +67,22 @@
#include <sys/buf.h>
#include <sys/user.h>
+#include <sys/kernel.h>
+#include <sys/dkstat.h>
+
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
-#include <machine/cpu.h>
+#include <machine/stdarg.h>
+extern char kstack[];
int avefree = 0; /* XXX */
-unsigned maxdmap = MAXDSIZ; /* XXX */
int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
+/* vm_map_t upages_map; */
+void swapout(struct proc *p);
int
kernacc(addr, len, rw)
caddr_t addr;
@@ -89,18 +95,6 @@ kernacc(addr, len, rw)
saddr = trunc_page(addr);
eaddr = round_page(addr+len);
rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
- /*
- * XXX there are still some things (e.g. the buffer cache) that
- * are managed behind the VM system's back so even though an
- * address is accessible in the mind of the VM system, there may
- * not be physical pages where the VM thinks there is. This can
- * lead to bogus allocation of pages in the kernel address space
- * or worse, inconsistencies at the pmap level. We only worry
- * about the buffer cache for now.
- */
- if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
- saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
- rv = FALSE;
return(rv == TRUE);
}
@@ -112,6 +106,23 @@ useracc(addr, len, rw)
boolean_t rv;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+ /*
+ * XXX - specially disallow access to user page tables - they are
+ * in the map.
+ *
+ * XXX - don't specially disallow access to the user area - treat
+ * it as incorrectly as elsewhere.
+ *
+ * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was
+ * only used (as an end address) in trap.c. Use it as an end
+ * address here too.
+ */
+ if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS
+ || (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS
+ || (vm_offset_t) addr + len <= (vm_offset_t) addr) {
+ return (FALSE);
+ }
+
rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
trunc_page(addr), round_page(addr+len), prot);
return(rv == TRUE);
@@ -121,40 +132,18 @@ useracc(addr, len, rw)
/*
* Change protections on kernel pages from addr to addr+len
* (presumably so debugger can plant a breakpoint).
- *
- * We force the protection change at the pmap level. If we were
- * to use vm_map_protect a change to allow writing would be lazily-
- * applied meaning we would still take a protection fault, something
- * we really don't want to do. It would also fragment the kernel
- * map unnecessarily. We cannot use pmap_protect since it also won't
- * enforce a write-enable request. Using pmap_enter is the only way
- * we can ensure the change takes place properly.
+ * All addresses are assumed to reside in the Sysmap,
*/
-void
chgkprot(addr, len, rw)
register caddr_t addr;
int len, rw;
{
- vm_prot_t prot;
- vm_offset_t pa, sva, eva;
-
- prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
- eva = round_page(addr + len);
- for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
- /*
- * Extract physical address for the page.
- * We use a cheezy hack to differentiate physical
- * page 0 from an invalid mapping, not that it
- * really matters...
- */
- pa = pmap_extract(kernel_pmap, sva|1);
- if (pa == 0)
- panic("chgkprot: invalid page");
- pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
- }
+ vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+
+ vm_map_protect(kernel_map, trunc_page(addr),
+ round_page(addr+len), prot, FALSE);
}
#endif
-
void
vslock(addr, len)
caddr_t addr;
@@ -172,8 +161,8 @@ vsunlock(addr, len, dirtied)
{
#ifdef lint
dirtied++;
-#endif
- vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
+#endif lint
+ vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr+len), TRUE);
}
@@ -194,16 +183,19 @@ vm_fork(p1, p2, isvfork)
int isvfork;
{
register struct user *up;
- vm_offset_t addr;
+ vm_offset_t addr, ptaddr;
+ int i;
+ struct vm_map *vp;
+
+ while( cnt.v_free_count < cnt.v_free_min)
+ VM_WAIT;
-#ifdef i386
/*
* avoid copying any of the parent's pagetables or other per-process
* objects that reside in the map by marking all of them non-inheritable
*/
(void)vm_map_inherit(&p1->p_vmspace->vm_map,
- UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
-#endif
+ UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
#ifdef SYSVSHM
@@ -211,23 +203,40 @@ vm_fork(p1, p2, isvfork)
shmfork(p1, p2, isvfork);
#endif
-#ifndef i386
/*
* Allocate a wired-down (for now) pcb and kernel stack for the process
*/
- addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
- if (addr == 0)
- panic("vm_fork: no more kernel virtual memory");
- vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
-#else
-/* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
-and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
-not yet clear, yet it does... */
- addr = kmem_alloc(kernel_map, ctob(UPAGES));
- if (addr == 0)
- panic("vm_fork: no more kernel virtual memory");
-#endif
- up = (struct user *)addr;
+
+ addr = (vm_offset_t) kstack;
+
+ vp = &p2->p_vmspace->vm_map;
+
+ /* ream out old pagetables and kernel stack */
+ (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
+
+ /* get new pagetables and kernel stack */
+ (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
+
+ /* force in the page table encompassing the UPAGES */
+ ptaddr = trunc_page((u_int)vtopte(addr));
+ vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
+
+ /* and force in (demand-zero) the UPAGES */
+ vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
+
+ /* get a kernel virtual address for the UPAGES for this proc */
+ up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG);
+
+ /* and force-map the upages into the kernel pmap */
+ for (i = 0; i < UPAGES; i++)
+ pmap_enter(vm_map_pmap(kernel_map),
+ ((vm_offset_t) up) + NBPG * i,
+ pmap_extract(vp->pmap, addr + NBPG * i),
+ VM_PROT_READ|VM_PROT_WRITE, 1);
+
+ /* and allow the UPAGES page table entry to be paged (at the vm system level) */
+ vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
+
p2->p_addr = up;
/*
@@ -246,15 +255,7 @@ not yet clear, yet it does... */
((caddr_t)&up->u_stats.pstat_endcopy -
(caddr_t)&up->u_stats.pstat_startcopy));
-#ifdef i386
- { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
-
- vp = &p2->p_vmspace->vm_map;
- (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
- (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
- (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
- }
-#endif
+
/*
* cpu_fork will copy and update the kernel stack and pcb,
* and make the child ready to run. It marks the child
@@ -273,6 +274,7 @@ void
vm_init_limits(p)
register struct proc *p;
{
+ int tmp;
/*
* Set up the initial limits on process VM.
@@ -285,11 +287,13 @@ vm_init_limits(p)
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
- p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
+ tmp = ((2 * cnt.v_free_count) / 3) - 32;
+ if (cnt.v_free_count < 512)
+ tmp = cnt.v_free_count;
+ p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(tmp);
+ p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
-#include <vm/vm_pageout.h>
-
#ifdef DEBUG
int enableswap = 1;
int swapdebug = 0;
@@ -298,12 +302,67 @@ int swapdebug = 0;
#define SDB_SWAPOUT 4
#endif
+void
+faultin(p)
+struct proc *p;
+{
+ vm_offset_t i;
+ vm_offset_t vaddr, ptaddr;
+ vm_offset_t v, v1;
+ struct user *up;
+ int s;
+ int opflag;
+
+ if ((p->p_flag & P_INMEM) == 0) {
+ int rv0, rv1;
+ vm_map_t map;
+
+ ++p->p_lock;
+
+ map = &p->p_vmspace->vm_map;
+ /* force the page table encompassing the kernel stack (upages) */
+ ptaddr = trunc_page((u_int)vtopte(kstack));
+ vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
+
+ /* wire in the UPAGES */
+ vm_map_pageable(map, (vm_offset_t) kstack,
+ (vm_offset_t) kstack + UPAGES * NBPG, FALSE);
+
+ /* and map them nicely into the kernel pmap */
+ for (i = 0; i < UPAGES; i++) {
+ vm_offset_t off = i * NBPG;
+ vm_offset_t pa = (vm_offset_t)
+ pmap_extract(&p->p_vmspace->vm_pmap,
+ (vm_offset_t) kstack + off);
+ pmap_enter(vm_map_pmap(kernel_map),
+ ((vm_offset_t)p->p_addr) + off,
+ pa, VM_PROT_READ|VM_PROT_WRITE, 1);
+ }
+
+ /* and let the page table pages go (at least above pmap level) */
+ vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
+
+ s = splhigh();
+
+ if (p->p_stat == SRUN)
+ setrunqueue(p);
+
+ p->p_flag |= P_INMEM;
+
+ /* undo the effect of setting SLOCK above */
+ --p->p_lock;
+ splx(s);
+
+ }
+
+}
+
+int swapinreq;
+int percentactive;
/*
- * Brutally simple:
- * 1. Attempt to swapin every swaped-out, runnable process in
- * order of priority.
- * 2. If not enough memory, wake the pageout daemon and let it
- * clear some space.
+ * This swapin algorithm attempts to swap-in processes only if there
+ * is enough space for them. Of course, if a process waits for a long
+ * time, it will be swapped in anyway.
*/
void
scheduler()
@@ -313,88 +372,104 @@ scheduler()
struct proc *pp;
int ppri;
vm_offset_t addr;
- vm_size_t size;
+ int lastidle, lastrun;
+ int curidle, currun;
+ int forceload;
+ int percent;
+ int ntries;
+
+ lastidle = 0;
+ lastrun = 0;
loop:
-#ifdef DEBUG
- while (!enableswap)
- sleep((caddr_t)&proc0, PVM);
-#endif
+ ntries = 0;
+ vmmeter();
+
+ curidle = cp_time[CP_IDLE];
+ currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE];
+ percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle));
+ lastrun = currun;
+ lastidle = curidle;
+ if( percent > 100)
+ percent = 100;
+ percentactive = percent;
+
+ if( percentactive < 25)
+ forceload = 1;
+ else
+ forceload = 0;
+
+loop1:
pp = NULL;
ppri = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
+ int mempri;
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
- if (pri > ppri) {
+ mempri = pri > 0 ? pri : 0;
+ /*
+ * if this process is higher priority and there is
+ * enough space, then select this process instead
+ * of the previous selection.
+ */
+ if (pri > ppri &&
+ (((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) {
pp = p;
ppri = pri;
}
}
}
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: running, procp %x pri %d\n", pp, ppri);
-#endif
+
+ if ((pp == NULL) && (ntries == 0) && forceload) {
+ ++ntries;
+ goto loop1;
+ }
+
/*
* Nothing to do, back to sleep
*/
if ((p = pp) == NULL) {
- sleep((caddr_t)&proc0, PVM);
+ tsleep((caddr_t)&proc0, PVM, "sched", 0);
goto loop;
}
/*
- * We would like to bring someone in.
- * This part is really bogus cuz we could deadlock on memory
- * despite our feeble check.
+ * We would like to bring someone in. (only if there is space).
*/
- size = round_page(ctob(UPAGES));
- addr = (vm_offset_t) p->p_addr;
- if (cnt.v_free_count > atop(size)) {
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPIN)
- printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
- p->p_pid, p->p_comm, p->p_addr,
- ppri, cnt.v_free_count);
-#endif
- vm_map_pageable(kernel_map, addr, addr+size, FALSE);
- /*
- * Some architectures need to be notified when the
- * user area has moved to new physical page(s) (e.g.
- * see pmax/pmax/vm_machdep.c).
- */
- cpu_swapin(p);
- (void) splstatclock();
- if (p->p_stat == SRUN)
- setrunqueue(p);
- p->p_flag |= P_INMEM;
- (void) spl0();
+/*
+ printf("swapin: %d, free: %d, res: %d, min: %d\n",
+ p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min);
+*/
+ (void) splhigh();
+ if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) ||
+ (cnt.v_free_count >= cnt.v_free_min)) {
+ spl0();
+ faultin(p);
p->p_swtime = 0;
goto loop;
- }
+ }
+ /*
+ * log the memory shortage
+ */
+ swapinreq += p->p_vmspace->vm_swrss;
/*
* Not enough memory, jab the pageout daemon and wait til the
* coast is clear.
*/
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: no room for pid %d(%s), free %d\n",
- p->p_pid, p->p_comm, cnt.v_free_count);
-#endif
- (void) splhigh();
- VM_WAIT;
+ if( cnt.v_free_count < cnt.v_free_min) {
+ VM_WAIT;
+ } else {
+ tsleep((caddr_t)&proc0, PVM, "sched", 0);
+ }
(void) spl0();
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: room again, free %d\n", cnt.v_free_count);
-#endif
goto loop;
}
-#define swappable(p) \
- (((p)->p_flag & \
- (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
+#define swappable(p) \
+ (((p)->p_lock == 0) && \
+ ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO)) == P_INMEM)
+extern int vm_pageout_free_min;
/*
* Swapout is driven by the pageout daemon. Very simple, we find eligible
* procs and unwire their u-areas. We try to always "swap" at least one
@@ -409,54 +484,86 @@ swapout_threads()
register struct proc *p;
struct proc *outp, *outp2;
int outpri, outpri2;
+ int tpri;
int didswap = 0;
+ int swapneeded = swapinreq;
extern int maxslp;
+ int runnablenow;
+ int s;
-#ifdef DEBUG
- if (!enableswap)
- return;
-#endif
+swapmore:
+ runnablenow = 0;
outp = outp2 = NULL;
- outpri = outpri2 = 0;
+ outpri = outpri2 = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if (!swappable(p))
continue;
switch (p->p_stat) {
case SRUN:
- if (p->p_swtime > outpri2) {
+ ++runnablenow;
+ /*
+ * count the process as being in a runnable state
+ */
+ if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) {
outp2 = p;
- outpri2 = p->p_swtime;
+ outpri2 = tpri;
}
continue;
case SSLEEP:
case SSTOP:
- if (p->p_slptime >= maxslp) {
+ /*
+ * do not swapout a process that is waiting for VM datastructures
+ * there is a possible deadlock.
+ */
+ if (!lock_try_write( &p->p_vmspace->vm_map.lock)) {
+ continue;
+ }
+ vm_map_unlock( &p->p_vmspace->vm_map);
+ if (p->p_slptime > maxslp) {
swapout(p);
didswap++;
- } else if (p->p_slptime > outpri) {
+ } else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri) {
outp = p;
- outpri = p->p_slptime;
+ outpri = tpri ;
}
continue;
}
}
/*
- * If we didn't get rid of any real duds, toss out the next most
- * likely sleeping/stopped or running candidate. We only do this
- * if we are real low on memory since we don't gain much by doing
- * it (UPAGES pages).
+ * We swapout only if there are more than two runnable processes or if
+ * another process needs some space to swapin.
*/
- if (didswap == 0 &&
- cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
- if ((p = outp) == 0)
- p = outp2;
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPOUT)
- printf("swapout_threads: no duds, try procp %x\n", p);
-#endif
- if (p)
+ if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) &&
+ (((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) ||
+ (cnt.v_free_count < cnt.v_free_min))) {
+ if ((p = outp) == 0) {
+ p = outp2;
+ }
+
+ if (p) {
swapout(p);
+ didswap = 1;
+ }
+ }
+
+ /*
+ * if we previously had found a process to swapout, and we need to swapout
+ * more then try again.
+ */
+#if 0
+ if( p && swapinreq)
+ goto swapmore;
+#endif
+
+ /*
+ * If we swapped something out, and another process needed memory,
+ * then wakeup the sched process.
+ */
+ if (didswap) {
+ if (swapneeded)
+ wakeup((caddr_t)&proc0);
+ swapinreq = 0;
}
}
@@ -465,59 +572,37 @@ swapout(p)
register struct proc *p;
{
vm_offset_t addr;
- vm_size_t size;
+ struct pmap *pmap = &p->p_vmspace->vm_pmap;
+ vm_map_t map = &p->p_vmspace->vm_map;
+ vm_offset_t ptaddr;
+ int i;
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPOUT)
- printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
- p->p_pid, p->p_comm, p->p_addr, p->p_stat,
- p->p_slptime, cnt.v_free_count);
-#endif
- size = round_page(ctob(UPAGES));
- addr = (vm_offset_t) p->p_addr;
-#if defined(hp300) || defined(luna68k)
+ ++p->p_stats->p_ru.ru_nswap;
/*
- * Ugh! u-area is double mapped to a fixed address behind the
- * back of the VM system and accesses are usually through that
- * address rather than the per-process address. Hence reference
- * and modify information are recorded at the fixed address and
- * lost at context switch time. We assume the u-struct and
- * kernel stack are always accessed/modified and force it to be so.
+ * remember the process resident count
*/
- {
- register int i;
- volatile long tmp;
-
- for (i = 0; i < UPAGES; i++) {
- tmp = *(long *)addr; *(long *)addr = tmp;
- addr += NBPG;
- }
- addr = (vm_offset_t) p->p_addr;
- }
-#endif
-#ifdef mips
+ p->p_vmspace->vm_swrss =
+ p->p_vmspace->vm_pmap.pm_stats.resident_count;
/*
- * Be sure to save the floating point coprocessor state before
- * paging out the u-struct.
+ * and decrement the amount of needed space
*/
- {
- extern struct proc *machFPCurProcPtr;
+ swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count);
- if (p == machFPCurProcPtr) {
- MachSaveCurFPState(p);
- machFPCurProcPtr = (struct proc *)0;
- }
- }
-#endif
-#ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */
- vm_map_pageable(kernel_map, addr, addr+size, TRUE);
- pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
-#endif
(void) splhigh();
p->p_flag &= ~P_INMEM;
if (p->p_stat == SRUN)
remrq(p);
(void) spl0();
+
+ ++p->p_lock;
+/* let the upages be paged */
+ pmap_remove(vm_map_pmap(kernel_map),
+ (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
+
+ vm_map_pageable(map, (vm_offset_t) kstack,
+ (vm_offset_t) kstack + UPAGES * NBPG, TRUE);
+
+ --p->p_lock;
p->p_swtime = 0;
}
@@ -525,6 +610,7 @@ swapout(p)
* The rest of these routines fake thread handling
*/
+#ifndef assert_wait
void
assert_wait(event, ruptible)
int event;
@@ -535,44 +621,38 @@ assert_wait(event, ruptible)
#endif
curproc->p_thread = event;
}
+#endif
void
-thread_block()
+thread_block(char *msg)
{
- int s = splhigh();
-
if (curproc->p_thread)
- sleep((caddr_t)curproc->p_thread, PVM);
- splx(s);
+ tsleep((caddr_t)curproc->p_thread, PVM, msg, 0);
}
+
void
-thread_sleep(event, lock, ruptible)
+thread_sleep_(event, lock, wmesg)
int event;
simple_lock_t lock;
- boolean_t ruptible;
+ char *wmesg;
{
-#ifdef lint
- ruptible++;
-#endif
- int s = splhigh();
curproc->p_thread = event;
simple_unlock(lock);
- if (curproc->p_thread)
- sleep((caddr_t)event, PVM);
- splx(s);
+ if (curproc->p_thread) {
+ tsleep((caddr_t)event, PVM, wmesg, 0);
+ }
}
+#ifndef thread_wakeup
void
thread_wakeup(event)
int event;
{
- int s = splhigh();
-
wakeup((caddr_t)event);
- splx(s);
}
+#endif
/*
* DEBUG stuff
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 4874f9e707a3..a0eac7045e55 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
@@ -79,7 +80,8 @@
* The start and end address of physical memory is passed in.
*/
-void vm_mem_init()
+void
+vm_mem_init()
{
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
@@ -89,9 +91,9 @@ void vm_mem_init()
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
- vm_set_page_size();
- vm_page_startup(&avail_start, &avail_end);
+ vm_set_page_size();
+ virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
/*
* Initialize other VM packages
*/
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 7e4db63abf28..55a094992aa6 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -292,9 +292,13 @@ kmem_malloc(map, size, canwait)
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
vm_map_unlock(map);
+#if 0
if (canwait) /* XXX should wait */
panic("kmem_malloc: %s too small",
map == kmem_map ? "kmem_map" : "mb_map");
+#endif
+ if (canwait)
+ panic("kmem_malloc: map too small");
return (0);
}
offset = addr - vm_map_min(kmem_map);
@@ -404,7 +408,7 @@ vm_offset_t kmem_alloc_wait(map, size)
}
assert_wait((int)map, TRUE);
vm_map_unlock(map);
- thread_block();
+ thread_block("kmaw");
}
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
vm_map_unlock(map);
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index d0d2c358af06..c032560f7648 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -65,8 +65,10 @@
/* Kernel memory management definitions. */
vm_map_t buffer_map;
-vm_map_t exec_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
+vm_map_t io_map;
+vm_map_t clean_map;
+vm_map_t pager_map;
vm_map_t phys_map;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 425fe0de4326..ffffa963c996 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -73,6 +73,7 @@
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
+#include <vm/vm_kern.h>
/*
* Virtual memory maps provide for the mapping, protection,
@@ -137,6 +138,11 @@ vm_size_t kentry_data_size;
vm_map_entry_t kentry_free;
vm_map_t kmap_free;
+int kentry_count;
+vm_map_t kmap_free;
+static vm_offset_t mapvm=0;
+static int mapvmpgcnt=0;
+
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
@@ -273,27 +279,71 @@ vm_map_init(map, min, max, pageable)
* Allocates a VM map entry for insertion.
* No entry fields are filled in. This routine is
*/
-vm_map_entry_t vm_map_entry_create(map)
+static struct vm_map_entry *mappool;
+static int mappoolcnt;
+void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
+
+vm_map_entry_t
+vm_map_entry_create(map)
vm_map_t map;
{
vm_map_entry_t entry;
-#ifdef DEBUG
- extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
- boolean_t isspecial;
-
- isspecial = (map == kernel_map || map == kmem_map ||
- map == mb_map || map == pager_map);
- if (isspecial && map->entries_pageable ||
- !isspecial && !map->entries_pageable)
- panic("vm_map_entry_create: bogus map");
-#endif
- if (map->entries_pageable) {
+ int s;
+ int i;
+#define KENTRY_LOW_WATER 64
+#define MAPENTRY_LOW_WATER 64
+
+ /*
+ * This is a *very* nasty (and sort of incomplete) hack!!!!
+ */
+ if (kentry_count < KENTRY_LOW_WATER) {
+ if (mapvmpgcnt && mapvm) {
+ vm_page_t m;
+ if (m = vm_page_alloc(kmem_object, mapvm-vm_map_min(kmem_map))) {
+ int newentries;
+ newentries = (NBPG/sizeof (struct vm_map_entry));
+ vm_page_wire(m);
+ m->flags &= ~PG_BUSY;
+ pmap_enter(vm_map_pmap(kmem_map), mapvm,
+ VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1);
+
+ entry = (vm_map_entry_t) mapvm;
+ mapvm += NBPG;
+ --mapvmpgcnt;
+
+ for (i = 0; i < newentries; i++) {
+ vm_map_entry_dispose(kernel_map, entry);
+ entry++;
+ }
+ }
+ }
+ }
+
+ if (map == kernel_map || map == kmem_map || map == pager_map) {
+
+ if (entry = kentry_free) {
+ kentry_free = entry->next;
+ --kentry_count;
+ return entry;
+ }
+
+ if (entry = mappool) {
+ mappool = entry->next;
+ --mappoolcnt;
+ return entry;
+ }
+
+ } else {
+ if (entry = mappool) {
+ mappool = entry->next;
+ --mappoolcnt;
+ return entry;
+ }
+
MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
M_VMMAPENT, M_WAITOK);
- } else {
- if (entry = kentry_free)
- kentry_free = kentry_free->next;
}
+dopanic:
if (entry == NULL)
panic("vm_map_entry_create: out of map entries");
@@ -305,25 +355,28 @@ vm_map_entry_t vm_map_entry_create(map)
*
* Inverse of vm_map_entry_create.
*/
-void vm_map_entry_dispose(map, entry)
+void
+vm_map_entry_dispose(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
-#ifdef DEBUG
- extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
- boolean_t isspecial;
-
- isspecial = (map == kernel_map || map == kmem_map ||
- map == mb_map || map == pager_map);
- if (isspecial && map->entries_pageable ||
- !isspecial && !map->entries_pageable)
- panic("vm_map_entry_dispose: bogus map");
-#endif
- if (map->entries_pageable) {
- FREE(entry, M_VMMAPENT);
- } else {
+ extern vm_map_t kernel_map, kmem_map, pager_map;
+ int s;
+
+ if (map == kernel_map || map == kmem_map || map == pager_map ||
+ kentry_count < KENTRY_LOW_WATER) {
entry->next = kentry_free;
kentry_free = entry;
+ ++kentry_count;
+ } else {
+ if (mappoolcnt < MAPENTRY_LOW_WATER) {
+ entry->next = mappool;
+ mappool = entry;
+ ++mappoolcnt;
+ return;
+ }
+
+ FREE(entry, M_VMMAPENT);
}
}
@@ -799,7 +852,7 @@ static void _vm_map_clip_start(map, entry, start)
* See if we can simplify this entry first
*/
- vm_map_simplify_entry(map, entry);
+ /* vm_map_simplify_entry(map, entry); */
/*
* Split off the front portion --
@@ -1130,7 +1183,7 @@ vm_map_pageable(map, start, end, new_pageable)
{
register vm_map_entry_t entry;
vm_map_entry_t start_entry;
- register vm_offset_t failed;
+ register vm_offset_t failed = 0;
int rv;
vm_map_lock(map);
@@ -2546,11 +2599,13 @@ void vm_map_simplify(map, start)
if (map->first_free == this_entry)
map->first_free = prev_entry;
- SAVE_HINT(map, prev_entry);
- vm_map_entry_unlink(map, this_entry);
- prev_entry->end = this_entry->end;
- vm_object_deallocate(this_entry->object.vm_object);
- vm_map_entry_dispose(map, this_entry);
+ if (!this_entry->object.vm_object->paging_in_progress) {
+ SAVE_HINT(map, prev_entry);
+ vm_map_entry_unlink(map, this_entry);
+ prev_entry->end = this_entry->end;
+ vm_object_deallocate(this_entry->object.vm_object);
+ vm_map_entry_dispose(map, this_entry);
+ }
}
vm_map_unlock(map);
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index d25b7a2d1bd3..ee253ef70f61 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -102,11 +102,11 @@ struct vm_map_entry {
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
- boolean_t is_a_map; /* Is "object" a map? */
- boolean_t is_sub_map; /* Is "object" a submap? */
+ boolean_t is_a_map:1, /* Is "object" a map? */
+ is_sub_map:1, /* Is "object" a submap? */
/* Only in sharing maps: */
- boolean_t copy_on_write; /* is data copy-on-write */
- boolean_t needs_copy; /* does object need to be copied */
+ copy_on_write:1,/* is data copy-on-write */
+ needs_copy:1; /* does object need to be copied */
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
@@ -176,7 +176,7 @@ typedef struct {
/* XXX: number of kernel maps and entries to statically allocate */
#define MAX_KMAP 10
-#define MAX_KMAPENT 500
+#define MAX_KMAPENT 128
#ifdef KERNEL
boolean_t vm_map_check_protection __P((vm_map_t,
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 9db6f506c2a0..2a8029b5ad10 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -95,6 +95,7 @@ loadav(avg)
/*
* Attributes associated with virtual memory.
*/
+int
vm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 340cded1ba48..2e7204a96a12 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -217,8 +217,10 @@ mmap(p, uap, retval)
if (flags & MAP_FIXED) {
if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS)
return (EINVAL);
+#ifndef i386
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
+#endif
if (addr > addr + size)
return (EINVAL);
}
@@ -400,8 +402,10 @@ munmap(p, uap, retval)
*/
if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS)
return (EINVAL);
+#ifndef i386
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
+#endif
if (addr > addr + size)
return (EINVAL);
map = &p->p_vmspace->vm_map;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index d11fa8be014f..a6419dc22aa8 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -72,6 +72,12 @@
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+static void _vm_object_allocate(vm_size_t, vm_object_t);
+void vm_object_deactivate_pages(vm_object_t);
+void vm_object_cache_trim(void);
+void vm_object_remove(vm_pager_t);
/*
* Virtual memory objects maintain the actual data
@@ -99,26 +105,56 @@
*
*/
+
struct vm_object kernel_object_store;
struct vm_object kmem_object_store;
+extern int vm_cache_max;
#define VM_OBJECT_HASH_COUNT 157
-int vm_cache_max = 100; /* can patch if necessary */
-struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
+struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
long object_collapses = 0;
long object_bypasses = 0;
-static void _vm_object_allocate __P((vm_size_t, vm_object_t));
+static void
+_vm_object_allocate(size, object)
+ vm_size_t size;
+ register vm_object_t object;
+{
+ bzero(object, sizeof *object);
+ TAILQ_INIT(&object->memq);
+ vm_object_lock_init(object);
+ object->ref_count = 1;
+ object->resident_page_count = 0;
+ object->size = size;
+ object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
+ object->paging_in_progress = 0;
+ object->copy = NULL;
+
+ /*
+ * Object starts out read-write, with no pager.
+ */
+
+ object->pager = NULL;
+ object->paging_offset = 0;
+ object->shadow = NULL;
+ object->shadow_offset = (vm_offset_t) 0;
+
+ simple_lock(&vm_object_list_lock);
+ TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
+ vm_object_count++;
+ cnt.v_nzfod += atop(size);
+ simple_unlock(&vm_object_list_lock);
+}
/*
* vm_object_init:
*
* Initialize the VM objects module.
*/
-void vm_object_init(size)
- vm_size_t size;
+void
+vm_object_init(vm_offset_t nothing)
{
register int i;
@@ -132,10 +168,12 @@ void vm_object_init(size)
TAILQ_INIT(&vm_object_hashtable[i]);
kernel_object = &kernel_object_store;
- _vm_object_allocate(size, kernel_object);
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ kernel_object);
kmem_object = &kmem_object_store;
- _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object);
+ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ kmem_object);
}
/*
@@ -144,55 +182,30 @@ void vm_object_init(size)
* Returns a new object with the given size.
*/
-vm_object_t vm_object_allocate(size)
+vm_object_t
+vm_object_allocate(size)
vm_size_t size;
{
register vm_object_t result;
+ int s;
result = (vm_object_t)
malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
+
_vm_object_allocate(size, result);
return(result);
}
-static void
-_vm_object_allocate(size, object)
- vm_size_t size;
- register vm_object_t object;
-{
- TAILQ_INIT(&object->memq);
- vm_object_lock_init(object);
- object->ref_count = 1;
- object->resident_page_count = 0;
- object->size = size;
- object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
- object->paging_in_progress = 0;
- object->copy = NULL;
-
- /*
- * Object starts out read-write, with no pager.
- */
-
- object->pager = NULL;
- object->paging_offset = 0;
- object->shadow = NULL;
- object->shadow_offset = (vm_offset_t) 0;
-
- simple_lock(&vm_object_list_lock);
- TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
- vm_object_count++;
- cnt.v_nzfod += atop(size);
- simple_unlock(&vm_object_list_lock);
-}
/*
* vm_object_reference:
*
* Gets another reference to the given object.
*/
-void vm_object_reference(object)
+inline void
+vm_object_reference(object)
register vm_object_t object;
{
if (object == NULL)
@@ -214,8 +227,9 @@ void vm_object_reference(object)
*
* No object may be locked.
*/
-void vm_object_deallocate(object)
- register vm_object_t object;
+void
+vm_object_deallocate(object)
+ vm_object_t object;
{
vm_object_t temp;
@@ -235,11 +249,11 @@ void vm_object_deallocate(object)
vm_object_lock(object);
if (--(object->ref_count) != 0) {
+ vm_object_unlock(object);
/*
* If there are still references, then
* we are done.
*/
- vm_object_unlock(object);
vm_object_cache_unlock();
return;
}
@@ -257,7 +271,12 @@ void vm_object_deallocate(object)
vm_object_cached++;
vm_object_cache_unlock();
- vm_object_deactivate_pages(object);
+/*
+ * this code segment was removed because it kills performance with
+ * large -- repetively used binaries. The functionality now resides
+ * in the pageout daemon
+ * vm_object_deactivate_pages(object);
+ */
vm_object_unlock(object);
vm_object_cache_trim();
@@ -269,7 +288,7 @@ void vm_object_deallocate(object)
*/
vm_object_remove(object->pager);
vm_object_cache_unlock();
-
+
temp = object->shadow;
vm_object_terminate(object);
/* unlocks and deallocates object */
@@ -277,18 +296,19 @@ void vm_object_deallocate(object)
}
}
-
/*
* vm_object_terminate actually destroys the specified object, freeing
* up all previously used resources.
*
* The object must be locked.
*/
-void vm_object_terminate(object)
+void
+vm_object_terminate(object)
register vm_object_t object;
{
register vm_page_t p;
vm_object_t shadow_object;
+ int s;
/*
* Detach the object from its shadow if we are the shadow's
@@ -298,28 +318,68 @@ void vm_object_terminate(object)
vm_object_lock(shadow_object);
if (shadow_object->copy == object)
shadow_object->copy = NULL;
-#if 0
+/*
else if (shadow_object->copy != NULL)
panic("vm_object_terminate: copy/shadow inconsistency");
-#endif
+*/
vm_object_unlock(shadow_object);
}
/*
- * Wait until the pageout daemon is through with the object.
+ * Wait until the pageout daemon is through
+ * with the object.
*/
+
while (object->paging_in_progress) {
vm_object_sleep((int)object, object, FALSE);
vm_object_lock(object);
}
/*
- * If not an internal object clean all the pages, removing them
- * from paging queues as we go.
+ * While the paging system is locked,
+ * pull the object's pages off the active
+ * and inactive queues. This keeps the
+ * pageout daemon from playing with them
+ * during vm_pager_deallocate.
*
- * XXX need to do something in the event of a cleaning error.
+ * We can't free the pages yet, because the
+ * object's pager may have to write them out
+ * before deallocating the paging space.
+ */
+
+ for( p = object->memq.tqh_first; p; p=p->listq.tqe_next) {
+ VM_PAGE_CHECK(p);
+
+ vm_page_lock_queues();
+ s = splimp();
+ if (p->flags & PG_ACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
+ p->flags &= ~PG_ACTIVE;
+ cnt.v_active_count--;
+ }
+
+ if (p->flags & PG_INACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq);
+ p->flags &= ~PG_INACTIVE;
+ cnt.v_inactive_count--;
+ }
+ splx(s);
+ vm_page_unlock_queues();
+ }
+
+ vm_object_unlock(object);
+
+ if (object->paging_in_progress != 0)
+ panic("vm_object_deallocate: pageout in progress");
+
+ /*
+ * Clean and free the pages, as appropriate.
+ * All references to the object are gone,
+ * so we don't need to lock it.
*/
+
if ((object->flags & OBJ_INTERNAL) == 0) {
+ vm_object_lock(object);
(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
vm_object_unlock(object);
}
@@ -335,23 +395,24 @@ void vm_object_terminate(object)
cnt.v_pfree++;
vm_page_unlock_queues();
}
- if ((object->flags & OBJ_INTERNAL) == 0)
- vm_object_unlock(object);
/*
- * Let the pager know object is dead.
+ * Let the pager know object is dead.
*/
+
if (object->pager != NULL)
vm_pager_deallocate(object->pager);
+
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, object, object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
/*
- * Free the space for the object.
+ * Free the space for the object.
*/
+
free((caddr_t)object, M_VMOBJ);
}
@@ -359,6 +420,69 @@ void vm_object_terminate(object)
* vm_object_page_clean
*
* Clean all dirty pages in the specified range of object.
+ * Leaves page on whatever queue it is currently on.
+ *
+ * Odd semantics: if start == end, we clean everything.
+ *
+ * The object must be locked.
+ */
+#if 1
+boolean_t
+vm_object_page_clean(object, start, end, syncio, de_queue)
+ register vm_object_t object;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ boolean_t syncio;
+ boolean_t de_queue;
+{
+ register vm_page_t p, nextp;
+ int s;
+ int size;
+
+ if (object->pager == NULL)
+ return 1;
+
+ if (start != end) {
+ start = trunc_page(start);
+ end = round_page(end);
+ }
+ size = end - start;
+
+again:
+ /*
+ * Wait until the pageout daemon is through with the object.
+ */
+ while (object->paging_in_progress) {
+ vm_object_sleep((int)object, object, FALSE);
+ }
+
+ nextp = object->memq.tqh_first;
+ while ( (p = nextp) && ((start == end) || (size != 0) ) ) {
+ nextp = p->listq.tqe_next;
+ if (start == end || (p->offset >= start && p->offset < end)) {
+ if (p->flags & PG_BUSY)
+ continue;
+
+ size -= PAGE_SIZE;
+
+ if ((p->flags & PG_CLEAN)
+ && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
+ p->flags &= ~PG_CLEAN;
+
+ if ((p->flags & PG_CLEAN) == 0) {
+ vm_pageout_clean(p,VM_PAGEOUT_FORCE);
+ goto again;
+ }
+ }
+ }
+ wakeup((caddr_t)object);
+ return 1;
+}
+#endif
+/*
+ * vm_object_page_clean
+ *
+ * Clean all dirty pages in the specified range of object.
* If syncio is TRUE, page cleaning is done synchronously.
* If de_queue is TRUE, pages are removed from any paging queue
* they were on, otherwise they are left on whatever queue they
@@ -372,6 +496,7 @@ void vm_object_terminate(object)
* somewhere. We attempt to clean (and dequeue) all pages regardless
* of where an error occurs.
*/
+#if 0
boolean_t
vm_object_page_clean(object, start, end, syncio, de_queue)
register vm_object_t object;
@@ -421,6 +546,7 @@ again:
* Loop through the object page list cleaning as necessary.
*/
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
+ onqueue = 0;
if ((start == end || p->offset >= start && p->offset < end) &&
!(p->flags & PG_FICTITIOUS)) {
if ((p->flags & PG_CLEAN) &&
@@ -493,6 +619,7 @@ again:
}
return (noerror);
}
+#endif
/*
* vm_object_deactivate_pages
@@ -539,6 +666,7 @@ vm_object_cache_trim()
vm_object_cache_unlock();
}
+
/*
* vm_object_pmap_copy:
*
@@ -576,7 +704,8 @@ void vm_object_pmap_copy(object, start, end)
*
* The object must *not* be locked.
*/
-void vm_object_pmap_remove(object, start, end)
+void
+vm_object_pmap_remove(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
@@ -587,9 +716,19 @@ void vm_object_pmap_remove(object, start, end)
return;
vm_object_lock(object);
- for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
- if ((start <= p->offset) && (p->offset < end))
+again:
+ for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
+ if ((start <= p->offset) && (p->offset < end)) {
+ if (p->flags & PG_BUSY) {
+ p->flags |= PG_WANTED;
+ tsleep((caddr_t) p, PVM, "vmopmr", 0);
+ goto again;
+ }
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
+ if ((p->flags & PG_CLEAN) == 0)
+ p->flags |= PG_LAUNDRY;
+ }
+ }
vm_object_unlock(object);
}
@@ -629,6 +768,7 @@ void vm_object_copy(src_object, src_offset, size,
return;
}
+
/*
* If the object's pager is null_pager or the
* default pager, we don't have to make a copy
@@ -637,7 +777,15 @@ void vm_object_copy(src_object, src_offset, size,
*/
vm_object_lock(src_object);
+
+ /*
+ * Try to collapse the object before copying it.
+ */
+
+ vm_object_collapse(src_object);
+
if (src_object->pager == NULL ||
+ src_object->pager->pg_type == PG_SWAP ||
(src_object->flags & OBJ_INTERNAL)) {
/*
@@ -664,10 +812,6 @@ void vm_object_copy(src_object, src_offset, size,
return;
}
- /*
- * Try to collapse the object before copying it.
- */
- vm_object_collapse(src_object);
/*
* If the object has a pager, the pager wants to
@@ -798,7 +942,8 @@ void vm_object_copy(src_object, src_offset, size,
* are returned in the source parameters.
*/
-void vm_object_shadow(object, offset, length)
+void
+vm_object_shadow(object, offset, length)
vm_object_t *object; /* IN/OUT */
vm_offset_t *offset; /* IN/OUT */
vm_size_t length;
@@ -843,7 +988,8 @@ void vm_object_shadow(object, offset, length)
* Set the specified object's pager to the specified pager.
*/
-void vm_object_setpager(object, pager, paging_offset,
+void
+vm_object_setpager(object, pager, paging_offset,
read_only)
vm_object_t object;
vm_pager_t pager;
@@ -852,9 +998,12 @@ void vm_object_setpager(object, pager, paging_offset,
{
#ifdef lint
read_only++; /* No longer used */
-#endif
+#endif lint
vm_object_lock(object); /* XXX ? */
+ if (object->pager && object->pager != pager) {
+ panic("!!!pager already allocated!!!\n");
+ }
object->pager = pager;
object->paging_offset = paging_offset;
vm_object_unlock(object); /* XXX ? */
@@ -865,7 +1014,7 @@ void vm_object_setpager(object, pager, paging_offset,
*/
#define vm_object_hash(pager) \
- (((unsigned)pager)%VM_OBJECT_HASH_COUNT)
+ (((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT)
/*
* vm_object_lookup looks in the object cache for an object with the
@@ -965,38 +1114,6 @@ vm_object_remove(pager)
}
}
-/*
- * vm_object_cache_clear removes all objects from the cache.
- *
- */
-
-void vm_object_cache_clear()
-{
- register vm_object_t object;
-
- /*
- * Remove each object in the cache by scanning down the
- * list of cached objects.
- */
- vm_object_cache_lock();
- while ((object = vm_object_cached_list.tqh_first) != NULL) {
- vm_object_cache_unlock();
-
- /*
- * Note: it is important that we use vm_object_lookup
- * to gain a reference, and not vm_object_reference, because
- * the logic for removing an object from the cache lies in
- * lookup.
- */
- if (object != vm_object_lookup(object->pager))
- panic("vm_object_cache_clear: I'm sooo confused.");
- pager_cache(object, FALSE);
-
- vm_object_cache_lock();
- }
- vm_object_cache_unlock();
-}
-
boolean_t vm_object_collapse_allowed = TRUE;
/*
* vm_object_collapse:
@@ -1008,8 +1125,12 @@ boolean_t vm_object_collapse_allowed = TRUE;
* Requires that the object be locked and the page
* queues be unlocked.
*
+ * This routine has significant changes by John S. Dyson
+ * to fix some swap memory leaks. 18 Dec 93
+ *
*/
-void vm_object_collapse(object)
+void
+vm_object_collapse(object)
register vm_object_t object;
{
@@ -1027,11 +1148,10 @@ void vm_object_collapse(object)
* Verify that the conditions are right for collapse:
*
* The object exists and no pages in it are currently
- * being paged out (or have ever been paged out).
+ * being paged out.
*/
if (object == NULL ||
- object->paging_in_progress != 0 ||
- object->pager != NULL)
+ object->paging_in_progress != 0)
return;
/*
@@ -1067,12 +1187,24 @@ void vm_object_collapse(object)
* parent object.
*/
if (backing_object->shadow != NULL &&
- backing_object->shadow->copy != NULL) {
+ backing_object->shadow->copy == backing_object) {
vm_object_unlock(backing_object);
return;
}
/*
+ * we can deal only with the swap pager
+ */
+ if ((object->pager &&
+ object->pager->pg_type != PG_SWAP) ||
+ (backing_object->pager &&
+ backing_object->pager->pg_type != PG_SWAP)) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+
+ /*
* We know that we can either collapse the backing
* object (if the parent is the only reference to
* it) or (perhaps) remove the parent's reference
@@ -1098,7 +1230,8 @@ void vm_object_collapse(object)
* pages that shadow them.
*/
- while ((p = backing_object->memq.tqh_first) != NULL) {
+ while (p = backing_object->memq.tqh_first) {
+
new_offset = (p->offset - backing_offset);
/*
@@ -1116,19 +1249,12 @@ void vm_object_collapse(object)
vm_page_unlock_queues();
} else {
pp = vm_page_lookup(object, new_offset);
- if (pp != NULL && !(pp->flags & PG_FAKE)) {
+ if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
+ object->paging_offset + new_offset))) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
- }
- else {
- if (pp) {
- /* may be someone waiting for it */
- PAGE_WAKEUP(pp);
- vm_page_lock_queues();
- vm_page_free(pp);
- vm_page_unlock_queues();
- }
+ } else {
vm_page_rename(p, object, new_offset);
}
}
@@ -1136,19 +1262,50 @@ void vm_object_collapse(object)
/*
* Move the pager from backing_object to object.
- *
- * XXX We're only using part of the paging space
- * for keeps now... we ought to discard the
- * unused portion.
*/
if (backing_object->pager) {
- object->pager = backing_object->pager;
- object->paging_offset = backing_offset +
- backing_object->paging_offset;
- backing_object->pager = NULL;
+ backing_object->paging_in_progress++;
+ if (object->pager) {
+ vm_pager_t bopager;
+ object->paging_in_progress++;
+ /*
+ * copy shadow object pages into ours
+ * and destroy unneeded pages in shadow object.
+ */
+ bopager = backing_object->pager;
+ backing_object->pager = NULL;
+ vm_object_remove(backing_object->pager);
+ swap_pager_copy(
+ bopager, backing_object->paging_offset,
+ object->pager, object->paging_offset,
+ object->shadow_offset);
+ object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t)object);
+ } else {
+ object->paging_in_progress++;
+ /*
+ * grab the shadow objects pager
+ */
+ object->pager = backing_object->pager;
+ object->paging_offset = backing_object->paging_offset + backing_offset;
+ vm_object_remove(backing_object->pager);
+ backing_object->pager = NULL;
+ /*
+ * free unnecessary blocks
+ */
+ swap_pager_freespace(object->pager, 0, object->paging_offset);
+ object->paging_in_progress--;
+ if (object->paging_in_progress == 0)
+ wakeup((caddr_t)object);
+ }
+ backing_object->paging_in_progress--;
+ if (backing_object->paging_in_progress == 0)
+ wakeup((caddr_t)backing_object);
}
+
/*
* Object now shadows whatever backing_object did.
* Note that the reference to backing_object->shadow
@@ -1173,7 +1330,7 @@ void vm_object_collapse(object)
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, backing_object,
- object_list);
+ object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
@@ -1204,9 +1361,7 @@ void vm_object_collapse(object)
* of pages here.
*/
- for (p = backing_object->memq.tqh_first;
- p != NULL;
- p = p->listq.tqe_next) {
+ for( p = backing_object->memq.tqh_first;p;p=p->listq.tqe_next) {
new_offset = (p->offset - backing_offset);
/*
@@ -1219,10 +1374,9 @@ void vm_object_collapse(object)
*/
if (p->offset >= backing_offset &&
- new_offset < size &&
- ((pp = vm_page_lookup(object, new_offset))
- == NULL ||
- (pp->flags & PG_FAKE))) {
+ new_offset <= size &&
+ ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) &&
+ (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) {
/*
* Page still needed.
* Can't go any further.
@@ -1239,23 +1393,24 @@ void vm_object_collapse(object)
* count is at least 2.
*/
- object->shadow = backing_object->shadow;
- vm_object_reference(object->shadow);
+ vm_object_reference(object->shadow = backing_object->shadow);
object->shadow_offset += backing_object->shadow_offset;
/*
- * Backing object might have had a copy pointer
- * to us. If it did, clear it.
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
*/
if (backing_object->copy == object) {
backing_object->copy = NULL;
}
-
+
/* Drop the reference count on backing_object.
* Since its ref_count was at least 2, it
* will not vanish; so we don't need to call
* vm_object_deallocate.
*/
+ if (backing_object->ref_count == 1)
+ printf("should have called obj deallocate\n");
backing_object->ref_count--;
vm_object_unlock(backing_object);
@@ -1277,23 +1432,55 @@ void vm_object_collapse(object)
*
* The object must be locked.
*/
-void vm_object_page_remove(object, start, end)
+void
+vm_object_page_remove(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
{
register vm_page_t p, next;
+ vm_offset_t size;
+ int cnt;
+ int s;
if (object == NULL)
return;
- for (p = object->memq.tqh_first; p != NULL; p = next) {
- next = p->listq.tqe_next;
- if ((start <= p->offset) && (p->offset < end)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
- vm_page_lock_queues();
- vm_page_free(p);
- vm_page_unlock_queues();
+ start = trunc_page(start);
+ end = round_page(end);
+again:
+ size = end-start;
+ if (size > 4*PAGE_SIZE || size >= object->size/4) {
+ for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) {
+ next = p->listq.tqe_next;
+ if ((start <= p->offset) && (p->offset < end)) {
+ if (p->flags & PG_BUSY) {
+ p->flags |= PG_WANTED;
+ tsleep((caddr_t) p, PVM, "vmopar", 0);
+ goto again;
+ }
+ pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ size -= PAGE_SIZE;
+ }
+ }
+ } else {
+ while (size > 0) {
+ while (p = vm_page_lookup(object, start)) {
+ if (p->flags & PG_BUSY) {
+ p->flags |= PG_WANTED;
+ tsleep((caddr_t) p, PVM, "vmopar", 0);
+ goto again;
+ }
+ pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ }
+ start += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
}
}
@@ -1389,6 +1576,27 @@ boolean_t vm_object_coalesce(prev_object, next_object,
}
/*
+ * returns page after looking up in shadow chain
+ */
+
+vm_page_t
+vm_object_page_lookup(object, offset)
+ vm_object_t object;
+ vm_offset_t offset;
+{
+ vm_page_t m;
+ if (!(m=vm_page_lookup(object, offset))) {
+ if (!object->shadow)
+ return 0;
+ else
+ return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
+ }
+ return m;
+}
+
+#define DEBUG
+#if defined(DEBUG) || (NDDB > 0)
+/*
* vm_object_print: [ debug ]
*/
void vm_object_print(object, full)
@@ -1434,3 +1642,4 @@ void vm_object_print(object, full)
printf("\n");
indent -= 2;
}
+#endif /* defined(DEBUG) || (NDDB > 0) */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 0cd9d875b699..38d320f9d64c 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
@@ -33,9 +33,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
- *
- *
+ * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
+ * $Id: vm_page.c,v 1.17 1994/04/20 07:07:14 davidg Exp $
+ */
+
+/*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
@@ -68,6 +70,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -123,7 +126,6 @@ void vm_set_page_size()
break;
}
-
/*
* vm_page_startup:
*
@@ -133,17 +135,55 @@ void vm_set_page_size()
* for the object/offset-to-page hash table headers.
* Each page cell is initialized and placed on the free list.
*/
-void vm_page_startup(start, end)
- vm_offset_t *start;
- vm_offset_t *end;
+
+vm_offset_t
+vm_page_startup(starta, enda, vaddr)
+ register vm_offset_t starta;
+ vm_offset_t enda;
+ register vm_offset_t vaddr;
{
+ register vm_offset_t mapped;
register vm_page_t m;
- register struct pglist *bucket;
- vm_size_t npages;
+ register struct pglist *bucket;
+ vm_size_t npages, page_range;
+ register vm_offset_t new_start;
int i;
vm_offset_t pa;
+ int nblocks;
+ vm_offset_t first_managed_page;
+ int size;
+
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+ extern vm_offset_t phys_avail[];
+/* the biggest memory array is the second group of pages */
+ vm_offset_t start;
+ vm_offset_t biggestone, biggestsize;
+
+ vm_offset_t total;
+
+ total = 0;
+ biggestsize = 0;
+ biggestone = 0;
+ nblocks = 0;
+ vaddr = round_page(vaddr);
+
+ for (i = 0; phys_avail[i + 1]; i += 2) {
+ phys_avail[i] = round_page(phys_avail[i]);
+ phys_avail[i+1] = trunc_page(phys_avail[i+1]);
+ }
+
+ for (i = 0; phys_avail[i + 1]; i += 2) {
+ int size = phys_avail[i+1] - phys_avail[i];
+ if (size > biggestsize) {
+ biggestone = i;
+ biggestsize = size;
+ }
+ ++nblocks;
+ total += size;
+ }
+
+ start = phys_avail[biggestone];
/*
@@ -163,7 +203,7 @@ void vm_page_startup(start, end)
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Calculate the number of hash table buckets.
+ * Allocate (and initialize) the hash table buckets.
*
* The number of buckets MUST BE a power of 2, and
* the actual value is the next power of 2 greater
@@ -172,23 +212,31 @@ void vm_page_startup(start, end)
* Note:
* This computation can be tweaked if desired.
*/
-
+ vm_page_buckets = (struct pglist *)vaddr;
+ bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
- while (vm_page_bucket_count < atop(*end - *start))
+ while (vm_page_bucket_count < atop(total))
vm_page_bucket_count <<= 1;
}
+
vm_page_hash_mask = vm_page_bucket_count - 1;
/*
- * Allocate (and initialize) the hash table buckets.
+ * Validate these addresses.
*/
- vm_page_buckets = (struct pglist *)
- pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
- bucket = vm_page_buckets;
- for (i = vm_page_bucket_count; i--;) {
+ new_start = start + vm_page_bucket_count * sizeof(struct pglist);
+ new_start = round_page(new_start);
+ mapped = vaddr;
+ vaddr = pmap_map(mapped, start, new_start,
+ VM_PROT_READ|VM_PROT_WRITE);
+ start = new_start;
+ bzero((caddr_t) mapped, vaddr - mapped);
+ mapped = vaddr;
+
+ for (i = 0; i< vm_page_bucket_count; i++) {
TAILQ_INIT(bucket);
bucket++;
}
@@ -196,11 +244,9 @@ void vm_page_startup(start, end)
simple_lock_init(&bucket_lock);
/*
- * Truncate the remainder of physical memory to our page size.
+ * round (or truncate) the addresses to our page size.
*/
- *end = trunc_page(*end);
-
/*
* Pre-allocate maps and map entries that cannot be dynamically
* allocated via malloc(). The maps include the kernel_map and
@@ -213,9 +259,20 @@ void vm_page_startup(start, end)
* map (they should use their own maps).
*/
- kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
- MAX_KMAPENT*sizeof(struct vm_map_entry));
- kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
+ kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
+ MAX_KMAPENT * sizeof(struct vm_map_entry);
+ kentry_data_size = round_page(kentry_data_size);
+ kentry_data = (vm_offset_t) vaddr;
+ vaddr += kentry_data_size;
+
+ /*
+ * Validate these zone addresses.
+ */
+
+ new_start = start + (vaddr - mapped);
+ pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
+ bzero((caddr_t) mapped, (vaddr - mapped));
+ start = round_page(new_start);
/*
* Compute the number of pages of memory that will be
@@ -223,53 +280,53 @@ void vm_page_startup(start, end)
* of a page structure per page).
*/
- cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page))
- / (PAGE_SIZE + sizeof(struct vm_page));
+ npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
+ first_page = phys_avail[0] / PAGE_SIZE;
+ page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
/*
- * Record the extent of physical memory that the
- * virtual memory system manages.
+ * Initialize the mem entry structures now, and
+ * put them in the free queue.
*/
- first_page = *start;
- first_page += npages*sizeof(struct vm_page);
- first_page = atop(round_page(first_page));
- last_page = first_page + npages - 1;
-
- first_phys_addr = ptoa(first_page);
- last_phys_addr = ptoa(last_page) + PAGE_MASK;
+ vm_page_array = (vm_page_t) vaddr;
+ mapped = vaddr;
/*
- * Allocate and clear the mem entry structures.
+ * Validate these addresses.
*/
- m = vm_page_array = (vm_page_t)
- pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
+ new_start = round_page(start + page_range * sizeof (struct vm_page));
+ mapped = pmap_map(mapped, start, new_start,
+ VM_PROT_READ|VM_PROT_WRITE);
+ start = new_start;
+
+ first_managed_page = start / PAGE_SIZE;
/*
- * Initialize the mem entry structures now, and
- * put them in the free queue.
+ * Clear all of the page structures
*/
+ bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
- pa = first_phys_addr;
- while (npages--) {
- m->flags = 0;
- m->object = NULL;
- m->phys_addr = pa;
-#ifdef i386
- if (pmap_isvalidphys(m->phys_addr)) {
+ cnt.v_page_count = 0;
+ cnt.v_free_count= 0;
+ for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
+ if (i == biggestone)
+ pa = ptoa(first_managed_page);
+ else
+ pa = phys_avail[i];
+ while (pa < phys_avail[i + 1] && npages-- > 0) {
+ ++cnt.v_page_count;
+ ++cnt.v_free_count;
+ m = PHYS_TO_VM_PAGE(pa);
+ m->flags = 0;
+ m->object = 0;
+ m->phys_addr = pa;
+ m->hold_count = 0;
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
- } else {
- /* perhaps iomem needs it's own type, or dev pager? */
- m->flags |= PG_FICTITIOUS | PG_BUSY;
- cnt.v_free_count--;
+ pa += PAGE_SIZE;
}
-#else /* i386 */
- TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
-#endif /* i386 */
- m++;
- pa += PAGE_SIZE;
}
/*
@@ -278,8 +335,7 @@ void vm_page_startup(start, end)
*/
simple_lock_init(&vm_pages_needed_lock);
- /* from now on, pmap_bootstrap_alloc can't be used */
- vm_page_startup_initialized = TRUE;
+ return(mapped);
}
/*
@@ -289,8 +345,13 @@ void vm_page_startup(start, end)
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
-#define vm_page_hash(object, offset) \
- (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
+inline const int
+vm_page_hash(object, offset)
+ vm_object_t object;
+ vm_offset_t offset;
+{
+ return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
+}
/*
* vm_page_insert: [ internal use only ]
@@ -307,7 +368,7 @@ void vm_page_insert(mem, object, offset)
register vm_offset_t offset;
{
register struct pglist *bucket;
- int spl;
+ int s;
VM_PAGE_CHECK(mem);
@@ -326,11 +387,11 @@ void vm_page_insert(mem, object, offset)
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void) splx(spl);
+ (void) splx(s);
/*
* Now link into the object's list of backed pages.
@@ -361,7 +422,7 @@ void vm_page_remove(mem)
register vm_page_t mem;
{
register struct pglist *bucket;
- int spl;
+ int s;
VM_PAGE_CHECK(mem);
@@ -373,11 +434,11 @@ void vm_page_remove(mem)
*/
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
TAILQ_REMOVE(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void) splx(spl);
+ (void) splx(s);
/*
* Now remove from the object's list of backed pages.
@@ -410,7 +471,7 @@ vm_page_t vm_page_lookup(object, offset)
{
register vm_page_t mem;
register struct pglist *bucket;
- int spl;
+ int s;
/*
* Search the hash table for this object/offset pair
@@ -418,19 +479,19 @@ vm_page_t vm_page_lookup(object, offset)
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
VM_PAGE_CHECK(mem);
if ((mem->object == object) && (mem->offset == offset)) {
simple_unlock(&bucket_lock);
- splx(spl);
+ splx(s);
return(mem);
}
}
simple_unlock(&bucket_lock);
- splx(spl);
+ splx(s);
return(NULL);
}
@@ -465,46 +526,62 @@ void vm_page_rename(mem, new_object, new_offset)
*
* Object must be locked.
*/
-vm_page_t vm_page_alloc(object, offset)
+vm_page_t
+vm_page_alloc(object, offset)
vm_object_t object;
vm_offset_t offset;
{
register vm_page_t mem;
- int spl;
+ int s;
- spl = splimp(); /* XXX */
+ s = splimp();
simple_lock(&vm_page_queue_free_lock);
- if (vm_page_queue_free.tqh_first == NULL) {
+ if ( object != kernel_object &&
+ object != kmem_object &&
+ curproc != pageproc && curproc != &proc0 &&
+ cnt.v_free_count < cnt.v_free_reserved) {
+
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
+ splx(s);
+ /*
+ * this wakeup seems unnecessary, but there is code that
+ * might just check to see if there are free pages, and
+ * punt if there aren't. VM_WAIT does this too, but
+ * redundant wakeups aren't that bad...
+ */
+ if (curproc != pageproc)
+ wakeup((caddr_t) &vm_pages_needed);
+ return(NULL);
+ }
+ if (( mem = vm_page_queue_free.tqh_first) == 0) {
+ simple_unlock(&vm_page_queue_free_lock);
+ printf("No pages???\n");
+ splx(s);
+ /*
+ * comment above re: wakeups applies here too...
+ */
+ if (curproc != pageproc)
+ wakeup((caddr_t) &vm_pages_needed);
return(NULL);
}
- mem = vm_page_queue_free.tqh_first;
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
cnt.v_free_count--;
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
VM_PAGE_INIT(mem, object, offset);
+ splx(s);
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
+/*
+ * don't wakeup too often, so we wakeup the pageout daemon when
+ * we would be nearly out of memory.
+ */
+ if (curproc != pageproc &&
+ (cnt.v_free_count < cnt.v_free_reserved))
+ wakeup((caddr_t) &vm_pages_needed);
- if (cnt.v_free_count < cnt.v_free_min ||
- (cnt.v_free_count < cnt.v_free_target &&
- cnt.v_inactive_count < cnt.v_inactive_target))
- thread_wakeup((int)&vm_pages_needed);
- return (mem);
+ return(mem);
}
/*
@@ -518,6 +595,8 @@ vm_page_t vm_page_alloc(object, offset)
void vm_page_free(mem)
register vm_page_t mem;
{
+ int s;
+ s = splimp();
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
@@ -532,18 +611,46 @@ void vm_page_free(mem)
}
if (!(mem->flags & PG_FICTITIOUS)) {
- int spl;
- spl = splimp();
simple_lock(&vm_page_queue_free_lock);
+ if (mem->wire_count) {
+ cnt.v_wire_count--;
+ mem->wire_count = 0;
+ }
TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
cnt.v_free_count++;
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
+ splx(s);
+ /*
+ * if pageout daemon needs pages, then tell it that there
+ * are some free.
+ */
+ if (vm_pageout_pages_needed)
+ wakeup((caddr_t)&vm_pageout_pages_needed);
+
+ /*
+ * wakeup processes that are waiting on memory if we
+ * hit a high water mark.
+ */
+ if (cnt.v_free_count == cnt.v_free_min) {
+ wakeup((caddr_t)&cnt.v_free_count);
+ }
+
+ /*
+ * wakeup scheduler process if we have lots of memory.
+ * this process will swapin processes.
+ */
+ if (cnt.v_free_count == cnt.v_free_target) {
+ wakeup((caddr_t)&proc0);
+ }
+ } else {
+ splx(s);
}
+ wakeup((caddr_t) mem);
}
+
/*
* vm_page_wire:
*
@@ -556,9 +663,11 @@ void vm_page_free(mem)
void vm_page_wire(mem)
register vm_page_t mem;
{
+ int s;
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
+ s = splimp();
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
cnt.v_active_count--;
@@ -569,6 +678,7 @@ void vm_page_wire(mem)
cnt.v_inactive_count--;
mem->flags &= ~PG_INACTIVE;
}
+ splx(s);
cnt.v_wire_count++;
}
mem->wire_count++;
@@ -585,17 +695,77 @@ void vm_page_wire(mem)
void vm_page_unwire(mem)
register vm_page_t mem;
{
+ int s;
VM_PAGE_CHECK(mem);
- mem->wire_count--;
+ s = splimp();
+
+ if( mem->wire_count)
+ mem->wire_count--;
if (mem->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
cnt.v_active_count++;
mem->flags |= PG_ACTIVE;
cnt.v_wire_count--;
}
+ splx(s);
}
+#if 0
+/*
+ * vm_page_deactivate:
+ *
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
+ *
+ * The page queues must be locked.
+ */
+void
+vm_page_deactivate(m)
+ register vm_page_t m;
+{
+ int spl;
+ VM_PAGE_CHECK(m);
+
+ /*
+ * Only move active pages -- ignore locked or already
+ * inactive ones.
+ *
+ * XXX: sometimes we get pages which aren't wired down
+ * or on any queue - we need to put them on the inactive
+ * queue also, otherwise we lose track of them.
+ * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
+ */
+
+ spl = splimp();
+ if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
+ m->hold_count == 0) {
+
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ if (m->flags & PG_ACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ m->flags &= ~PG_ACTIVE;
+ cnt.v_active_count--;
+ }
+ TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
+ m->flags |= PG_INACTIVE;
+ cnt.v_inactive_count++;
+#define NOT_DEACTIVATE_PROTECTS
+#ifndef NOT_DEACTIVATE_PROTECTS
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+#else
+ if ((m->flags & PG_CLEAN) &&
+ pmap_is_modified(VM_PAGE_TO_PHYS(m)))
+ m->flags &= ~PG_CLEAN;
+#endif
+ if ((m->flags & PG_CLEAN) == 0)
+ m->flags |= PG_LAUNDRY;
+ }
+ splx(spl);
+}
+#endif
+#if 1
/*
* vm_page_deactivate:
*
@@ -608,14 +778,16 @@ void vm_page_unwire(mem)
void vm_page_deactivate(m)
register vm_page_t m;
{
+ int s;
VM_PAGE_CHECK(m);
+ s = splimp();
/*
* Only move active pages -- ignore locked or already
* inactive ones.
*/
- if (m->flags & PG_ACTIVE) {
+ if ((m->flags & PG_ACTIVE) && (m->hold_count == 0)) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
@@ -623,15 +795,21 @@ void vm_page_deactivate(m)
m->flags |= PG_INACTIVE;
cnt.v_active_count--;
cnt.v_inactive_count++;
+#define NOT_DEACTIVATE_PROTECTS
+#ifndef NOT_DEACTIVATE_PROTECTS
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+#else
if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
m->flags &= ~PG_CLEAN;
+#endif
if (m->flags & PG_CLEAN)
m->flags &= ~PG_LAUNDRY;
else
m->flags |= PG_LAUNDRY;
}
+ splx(s);
}
-
+#endif
/*
* vm_page_activate:
*
@@ -643,8 +821,10 @@ void vm_page_deactivate(m)
void vm_page_activate(m)
register vm_page_t m;
{
+ int s;
VM_PAGE_CHECK(m);
+ s = splimp();
if (m->flags & PG_INACTIVE) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
cnt.v_inactive_count--;
@@ -656,8 +836,12 @@ void vm_page_activate(m)
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->flags |= PG_ACTIVE;
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ m->act_count = 10;
cnt.v_active_count++;
}
+ splx(s);
}
/*
@@ -668,12 +852,12 @@ void vm_page_activate(m)
* be used by the zero-fill object.
*/
-boolean_t vm_page_zero_fill(m)
+boolean_t
+vm_page_zero_fill(m)
vm_page_t m;
{
VM_PAGE_CHECK(m);
- m->flags &= ~PG_CLEAN;
pmap_zero_page(VM_PAGE_TO_PHYS(m));
return(TRUE);
}
@@ -683,14 +867,13 @@ boolean_t vm_page_zero_fill(m)
*
* Copy one page to another
*/
-
-void vm_page_copy(src_m, dest_m)
+void
+vm_page_copy(src_m, dest_m)
vm_page_t src_m;
vm_page_t dest_m;
{
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dest_m);
- dest_m->flags &= ~PG_CLEAN;
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 8bf51469a1f6..e8049c4955f9 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -107,6 +107,8 @@ struct vm_page {
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
+ short hold_count; /* page hold count */
+ u_short act_count; /* page usage count */
vm_offset_t phys_addr; /* physical address of page */
};
@@ -209,7 +211,7 @@ simple_lock_data_t vm_page_queue_free_lock;
(m)->flags &= ~PG_BUSY; \
if ((m)->flags & PG_WANTED) { \
(m)->flags &= ~PG_WANTED; \
- thread_wakeup((int) (m)); \
+ wakeup((caddr_t) (m)); \
} \
}
@@ -222,6 +224,8 @@ simple_lock_data_t vm_page_queue_free_lock;
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
vm_page_insert((mem), (object), (offset)); \
(mem)->wire_count = 0; \
+ (mem)->hold_count = 0; \
+ (mem)->act_count = 0; \
}
void vm_page_activate __P((vm_page_t));
@@ -233,10 +237,32 @@ void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
-void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
+vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
+
+/*
+ * Keep page from being freed by the page daemon
+ * much of the same effect as wiring, except much lower
+ * overhead and should be used only for *very* temporary
+ * holding ("wiring").
+ */
+static inline void
+vm_page_hold(mem)
+ vm_page_t mem;
+{
+ mem->hold_count++;
+}
+
+static inline void
+vm_page_unhold(mem)
+ vm_page_t mem;
+{
+ if( --mem->hold_count < 0)
+ panic("vm_page_unhold: hold count < 0!!!");
+}
+
#endif /* KERNEL */
#endif /* !_VM_PAGE_ */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 679540591e7f..202bf0369faf 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,6 +1,10 @@
/*
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
@@ -33,7 +37,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
+ * @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -60,6 +64,8 @@
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
+ *
+ * $Id: vm_pageout.c,v 1.20 1994/04/20 07:07:15 davidg Exp $
*/
/*
@@ -67,501 +73,802 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
-#ifndef VM_PAGE_FREE_MIN
-#define VM_PAGE_FREE_MIN (cnt.v_free_count / 20)
-#endif
+extern vm_map_t kmem_map;
+int vm_pages_needed; /* Event on which pageout daemon sleeps */
+int vm_pagescanner; /* Event on which pagescanner sleeps */
+int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */
+
+int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */
+int vm_page_pagesfreed;
-#ifndef VM_PAGE_FREE_TARGET
-#define VM_PAGE_FREE_TARGET ((cnt.v_free_min * 4) / 3)
-#endif
+extern int npendingio;
+extern int hz;
+int vm_pageout_proc_limit;
+extern int nswiodone;
+extern int swap_pager_full;
+extern int swap_pager_ready();
-int vm_page_free_min_min = 16 * 1024;
-int vm_page_free_min_max = 256 * 1024;
+#define MAXREF 32767
-int vm_pages_needed; /* Event on which pageout daemon sleeps */
+#define MAXSCAN 512 /* maximum number of pages to scan in active queue */
+ /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */
+#define ACT_DECLINE 1
+#define ACT_ADVANCE 6
+#define ACT_MAX 300
+
+#define LOWATER ((2048*1024)/NBPG)
+
+#define VM_PAGEOUT_PAGE_COUNT 8
+static vm_offset_t vm_space_needed;
+int vm_pageout_req_do_stats;
int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */
-#ifdef CLUSTERED_PAGEOUT
-#define MAXPOCLUSTER (MAXPHYS/NBPG) /* XXX */
-int doclustered_pageout = 1;
-#endif
/*
- * vm_pageout_scan does the dirty work for the pageout daemon.
+ * vm_pageout_clean:
+ * cleans a vm_page
*/
-void
-vm_pageout_scan()
+int
+vm_pageout_clean(m, sync)
+ register vm_page_t m;
+ int sync;
{
- register vm_page_t m, next;
- register int page_shortage;
- register int s;
- register int pages_freed;
- int free;
- vm_object_t object;
+ /*
+ * Clean the page and remove it from the
+ * laundry.
+ *
+ * We set the busy bit to cause
+ * potential page faults on this page to
+ * block.
+ *
+ * And we set pageout-in-progress to keep
+ * the object from disappearing during
+ * pageout. This guarantees that the
+ * page won't move from the inactive
+ * queue. (However, any other page on
+ * the inactive queue may move!)
+ */
+
+ register vm_object_t object;
+ register vm_pager_t pager;
+ int pageout_status[VM_PAGEOUT_PAGE_COUNT];
+ vm_page_t ms[VM_PAGEOUT_PAGE_COUNT];
+ int pageout_count;
+ int anyok=0;
+ int i;
+ vm_offset_t offset = m->offset;
+
+ object = m->object;
+ if (!object) {
+ printf("pager: object missing\n");
+ return 0;
+ }
/*
- * Only continue when we want more pages to be "free"
+ * Try to collapse the object before
+ * making a pager for it. We must
+ * unlock the page queues first.
+ * We try to defer the creation of a pager
+ * until all shadows are not paging. This
+ * allows vm_object_collapse to work better and
+ * helps control swap space size.
+ * (J. Dyson 11 Nov 93)
*/
- cnt.v_rev++;
+ if (!object->pager &&
+ cnt.v_free_count < vm_pageout_free_min)
+ return 0;
- s = splimp();
- simple_lock(&vm_page_queue_free_lock);
- free = cnt.v_free_count;
- simple_unlock(&vm_page_queue_free_lock);
- splx(s);
+ if (!object->pager &&
+ object->shadow &&
+ object->shadow->paging_in_progress)
+ return 0;
- if (free < cnt.v_free_target) {
- swapout_threads();
+ if( !sync) {
+ if (object->shadow) {
+ vm_object_collapse(object);
+ if (!vm_page_lookup(object, offset))
+ return 0;
+ }
- /*
- * Be sure the pmap system is updated so
- * we can scan the inactive queue.
- */
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
+ return 0;
+ }
+ }
+
+ pageout_count = 1;
+ ms[0] = m;
+
+ if( pager = object->pager) {
+ for(i=1;i<VM_PAGEOUT_PAGE_COUNT;i++) {
+ if( ms[i] = vm_page_lookup( object, offset+i*NBPG)) {
+ if((((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE)
+ || (( ms[i]->flags & PG_CLEAN) == 0 && sync == VM_PAGEOUT_FORCE))
+ && (ms[i]->wire_count == 0)
+ && (ms[i]->hold_count == 0))
+ pageout_count++;
+ else
+ break;
+ } else
+ break;
+ }
+ for(i=0;i<pageout_count;i++) {
+ ms[i]->flags |= PG_BUSY;
+ pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ);
+ }
+ object->paging_in_progress += pageout_count;
+ cnt.v_pageouts += pageout_count;
+ } else {
+
+ m->flags |= PG_BUSY;
- pmap_update();
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ);
+
+ cnt.v_pageouts++;
+
+ object->paging_in_progress++;
+
+ pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
+ object->size, VM_PROT_ALL, 0);
+ if (pager != NULL) {
+ vm_object_setpager(object, pager, 0, FALSE);
+ }
}
/*
- * Acquire the resident page system lock,
- * as we may be changing what's resident quite a bit.
+ * If there is no pager for the page,
+ * use the default pager. If there's
+ * no place to put the page at the
+ * moment, leave it in the laundry and
+ * hope that there will be paging space
+ * later.
*/
- vm_page_lock_queues();
- /*
- * Start scanning the inactive queue for pages we can free.
- * We keep scanning until we have enough free pages or
- * we have scanned through the entire queue. If we
- * encounter dirty pages, we start cleaning them.
- */
+ if ((pager && pager->pg_type == PG_SWAP) ||
+ cnt.v_free_count >= vm_pageout_free_min) {
+ if( pageout_count == 1) {
+ pageout_status[0] = pager ?
+ vm_pager_put(pager, m,
+ ((sync || (object == kernel_object)) ? TRUE: FALSE)) :
+ VM_PAGER_FAIL;
+ } else {
+ if( !pager) {
+ for(i=0;i<pageout_count;i++)
+ pageout_status[i] = VM_PAGER_FAIL;
+ } else {
+ vm_pager_put_pages(pager, ms, pageout_count,
+ ((sync || (object == kernel_object)) ? TRUE : FALSE),
+ pageout_status);
+ }
+ }
+
+ } else {
+ for(i=0;i<pageout_count;i++)
+ pageout_status[i] = VM_PAGER_FAIL;
+ }
- pages_freed = 0;
- for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) {
- s = splimp();
- simple_lock(&vm_page_queue_free_lock);
- free = cnt.v_free_count;
- simple_unlock(&vm_page_queue_free_lock);
- splx(s);
- if (free >= cnt.v_free_target)
+ for(i=0;i<pageout_count;i++) {
+ switch (pageout_status[i]) {
+ case VM_PAGER_OK:
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ++anyok;
+ break;
+ case VM_PAGER_PEND:
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ++anyok;
+ break;
+ case VM_PAGER_BAD:
+ /*
+ * Page outside of range of object.
+ * Right now we essentially lose the
+ * changes by pretending it worked.
+ */
+ ms[i]->flags &= ~PG_LAUNDRY;
+ ms[i]->flags |= PG_CLEAN;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i]));
+ break;
+ case VM_PAGER_ERROR:
+ case VM_PAGER_FAIL:
+ /*
+ * If page couldn't be paged out, then
+ * reactivate the page so it doesn't
+ * clog the inactive list. (We will
+ * try paging out it again later).
+ */
+ if (ms[i]->flags & PG_INACTIVE)
+ vm_page_activate(ms[i]);
+ break;
+ case VM_PAGER_AGAIN:
break;
+ }
- cnt.v_scan++;
- next = m->pageq.tqe_next;
/*
- * If the page has been referenced, move it back to the
- * active queue.
+ * If the operation is still going, leave
+ * the page busy to block all other accesses.
+ * Also, leave the paging in progress
+ * indicator set so that we don't attempt an
+ * object collapse.
*/
- if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
- vm_page_activate(m);
- cnt.v_reactivated++;
- continue;
+ if (pageout_status[i] != VM_PAGER_PEND) {
+ PAGE_WAKEUP(ms[i]);
+ if (--object->paging_in_progress == 0)
+ wakeup((caddr_t) object);
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i]));
+ if( ms[i]->flags & PG_INACTIVE)
+ vm_page_activate(ms[i]);
+ }
}
+ }
+ return anyok;
+}
+/*
+ * vm_pageout_object_deactivate_pages
+ *
+ * deactivate enough pages to satisfy the inactive target
+ * requirements or if vm_page_proc_limit is set, then
+ * deactivate all of the pages in the object and its
+ * shadows.
+ *
+ * The object and map must be locked.
+ */
+int
+vm_pageout_object_deactivate_pages(map, object, count)
+ vm_map_t map;
+ vm_object_t object;
+ int count;
+{
+ register vm_page_t p, next;
+ int rcount;
+ int s;
+ int dcount;
+
+ dcount = 0;
+ if (count == 0)
+ count = 1;
+
+ if (object->shadow) {
+ int scount = count;
+ if( object->shadow->ref_count > 1)
+ scount /= object->shadow->ref_count;
+ if( scount)
+ dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount);
+ }
+
+ if (object->paging_in_progress)
+ return dcount;
+
+ /*
+ * scan the objects entire memory queue
+ */
+ rcount = object->resident_page_count;
+ p = object->memq.tqh_first;
+ while (p && (rcount-- > 0)) {
+ next = p->listq.tqe_next;
+ vm_page_lock_queues();
/*
- * If the page is clean, free it up.
+ * if a page is active, not wired and is in the processes pmap,
+ * then deactivate the page.
*/
- if (m->flags & PG_CLEAN) {
- object = m->object;
- if (vm_object_lock_try(object)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(m),
- VM_PROT_NONE);
- vm_page_free(m);
- pages_freed++;
- cnt.v_dfree++;
- vm_object_unlock(object);
+ if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE &&
+ p->wire_count == 0 &&
+ p->hold_count == 0 &&
+ pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
+ if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p))) {
+ p->act_count -= min(p->act_count, ACT_DECLINE);
+ /*
+ * if the page act_count is zero -- then we deactivate
+ */
+ if (!p->act_count) {
+ vm_page_deactivate(p);
+ pmap_page_protect(VM_PAGE_TO_PHYS(p),
+ VM_PROT_NONE);
+ /*
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
+ */
+ } else {
+ TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
+ TAILQ_REMOVE(&object->memq, p, listq);
+ TAILQ_INSERT_TAIL(&object->memq, p, listq);
+ }
+ /*
+ * see if we are done yet
+ */
+ if (p->flags & PG_INACTIVE) {
+ --count;
+ ++dcount;
+ if (count <= 0 &&
+ cnt.v_inactive_count > cnt.v_inactive_target) {
+ vm_page_unlock_queues();
+ return dcount;
+ }
+ }
+
+ } else {
+ /*
+ * Move the page to the bottom of the queue.
+ */
+ pmap_clear_reference(VM_PAGE_TO_PHYS(p));
+ if (p->act_count < ACT_MAX)
+ p->act_count += ACT_ADVANCE;
+
+ TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
+ TAILQ_REMOVE(&object->memq, p, listq);
+ TAILQ_INSERT_TAIL(&object->memq, p, listq);
}
- continue;
}
+ vm_page_unlock_queues();
+ p = next;
+ }
+ return dcount;
+}
+
+
+/*
+ * deactivate some number of pages in a map, try to do it fairly, but
+ * that is really hard to do.
+ */
+
+void
+vm_pageout_map_deactivate_pages(map, entry, count, freeer)
+ vm_map_t map;
+ vm_map_entry_t entry;
+ int *count;
+ int (*freeer)(vm_map_t, vm_object_t, int);
+{
+ vm_map_t tmpm;
+ vm_map_entry_t tmpe;
+ vm_object_t obj;
+ if (*count <= 0)
+ return;
+ vm_map_reference(map);
+ if (!lock_try_read(&map->lock)) {
+ vm_map_deallocate(map);
+ return;
+ }
+ if (entry == 0) {
+ tmpe = map->header.next;
+ while (tmpe != &map->header && *count > 0) {
+ vm_pageout_map_deactivate_pages(map, tmpe, count, freeer);
+ tmpe = tmpe->next;
+ };
+ } else if (entry->is_sub_map || entry->is_a_map) {
+ tmpm = entry->object.share_map;
+ tmpe = tmpm->header.next;
+ while (tmpe != &tmpm->header && *count > 0) {
+ vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer);
+ tmpe = tmpe->next;
+ };
+ } else if (obj = entry->object.vm_object) {
+ *count -= (*freeer)(map, obj, *count);
+ }
+ lock_read_done(&map->lock);
+ vm_map_deallocate(map);
+ return;
+}
+
+/*
+ * vm_pageout_scan does the dirty work for the pageout daemon.
+ */
+int
+vm_pageout_scan()
+{
+ vm_page_t m;
+ int page_shortage, maxscan, maxlaunder;
+ int pages_freed, free, nproc;
+ int desired_free;
+ vm_page_t next;
+ struct proc *p;
+ vm_object_t object;
+ int s;
+ int force_wakeup = 0;
+
+morefree:
+ /*
+ * scan the processes for exceeding their rlimits or if process
+ * is swapped out -- deactivate pages
+ */
+
+rescanproc1:
+ for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
+ vm_offset_t size;
+ int overage;
+ vm_offset_t limit;
+
/*
- * If the page is dirty but already being washed, skip it.
+ * if this is a system process or if we have already
+ * looked at this process, skip it.
*/
- if ((m->flags & PG_LAUNDRY) == 0)
+ if (p->p_flag & (P_SYSTEM|P_WEXIT)) {
continue;
+ }
/*
- * Otherwise the page is dirty and still in the laundry,
- * so we start the cleaning operation and remove it from
- * the laundry.
+ * if the process is in a non-running type state,
+ * don't touch it.
*/
- object = m->object;
- if (!vm_object_lock_try(object))
+ if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
continue;
- cnt.v_pageouts++;
-#ifdef CLUSTERED_PAGEOUT
- if (object->pager &&
- vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
- vm_pageout_cluster(m, object);
- else
-#endif
- vm_pageout_page(m, object);
- thread_wakeup((int) object);
- vm_object_unlock(object);
+ }
+
/*
- * Former next page may no longer even be on the inactive
- * queue (due to potential blocking in the pager with the
- * queues unlocked). If it isn't, we just start over.
+ * get a limit
*/
- if (next && (next->flags & PG_INACTIVE) == 0)
- next = vm_page_queue_inactive.tqh_first;
- }
-
- /*
- * Compute the page shortage. If we are still very low on memory
- * be sure that we will move a minimal amount of pages from active
- * to inactive.
- */
-
- page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
- if (page_shortage <= 0 && pages_freed == 0)
- page_shortage = 1;
-
- while (page_shortage > 0) {
+ limit = min(p->p_rlimit[RLIMIT_RSS].rlim_cur,
+ p->p_rlimit[RLIMIT_RSS].rlim_max);
+
/*
- * Move some more pages from active to inactive.
+ * let processes that are swapped out really be swapped out
+ * set the limit to nothing (will force a swap-out.)
*/
+ if ((p->p_flag & P_INMEM) == 0)
+ limit = 0;
+
+ size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
+ if (size >= limit) {
+ overage = (size - limit) / NBPG;
+ vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
+ (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
+ }
- if ((m = vm_page_queue_active.tqh_first) == NULL)
- break;
- vm_page_deactivate(m);
- page_shortage--;
}
- vm_page_unlock_queues();
-}
+ if (((cnt.v_free_count + cnt.v_inactive_count) >=
+ (cnt.v_inactive_target + cnt.v_free_target)) &&
+ (cnt.v_free_count >= cnt.v_free_target))
+ return force_wakeup;
-/*
- * Called with object and page queues locked.
- * If reactivate is TRUE, a pager error causes the page to be
- * put back on the active queue, ow it is left on the inactive queue.
- */
-void
-vm_pageout_page(m, object)
- vm_page_t m;
- vm_object_t object;
-{
- vm_pager_t pager;
- int pageout_status;
+ pages_freed = 0;
+ desired_free = cnt.v_free_target;
/*
- * We set the busy bit to cause potential page faults on
- * this page to block.
- *
- * We also set pageout-in-progress to keep the object from
- * disappearing during pageout. This guarantees that the
- * page won't move from the inactive queue. (However, any
- * other page on the inactive queue may move!)
+ * Start scanning the inactive queue for pages we can free.
+ * We keep scanning until we have enough free pages or
+ * we have scanned through the entire queue. If we
+ * encounter dirty pages, we start cleaning them.
*/
- pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
- m->flags |= PG_BUSY;
- /*
- * Try to collapse the object before making a pager for it.
- * We must unlock the page queues first.
- */
- vm_page_unlock_queues();
- if (object->pager == NULL)
- vm_object_collapse(object);
+ maxlaunder = (cnt.v_free_target - cnt.v_free_count);
+ maxscan = cnt.v_inactive_count;
+rescan1:
+ m = vm_page_queue_inactive.tqh_first;
+ while (m && (maxscan-- > 0) &&
+ (cnt.v_free_count < desired_free) ) {
+ vm_page_t next;
- object->paging_in_progress++;
- vm_object_unlock(object);
+ next = m->pageq.tqe_next;
- /*
- * Do a wakeup here in case the following operations block.
- */
- thread_wakeup((int) &cnt.v_free_count);
+ if( (m->flags & PG_INACTIVE) == 0) {
+ printf("vm_pageout_scan: page not inactive?");
+ continue;
+ }
- /*
- * If there is no pager for the page, use the default pager.
- * If there is no place to put the page at the moment,
- * leave it in the laundry and hope that there will be
- * paging space later.
- */
- if ((pager = object->pager) == NULL) {
- pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, object->size,
- VM_PROT_ALL, (vm_offset_t)0);
- if (pager != NULL)
- vm_object_setpager(object, pager, 0, FALSE);
- }
- pageout_status = pager ? vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL;
- vm_object_lock(object);
- vm_page_lock_queues();
-
- switch (pageout_status) {
- case VM_PAGER_OK:
- case VM_PAGER_PEND:
- cnt.v_pgpgout++;
- m->flags &= ~PG_LAUNDRY;
- break;
- case VM_PAGER_BAD:
/*
- * Page outside of range of object. Right now we
- * essentially lose the changes by pretending it
- * worked.
- *
- * XXX dubious, what should we do?
+ * activate held pages
*/
- m->flags &= ~PG_LAUNDRY;
- m->flags |= PG_CLEAN;
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
- break;
- case VM_PAGER_AGAIN:
- {
- extern int lbolt;
+ if (m->hold_count != 0) {
+ vm_page_activate(m);
+ m = next;
+ continue;
+ }
/*
- * FAIL on a write is interpreted to mean a resource
- * shortage, so we put pause for awhile and try again.
- * XXX could get stuck here.
+ * dont mess with busy pages
*/
- (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
- break;
- }
- case VM_PAGER_FAIL:
- case VM_PAGER_ERROR:
+ if (m->flags & PG_BUSY) {
+ m = next;
+ continue;
+ }
+
/*
- * If page couldn't be paged out, then reactivate
- * the page so it doesn't clog the inactive list.
- * (We will try paging out it again later).
+ * if page is clean and but the page has been referenced,
+ * then reactivate the page, but if we are very low on memory
+ * or the page has not been referenced, then we free it to the
+ * vm system.
*/
- vm_page_activate(m);
- cnt.v_reactivated++;
- break;
- }
+ if (m->flags & PG_CLEAN) {
+ if ((cnt.v_free_count > vm_pageout_free_min) /* XXX */
+ && pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ vm_page_activate(m);
+ } else if (!m->act_count) {
+ pmap_page_protect(VM_PAGE_TO_PHYS(m),
+ VM_PROT_NONE);
+ vm_page_free(m);
+ ++pages_freed;
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
+ TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
+ }
+ } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) {
+ int written;
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ vm_page_activate(m);
+ m = next;
+ continue;
+ }
+ /*
+ * If a page is dirty, then it is either
+ * being washed (but not yet cleaned)
+ * or it is still in the laundry. If it is
+ * still in the laundry, then we start the
+ * cleaning operation.
+ */
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ if (written = vm_pageout_clean(m,0)) {
+ maxlaunder -= written;
+ }
+ /*
+ * if the next page has been re-activated, start scanning again
+ */
+ if (next && (next->flags & PG_INACTIVE) == 0)
+ goto rescan1;
+ } else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ vm_page_activate(m);
+ }
+ m = next;
+ }
/*
- * If the operation is still going, leave the page busy
- * to block all other accesses. Also, leave the paging
- * in progress indicator set so that we don't attempt an
- * object collapse.
+ * now check malloc area or swap processes out if we are in low
+ * memory conditions
*/
- if (pageout_status != VM_PAGER_PEND) {
- m->flags &= ~PG_BUSY;
- PAGE_WAKEUP(m);
- object->paging_in_progress--;
+ if (cnt.v_free_count <= cnt.v_free_min) {
+ /*
+ * swap out inactive processes
+ */
+ swapout_threads();
}
-}
-
-#ifdef CLUSTERED_PAGEOUT
-#define PAGEOUTABLE(p) \
- ((((p)->flags & (PG_INACTIVE|PG_CLEAN|PG_LAUNDRY)) == \
- (PG_INACTIVE|PG_LAUNDRY)) && !pmap_is_referenced(VM_PAGE_TO_PHYS(p)))
-
-/*
- * Attempt to pageout as many contiguous (to ``m'') dirty pages as possible
- * from ``object''. Using information returned from the pager, we assemble
- * a sorted list of contiguous dirty pages and feed them to the pager in one
- * chunk. Called with paging queues and object locked. Also, object must
- * already have a pager.
- */
-void
-vm_pageout_cluster(m, object)
- vm_page_t m;
- vm_object_t object;
-{
- vm_offset_t offset, loff, hoff;
- vm_page_t plist[MAXPOCLUSTER], *plistp, p;
- int postatus, ix, count;
/*
- * Determine the range of pages that can be part of a cluster
- * for this object/offset. If it is only our single page, just
- * do it normally.
+ * Compute the page shortage. If we are still very low on memory
+ * be sure that we will move a minimal amount of pages from active
+ * to inactive.
*/
- vm_pager_cluster(object->pager, m->offset, &loff, &hoff);
- if (hoff - loff == PAGE_SIZE) {
- vm_pageout_page(m, object);
- return;
+
+ page_shortage = cnt.v_inactive_target -
+ (cnt.v_free_count + cnt.v_inactive_count);
+
+ if (page_shortage <= 0) {
+ if (pages_freed == 0) {
+ if( cnt.v_free_count < cnt.v_free_min) {
+ page_shortage = cnt.v_free_min - cnt.v_free_count;
+ } else if(((cnt.v_free_count + cnt.v_inactive_count) <
+ (cnt.v_free_min + cnt.v_inactive_target))) {
+ page_shortage = 1;
+ } else {
+ page_shortage = 0;
+ }
+ }
+
}
- plistp = plist;
+ maxscan = cnt.v_active_count;
+ m = vm_page_queue_active.tqh_first;
+ while (m && maxscan-- && (page_shortage > 0)) {
- /*
- * Target page is always part of the cluster.
- */
- pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
- m->flags |= PG_BUSY;
- plistp[atop(m->offset - loff)] = m;
- count = 1;
+ next = m->pageq.tqe_next;
- /*
- * Backup from the given page til we find one not fulfilling
- * the pageout criteria or we hit the lower bound for the
- * cluster. For each page determined to be part of the
- * cluster, unmap it and busy it out so it won't change.
- */
- ix = atop(m->offset - loff);
- offset = m->offset;
- while (offset > loff && count < MAXPOCLUSTER-1) {
- p = vm_page_lookup(object, offset - PAGE_SIZE);
- if (p == NULL || !PAGEOUTABLE(p))
- break;
- pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
- p->flags |= PG_BUSY;
- plistp[--ix] = p;
- offset -= PAGE_SIZE;
- count++;
+ /*
+ * Don't deactivate pages that are busy.
+ */
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
+ m = next;
+ continue;
+ }
+
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
+
+ /*
+ * if the page act_count is zero -- then we deactivate
+ */
+ if (!m->act_count) {
+ vm_page_deactivate(m);
+ --page_shortage;
+ /*
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
+ */
+ } else {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ }
+ }
+
+ m = next;
}
- plistp += atop(offset - loff);
- loff = offset;
/*
- * Now do the same moving forward from the target.
+ * if we have not freed any pages and we are desparate for memory
+ * then we keep trying until we get some (any) memory.
*/
- ix = atop(m->offset - loff) + 1;
- offset = m->offset + PAGE_SIZE;
- while (offset < hoff && count < MAXPOCLUSTER) {
- p = vm_page_lookup(object, offset);
- if (p == NULL || !PAGEOUTABLE(p))
- break;
- pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
- p->flags |= PG_BUSY;
- plistp[ix++] = p;
- offset += PAGE_SIZE;
- count++;
+
+ if( !force_wakeup && (swap_pager_full || !force_wakeup ||
+ (pages_freed == 0 && (cnt.v_free_count < cnt.v_free_min)))){
+ vm_pager_sync();
+ force_wakeup = 1;
+ goto morefree;
}
- hoff = offset;
+ vm_page_pagesfreed += pages_freed;
+ return force_wakeup;
+}
- /*
- * Pageout the page.
- * Unlock everything and do a wakeup prior to the pager call
- * in case it blocks.
- */
- vm_page_unlock_queues();
- object->paging_in_progress++;
- vm_object_unlock(object);
-again:
- thread_wakeup((int) &cnt.v_free_count);
- postatus = vm_pager_put_pages(object->pager, plistp, count, FALSE);
- /*
- * XXX rethink this
- */
- if (postatus == VM_PAGER_AGAIN) {
- extern int lbolt;
+void
+vm_pagescan()
+{
+ int maxscan, pages_scanned, pages_referenced, nextscan, scantick = hz/20;
+ int m_ref, next_ref;
+ vm_page_t m, next;
- (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
- goto again;
- } else if (postatus == VM_PAGER_BAD)
- panic("vm_pageout_cluster: VM_PAGER_BAD");
- vm_object_lock(object);
- vm_page_lock_queues();
+ (void) spl0();
+
+ nextscan = scantick;
+
+scanloop:
+
+ pages_scanned = 0;
+ pages_referenced = 0;
+ maxscan = min(cnt.v_active_count, MAXSCAN);
/*
- * Loop through the affected pages, reflecting the outcome of
- * the operation.
+ * Gather statistics on page usage.
*/
- for (ix = 0; ix < count; ix++) {
- p = *plistp++;
- switch (postatus) {
- case VM_PAGER_OK:
- case VM_PAGER_PEND:
- cnt.v_pgpgout++;
- p->flags &= ~PG_LAUNDRY;
- break;
- case VM_PAGER_FAIL:
- case VM_PAGER_ERROR:
- /*
- * Pageout failed, reactivate the target page so it
- * doesn't clog the inactive list. Other pages are
- * left as they are.
- */
- if (p == m) {
- vm_page_activate(p);
- cnt.v_reactivated++;
- }
- break;
- }
- pmap_clear_reference(VM_PAGE_TO_PHYS(p));
+ m = vm_page_queue_active.tqh_first;
+ while (m && (maxscan-- > 0)) {
+
+ ++pages_scanned;
+
+ next = m->pageq.tqe_next;
+
/*
- * If the operation is still going, leave the page busy
- * to block all other accesses.
+ * Dont mess with pages that are busy.
*/
- if (postatus != VM_PAGER_PEND) {
- p->flags &= ~PG_BUSY;
- PAGE_WAKEUP(p);
+ if ((m->flags & PG_BUSY) || (m->hold_count != 0)) {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ m = next;
+ continue;
+ }
+ /*
+ * Advance pages that have been referenced, decline pages that
+ * have not.
+ */
+ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ pages_referenced++;
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
+ /*
+ * if the page act_count is zero, and we are low on mem -- then we deactivate
+ */
+ if (!m->act_count &&
+ (cnt.v_free_count+cnt.v_inactive_count < cnt.v_free_target+cnt.v_inactive_target )) {
+ vm_page_deactivate(m);
+ /*
+ * else if on the next go-around we will deactivate the page
+ * we need to place the page on the end of the queue to age
+ * the other pages in memory.
+ */
+ } else {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ }
}
+ m = next;
}
- /*
- * If the operation is still going, leave the paging in progress
- * indicator set so that we don't attempt an object collapse.
- */
- if (postatus != VM_PAGER_PEND)
- object->paging_in_progress--;
+ if (pages_referenced) {
+ nextscan = (pages_scanned / pages_referenced) * scantick;
+ nextscan = max(nextscan, scantick);
+ nextscan = min(nextscan, hz);
+ } else
+ nextscan = hz;
+ tsleep((caddr_t) &vm_pagescanner, PVM, "scanw", nextscan);
+
+ goto scanloop;
}
-#endif
/*
* vm_pageout is the high level pageout daemon.
*/
-
-void vm_pageout()
+void
+vm_pageout()
{
+ extern npendingio, swiopend;
+ static nowakeup;
(void) spl0();
/*
* Initialize some paging parameters.
*/
- if (cnt.v_free_min == 0) {
- cnt.v_free_min = VM_PAGE_FREE_MIN;
- vm_page_free_min_min /= cnt.v_page_size;
- vm_page_free_min_max /= cnt.v_page_size;
- if (cnt.v_free_min < vm_page_free_min_min)
- cnt.v_free_min = vm_page_free_min_min;
- if (cnt.v_free_min > vm_page_free_min_max)
- cnt.v_free_min = vm_page_free_min_max;
- }
-
- if (cnt.v_free_target == 0)
- cnt.v_free_target = VM_PAGE_FREE_TARGET;
-
- if (cnt.v_free_target <= cnt.v_free_min)
- cnt.v_free_target = cnt.v_free_min + 1;
-
- /* XXX does not really belong here */
+vmretry:
+ cnt.v_free_min = 12;
+ cnt.v_free_reserved = 8;
+ if (cnt.v_free_min < 8)
+ cnt.v_free_min = 8;
+ if (cnt.v_free_min > 32)
+ cnt.v_free_min = 32;
+ vm_pageout_free_min = 4;
+ cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved;
+ cnt.v_inactive_target = cnt.v_free_count / 12;
+ cnt.v_free_min += cnt.v_free_reserved;
+
+ /* XXX does not really belong here */
if (vm_page_max_wired == 0)
vm_page_max_wired = cnt.v_free_count / 3;
+
+ (void) swap_pager_alloc(0, 0, 0, 0);
+
/*
* The pageout daemon is never done, so loop
* forever.
*/
-
- simple_lock(&vm_pages_needed_lock);
while (TRUE) {
- thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
- FALSE);
- /*
- * Compute the inactive target for this scan.
- * We need to keep a reasonable amount of memory in the
- * inactive list to better simulate LRU behavior.
- */
- cnt.v_inactive_target =
- (cnt.v_active_count + cnt.v_inactive_count) / 3;
- if (cnt.v_inactive_target <= cnt.v_free_target)
- cnt.v_inactive_target = cnt.v_free_target + 1;
-
+ int force_wakeup;
+ extern struct loadavg averunnable;
+/*
+ cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024;
+ cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved;
+ cnt.v_inactive_target = cnt.v_free_target*2;
+*/
+
+ tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0);
+
+ vm_pager_sync();
/*
- * Only make a scan if we are likely to do something.
- * Otherwise we might have been awakened by a pager
- * to clean up async pageouts.
+ * The force wakeup hack added to eliminate delays and potiential
+ * deadlock. It was possible for the page daemon to indefintely
+ * postpone waking up a process that it might be waiting for memory
+ * on. The putmulti stuff seems to have aggravated the situation.
*/
- if (cnt.v_free_count < cnt.v_free_target ||
- cnt.v_inactive_count < cnt.v_inactive_target)
- vm_pageout_scan();
+ force_wakeup = vm_pageout_scan();
vm_pager_sync();
- simple_lock(&vm_pages_needed_lock);
- thread_wakeup((int) &cnt.v_free_count);
+ if( force_wakeup)
+ wakeup( (caddr_t) &cnt.v_free_count);
+ cnt.v_scan++;
+ wakeup((caddr_t) kmem_map);
}
}
+
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index a82a0ea40aca..834aee536880 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -72,7 +72,11 @@
extern int vm_pages_needed; /* should be some "event" structure */
simple_lock_data_t vm_pages_needed_lock;
+extern int vm_pageout_pages_needed;
+#define VM_PAGEOUT_ASYNC 0
+#define VM_PAGEOUT_SYNC 1
+#define VM_PAGEOUT_FORCE 2
/*
* Exported routines.
@@ -82,15 +86,27 @@ simple_lock_data_t vm_pages_needed_lock;
* Signal pageout-daemon and wait for it.
*/
-#define VM_WAIT { \
- simple_lock(&vm_pages_needed_lock); \
- thread_wakeup((int)&vm_pages_needed); \
- thread_sleep((int)&cnt.v_free_count, \
- &vm_pages_needed_lock, FALSE); \
- }
+#define VM_WAIT vm_wait()
+
+inline static void vm_wait() {
+ extern struct proc *curproc, *pageproc;
+ int s;
+ s = splhigh();
+ if (curproc == pageproc) {
+ vm_pageout_pages_needed = 1;
+ tsleep((caddr_t) &vm_pageout_pages_needed, PSWP, "vmwait", 0);
+ vm_pageout_pages_needed = 0;
+ } else {
+ wakeup((caddr_t) &vm_pages_needed);
+ tsleep((caddr_t) &cnt.v_free_count, PVM, "vmwait", 0);
+ }
+ splx(s);
+}
+
+
#ifdef KERNEL
void vm_pageout __P((void));
-void vm_pageout_scan __P((void));
+int vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
#endif
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 7123abb16ef0..1e4b201120f7 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -75,34 +75,14 @@
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
-#ifdef SWAPPAGER
extern struct pagerops swappagerops;
-#endif
-
-#ifdef VNODEPAGER
extern struct pagerops vnodepagerops;
-#endif
-
-#ifdef DEVPAGER
extern struct pagerops devicepagerops;
-#endif
struct pagerops *pagertab[] = {
-#ifdef SWAPPAGER
&swappagerops, /* PG_SWAP */
-#else
- NULL,
-#endif
-#ifdef VNODEPAGER
&vnodepagerops, /* PG_VNODE */
-#else
- NULL,
-#endif
-#ifdef DEVPAGER
&devicepagerops, /* PG_DEV */
-#else
- NULL,
-#endif
};
int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
@@ -118,6 +98,7 @@ struct pagerops *dfltpagerops = NULL; /* default pager */
*/
#define PAGER_MAP_SIZE (4 * 1024 * 1024)
+int pager_map_size = PAGER_MAP_SIZE;
vm_map_t pager_map;
boolean_t pager_map_wanted;
vm_offset_t pager_sva, pager_eva;
@@ -130,8 +111,10 @@ vm_pager_init()
/*
* Allocate a kernel submap for tracking get/put page mappings
*/
+/*
pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
PAGER_MAP_SIZE, FALSE);
+*/
/*
* Initialize known pagers
*/
@@ -173,38 +156,61 @@ vm_pager_deallocate(pager)
(*pager->pg_ops->pgo_dealloc)(pager);
}
+
int
-vm_pager_get_pages(pager, mlist, npages, sync)
+vm_pager_get_pages(pager, m, count, reqpage, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t *m;
+ int count;
+ int reqpage;
boolean_t sync;
{
- int rv;
+ extern boolean_t vm_page_zero_fill();
+ extern int vm_pageout_count;
+ int i;
if (pager == NULL) {
- rv = VM_PAGER_OK;
- while (npages--)
- if (!vm_page_zero_fill(*mlist)) {
- rv = VM_PAGER_FAIL;
- break;
- } else
- mlist++;
- return (rv);
+ for (i=0;i<count;i++) {
+ if( i != reqpage) {
+ PAGE_WAKEUP(m[i]);
+ vm_page_free(m[i]);
+ }
+ }
+ vm_page_zero_fill(m[reqpage]);
+ return VM_PAGER_OK;
+ }
+
+ if( pager->pg_ops->pgo_getpages == 0) {
+ for(i=0;i<count;i++) {
+ if( i != reqpage) {
+ PAGE_WAKEUP(m[i]);
+ vm_page_free(m[i]);
+ }
+ }
+ return(VM_PAGER_GET(pager, m[reqpage], sync));
+ } else {
+ return(VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
}
- return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync));
}
int
-vm_pager_put_pages(pager, mlist, npages, sync)
+vm_pager_put_pages(pager, m, count, sync, rtvals)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t *m;
+ int count;
boolean_t sync;
+ int *rtvals;
{
- if (pager == NULL)
- panic("vm_pager_put_pages: null pager");
- return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
+ int i;
+
+ if( pager->pg_ops->pgo_putpages)
+ return(VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
+ else {
+ for(i=0;i<count;i++) {
+ rtvals[i] = VM_PAGER_PUT( pager, m[i], sync);
+ }
+ return rtvals[0];
+ }
}
boolean_t
@@ -228,9 +234,10 @@ vm_pager_sync()
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
if (pgops)
- (*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE);
+ (*(*pgops)->pgo_putpage)(NULL, NULL, 0);
}
+#if 0
void
vm_pager_cluster(pager, offset, loff, hoff)
vm_pager_t pager;
@@ -242,91 +249,25 @@ vm_pager_cluster(pager, offset, loff, hoff)
panic("vm_pager_cluster: null pager");
return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
}
-
-void
-vm_pager_clusternull(pager, offset, loff, hoff)
- vm_pager_t pager;
- vm_offset_t offset;
- vm_offset_t *loff;
- vm_offset_t *hoff;
-{
- panic("vm_pager_nullcluster called");
-}
+#endif
vm_offset_t
-vm_pager_map_pages(mlist, npages, canwait)
- vm_page_t *mlist;
- int npages;
- boolean_t canwait;
+vm_pager_map_page(m)
+ vm_page_t m;
{
- vm_offset_t kva, va;
- vm_size_t size;
- vm_page_t m;
+ vm_offset_t kva;
- /*
- * Allocate space in the pager map, if none available return 0.
- * This is basically an expansion of kmem_alloc_wait with optional
- * blocking on no space.
- */
- size = npages * PAGE_SIZE;
- vm_map_lock(pager_map);
- while (vm_map_findspace(pager_map, 0, size, &kva)) {
- if (!canwait) {
- vm_map_unlock(pager_map);
- return (0);
- }
- pager_map_wanted = TRUE;
- vm_map_unlock(pager_map);
- (void) tsleep(pager_map, PVM, "pager_map", 0);
- vm_map_lock(pager_map);
- }
- vm_map_insert(pager_map, NULL, 0, kva, kva + size);
- vm_map_unlock(pager_map);
-
- for (va = kva; npages--; va += PAGE_SIZE) {
- m = *mlist++;
-#ifdef DEBUG
- if ((m->flags & PG_BUSY) == 0)
- panic("vm_pager_map_pages: page not busy");
- if (m->flags & PG_PAGEROWNED)
- panic("vm_pager_map_pages: page already in pager");
-#endif
-#ifdef DEBUG
- m->flags |= PG_PAGEROWNED;
-#endif
- pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m),
- VM_PROT_DEFAULT, TRUE);
- }
- return (kva);
+ kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
+ pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
+ VM_PROT_DEFAULT, TRUE);
+ return(kva);
}
void
-vm_pager_unmap_pages(kva, npages)
+vm_pager_unmap_page(kva)
vm_offset_t kva;
- int npages;
{
- vm_size_t size = npages * PAGE_SIZE;
-
-#ifdef DEBUG
- vm_offset_t va;
- vm_page_t m;
- int np = npages;
-
- for (va = kva; np--; va += PAGE_SIZE) {
- m = vm_pager_atop(va);
- if (m->flags & PG_PAGEROWNED)
- m->flags &= ~PG_PAGEROWNED;
- else
- printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n",
- m, va, VM_PAGE_TO_PHYS(m));
- }
-#endif
- pmap_remove(vm_map_pmap(pager_map), kva, kva + size);
- vm_map_lock(pager_map);
- (void) vm_map_delete(pager_map, kva, kva + size);
- if (pager_map_wanted)
- wakeup(pager_map);
- vm_map_unlock(pager_map);
+ kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
}
vm_page_t
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index e4659c268c1d..3e20e50bcad9 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 1990 University of Utah.
* Copyright (c) 1991, 1993
@@ -74,17 +75,26 @@ struct pagerops {
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
void (*pgo_dealloc) /* Disassociate. */
__P((vm_pager_t));
+ int (*pgo_getpage)
+ __P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_getpages) /* Get (read) page. */
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t *, int, int, boolean_t));
+ int (*pgo_putpage)
+ __P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_putpages) /* Put (write) page. */
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t *, int, boolean_t, int *));
boolean_t (*pgo_haspage) /* Does pager have page? */
__P((vm_pager_t, vm_offset_t));
- void (*pgo_cluster) /* Return range of cluster. */
- __P((vm_pager_t, vm_offset_t,
- vm_offset_t *, vm_offset_t *));
};
+#define VM_PAGER_ALLOC(h, s, p, o) (*(pg)->pg_ops->pgo_alloc)(h, s, p, o)
+#define VM_PAGER_DEALLOC(pg) (*(pg)->pg_ops->pgo_dealloc)(pg)
+#define VM_PAGER_GET(pg, m, s) (*(pg)->pg_ops->pgo_getpage)(pg, m, s)
+#define VM_PAGER_GET_MULTI(pg, m, c, r, s) (*(pg)->pg_ops->pgo_getpages)(pg, m, c, r, s)
+#define VM_PAGER_PUT(pg, m, s) (*(pg)->pg_ops->pgo_putpage)(pg, m, s)
+#define VM_PAGER_PUT_MULTI(pg, m, c, s, rtval) (*(pg)->pg_ops->pgo_putpages)(pg, m, c, s, rtval)
+#define VM_PAGER_HASPAGE(pg, o) (*(pg)->pg_ops->pgo_haspage)(pg, o)
+
/*
* get/put return values
* OK operation was successful
@@ -107,21 +117,15 @@ extern struct pagerops *dfltpagerops;
vm_pager_t vm_pager_allocate
__P((int, caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
vm_page_t vm_pager_atop __P((vm_offset_t));
-void vm_pager_cluster
- __P((vm_pager_t, vm_offset_t,
- vm_offset_t *, vm_offset_t *));
-void vm_pager_clusternull
- __P((vm_pager_t, vm_offset_t,
- vm_offset_t *, vm_offset_t *));
void vm_pager_deallocate __P((vm_pager_t));
int vm_pager_get_pages
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t *, int, int, boolean_t));
boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
void vm_pager_init __P((void));
vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
int vm_pager_put_pages
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+ __P((vm_pager_t, vm_page_t *, int, boolean_t, int *));
void vm_pager_sync __P((void));
void vm_pager_unmap_pages __P((vm_offset_t, int));
@@ -134,13 +138,16 @@ void vm_pager_unmap_pages __P((vm_offset_t, int));
({ \
vm_page_t ml[1]; \
ml[0] = (m); \
- vm_pager_get_pages(p, ml, 1, s); \
+ vm_pager_get_pages(p, ml, 1, 0, s); \
})
+
#define vm_pager_put(p, m, s) \
({ \
+ int rtval; \
vm_page_t ml[1]; \
ml[0] = (m); \
- vm_pager_put_pages(p, ml, 1, s); \
+ vm_pager_put_pages(p, ml, 1, s, &rtval); \
+ rtval; \
})
#endif
diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h
index 2d2c71594edf..4a785ce6e4ea 100644
--- a/sys/vm/vm_param.h
+++ b/sys/vm/vm_param.h
@@ -84,14 +84,25 @@ typedef int boolean_t;
*/
#define DEFAULT_PAGE_SIZE 4096
+#if 0
+
/*
* All references to the size of a page should be done with PAGE_SIZE
* or PAGE_SHIFT. The fact they are variables is hidden here so that
* we can easily make them constant if we so desire.
*/
+#ifndef PAGE_SIZE
#define PAGE_SIZE cnt.v_page_size /* size of page */
+#endif
+#ifndef PAGE_MASK
#define PAGE_MASK page_mask /* size of page - 1 */
+#endif
+#ifndef PAGE_SHIFT
#define PAGE_SHIFT page_shift /* bits to shift for pages */
+#endif
+
+#endif
+
#ifdef KERNEL
extern vm_size_t page_mask;
extern int page_shift;
@@ -129,17 +140,34 @@ extern int page_shift;
* No rounding is used.
*/
#ifdef KERNEL
+
+#if 0
+
+#ifndef atop
#define atop(x) (((unsigned)(x)) >> PAGE_SHIFT)
+#endif
+#ifndef ptoa
#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
+#endif
/*
* Round off or truncate to the nearest page. These will work
* for either addresses or counts (i.e., 1 byte rounds to 1 page).
*/
+#ifndef round_page
#define round_page(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
+#endif
+#ifndef trunc_page
#define trunc_page(x) \
((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
+#endif
+#ifndef num_pages
+#define num_pages(x) \
+ ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
+#endif
+
+#endif
#define num_pages(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
@@ -148,11 +176,13 @@ extern vm_offset_t first_addr; /* first physical page */
extern vm_offset_t last_addr; /* last physical page */
#else
+#if 0
/* out-of-kernel versions of round_page and trunc_page */
#define round_page(x) \
((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
#define trunc_page(x) \
((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
+#endif
#endif /* KERNEL */
#endif /* ASSEMBLER */
diff --git a/sys/vm/vm_prot.h b/sys/vm/vm_prot.h
index b3bae4386315..ee009bc4d03c 100644
--- a/sys/vm/vm_prot.h
+++ b/sys/vm/vm_prot.h
@@ -75,7 +75,7 @@
* vm_prot_t VM protection values.
*/
-typedef int vm_prot_t;
+typedef u_char vm_prot_t;
/*
* Protection values, defined as bits within the vm_prot_t type
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index 10b7523ae232..5008a09ce161 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -51,6 +51,7 @@
*/
int nswap, nswdev;
+int vm_swap_size;
#ifdef SEQSWAP
int niswdev; /* number of interleaved swap devices */
int niswap; /* size of interleaved swap area */
@@ -143,9 +144,8 @@ swapinit()
/*
* Now set up swap buffer headers.
*/
- bswlist.b_actf = sp;
for (i = 0; i < nswbuf - 1; i++, sp++) {
- sp->b_actf = sp + 1;
+ TAILQ_INSERT_HEAD(&bswlist, sp, b_freelist);
sp->b_rcred = sp->b_wcred = p->p_ucred;
sp->b_vnbufs.le_next = NOLIST;
}
@@ -390,12 +390,18 @@ swfree(p, index)
blk = niswap;
for (swp = &swdevt[niswdev]; swp != sp; swp++)
blk += swp->sw_nblks;
+#if 0
rmfree(swapmap, nblks, blk);
return (0);
+#endif
+ rlist_free(&swapmap, blk, blk + nblks - 1);
+ vm_swap_size += nblks;
+ return (0);
}
#endif
for (dvbase = 0; dvbase < nblks; dvbase += dmmax) {
blk = nblks - dvbase;
+
#ifdef SEQSWAP
if ((vsbase = index*dmmax + dvbase*niswdev) >= niswap)
panic("swfree");
@@ -405,6 +411,7 @@ swfree(p, index)
#endif
if (blk > dmmax)
blk = dmmax;
+#if 0
if (vsbase == 0) {
/*
* First of all chunks... initialize the swapmap.
@@ -422,6 +429,11 @@ swfree(p, index)
vsbase + ctod(CLSIZE));
} else
rmfree(swapmap, blk, vsbase);
+#endif
+ /* XXX -- we need to exclude the first cluster as above */
+ /* but for now, this will work fine... */
+ rlist_free(&swapmap, vsbase, vsbase + blk - 1);
+ vm_swap_size += blk;
}
return (0);
}
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 3d49ea717184..ee6ddf6ab536 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -50,9 +50,12 @@
#include <vm/vm.h>
+extern int swap_pager_full;
+
struct obreak_args {
char *nsiz;
};
+
/* ARGSUSED */
int
obreak(p, uap, retval)
@@ -72,9 +75,11 @@ obreak(p, uap, retval)
old = round_page(old + ctob(vm->vm_dsize));
diff = new - old;
if (diff > 0) {
+ if (swap_pager_full) {
+ return(ENOMEM);
+ }
rv = vm_allocate(&vm->vm_map, &old, diff, FALSE);
if (rv != KERN_SUCCESS) {
- uprintf("sbrk: grow failed, return = %d\n", rv);
return(ENOMEM);
}
vm->vm_dsize += btoc(diff);
@@ -82,7 +87,6 @@ obreak(p, uap, retval)
diff = -diff;
rv = vm_deallocate(&vm->vm_map, new, diff);
if (rv != KERN_SUCCESS) {
- uprintf("sbrk: shrink failed, return = %d\n", rv);
return(ENOMEM);
}
vm->vm_dsize -= btoc(diff);
@@ -90,41 +94,10 @@ obreak(p, uap, retval)
return(0);
}
-/*
- * Enlarge the "stack segment" to include the specified
- * stack pointer for the process.
- */
-int
-grow(p, sp)
- struct proc *p;
- unsigned sp;
-{
- register struct vmspace *vm = p->p_vmspace;
- register int si;
-
- /*
- * For user defined stacks (from sendsig).
- */
- if (sp < (unsigned)vm->vm_maxsaddr)
- return (0);
- /*
- * For common case of already allocated (from trap).
- */
- if (sp >= USRSTACK - ctob(vm->vm_ssize))
- return (1);
- /*
- * Really need to check vs limit and increment stack size if ok.
- */
- si = clrnd(btoc(USRSTACK-sp) - vm->vm_ssize);
- if (vm->vm_ssize + si > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur))
- return (0);
- vm->vm_ssize += si;
- return (1);
-}
-
struct ovadvise_args {
int anom;
};
+
/* ARGSUSED */
int
ovadvise(p, uap, retval)
diff --git a/sys/vm/vm_user.c b/sys/vm/vm_user.c
index 20172c6c6519..0f2c23432f53 100644
--- a/sys/vm/vm_user.c
+++ b/sys/vm/vm_user.c
@@ -168,6 +168,7 @@ svm_protect(p, uap, retval)
return((int)rv);
}
+#endif
/*
* vm_inherit sets the inheritence of the specified range in the
* specified map.
@@ -203,7 +204,6 @@ vm_protect(map, start, size, set_maximum, new_protection)
return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
}
-#endif
/*
* vm_allocate allocates "zero fill" memory in the specfied
@@ -255,6 +255,7 @@ vm_deallocate(map, start, size)
return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
}
+#if 1
/*
* Similar to vm_allocate but assigns an explicit pager.
*/
@@ -310,3 +311,4 @@ vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
return(result);
}
+#endif
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 9c2f8260cfb3..b8e5a192796f 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -1,7 +1,8 @@
/*
* Copyright (c) 1990 University of Utah.
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1993,1994 John S. Dyson
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
@@ -35,7 +36,8 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
+ * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
+ * $Id: vnode_pager.c,v 1.17 1994/04/05 03:23:53 davidg Exp $
*/
/*
@@ -46,6 +48,24 @@
* fix credential use (uses current process credentials now)
*/
+/*
+ * MODIFICATIONS:
+ * John S. Dyson 08 Dec 93
+ *
+ * This file in conjunction with some vm_fault mods, eliminate the performance
+ * advantage for using the buffer cache and minimize memory copies.
+ *
+ * 1) Supports multiple - block reads
+ * 2) Bypasses buffer cache for reads
+ *
+ * TODO:
+ *
+ * 1) Totally bypass buffer cache for reads
+ * (Currently will still sometimes use buffer cache for reads)
+ * 2) Bypass buffer cache for writes
+ * (Code does not support it, but mods are simple)
+ */
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@@ -58,51 +78,44 @@
#include <vm/vm_page.h>
#include <vm/vnode_pager.h>
-struct pagerlst vnode_pager_list; /* list of managed vnodes */
+#include <sys/buf.h>
+#include <miscfs/specfs/specdev.h>
-#ifdef DEBUG
-int vpagerdebug = 0x00;
-#define VDB_FOLLOW 0x01
-#define VDB_INIT 0x02
-#define VDB_IO 0x04
-#define VDB_FAIL 0x08
-#define VDB_ALLOC 0x10
-#define VDB_SIZE 0x20
-#endif
+int vnode_pager_putmulti();
-static vm_pager_t vnode_pager_alloc
- __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
-static void vnode_pager_cluster
- __P((vm_pager_t, vm_offset_t,
- vm_offset_t *, vm_offset_t *));
-static void vnode_pager_dealloc __P((vm_pager_t));
-static int vnode_pager_getpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
-static boolean_t vnode_pager_haspage __P((vm_pager_t, vm_offset_t));
-static void vnode_pager_init __P((void));
-static int vnode_pager_io
- __P((vn_pager_t, vm_page_t *, int,
- boolean_t, enum uio_rw));
-static boolean_t vnode_pager_putpage
- __P((vm_pager_t, vm_page_t *, int, boolean_t));
+void vnode_pager_init();
+vm_pager_t vnode_pager_alloc(caddr_t, vm_offset_t, vm_prot_t, vm_offset_t);
+void vnode_pager_dealloc();
+int vnode_pager_getpage();
+int vnode_pager_getmulti();
+int vnode_pager_putpage();
+boolean_t vnode_pager_haspage();
struct pagerops vnodepagerops = {
vnode_pager_init,
vnode_pager_alloc,
vnode_pager_dealloc,
vnode_pager_getpage,
+ vnode_pager_getmulti,
vnode_pager_putpage,
- vnode_pager_haspage,
- vnode_pager_cluster
+ vnode_pager_putmulti,
+ vnode_pager_haspage
};
-static void
+static int vnode_pager_input(vn_pager_t vnp, vm_page_t *m, int count, int reqpage);
+static int vnode_pager_output(vn_pager_t vnp, vm_page_t *m, int count, int *rtvals);
+struct buf * getpbuf() ;
+void relpbuf(struct buf *bp) ;
+
+extern vm_map_t pager_map;
+
+struct pagerlst vnode_pager_list; /* list of managed vnodes */
+
+#define MAXBP (PAGE_SIZE/DEV_BSIZE);
+
+void
vnode_pager_init()
{
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_init()\n");
-#endif
TAILQ_INIT(&vnode_pager_list);
}
@@ -110,12 +123,12 @@ vnode_pager_init()
* Allocate (or lookup) pager for a vnode.
* Handle is a vnode pointer.
*/
-static vm_pager_t
-vnode_pager_alloc(handle, size, prot, foff)
+vm_pager_t
+vnode_pager_alloc(handle, size, prot, offset)
caddr_t handle;
vm_size_t size;
vm_prot_t prot;
- vm_offset_t foff;
+ vm_offset_t offset;
{
register vm_pager_t pager;
register vn_pager_t vnp;
@@ -124,10 +137,6 @@ vnode_pager_alloc(handle, size, prot, foff)
struct vnode *vp;
struct proc *p = curproc; /* XXX */
-#ifdef DEBUG
- if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC))
- printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot);
-#endif
/*
* Pageout to vnode, no can do yet.
*/
@@ -171,12 +180,12 @@ vnode_pager_alloc(handle, size, prot, foff)
vnp->vnp_flags = 0;
vnp->vnp_vp = vp;
vnp->vnp_size = vattr.va_size;
+
TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
pager->pg_handle = handle;
pager->pg_type = PG_VNODE;
- pager->pg_flags = 0;
pager->pg_ops = &vnodepagerops;
- pager->pg_data = vnp;
+ pager->pg_data = (caddr_t)vnp;
vp->v_vmdata = (caddr_t)pager;
} else {
/*
@@ -184,121 +193,104 @@ vnode_pager_alloc(handle, size, prot, foff)
* cache if found and also gain a reference to the object.
*/
object = vm_object_lookup(pager);
-#ifdef DEBUG
- vnp = (vn_pager_t)pager->pg_data;
-#endif
}
-#ifdef DEBUG
- if (vpagerdebug & VDB_ALLOC)
- printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n",
- vp, vnp->vnp_size, pager, object);
-#endif
return(pager);
}
-static void
+void
vnode_pager_dealloc(pager)
vm_pager_t pager;
{
register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
register struct vnode *vp;
-#ifdef NOTDEF
struct proc *p = curproc; /* XXX */
-#endif
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_dealloc(%x)\n", pager);
-#endif
if (vp = vnp->vnp_vp) {
vp->v_vmdata = NULL;
vp->v_flag &= ~VTEXT;
-#if NOTDEF
+#if 0
/* can hang if done at reboot on NFS FS */
(void) VOP_FSYNC(vp, p->p_ucred, p);
#endif
vrele(vp);
}
+
TAILQ_REMOVE(&vnode_pager_list, pager, pg_list);
free((caddr_t)vnp, M_VMPGDATA);
free((caddr_t)pager, M_VMPAGER);
}
-static int
-vnode_pager_getpage(pager, mlist, npages, sync)
+int
+vnode_pager_getmulti(pager, m, count, reqpage, sync)
+ vm_pager_t pager;
+ vm_page_t *m;
+ int count;
+ int reqpage;
+ boolean_t sync;
+{
+
+ return vnode_pager_input((vn_pager_t) pager->pg_data, m, count, reqpage);
+}
+
+int
+vnode_pager_getpage(pager, m, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t m;
boolean_t sync;
{
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_getpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
-#endif
- return(vnode_pager_io((vn_pager_t)pager->pg_data,
- mlist, npages, sync, UIO_READ));
+ int err;
+ vm_page_t marray[1];
+ if (pager == NULL)
+ return FALSE;
+ marray[0] = m;
+
+ return vnode_pager_input((vn_pager_t)pager->pg_data, marray, 1, 0);
}
-static boolean_t
-vnode_pager_putpage(pager, mlist, npages, sync)
+boolean_t
+vnode_pager_putpage(pager, m, sync)
vm_pager_t pager;
- vm_page_t *mlist;
- int npages;
+ vm_page_t m;
boolean_t sync;
{
int err;
+ vm_page_t marray[1];
+ int rtvals[1];
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_putpage(%x, %x, %x, %x)\n",
- pager, mlist, npages, sync);
-#endif
if (pager == NULL)
- return (FALSE); /* ??? */
- err = vnode_pager_io((vn_pager_t)pager->pg_data,
- mlist, npages, sync, UIO_WRITE);
- /*
- * If the operation was successful, mark the pages clean.
- */
- if (err == VM_PAGER_OK) {
- while (npages--) {
- (*mlist)->flags |= PG_CLEAN;
- pmap_clear_modify(VM_PAGE_TO_PHYS(*mlist));
- mlist++;
- }
- }
- return(err);
+ return FALSE;
+ marray[0] = m;
+ vnode_pager_output((vn_pager_t)pager->pg_data, marray, 1, rtvals);
+ return rtvals[0];
+}
+
+int
+vnode_pager_putmulti(pager, m, c, sync, rtvals)
+ vm_pager_t pager;
+ vm_page_t *m;
+ int c;
+ boolean_t sync;
+ int *rtvals;
+{
+ return vnode_pager_output((vn_pager_t)pager->pg_data, m, c, rtvals);
}
-static boolean_t
+
+boolean_t
vnode_pager_haspage(pager, offset)
vm_pager_t pager;
vm_offset_t offset;
{
register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
daddr_t bn;
+ int run;
int err;
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_haspage(%x, %x)\n", pager, offset);
-#endif
-
/*
* Offset beyond end of file, do not have the page
- * Lock the vnode first to make sure we have the most recent
- * version of the size.
*/
- VOP_LOCK(vnp->vnp_vp);
if (offset >= vnp->vnp_size) {
- VOP_UNLOCK(vnp->vnp_vp);
-#ifdef DEBUG
- if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
- printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
- pager, offset, vnp->vnp_size);
-#endif
return(FALSE);
}
@@ -311,53 +303,14 @@ vnode_pager_haspage(pager, offset)
*/
err = VOP_BMAP(vnp->vnp_vp,
offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
- (struct vnode **)0, &bn, NULL);
- VOP_UNLOCK(vnp->vnp_vp);
+ (struct vnode **)0, &bn, 0);
if (err) {
-#ifdef DEBUG
- if (vpagerdebug & VDB_FAIL)
- printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n",
- err, pager, offset);
-#endif
return(TRUE);
}
return((long)bn < 0 ? FALSE : TRUE);
}
-static void
-vnode_pager_cluster(pager, offset, loffset, hoffset)
- vm_pager_t pager;
- vm_offset_t offset;
- vm_offset_t *loffset;
- vm_offset_t *hoffset;
-{
- vn_pager_t vnp = (vn_pager_t)pager->pg_data;
- vm_offset_t loff, hoff;
-
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_cluster(%x, %x) ", pager, offset);
-#endif
- loff = offset;
- if (loff >= vnp->vnp_size)
- panic("vnode_pager_cluster: bad offset");
- /*
- * XXX could use VOP_BMAP to get maxcontig value
- */
- hoff = loff + MAXBSIZE;
- if (hoff > round_page(vnp->vnp_size))
- hoff = round_page(vnp->vnp_size);
-
- *loffset = loff;
- *hoffset = hoff;
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("returns [%x-%x]\n", loff, hoff);
-#endif
-}
-
/*
- * (XXX)
* Lets the VM system know about a change in size for a file.
* If this vnode is mapped into some address space (i.e. we have a pager
* for it) we adjust our own internal size and flush any cached pages in
@@ -399,19 +352,14 @@ vnode_pager_setsize(vp, nsize)
if (object == NULL)
return;
-#ifdef DEBUG
- if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE))
- printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n",
- vp, object, vnp->vnp_size, nsize);
-#endif
/*
* File has shrunk.
* Toss any cached pages beyond the new EOF.
*/
- if (nsize < vnp->vnp_size) {
+ if (round_page(nsize) < round_page(vnp->vnp_size)) {
vm_object_lock(object);
vm_object_page_remove(object,
- (vm_offset_t)nsize, vnp->vnp_size);
+ (vm_offset_t)round_page(nsize), round_page(vnp->vnp_size));
vm_object_unlock(object);
}
vnp->vnp_size = (vm_offset_t)nsize;
@@ -425,24 +373,67 @@ vnode_pager_umount(mp)
register vm_pager_t pager, npager;
struct vnode *vp;
- for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){
+ pager = vnode_pager_list.tqh_first;
+ while( pager) {
/*
* Save the next pointer now since uncaching may
* terminate the object and render pager invalid
*/
- npager = pager->pg_list.tqe_next;
vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
- if (mp == (struct mount *)0 || vp->v_mount == mp) {
- VOP_LOCK(vp);
+ npager = pager->pg_list.tqe_next;
+ if (mp == (struct mount *)0 || vp->v_mount == mp)
(void) vnode_pager_uncache(vp);
- VOP_UNLOCK(vp);
- }
+ pager = npager;
}
}
/*
* Remove vnode associated object from the object cache.
*
+ * Note: this routine may be invoked as a result of a pager put
+ * operation (possibly at object termination time), so we must be careful.
+ */
+boolean_t
+vnode_pager_uncache(vp)
+ register struct vnode *vp;
+{
+ register vm_object_t object;
+ boolean_t uncached, locked;
+ vm_pager_t pager;
+
+ /*
+ * Not a mapped vnode
+ */
+ pager = (vm_pager_t)vp->v_vmdata;
+ if (pager == NULL)
+ return (TRUE);
+ /*
+ * Unlock the vnode if it is currently locked.
+ * We do this since uncaching the object may result
+ * in its destruction which may initiate paging
+ * activity which may necessitate locking the vnode.
+ */
+ locked = VOP_ISLOCKED(vp);
+ if (locked)
+ VOP_UNLOCK(vp);
+ /*
+ * Must use vm_object_lookup() as it actually removes
+ * the object from the cache list.
+ */
+ object = vm_object_lookup(pager);
+ if (object) {
+ uncached = (object->ref_count <= 1);
+ pager_cache(object, FALSE);
+ } else
+ uncached = TRUE;
+ if (locked)
+ VOP_LOCK(vp);
+ return(uncached);
+}
+#if 0
+/*
+ * Remove vnode associated object from the object cache.
+ *
* XXX unlock the vnode if it is currently locked.
* We must do this since uncaching the object may result in its
* destruction which may initiate paging activity which may necessitate
@@ -462,14 +453,6 @@ vnode_pager_uncache(vp)
pager = (vm_pager_t)vp->v_vmdata;
if (pager == NULL)
return (TRUE);
-#ifdef DEBUG
- if (!VOP_ISLOCKED(vp)) {
- extern int (**nfsv2_vnodeop_p)();
-
- if (vp->v_op != nfsv2_vnodeop_p)
- panic("vnode_pager_uncache: vnode not locked!");
- }
-#endif
/*
* Must use vm_object_lookup() as it actually removes
* the object from the cache list.
@@ -484,97 +467,958 @@ vnode_pager_uncache(vp)
uncached = TRUE;
return(uncached);
}
+#endif
-static int
-vnode_pager_io(vnp, mlist, npages, sync, rw)
- register vn_pager_t vnp;
- vm_page_t *mlist;
- int npages;
- boolean_t sync;
- enum uio_rw rw;
+
+void
+vnode_pager_freepage(m)
+ vm_page_t m;
+{
+ PAGE_WAKEUP(m);
+ vm_page_free(m);
+}
+
+/*
+ * calculate the linear (byte) disk address of specified virtual
+ * file address
+ */
+vm_offset_t
+vnode_pager_addr(vp, address)
+ struct vnode *vp;
+ vm_offset_t address;
+{
+ int rtaddress;
+ int bsize;
+ vm_offset_t block;
+ struct vnode *rtvp;
+ int err;
+ int vblock, voffset;
+ int run;
+
+ bsize = vp->v_mount->mnt_stat.f_iosize;
+ vblock = address / bsize;
+ voffset = address % bsize;
+
+ err = VOP_BMAP(vp,vblock,&rtvp,&block,0);
+
+ if( err)
+ rtaddress = -1;
+ else
+ rtaddress = block * DEV_BSIZE + voffset;
+
+ return rtaddress;
+}
+
+/*
+ * interrupt routine for I/O completion
+ */
+void
+vnode_pager_iodone(bp)
+ struct buf *bp;
{
+ bp->b_flags |= B_DONE;
+ wakeup((caddr_t)bp);
+}
+
+/*
+ * small block file system vnode pager input
+ */
+int
+vnode_pager_input_smlfs(vnp, m)
+ vn_pager_t vnp;
+ vm_page_t m;
+{
+ int i;
+ int s;
+ vm_offset_t paging_offset;
+ struct vnode *dp, *vp;
+ struct buf *bp;
+ vm_offset_t mapsize;
+ vm_offset_t foff;
+ vm_offset_t kva;
+ int fileaddr;
+ int block;
+ vm_offset_t bsize;
+ int error = 0;
+ int run;
+
+ paging_offset = m->object->paging_offset;
+ vp = vnp->vnp_vp;
+ bsize = vp->v_mount->mnt_stat.f_iosize;
+ foff = m->offset + paging_offset;
+
+ VOP_BMAP(vp, foff, &dp, 0, 0);
+
+ kva = vm_pager_map_page(m);
+
+ for(i=0;i<PAGE_SIZE/bsize;i++) {
+ /*
+ * calculate logical block and offset
+ */
+ block = foff / bsize + i;
+ s = splbio();
+ while (bp = incore(vp, block)) {
+ int amount;
+
+ /*
+ * wait until the buffer is avail or gone
+ */
+ if (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep ((caddr_t)bp, PVM, "vnwblk", 0);
+ continue;
+ }
+
+ amount = bsize;
+ if ((foff + bsize) > vnp->vnp_size)
+ amount = vnp->vnp_size - foff;
+
+ /*
+ * make sure that this page is in the buffer
+ */
+ if ((amount > 0) && amount <= bp->b_bcount) {
+ bp->b_flags |= B_BUSY;
+ splx(s);
+
+ /*
+ * copy the data from the buffer
+ */
+ bcopy(bp->b_un.b_addr, (caddr_t)kva + i * bsize, amount);
+ if (amount < bsize) {
+ bzero((caddr_t)kva + amount, bsize - amount);
+ }
+ bp->b_flags &= ~B_BUSY;
+ wakeup((caddr_t)bp);
+ goto nextblock;
+ }
+ break;
+ }
+ splx(s);
+ fileaddr = vnode_pager_addr(vp, foff + i * bsize);
+ if( fileaddr != -1) {
+ bp = getpbuf();
+ VHOLD(vp);
+
+ /* build a minimal buffer header */
+ bp->b_flags = B_BUSY | B_READ | B_CALL;
+ bp->b_iodone = vnode_pager_iodone;
+ bp->b_proc = curproc;
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ if( bp->b_rcred != NOCRED)
+ crhold(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva + i * bsize;
+ bp->b_blkno = fileaddr / DEV_BSIZE;
+ bgetvp(dp, bp);
+ bp->b_bcount = bsize;
+ bp->b_bufsize = bsize;
+
+ /* do the input */
+ VOP_STRATEGY(bp);
+
+ /* we definitely need to be at splbio here */
+
+ s = splbio();
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "vnsrd", 0);
+ }
+ splx(s);
+ if ((bp->b_flags & B_ERROR) != 0)
+ error = EIO;
+
+ /*
+ * free the buffer header back to the swap buffer pool
+ */
+ relpbuf(bp);
+ HOLDRELE(vp);
+ if( error)
+ break;
+ } else {
+ bzero((caddr_t) kva + i * bsize, bsize);
+ }
+nextblock:
+ }
+ vm_pager_unmap_page(kva);
+ if( error) {
+ return VM_PAGER_FAIL;
+ }
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ m->flags |= PG_CLEAN;
+ m->flags &= ~PG_LAUNDRY;
+ return VM_PAGER_OK;
+
+}
+
+
+/*
+ * old style vnode pager output routine
+ */
+int
+vnode_pager_input_old(vnp, m)
+ vn_pager_t vnp;
+ vm_page_t m;
+{
+ int i;
struct uio auio;
struct iovec aiov;
+ int error;
+ int size;
+ vm_offset_t foff;
+ vm_offset_t kva;
+
+ error = 0;
+ foff = m->offset + m->object->paging_offset;
+ /*
+ * Return failure if beyond current EOF
+ */
+ if (foff >= vnp->vnp_size) {
+ return VM_PAGER_BAD;
+ } else {
+ size = PAGE_SIZE;
+ if (foff + size > vnp->vnp_size)
+ size = vnp->vnp_size - foff;
+/*
+ * Allocate a kernel virtual address and initialize so that
+ * we can use VOP_READ/WRITE routines.
+ */
+ kva = vm_pager_map_page(m);
+ aiov.iov_base = (caddr_t)kva;
+ aiov.iov_len = size;
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = foff;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_resid = size;
+ auio.uio_procp = (struct proc *)0;
+
+ error = VOP_READ(vnp->vnp_vp, &auio, 0, curproc->p_ucred);
+ if (!error) {
+ register int count = size - auio.uio_resid;
+
+ if (count == 0)
+ error = EINVAL;
+ else if (count != PAGE_SIZE)
+ bzero((caddr_t)kva + count, PAGE_SIZE - count);
+ }
+ vm_pager_unmap_page(kva);
+ }
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ m->flags |= PG_CLEAN;
+ m->flags &= ~PG_LAUNDRY;
+ return error?VM_PAGER_FAIL:VM_PAGER_OK;
+}
+
+/*
+ * generic vnode pager input routine
+ */
+int
+vnode_pager_input(vnp, m, count, reqpage)
+ register vn_pager_t vnp;
+ vm_page_t *m;
+ int count, reqpage;
+{
+ int i,j;
vm_offset_t kva, foff;
- int error, size;
+ int size;
struct proc *p = curproc; /* XXX */
+ vm_object_t object;
+ vm_offset_t paging_offset;
+ struct vnode *dp, *vp;
+ vm_offset_t mapsize;
+ int bsize;
+
+ int first, last;
+ int reqaddr, firstaddr;
+ int run;
+ int block, offset;
+
+ int nbp;
+ struct buf *bp;
+ int s;
+ int failflag;
+
+ int errtype=0; /* 0 is file type otherwise vm type */
+ int error = 0;
+
+ object = m[reqpage]->object; /* all vm_page_t items are in same object */
+ paging_offset = object->paging_offset;
+
+ vp = vnp->vnp_vp;
+ bsize = vp->v_mount->mnt_stat.f_iosize;
+
+ /* get the UNDERLYING device for the file with VOP_BMAP() */
+ /*
+ * originally, we did not check for an error return
+ * value -- assuming an fs always has a bmap entry point
+ * -- that assumption is wrong!!!
+ */
+ kva = 0;
+ mapsize = 0;
+ foff = m[reqpage]->offset + paging_offset;
+ if (!VOP_BMAP(vp, foff, &dp, 0, 0)) {
+ /*
+ * we do not block for a kva, notice we default to a kva
+ * conservative behavior
+ */
+ kva = kmem_alloc_pageable(pager_map, (mapsize = count*PAGE_SIZE));
+ if( !kva) {
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ m[0] = m[reqpage];
+ kva = kmem_alloc_wait(pager_map, mapsize = PAGE_SIZE);
+ reqpage = 0;
+ count = 1;
+ }
+ }
- /* XXX */
- vm_page_t m;
- if (npages != 1)
- panic("vnode_pager_io: cannot handle multiple pages");
- m = *mlist;
- /* XXX */
-
-#ifdef DEBUG
- if (vpagerdebug & VDB_FOLLOW)
- printf("vnode_pager_io(%x, %x, %c): vnode %x\n",
- vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp);
-#endif
- foff = m->offset + m->object->paging_offset;
/*
- * Allocate a kernel virtual address and initialize so that
- * we can use VOP_READ/WRITE routines.
+ * if we can't get a kva or we can't bmap, use old VOP code
*/
- kva = vm_pager_map_pages(mlist, npages, sync);
- if (kva == NULL)
- return(VM_PAGER_AGAIN);
+ if (!kva) {
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ return vnode_pager_input_old(vnp, m[reqpage]);
/*
- * After all of the potentially blocking operations have been
- * performed, we can do the size checks:
- * read beyond EOF (returns error)
- * short read
+ * if the blocksize is smaller than a page size, then use
+ * special small filesystem code. NFS sometimes has a small
+ * blocksize, but it can handle large reads itself.
*/
- VOP_LOCK(vnp->vnp_vp);
- if (foff >= vnp->vnp_size) {
- VOP_UNLOCK(vnp->vnp_vp);
- vm_pager_unmap_pages(kva, npages);
-#ifdef DEBUG
- if (vpagerdebug & VDB_SIZE)
- printf("vnode_pager_io: vp %x, off %d size %d\n",
- vnp->vnp_vp, foff, vnp->vnp_size);
-#endif
- return(VM_PAGER_BAD);
+ } else if( (PAGE_SIZE / bsize) > 1 &&
+ (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
+
+ kmem_free_wakeup(pager_map, kva, mapsize);
+
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ return vnode_pager_input_smlfs(vnp, m[reqpage]);
+ }
+
+/*
+ * here on direct device I/O
+ */
+
+
+ /*
+ * This pathetic hack gets data from the buffer cache, if it's there.
+ * I believe that this is not really necessary, and the ends can
+ * be gotten by defaulting to the normal vfs read behavior, but this
+ * might be more efficient, because the will NOT invoke read-aheads
+ * and one of the purposes of this code is to bypass the buffer
+ * cache and keep from flushing it by reading in a program.
+ */
+ /*
+ * calculate logical block and offset
+ */
+ block = foff / bsize;
+ offset = foff % bsize;
+ s = splbio();
+
+ /*
+ * if we have a buffer in core, then try to use it
+ */
+ while (bp = incore(vp, block)) {
+ int amount;
+
+ /*
+ * wait until the buffer is avail or gone
+ */
+ if (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep ((caddr_t)bp, PVM, "vnwblk", 0);
+ continue;
+ }
+
+ amount = PAGE_SIZE;
+ if ((foff + amount) > vnp->vnp_size)
+ amount = vnp->vnp_size - foff;
+
+ /*
+ * make sure that this page is in the buffer
+ */
+ if ((amount > 0) && (offset + amount) <= bp->b_bcount) {
+ bp->b_flags |= B_BUSY;
+ splx(s);
+
+ /*
+ * map the requested page
+ */
+ pmap_kenter(kva, VM_PAGE_TO_PHYS(m[reqpage]));
+ pmap_update();
+
+ /*
+ * copy the data from the buffer
+ */
+ bcopy(bp->b_un.b_addr + offset, (caddr_t)kva, amount);
+ if (amount < PAGE_SIZE) {
+ bzero((caddr_t)kva + amount, PAGE_SIZE - amount);
+ }
+ /*
+ * unmap the page and free the kva
+ */
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
+ kmem_free_wakeup(pager_map, kva, mapsize);
+ /*
+ * release the buffer back to the block subsystem
+ */
+ bp->b_flags &= ~B_BUSY;
+ wakeup((caddr_t)bp);
+ /*
+ * we did not have to do any work to get the requested
+ * page, the read behind/ahead does not justify a read
+ */
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ count = 1;
+ reqpage = 0;
+ m[0] = m[reqpage];
+
+ /*
+ * sorry for the goto
+ */
+ goto finishup;
+ }
+ /*
+ * buffer is nowhere to be found, read from the disk
+ */
+ break;
+ }
+ splx(s);
+
+ reqaddr = vnode_pager_addr(vp, foff);
+ s = splbio();
+ /*
+ * Make sure that our I/O request is contiguous.
+ * Scan backward and stop for the first discontiguous
+ * entry or stop for a page being in buffer cache.
+ */
+ failflag = 0;
+ first = reqpage;
+ for (i = reqpage - 1; i >= 0; --i) {
+ if (failflag ||
+ incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize) ||
+ (vnode_pager_addr(vp, m[i]->offset + paging_offset))
+ != reqaddr + (i - reqpage) * PAGE_SIZE) {
+ vnode_pager_freepage(m[i]);
+ failflag = 1;
+ } else {
+ first = i;
+ }
+ }
+
+ /*
+ * Scan forward and stop for the first non-contiguous
+ * entry or stop for a page being in buffer cache.
+ */
+ failflag = 0;
+ last = reqpage + 1;
+ for (i = reqpage + 1; i < count; i++) {
+ if (failflag ||
+ incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize) ||
+ (vnode_pager_addr(vp, m[i]->offset + paging_offset))
+ != reqaddr + (i - reqpage) * PAGE_SIZE) {
+ vnode_pager_freepage(m[i]);
+ failflag = 1;
+ } else {
+ last = i + 1;
+ }
+ }
+ splx(s);
+
+ /*
+ * the first and last page have been calculated now, move input
+ * pages to be zero based...
+ */
+ count = last;
+ if (first != 0) {
+ for (i = first; i < count; i++) {
+ m[i - first] = m[i];
+ }
+ count -= first;
+ reqpage -= first;
}
- if (foff + PAGE_SIZE > vnp->vnp_size)
+
+ /*
+ * calculate the file virtual address for the transfer
+ */
+ foff = m[0]->offset + paging_offset;
+ /*
+ * and get the disk physical address (in bytes)
+ */
+ firstaddr = vnode_pager_addr(vp, foff);
+
+ /*
+ * calculate the size of the transfer
+ */
+ size = count * PAGE_SIZE;
+ if ((foff + size) > vnp->vnp_size)
size = vnp->vnp_size - foff;
- else
+
+ /*
+ * round up physical size for real devices
+ */
+ if( dp->v_type == VBLK || dp->v_type == VCHR)
+ size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+
+ /*
+ * and map the pages to be read into the kva
+ */
+ for (i = 0; i < count; i++)
+ pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
+
+ pmap_update();
+ bp = getpbuf();
+ VHOLD(vp);
+
+ /* build a minimal buffer header */
+ bp->b_flags = B_BUSY | B_READ | B_CALL;
+ bp->b_iodone = vnode_pager_iodone;
+ /* B_PHYS is not set, but it is nice to fill this in */
+ bp->b_proc = curproc;
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ if( bp->b_rcred != NOCRED)
+ crhold(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva;
+ bp->b_blkno = firstaddr / DEV_BSIZE;
+ bgetvp(dp, bp);
+ bp->b_bcount = size;
+ bp->b_bufsize = size;
+
+ /* do the input */
+ VOP_STRATEGY(bp);
+
+ s = splbio();
+ /* we definitely need to be at splbio here */
+
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "vnread", 0);
+ }
+ splx(s);
+ if ((bp->b_flags & B_ERROR) != 0)
+ error = EIO;
+
+ if (!error) {
+ if (size != count * PAGE_SIZE)
+ bzero((caddr_t)kva + size, PAGE_SIZE * count - size);
+ }
+
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE * count);
+ kmem_free_wakeup(pager_map, kva, mapsize);
+
+ /*
+ * free the buffer header back to the swap buffer pool
+ */
+ relpbuf(bp);
+ HOLDRELE(vp);
+
+finishup:
+ for (i = 0; i < count; i++) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ m[i]->flags |= PG_CLEAN;
+ m[i]->flags &= ~PG_LAUNDRY;
+ if (i != reqpage) {
+ /*
+ * whether or not to leave the page activated
+ * is up in the air, but we should put the page
+ * on a page queue somewhere. (it already is in
+ * the object).
+ * Result: It appears that emperical results show
+ * that deactivating pages is best.
+ */
+ /*
+ * just in case someone was asking for this
+ * page we now tell them that it is ok to use
+ */
+ if (!error) {
+ vm_page_deactivate(m[i]);
+ PAGE_WAKEUP(m[i]);
+ m[i]->flags &= ~PG_FAKE;
+ m[i]->act_count = 2;
+ } else {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ }
+ if (error) {
+ printf("vnode pager read error: %d\n", error);
+ }
+ if (errtype)
+ return error;
+ return (error ? VM_PAGER_FAIL : VM_PAGER_OK);
+}
+
+/*
+ * old-style vnode pager output routine
+ */
+int
+vnode_pager_output_old(vnp, m)
+ register vn_pager_t vnp;
+ vm_page_t m;
+{
+ vm_offset_t foff;
+ vm_offset_t kva;
+ vm_offset_t size;
+ struct iovec aiov;
+ struct uio auio;
+ struct vnode *vp;
+ int error;
+
+ vp = vnp->vnp_vp;
+ foff = m->offset + m->object->paging_offset;
+ /*
+ * Return failure if beyond current EOF
+ */
+ if (foff >= vnp->vnp_size) {
+ return VM_PAGER_BAD;
+ } else {
size = PAGE_SIZE;
- aiov.iov_base = (caddr_t)kva;
- aiov.iov_len = size;
- auio.uio_iov = &aiov;
- auio.uio_iovcnt = 1;
- auio.uio_offset = foff;
- auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_rw = rw;
- auio.uio_resid = size;
- auio.uio_procp = (struct proc *)0;
-#ifdef DEBUG
- if (vpagerdebug & VDB_IO)
- printf("vnode_pager_io: vp %x kva %x foff %x size %x",
- vnp->vnp_vp, kva, foff, size);
-#endif
- if (rw == UIO_READ)
- error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
+ if (foff + size > vnp->vnp_size)
+ size = vnp->vnp_size - foff;
+/*
+ * Allocate a kernel virtual address and initialize so that
+ * we can use VOP_WRITE routines.
+ */
+ kva = vm_pager_map_page(m);
+ aiov.iov_base = (caddr_t)kva;
+ aiov.iov_len = size;
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = foff;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_resid = size;
+ auio.uio_procp = (struct proc *)0;
+
+ error = VOP_WRITE(vp, &auio, 0, curproc->p_ucred);
+
+ if (!error) {
+ if ((size - auio.uio_resid) == 0) {
+ error = EINVAL;
+ }
+ }
+ vm_pager_unmap_page(kva);
+ return error?VM_PAGER_FAIL:VM_PAGER_OK;
+ }
+}
+
+/*
+ * vnode pager output on a small-block file system
+ */
+int
+vnode_pager_output_smlfs(vnp, m)
+ vn_pager_t vnp;
+ vm_page_t m;
+{
+ int i;
+ int s;
+ vm_offset_t paging_offset;
+ struct vnode *dp, *vp;
+ struct buf *bp;
+ vm_offset_t mapsize;
+ vm_offset_t foff;
+ vm_offset_t kva;
+ int fileaddr;
+ int block;
+ vm_offset_t bsize;
+ int run;
+ int error = 0;
+
+ paging_offset = m->object->paging_offset;
+ vp = vnp->vnp_vp;
+ bsize = vp->v_mount->mnt_stat.f_iosize;
+ foff = m->offset + paging_offset;
+
+ VOP_BMAP(vp, foff, &dp, 0, 0);
+ kva = vm_pager_map_page(m);
+ for(i = 0; !error && i < (PAGE_SIZE/bsize); i++) {
+ /*
+ * calculate logical block and offset
+ */
+ fileaddr = vnode_pager_addr(vp, foff + i * bsize);
+ if( fileaddr != -1) {
+ s = splbio();
+ if( bp = incore( vp, (foff/bsize) + i)) {
+ bp = getblk(vp, (foff/bsize) + i, bp->b_bufsize,0, 0);
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ }
+ splx(s);
+
+ bp = getpbuf();
+ VHOLD(vp);
+
+ /* build a minimal buffer header */
+ bp->b_flags = B_BUSY | B_CALL | B_WRITE;
+ bp->b_iodone = vnode_pager_iodone;
+ bp->b_proc = curproc;
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ if( bp->b_rcred != NOCRED)
+ crhold(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva + i * bsize;
+ bp->b_blkno = fileaddr / DEV_BSIZE;
+ bgetvp(dp, bp);
+ ++dp->v_numoutput;
+ /* for NFS */
+ bp->b_dirtyoff = 0;
+ bp->b_dirtyend = bsize;
+ bp->b_bcount = bsize;
+ bp->b_bufsize = bsize;
+
+ /* do the input */
+ VOP_STRATEGY(bp);
+
+ /* we definitely need to be at splbio here */
+
+ s = splbio();
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "vnswrt", 0);
+ }
+ splx(s);
+ if ((bp->b_flags & B_ERROR) != 0)
+ error = EIO;
+
+ /*
+ * free the buffer header back to the swap buffer pool
+ */
+ relpbuf(bp);
+ HOLDRELE(vp);
+ }
+ }
+ vm_pager_unmap_page(kva);
+ if( error)
+ return VM_PAGER_FAIL;
else
- error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
- VOP_UNLOCK(vnp->vnp_vp);
-#ifdef DEBUG
- if (vpagerdebug & VDB_IO) {
- if (error || auio.uio_resid)
- printf(" returns error %x, resid %x",
- error, auio.uio_resid);
- printf("\n");
+ return VM_PAGER_OK;
+}
+
+/*
+ * generic vnode pager output routine
+ */
+int
+vnode_pager_output(vnp, m, count, rtvals)
+ vn_pager_t vnp;
+ vm_page_t *m;
+ int count;
+ int *rtvals;
+{
+ int i,j;
+ vm_offset_t kva, foff;
+ int size;
+ struct proc *p = curproc; /* XXX */
+ vm_object_t object;
+ vm_offset_t paging_offset;
+ struct vnode *dp, *vp;
+ struct buf *bp;
+ vm_offset_t mapsize;
+ vm_offset_t reqaddr;
+ int run;
+ int bsize;
+ int s;
+
+ int error = 0;
+
+retryoutput:
+ object = m[0]->object; /* all vm_page_t items are in same object */
+ paging_offset = object->paging_offset;
+
+ vp = vnp->vnp_vp;
+ bsize = vp->v_mount->mnt_stat.f_iosize;
+
+ for(i=0;i<count;i++)
+ rtvals[i] = VM_PAGER_AGAIN;
+
+ /*
+ * if the filesystem does not have a bmap, then use the
+ * old code
+ */
+ if (VOP_BMAP(vp, m[0]->offset+paging_offset, &dp, 0, 0)) {
+
+ rtvals[0] = vnode_pager_output_old(vnp, m[0]);
+
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[0]));
+ m[0]->flags |= PG_CLEAN;
+ m[0]->flags &= ~PG_LAUNDRY;
+ return rtvals[0];
}
-#endif
- if (!error) {
- register int count = size - auio.uio_resid;
- if (count == 0)
- error = EINVAL;
- else if (count != PAGE_SIZE && rw == UIO_READ)
- bzero((void *)(kva + count), PAGE_SIZE - count);
+ /*
+ * if the filesystem has a small blocksize, then use
+ * the small block filesystem output code
+ */
+ if ((bsize < PAGE_SIZE) &&
+ (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
+
+ for(i=0;i<count;i++) {
+ rtvals[i] = vnode_pager_output_smlfs(vnp, m[i]);
+ if( rtvals[i] == VM_PAGER_OK) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ m[i]->flags |= PG_CLEAN;
+ m[i]->flags &= ~PG_LAUNDRY;
+ }
+ }
+ return rtvals[0];
}
- vm_pager_unmap_pages(kva, npages);
- return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
+
+ /*
+ * get some kva for the output
+ */
+ kva = kmem_alloc_pageable(pager_map, (mapsize = count*PAGE_SIZE));
+ if( !kva) {
+ kva = kmem_alloc_pageable(pager_map, (mapsize = PAGE_SIZE));
+ count = 1;
+ if( !kva)
+ return rtvals[0];
+ }
+
+ for(i=0;i<count;i++) {
+ foff = m[i]->offset + paging_offset;
+ if (foff >= vnp->vnp_size) {
+ for(j=i;j<count;j++)
+ rtvals[j] = VM_PAGER_BAD;
+ count = i;
+ break;
+ }
+ }
+ if (count == 0) {
+ return rtvals[0];
+ }
+ foff = m[0]->offset + paging_offset;
+ reqaddr = vnode_pager_addr(vp, foff);
+ /*
+ * Scan forward and stop for the first non-contiguous
+ * entry or stop for a page being in buffer cache.
+ */
+ for (i = 1; i < count; i++) {
+ if ( vnode_pager_addr(vp, m[i]->offset + paging_offset)
+ != reqaddr + i * PAGE_SIZE) {
+ count = i;
+ break;
+ }
+ }
+
+ /*
+ * calculate the size of the transfer
+ */
+ size = count * PAGE_SIZE;
+ if ((foff + size) > vnp->vnp_size)
+ size = vnp->vnp_size - foff;
+
+ /*
+ * round up physical size for real devices
+ */
+ if( dp->v_type == VBLK || dp->v_type == VCHR)
+ size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+
+ /*
+ * and map the pages to be read into the kva
+ */
+ for (i = 0; i < count; i++)
+ pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
+ pmap_update();
+/*
+ printf("vnode: writing foff: %d, devoff: %d, size: %d\n",
+ foff, reqaddr, size);
+*/
+ /*
+ * next invalidate the incore vfs_bio data
+ */
+ for (i = 0; i < count; i++) {
+ int filblock = (foff + i * PAGE_SIZE) / bsize;
+ struct buf *fbp;
+
+ s = splbio();
+ if( fbp = incore( vp, filblock)) {
+ /* printf("invalidating: %d\n", filblock); */
+ fbp = getblk(vp, filblock, fbp->b_bufsize,0,0);
+ fbp->b_flags |= B_INVAL;
+ brelse(fbp);
+ }
+ splx(s);
+ }
+
+
+ bp = getpbuf();
+ VHOLD(vp);
+ /* build a minimal buffer header */
+ bp->b_flags = B_BUSY | B_WRITE | B_CALL;
+ bp->b_iodone = vnode_pager_iodone;
+ /* B_PHYS is not set, but it is nice to fill this in */
+ bp->b_proc = curproc;
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+
+ if( bp->b_rcred != NOCRED)
+ crhold(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crhold(bp->b_wcred);
+ bp->b_un.b_addr = (caddr_t) kva;
+ bp->b_blkno = reqaddr / DEV_BSIZE;
+ bgetvp(dp, bp);
+ ++dp->v_numoutput;
+
+ /* for NFS */
+ bp->b_dirtyoff = 0;
+ bp->b_dirtyend = size;
+
+ bp->b_bcount = size;
+ bp->b_bufsize = size;
+
+ /* do the output */
+ VOP_STRATEGY(bp);
+
+ s = splbio();
+
+ /* we definitely need to be at splbio here */
+
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "vnwrite", 0);
+ }
+ splx(s);
+
+ if ((bp->b_flags & B_ERROR) != 0)
+ error = EIO;
+
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE * count);
+ kmem_free_wakeup(pager_map, kva, mapsize);
+
+ /*
+ * free the buffer header back to the swap buffer pool
+ */
+ relpbuf(bp);
+ HOLDRELE(vp);
+
+ if( !error) {
+ for(i=0;i<count;i++) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ m[i]->flags |= PG_CLEAN;
+ m[i]->flags &= ~PG_LAUNDRY;
+ rtvals[i] = VM_PAGER_OK;
+ }
+ } else if( count != 1) {
+ error = 0;
+ count = 1;
+ goto retryoutput;
+ }
+
+ if (error) {
+ printf("vnode pager write error: %d\n", error);
+ }
+ return (error ? VM_PAGER_FAIL : VM_PAGER_OK);
}
+
diff --git a/sys/vm/vnode_pager.h b/sys/vm/vnode_pager.h
index 95c9545452ae..b01dc54ec09b 100644
--- a/sys/vm/vnode_pager.h
+++ b/sys/vm/vnode_pager.h
@@ -53,7 +53,4 @@ typedef struct vnpager *vn_pager_t;
#define VN_PAGER_NULL ((vn_pager_t)0)
-#define VNP_PAGING 0x01 /* vnode used for pageout */
-#define VNP_CACHED 0x02 /* vnode is cached */
-
#endif /* _VNODE_PAGER_ */