aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/apic_vector.S45
-rw-r--r--sys/amd64/amd64/genassym.c9
-rw-r--r--sys/amd64/amd64/mp_machdep.c37
-rw-r--r--sys/amd64/include/cpufunc.h10
-rw-r--r--sys/amd64/vmm/vmm_support.S11
-rw-r--r--sys/arm/arm/cpufunc.c11
-rw-r--r--sys/arm/arm/cpufunc_asm_armv7.S27
-rw-r--r--sys/arm/arm/db_trace.c139
-rw-r--r--sys/arm/arm/elf_trampoline.c11
-rw-r--r--sys/arm/arm/exception.S15
-rw-r--r--sys/arm/arm/identcpu.c2
-rw-r--r--sys/arm/arm/trap.c10
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_audio.c908
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_gpio.c328
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_sdhci.c40
-rw-r--r--sys/arm/broadcom/bcm2835/files.bcm283523
-rw-r--r--sys/arm/broadcom/bcm2835/vc_vchi_audioserv_defs.h156
-rw-r--r--sys/arm/conf/BEAGLEBONE6
-rw-r--r--sys/arm/conf/RPI-B6
-rw-r--r--sys/arm/include/armreg.h3
-rw-r--r--sys/arm/include/cpufunc.h2
-rw-r--r--sys/arm/ti/ti_gpio.c2
-rw-r--r--sys/arm/xscale/ixp425/if_npe.c2
-rw-r--r--sys/boot/amd64/boot1.efi/fat.tmpl.bz2.uu29
-rwxr-xr-xsys/boot/amd64/boot1.efi/generate-fat.sh2
-rw-r--r--sys/boot/amd64/efi/main.c14
-rw-r--r--sys/boot/arm/ixp425/boot2/ixp425_board.c2
-rw-r--r--sys/boot/efi/include/efiapi.h52
-rw-r--r--sys/boot/forth/beastie.4th11
-rw-r--r--sys/boot/forth/brand.4th3
-rw-r--r--sys/boot/pc98/boot2/Makefile1
-rw-r--r--sys/cam/cam_ccb.h1
-rw-r--r--sys/cam/cam_xpt.c1
-rw-r--r--sys/cam/cam_xpt_internal.h2
-rw-r--r--sys/cam/ctl/ctl.c171
-rw-r--r--sys/cam/ctl/ctl.h3
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c185
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.h1
-rw-r--r--sys/cam/ctl/ctl_ioctl.h16
-rw-r--r--sys/cam/ctl/ctl_private.h11
-rw-r--r--sys/cam/scsi/scsi_all.h40
-rw-r--r--sys/cam/scsi/scsi_xpt.c89
-rw-r--r--sys/cddl/contrib/opensolaris/uts/arm/dtrace/fasttrap_isa.c30
-rw-r--r--sys/cddl/contrib/opensolaris/uts/arm/sys/fasttrap_isa.h94
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h7
-rw-r--r--sys/cddl/dev/dtrace/arm/dtrace_asm.S197
-rw-r--r--sys/cddl/dev/dtrace/arm/dtrace_isa.c356
-rw-r--r--sys/cddl/dev/dtrace/arm/dtrace_subr.c261
-rw-r--r--sys/cddl/dev/dtrace/arm/regset.h57
-rw-r--r--sys/cddl/dev/fbt/arm/fbt_isa.c192
-rw-r--r--sys/cddl/dev/fbt/arm/fbt_isa.h30
-rw-r--r--sys/cddl/dev/lockstat/lockstat.c3
-rw-r--r--sys/cddl/dev/profile/profile.c10
-rw-r--r--sys/conf/dtb.mk5
-rw-r--r--sys/conf/files2
-rw-r--r--sys/conf/files.powerpc6
-rw-r--r--sys/conf/kern.mk8
-rw-r--r--sys/conf/kern.opts.mk8
-rw-r--r--sys/conf/kern.pre.mk9
-rw-r--r--sys/conf/options6
-rw-r--r--sys/conf/options.powerpc1
-rw-r--r--sys/contrib/dev/acpica/include/actbl2.h1
-rw-r--r--sys/contrib/dev/ath/ath_hal/ar9300/ar9300.h4
-rw-r--r--sys/contrib/dev/ral/microcode.h694
-rw-r--r--sys/contrib/dev/ral/rt2860.fw.uu192
-rw-r--r--sys/contrib/vchiq/interface/compat/list.h256
-rw-r--r--sys/contrib/vchiq/interface/compat/vchi_bsd.c532
-rw-r--r--sys/contrib/vchiq/interface/compat/vchi_bsd.h434
-rw-r--r--sys/contrib/vchiq/interface/vchi/connections/connection.h324
-rw-r--r--sys/contrib/vchiq/interface/vchi/message_drivers/message.h200
-rw-r--r--sys/contrib/vchiq/interface/vchi/vchi.h373
-rw-r--r--sys/contrib/vchiq/interface/vchi/vchi_cfg.h224
-rw-r--r--sys/contrib/vchiq/interface/vchi/vchi_cfg_internal.h71
-rw-r--r--sys/contrib/vchiq/interface/vchi/vchi_common.h163
-rw-r--r--sys/contrib/vchiq/interface/vchi/vchi_mh.h42
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq.h41
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835.h42
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c578
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c2809
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.h200
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_build_info.h37
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_cfg.h60
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.c117
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.h51
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.c3842
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.h710
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_if.h188
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_ioctl.h128
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c461
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c220
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_memdrv.h71
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_pagelist.h59
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_proc.c240
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_shim.c830
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.c151
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.h66
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_version.c59
-rw-r--r--sys/dev/acpica/acpi.c30
-rw-r--r--sys/dev/cxgb/cxgb_osdep.h6
-rw-r--r--sys/dev/cxgbe/adapter.h6
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c52
-rw-r--r--sys/dev/cxgbe/if_cxl.c44
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c13
-rw-r--r--sys/dev/cxgbe/offload.h7
-rw-r--r--sys/dev/cxgbe/t4_main.c77
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c2
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c9
-rw-r--r--sys/dev/drm2/radeon/ni.c8
-rw-r--r--sys/dev/drm2/radeon/si.c6
-rw-r--r--sys/dev/ed/if_ed.c6
-rw-r--r--sys/dev/hwpmc/hwpmc_armv7.c2
-rw-r--r--sys/dev/ipmi/ipmi.c104
-rw-r--r--sys/dev/ipmi/ipmi_kcs.c20
-rw-r--r--sys/dev/ipmi/ipmi_smic.c24
-rw-r--r--sys/dev/ipmi/ipmi_ssif.c17
-rw-r--r--sys/dev/ipmi/ipmivars.h15
-rw-r--r--sys/dev/iscsi/icl.c2
-rw-r--r--sys/dev/iscsi/icl.h1
-rw-r--r--sys/dev/iscsi/icl_conn_if.m24
-rw-r--r--sys/dev/iscsi/icl_soft.c60
-rw-r--r--sys/dev/iscsi/icl_wrappers.h30
-rw-r--r--sys/dev/iscsi/iscsi.c111
-rw-r--r--sys/dev/iscsi/iscsi.h1
-rw-r--r--sys/dev/iscsi/iscsi_ioctl.h24
-rw-r--r--sys/dev/malo/if_malo.c15
-rw-r--r--sys/dev/mwl/if_mwl.c15
-rw-r--r--sys/dev/pci/pci.c59
-rw-r--r--sys/dev/ral/if_ral_pci.c3
-rw-r--r--sys/dev/ral/rt2860.c405
-rw-r--r--sys/dev/ral/rt2860reg.h230
-rw-r--r--sys/dev/sfxge/common/efsys.h261
-rw-r--r--sys/dev/sfxge/sfxge.c50
-rw-r--r--sys/dev/sfxge/sfxge.h86
-rw-r--r--sys/dev/sfxge/sfxge_ev.c34
-rw-r--r--sys/dev/sfxge/sfxge_mcdi.c24
-rw-r--r--sys/dev/sfxge/sfxge_port.c70
-rw-r--r--sys/dev/sfxge/sfxge_rx.c16
-rw-r--r--sys/dev/sfxge/sfxge_tx.c73
-rw-r--r--sys/dev/sfxge/sfxge_tx.h30
-rw-r--r--sys/dev/sound/usb/uaudio.c96
-rw-r--r--sys/dev/uart/uart_bus_pci.c1
-rw-r--r--sys/dev/usb/controller/xhci.c4
-rw-r--r--sys/dev/usb/serial/u3g.c1
-rw-r--r--sys/dev/usb/usbdevs1
-rw-r--r--sys/dev/vt/hw/vga/vt_vga.c3
-rw-r--r--sys/dev/wpi/if_wpi.c5742
-rw-r--r--sys/dev/wpi/if_wpi_debug.h98
-rw-r--r--sys/dev/wpi/if_wpireg.h965
-rw-r--r--sys/dev/wpi/if_wpivar.h112
-rw-r--r--sys/fs/autofs/autofs.c31
-rw-r--r--sys/fs/autofs/autofs_ioctl.h23
-rw-r--r--sys/i386/i386/apic_vector.s50
-rw-r--r--sys/i386/i386/genassym.c9
-rw-r--r--sys/i386/i386/mp_machdep.c37
-rw-r--r--sys/i386/include/cpufunc.h9
-rw-r--r--sys/kern/init_main.c1
-rw-r--r--sys/kern/kern_clock.c5
-rw-r--r--sys/kern/kern_clocksource.c39
-rw-r--r--sys/kern/kern_sig.c59
-rw-r--r--sys/kern/kern_timeout.c240
-rw-r--r--sys/kern/subr_bus.c276
-rw-r--r--sys/kern/subr_hints.c28
-rw-r--r--sys/kern/uipc_shm.c3
-rw-r--r--sys/kern/vfs_mount.c10
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/cxgbe/Makefile1
-rw-r--r--sys/modules/cxgbe/if_cxl/Makefile11
-rw-r--r--sys/modules/dtb/rpi/Makefile5
-rw-r--r--sys/modules/dtrace/Makefile4
-rw-r--r--sys/modules/dtrace/dtrace/Makefile5
-rw-r--r--sys/modules/wpi/Makefile2
-rw-r--r--sys/netinet/if_ether.c22
-rw-r--r--sys/netinet/in.c3
-rw-r--r--sys/netinet6/in6.c6
-rw-r--r--sys/netinet6/nd6.c23
-rw-r--r--sys/netpfil/ipfw/ip_fw_iface.c8
-rw-r--r--sys/netpfil/ipfw/ip_fw_nat.c4
-rw-r--r--sys/netpfil/ipfw/ip_fw_private.h1
-rw-r--r--sys/netpfil/ipfw/ip_fw_table.c17
-rw-r--r--sys/netpfil/ipfw/ip_fw_table_algo.c13
-rw-r--r--sys/powerpc/aim/locore32.S4
-rw-r--r--sys/powerpc/aim/machdep.c93
-rw-r--r--sys/powerpc/aim/mmu_oea.c47
-rw-r--r--sys/powerpc/conf/NOTES1
-rw-r--r--sys/powerpc/conf/WII110
-rw-r--r--sys/powerpc/include/cpu.h2
-rw-r--r--sys/powerpc/ofw/ofw_syscons.c12
-rw-r--r--sys/powerpc/powerpc/swtch64.S17
-rw-r--r--sys/powerpc/pseries/mmu_phyp.c34
-rw-r--r--sys/powerpc/pseries/platform_chrp.c32
-rw-r--r--sys/powerpc/pseries/xics.c25
-rw-r--r--sys/powerpc/wii/ios_if.m64
-rw-r--r--sys/powerpc/wii/locore.S131
-rw-r--r--sys/powerpc/wii/platform_wii.c161
-rw-r--r--sys/powerpc/wii/wii_bus.c340
-rw-r--r--sys/powerpc/wii/wii_exireg.h35
-rw-r--r--sys/powerpc/wii/wii_fb.c885
-rw-r--r--sys/powerpc/wii/wii_fbreg.h40
-rw-r--r--sys/powerpc/wii/wii_fbvar.h857
-rw-r--r--sys/powerpc/wii/wii_gpio.c353
-rw-r--r--sys/powerpc/wii/wii_gpioreg.h38
-rw-r--r--sys/powerpc/wii/wii_ipc.c102
-rw-r--r--sys/powerpc/wii/wii_ipcreg.h102
-rw-r--r--sys/powerpc/wii/wii_pic.c244
-rw-r--r--sys/powerpc/wii/wii_picreg.h42
-rw-r--r--sys/sys/bitset.h10
-rw-r--r--sys/sys/bus.h63
-rw-r--r--sys/sys/callout.h1
-rw-r--r--sys/sys/cdefs.h3
-rw-r--r--sys/sys/copyright.h7
-rw-r--r--sys/sys/cpuset.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/systm.h1
-rw-r--r--sys/ufs/ffs/ffs_softdep.c14
-rw-r--r--sys/x86/acpica/madt.c20
-rw-r--r--sys/x86/include/apicreg.h60
-rw-r--r--sys/x86/include/apicvar.h14
-rw-r--r--sys/x86/include/specialreg.h1
-rw-r--r--sys/x86/x86/io_apic.c3
-rw-r--r--sys/x86/x86/local_apic.c407
-rw-r--r--sys/x86/xen/xen_apic.c1
222 files changed, 24608 insertions, 8006 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 4c87b4e80d37..d9f272439654 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -39,6 +39,7 @@
#include "opt_smp.h"
#include <machine/asmacros.h>
+#include <machine/specialreg.h>
#include <x86/apicreg.h>
#include "assym.s"
@@ -49,6 +50,22 @@
#define LK
#endif
+ .text
+ SUPERALIGN_TEXT
+ /* End Of Interrupt to APIC */
+as_lapic_eoi:
+ cmpl $0,x2apic_mode
+ jne 1f
+ movq lapic_map,%rax
+ movl $0,LA_EOI(%rax)
+ ret
+1:
+ movl $MSR_APIC_EOI,%ecx
+ xorl %eax,%eax
+ xorl %edx,%edx
+ wrmsr
+ ret
+
/*
* I/O Interrupt Entry Point. Rather than having one entry point for
* each interrupt source, we use one entry point for each 32-bit word
@@ -62,15 +79,22 @@
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
FAKE_MCOUNT(TF_RIP(%rsp)) ; \
- movq lapic, %rdx ; /* pointer to local APIC */ \
+ cmpl $0,x2apic_mode ; \
+ je 1f ; \
+ movl $(MSR_APIC_ISR0 + index),%ecx ; \
+ rdmsr ; \
+ jmp 2f ; \
+1: ; \
+ movq lapic_map, %rdx ; /* pointer to local APIC */ \
movl LA_ISR + 16 * (index)(%rdx), %eax ; /* load ISR */ \
+2: ; \
bsrl %eax, %eax ; /* index of highest set bit in ISR */ \
- jz 1f ; \
+ jz 3f ; \
addl $(32 * index),%eax ; \
movq %rsp, %rsi ; \
movl %eax, %edi ; /* pass the IRQ */ \
call lapic_handle_intr ; \
-1: ; \
+3: ; \
MEXITCOUNT ; \
jmp doreti
@@ -160,8 +184,7 @@ IDTVEC(xen_intr_upcall)
SUPERALIGN_TEXT
invltlb_ret:
- movq lapic, %rax
- movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
POP_FRAME
jmp doreti_iret
@@ -228,8 +251,7 @@ IDTVEC(invlcache)
IDTVEC(ipi_intr_bitmap_handler)
PUSH_FRAME
- movq lapic, %rdx
- movl $0, LA_EOI(%rdx) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
FAKE_MCOUNT(TF_RIP(%rsp))
@@ -245,8 +267,7 @@ IDTVEC(ipi_intr_bitmap_handler)
IDTVEC(cpustop)
PUSH_FRAME
- movq lapic, %rax
- movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
call cpustop_handler
jmp doreti
@@ -260,8 +281,7 @@ IDTVEC(cpususpend)
PUSH_FRAME
call cpususpend_handler
- movq lapic, %rax
- movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
jmp doreti
/*
@@ -279,7 +299,6 @@ IDTVEC(rendezvous)
incq (%rax)
#endif
call smp_rendezvous_action
- movq lapic, %rax
- movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
jmp doreti
#endif /* SMP */
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index aff685b2797b..3ffefc0e4c43 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -220,13 +220,8 @@ ASSYM(PC_COMMONTSSP, offsetof(struct pcpu, pc_commontssp));
ASSYM(PC_TSS, offsetof(struct pcpu, pc_tss));
ASSYM(PC_PM_SAVE_CNT, offsetof(struct pcpu, pc_pm_save_cnt));
-ASSYM(LA_VER, offsetof(struct LAPIC, version));
-ASSYM(LA_TPR, offsetof(struct LAPIC, tpr));
-ASSYM(LA_EOI, offsetof(struct LAPIC, eoi));
-ASSYM(LA_SVR, offsetof(struct LAPIC, svr));
-ASSYM(LA_ICR_LO, offsetof(struct LAPIC, icr_lo));
-ASSYM(LA_ICR_HI, offsetof(struct LAPIC, icr_hi));
-ASSYM(LA_ISR, offsetof(struct LAPIC, isr0));
+ASSYM(LA_EOI, LAPIC_EOI * LAPIC_MEM_MUL);
+ASSYM(LA_ISR, LAPIC_ISR0 * LAPIC_MEM_MUL);
ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL));
ASSYM(KDSEL, GSEL(GDATA_SEL, SEL_KPL));
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 3184f1325374..13c3d4300e05 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -705,8 +705,11 @@ init_secondary(void)
wrmsr(MSR_STAR, msr);
wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
- /* Disable local APIC just to be sure. */
- lapic_disable();
+ /*
+ * On real hardware, switch to x2apic mode if possible.
+ * Disable local APIC until BSP directed APs to run.
+ */
+ lapic_xapic_mode();
/* signal our startup to the BSP. */
mp_naps++;
@@ -1065,14 +1068,27 @@ ipi_startup(int apic_id, int vector)
{
/*
+ * This attempts to follow the algorithm described in the
+ * Intel Multiprocessor Specification v1.4 in section B.4.
+ * For each IPI, we allow the local APIC ~20us to deliver the
+ * IPI. If that times out, we panic.
+ */
+
+ /*
* first we do an INIT IPI: this INIT IPI might be run, resetting
* and running the target CPU. OR this INIT IPI might be latched (P5
* bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
* ignored.
*/
- lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
+ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
- lapic_ipi_wait(-1);
+ lapic_ipi_wait(20);
+
+ /* Explicitly deassert the INIT IPI. */
+ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
+ APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
+ apic_id);
+
DELAY(10000); /* wait ~10mS */
/*
@@ -1084,9 +1100,11 @@ ipi_startup(int apic_id, int vector)
* will run.
*/
lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
- APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
+ APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
vector, apic_id);
- lapic_ipi_wait(-1);
+ if (!lapic_ipi_wait(20))
+ panic("Failed to deliver first STARTUP IPI to APIC %d",
+ apic_id);
DELAY(200); /* wait ~200uS */
/*
@@ -1096,9 +1114,12 @@ ipi_startup(int apic_id, int vector)
* recognized after hardware RESET or INIT IPI.
*/
lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
- APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
+ APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
vector, apic_id);
- lapic_ipi_wait(-1);
+ if (!lapic_ipi_wait(20))
+ panic("Failed to deliver second STARTUP IPI to APIC %d",
+ apic_id);
+
DELAY(200); /* wait ~200uS */
}
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 7464739cf43d..7ea4bcf48850 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -343,6 +343,15 @@ rdmsr(u_int msr)
return (low | ((uint64_t)high << 32));
}
+static __inline uint32_t
+rdmsr32(u_int msr)
+{
+ uint32_t low;
+
+ __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
+ return (low);
+}
+
static __inline uint64_t
rdpmc(u_int pmc)
{
@@ -826,6 +835,7 @@ u_long rcr2(void);
u_long rcr3(void);
u_long rcr4(void);
uint64_t rdmsr(u_int msr);
+uint32_t rdmsr32(u_int msr);
uint64_t rdpmc(u_int pmc);
uint64_t rdr0(void);
uint64_t rdr1(void);
diff --git a/sys/amd64/vmm/vmm_support.S b/sys/amd64/vmm/vmm_support.S
index 2afc608ae71e..7919511c3eab 100644
--- a/sys/amd64/vmm/vmm_support.S
+++ b/sys/amd64/vmm/vmm_support.S
@@ -30,13 +30,14 @@
#include <machine/asmacros.h>
-#define LA_EOI 0xB0
-
.text
SUPERALIGN_TEXT
IDTVEC(justreturn)
+ pushq %rdx
pushq %rax
- movq lapic, %rax
- movl $0, LA_EOI(%rax)
+ pushq %rcx
+ call as_lapic_eoi
+ popq %rcx
popq %rax
- iretq
+ popq %rdx
+ jmp doreti_iret
diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c
index 8e69c5405ed3..65a2bca18813 100644
--- a/sys/arm/arm/cpufunc.c
+++ b/sys/arm/arm/cpufunc.c
@@ -837,6 +837,11 @@ u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_CORTEXA) || defined(CPU_KRAIT)
+/* Global cache line sizes, use 32 as default */
+int arm_dcache_min_line_size = 32;
+int arm_icache_min_line_size = 32;
+int arm_idcache_min_line_size = 32;
+
static void get_cachetype_cp15(void);
/* Additional cache information local to this file. Log2 of some of the
@@ -868,6 +873,12 @@ get_cachetype_cp15()
goto out;
if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
+ /* Resolve minimal cache line sizes */
+ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
+ arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
+ arm_idcache_min_line_size =
+ min(arm_icache_min_line_size, arm_dcache_min_line_size);
+
__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
: "=r" (clevel));
arm_cache_level = clevel;
diff --git a/sys/arm/arm/cpufunc_asm_armv7.S b/sys/arm/arm/cpufunc_asm_armv7.S
index 9fe6aa5c90d7..5dcc17147065 100644
--- a/sys/arm/arm/cpufunc_asm_armv7.S
+++ b/sys/arm/arm/cpufunc_asm_armv7.S
@@ -41,6 +41,12 @@ __FBSDID("$FreeBSD$");
.word _C_LABEL(arm_cache_loc)
.Lcache_type:
.word _C_LABEL(arm_cache_type)
+.Larmv7_dcache_line_size:
+ .word _C_LABEL(arm_dcache_min_line_size)
+.Larmv7_icache_line_size:
+ .word _C_LABEL(arm_icache_min_line_size)
+.Larmv7_idcache_line_size:
+ .word _C_LABEL(arm_idcache_min_line_size)
.Lway_mask:
.word 0x3ff
.Lmax_index:
@@ -180,14 +186,9 @@ ENTRY(armv7_idcache_wbinv_all)
RET
END(armv7_idcache_wbinv_all)
-/* XXX Temporary set it to 32 for MV cores, however this value should be
- * get from Cache Type register
- */
-.Larmv7_line_size:
- .word 32
-
ENTRY(armv7_dcache_wb_range)
- ldr ip, .Larmv7_line_size
+ ldr ip, .Larmv7_dcache_line_size
+ ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
@@ -202,7 +203,8 @@ ENTRY(armv7_dcache_wb_range)
END(armv7_dcache_wb_range)
ENTRY(armv7_dcache_wbinv_range)
- ldr ip, .Larmv7_line_size
+ ldr ip, .Larmv7_dcache_line_size
+ ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
@@ -221,7 +223,8 @@ END(armv7_dcache_wbinv_range)
* must use wb-inv of the entire cache.
*/
ENTRY(armv7_dcache_inv_range)
- ldr ip, .Larmv7_line_size
+ ldr ip, .Larmv7_dcache_line_size
+ ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
@@ -236,7 +239,8 @@ ENTRY(armv7_dcache_inv_range)
END(armv7_dcache_inv_range)
ENTRY(armv7_idcache_wbinv_range)
- ldr ip, .Larmv7_line_size
+ ldr ip, .Larmv7_idcache_line_size
+ ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
@@ -264,7 +268,8 @@ ENTRY_NP(armv7_icache_sync_all)
END(armv7_icache_sync_all)
ENTRY_NP(armv7_icache_sync_range)
- ldr ip, .Larmv7_line_size
+ ldr ip, .Larmv7_icache_line_size
+ ldr ip, [ip]
.Larmv7_sync_next:
mcr CP15_ICIMVAU(r0)
mcr CP15_DCCMVAC(r0)
diff --git a/sys/arm/arm/db_trace.c b/sys/arm/arm/db_trace.c
index 04ab565e6748..cbeee1fe1ae8 100644
--- a/sys/arm/arm/db_trace.c
+++ b/sys/arm/arm/db_trace.c
@@ -50,7 +50,6 @@ __FBSDID("$FreeBSD$");
#include <ddb/db_sym.h>
#include <ddb/db_output.h>
-#ifdef __ARM_EABI__
/*
* Definitions for the instruction interpreter.
*
@@ -453,131 +452,6 @@ db_stack_trace_cmd(struct unwind_state *state)
}
}
}
-#endif
-
-/*
- * APCS stack frames are awkward beasts, so I don't think even trying to use
- * a structure to represent them is a good idea.
- *
- * Here's the diagram from the APCS. Increasing address is _up_ the page.
- *
- * save code pointer [fp] <- fp points to here
- * return link value [fp, #-4]
- * return sp value [fp, #-8]
- * return fp value [fp, #-12]
- * [saved v7 value]
- * [saved v6 value]
- * [saved v5 value]
- * [saved v4 value]
- * [saved v3 value]
- * [saved v2 value]
- * [saved v1 value]
- * [saved a4 value]
- * [saved a3 value]
- * [saved a2 value]
- * [saved a1 value]
- *
- * The save code pointer points twelve bytes beyond the start of the
- * code sequence (usually a single STM) that created the stack frame.
- * We have to disassemble it if we want to know which of the optional
- * fields are actually present.
- */
-
-#ifndef __ARM_EABI__ /* The frame format is differend in AAPCS */
-static void
-db_stack_trace_cmd(db_expr_t addr, db_expr_t count, boolean_t kernel_only)
-{
- u_int32_t *frame, *lastframe;
- c_db_sym_t sym;
- const char *name;
- db_expr_t value;
- db_expr_t offset;
- int scp_offset;
-
- frame = (u_int32_t *)addr;
- lastframe = NULL;
- scp_offset = -(get_pc_str_offset() >> 2);
-
- while (count-- && frame != NULL && !db_pager_quit) {
- db_addr_t scp;
- u_int32_t savecode;
- int r;
- u_int32_t *rp;
- const char *sep;
-
- /*
- * In theory, the SCP isn't guaranteed to be in the function
- * that generated the stack frame. We hope for the best.
- */
- scp = frame[FR_SCP];
-
- sym = db_search_symbol(scp, DB_STGY_ANY, &offset);
- if (sym == C_DB_SYM_NULL) {
- value = 0;
- name = "(null)";
- } else
- db_symbol_values(sym, &name, &value);
- db_printf("%s() at ", name);
- db_printsym(scp, DB_STGY_PROC);
- db_printf("\n");
-#ifdef __PROG26
- db_printf("\tscp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV] & R15_PC);
- db_printsym(frame[FR_RLV] & R15_PC, DB_STGY_PROC);
- db_printf(")\n");
-#else
- db_printf("\tscp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV]);
- db_printsym(frame[FR_RLV], DB_STGY_PROC);
- db_printf(")\n");
-#endif
- db_printf("\trsp=0x%08x rfp=0x%08x", frame[FR_RSP], frame[FR_RFP]);
-
- savecode = ((u_int32_t *)scp)[scp_offset];
- if ((savecode & 0x0e100000) == 0x08000000) {
- /* Looks like an STM */
- rp = frame - 4;
- sep = "\n\t";
- for (r = 10; r >= 0; r--) {
- if (savecode & (1 << r)) {
- db_printf("%sr%d=0x%08x",
- sep, r, *rp--);
- sep = (frame - rp) % 4 == 2 ?
- "\n\t" : " ";
- }
- }
- }
-
- db_printf("\n");
-
- /*
- * Switch to next frame up
- */
- if (frame[FR_RFP] == 0)
- break; /* Top of stack */
-
- lastframe = frame;
- frame = (u_int32_t *)(frame[FR_RFP]);
-
- if (INKERNEL((int)frame)) {
- /* staying in kernel */
- if (frame <= lastframe) {
- db_printf("Bad frame pointer: %p\n", frame);
- break;
- }
- } else if (INKERNEL((int)lastframe)) {
- /* switch from user to kernel */
- if (kernel_only)
- break; /* kernel stack only */
- } else {
- /* in user */
- if (frame <= lastframe) {
- db_printf("Bad user frame pointer: %p\n",
- frame);
- break;
- }
- }
- }
-}
-#endif
/* XXX stubs */
void
@@ -600,24 +474,18 @@ db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
int
db_trace_thread(struct thread *thr, int count)
{
-#ifdef __ARM_EABI__
struct unwind_state state;
-#endif
struct pcb *ctx;
if (thr != curthread) {
ctx = kdb_thr_ctx(thr);
-#ifdef __ARM_EABI__
state.registers[FP] = ctx->pcb_regs.sf_r11;
state.registers[SP] = ctx->pcb_regs.sf_sp;
state.registers[LR] = ctx->pcb_regs.sf_lr;
state.registers[PC] = ctx->pcb_regs.sf_pc;
db_stack_trace_cmd(&state);
-#else
- db_stack_trace_cmd(ctx->pcb_regs.sf_r11, -1, TRUE);
-#endif
} else
db_trace_self();
return (0);
@@ -626,7 +494,6 @@ db_trace_thread(struct thread *thr, int count)
void
db_trace_self(void)
{
-#ifdef __ARM_EABI__
struct unwind_state state;
uint32_t sp;
@@ -639,10 +506,4 @@ db_trace_self(void)
state.registers[PC] = (uint32_t)db_trace_self;
db_stack_trace_cmd(&state);
-#else
- db_addr_t addr;
-
- addr = (db_addr_t)__builtin_frame_address(0);
- db_stack_trace_cmd(addr, -1, FALSE);
-#endif
}
diff --git a/sys/arm/arm/elf_trampoline.c b/sys/arm/arm/elf_trampoline.c
index 559b99212be0..24b30118b206 100644
--- a/sys/arm/arm/elf_trampoline.c
+++ b/sys/arm/arm/elf_trampoline.c
@@ -115,6 +115,10 @@ int arm_pcache_unified;
int arm_dcache_align;
int arm_dcache_align_mask;
+int arm_dcache_min_line_size = 32;
+int arm_icache_min_line_size = 32;
+int arm_idcache_min_line_size = 32;
+
u_int arm_cache_level;
u_int arm_cache_type[14];
u_int arm_cache_loc;
@@ -277,6 +281,13 @@ get_cachetype_cp15()
goto out;
if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
+ /* Resolve minimal cache line sizes */
+ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
+ arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
+ arm_idcache_min_line_size =
+ (arm_dcache_min_line_size > arm_icache_min_line_size ?
+ arm_icache_min_line_size : arm_dcache_min_line_size);
+
__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
: "=r" (clevel));
arm_cache_level = clevel;
diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S
index 17172f823c6c..58ae61255774 100644
--- a/sys/arm/arm/exception.S
+++ b/sys/arm/arm/exception.S
@@ -48,11 +48,26 @@
#include "assym.s"
+#include "opt_kdtrace.h"
#include <machine/asm.h>
#include <machine/armreg.h>
#include <machine/asmacros.h>
__FBSDID("$FreeBSD$");
+#ifdef KDTRACE_HOOKS
+ .bss
+ .align 4
+ .global _C_LABEL(dtrace_invop_jump_addr)
+_C_LABEL(dtrace_invop_jump_addr):
+ .word 0
+ .word 0
+
+ .global _C_LABEL(dtrace_invop_calltrap_addr)
+_C_LABEL(dtrace_invop_calltrap_addr):
+ .word 0
+ .word 0
+#endif
+
.text
.align 2
diff --git a/sys/arm/arm/identcpu.c b/sys/arm/arm/identcpu.c
index b37a1b3d1b69..75bf08cde618 100644
--- a/sys/arm/arm/identcpu.c
+++ b/sys/arm/arm/identcpu.c
@@ -387,7 +387,7 @@ identify_arm_cpu(void)
u_int8_t type, linesize;
int i;
- cpuid = cpu_id();
+ cpuid = cpu_ident();
if (cpuid == 0) {
printf("Processor failed probe - no CPU ID\n");
diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c
index 081cfaaacf58..0f142ce06b5b 100644
--- a/sys/arm/arm/trap.c
+++ b/sys/arm/arm/trap.c
@@ -78,6 +78,9 @@
* Created : 28/11/94
*/
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+#endif
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -427,6 +430,13 @@ dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td,
{
const char *mode;
+#ifdef KDTRACE_HOOKS
+ if (!TRAP_USERMODE(tf)) {
+ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK))
+ return (0);
+ }
+#endif
+
mode = TRAP_USERMODE(tf) ? "user" : "kernel";
disable_interrupts(PSR_I|PSR_F);
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_audio.c b/sys/arm/broadcom/bcm2835/bcm2835_audio.c
new file mode 100644
index 000000000000..91696e9479e2
--- /dev/null
+++ b/sys/arm/broadcom/bcm2835/bcm2835_audio.c
@@ -0,0 +1,908 @@
+/*-
+ * Copyright (c) 2015 Oleksandr Tymoshenko <gonzo@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_snd.h"
+#endif
+
+#include <dev/sound/pcm/sound.h>
+#include <dev/sound/chip.h>
+
+#include "mixer_if.h"
+
+#include "interface/compat/vchi_bsd.h"
+#include "interface/vchi/vchi.h"
+#include "interface/vchiq_arm/vchiq.h"
+
+#include "vc_vchi_audioserv_defs.h"
+
+SND_DECLARE_FILE("$FreeBSD$");
+
+#define DEST_AUTO 0
+#define DEST_HEADPHONES 1
+#define DEST_HDMI 2
+
+#define VCHIQ_AUDIO_PACKET_SIZE 4000
+#define VCHIQ_AUDIO_BUFFER_SIZE 128000
+
+#define VCHIQ_AUDIO_MAX_VOLUME
+/* volume in terms of 0.01dB */
+#define VCHIQ_AUDIO_VOLUME_MIN -10239
+#define VCHIQ_AUDIO_VOLUME(db100) (uint32_t)(-((db100) << 8)/100)
+
+/* dB levels with 5% volume step */
+static int db_levels[] = {
+ VCHIQ_AUDIO_VOLUME_MIN, -4605, -3794, -3218, -2772,
+ -2407, -2099, -1832, -1597, -1386,
+ -1195, -1021, -861, -713, -575,
+ -446, -325, -210, -102, 0,
+};
+
+static uint32_t bcm2835_audio_playfmt[] = {
+ SND_FORMAT(AFMT_U8, 1, 0),
+ SND_FORMAT(AFMT_U8, 2, 0),
+ SND_FORMAT(AFMT_S8, 1, 0),
+ SND_FORMAT(AFMT_S8, 2, 0),
+ SND_FORMAT(AFMT_S16_LE, 1, 0),
+ SND_FORMAT(AFMT_S16_LE, 2, 0),
+ SND_FORMAT(AFMT_U16_LE, 1, 0),
+ SND_FORMAT(AFMT_U16_LE, 2, 0),
+ 0
+};
+
+static struct pcmchan_caps bcm2835_audio_playcaps = {8000, 48000, bcm2835_audio_playfmt, 0};
+
+struct bcm2835_audio_info;
+
+#define PLAYBACK_IDLE 0
+#define PLAYBACK_STARTING 1
+#define PLAYBACK_PLAYING 2
+#define PLAYBACK_STOPPING 3
+
+struct bcm2835_audio_chinfo {
+ struct bcm2835_audio_info *parent;
+ struct pcm_channel *channel;
+ struct snd_dbuf *buffer;
+ uint32_t fmt, spd, blksz;
+
+ uint32_t complete_pos;
+ uint32_t free_buffer;
+ uint32_t buffered_ptr;
+ int playback_state;
+};
+
+struct bcm2835_audio_info {
+ device_t dev;
+ unsigned int bufsz;
+ struct bcm2835_audio_chinfo pch;
+ uint32_t dest, volume;
+ struct mtx *lock;
+ struct intr_config_hook intr_hook;
+
+ /* VCHI data */
+ struct mtx vchi_lock;
+
+ /* MSG reply */
+ struct mtx msg_avail_lock;
+ struct cv msg_avail_cv;
+ uint32_t msg_result;
+
+ VCHI_INSTANCE_T vchi_instance;
+ VCHI_CONNECTION_T *vchi_connection;
+ VCHI_SERVICE_HANDLE_T vchi_handle;
+
+ struct mtx data_lock;
+ struct cv data_cv;
+
+ /* Unloadign module */
+ int unloading;
+};
+
+#define bcm2835_audio_lock(_ess) snd_mtxlock((_ess)->lock)
+#define bcm2835_audio_unlock(_ess) snd_mtxunlock((_ess)->lock)
+#define bcm2835_audio_lock_assert(_ess) snd_mtxassert((_ess)->lock)
+
+#define VCHIQ_VCHI_LOCK(sc) mtx_lock(&(sc)->vchi_lock)
+#define VCHIQ_VCHI_UNLOCK(sc) mtx_unlock(&(sc)->vchi_lock)
+
+static const char *
+dest_description(uint32_t dest)
+{
+ switch (dest) {
+ case DEST_AUTO:
+ return "AUTO";
+ break;
+
+ case DEST_HEADPHONES:
+ return "HEADPHONES";
+ break;
+
+ case DEST_HDMI:
+ return "HDMI";
+ break;
+ default:
+ return "UNKNOWN";
+ break;
+ }
+}
+
+static void
+bcm2835_audio_callback(void *param, const VCHI_CALLBACK_REASON_T reason, void *msg_handle)
+{
+ struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)param;
+ int32_t status;
+ uint32_t msg_len;
+ VC_AUDIO_MSG_T m;
+
+ if (reason != VCHI_CALLBACK_MSG_AVAILABLE)
+ return;
+
+ status = vchi_msg_dequeue(sc->vchi_handle,
+ &m, sizeof m, &msg_len, VCHI_FLAGS_NONE);
+ if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
+ sc->msg_result = m.u.result.success;
+ cv_signal(&sc->msg_avail_cv);
+ } else if (m.type == VC_AUDIO_MSG_TYPE_COMPLETE) {
+ struct bcm2835_audio_chinfo *ch = m.u.complete.cookie;
+
+ int count = m.u.complete.count & 0xffff;
+ int perr = (m.u.complete.count & (1U << 30)) != 0;
+
+ ch->complete_pos = (ch->complete_pos + count) % sndbuf_getsize(ch->buffer);
+ ch->free_buffer += count;
+
+ if (perr || ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE) {
+ chn_intr(ch->channel);
+ cv_signal(&sc->data_cv);
+ }
+ } else
+ printf("%s: unknown m.type: %d\n", __func__, m.type);
+}
+
+/* VCHIQ stuff */
+static void
+bcm2835_audio_init(struct bcm2835_audio_info *sc)
+{
+ int status;
+
+ /* Initialize and create a VCHI connection */
+ status = vchi_initialise(&sc->vchi_instance);
+ if (status != 0) {
+ printf("vchi_initialise failed: %d\n", status);
+ return;
+ }
+
+ status = vchi_connect(NULL, 0, sc->vchi_instance);
+ if (status != 0) {
+ printf("vchi_connect failed: %d\n", status);
+ return;
+ }
+
+ SERVICE_CREATION_T params = {
+ VCHI_VERSION_EX(VC_AUDIOSERV_VER, VC_AUDIOSERV_MIN_VER),
+ VC_AUDIO_SERVER_NAME, /* 4cc service code */
+ sc->vchi_connection, /* passed in fn pointers */
+ 0, /* rx fifo size */
+ 0, /* tx fifo size */
+ bcm2835_audio_callback, /* service callback */
+ sc, /* service callback parameter */
+ 1,
+ 1,
+ 0 /* want crc check on bulk transfers */
+ };
+
+ status = vchi_service_open(sc->vchi_instance, &params,
+ &sc->vchi_handle);
+
+ if (status == 0)
+ /* Finished with the service for now */
+ vchi_service_release(sc->vchi_handle);
+ else
+ sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID;
+}
+
+static void
+bcm2835_audio_release(struct bcm2835_audio_info *sc)
+{
+ int success;
+
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+ success = vchi_service_close(sc->vchi_handle);
+ if (success != 0)
+ printf("vchi_service_close failed: %d\n", success);
+ sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID;
+ }
+
+ vchi_disconnect(sc->vchi_instance);
+}
+
+static void
+bcm2835_audio_reset_channel(struct bcm2835_audio_chinfo *ch)
+{
+ ch->free_buffer = VCHIQ_AUDIO_BUFFER_SIZE;
+ ch->playback_state = 0;
+ ch->buffered_ptr = 0;
+ ch->complete_pos = 0;
+
+ sndbuf_reset(ch->buffer);
+}
+
+static void
+bcm2835_audio_start(struct bcm2835_audio_chinfo *ch)
+{
+ VC_AUDIO_MSG_T m;
+ int ret;
+ struct bcm2835_audio_info *sc = ch->parent;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+
+ bcm2835_audio_reset_channel(ch);
+
+ m.type = VC_AUDIO_MSG_TYPE_START;
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ vchi_service_release(sc->vchi_handle);
+ }
+ VCHIQ_VCHI_UNLOCK(sc);
+
+}
+
+static void
+bcm2835_audio_stop(struct bcm2835_audio_chinfo *ch)
+{
+ VC_AUDIO_MSG_T m;
+ int ret;
+ struct bcm2835_audio_info *sc = ch->parent;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+
+ m.type = VC_AUDIO_MSG_TYPE_STOP;
+ m.u.stop.draining = 0;
+
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ vchi_service_release(sc->vchi_handle);
+ }
+ VCHIQ_VCHI_UNLOCK(sc);
+}
+
+static void
+bcm2835_audio_open(struct bcm2835_audio_info *sc)
+{
+ VC_AUDIO_MSG_T m;
+ int ret;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+
+ m.type = VC_AUDIO_MSG_TYPE_OPEN;
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ vchi_service_release(sc->vchi_handle);
+ }
+ VCHIQ_VCHI_UNLOCK(sc);
+}
+
+static void
+bcm2835_audio_update_controls(struct bcm2835_audio_info *sc)
+{
+ VC_AUDIO_MSG_T m;
+ int ret, db;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+
+ sc->msg_result = -1;
+
+ m.type = VC_AUDIO_MSG_TYPE_CONTROL;
+ m.u.control.dest = sc->dest;
+ if (sc->volume > 99)
+ sc->volume = 99;
+ db = db_levels[sc->volume/5];
+ m.u.control.volume = VCHIQ_AUDIO_VOLUME(db);
+
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ mtx_lock(&sc->msg_avail_lock);
+ cv_wait_sig(&sc->msg_avail_cv, &sc->msg_avail_lock);
+ if (sc->msg_result)
+ printf("%s failed: %d\n", __func__, sc->msg_result);
+ mtx_unlock(&sc->msg_avail_lock);
+
+ vchi_service_release(sc->vchi_handle);
+ }
+ VCHIQ_VCHI_UNLOCK(sc);
+}
+
+static void
+bcm2835_audio_update_params(struct bcm2835_audio_info *sc, struct bcm2835_audio_chinfo *ch)
+{
+ VC_AUDIO_MSG_T m;
+ int ret;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
+ vchi_service_use(sc->vchi_handle);
+
+ sc->msg_result = -1;
+
+ m.type = VC_AUDIO_MSG_TYPE_CONFIG;
+ m.u.config.channels = AFMT_CHANNEL(ch->fmt);
+ m.u.config.samplerate = ch->spd;
+ m.u.config.bps = AFMT_BIT(ch->fmt);
+
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ mtx_lock(&sc->msg_avail_lock);
+ cv_wait_sig(&sc->msg_avail_cv, &sc->msg_avail_lock);
+ if (sc->msg_result)
+ printf("%s failed: %d\n", __func__, sc->msg_result);
+ mtx_unlock(&sc->msg_avail_lock);
+
+ vchi_service_release(sc->vchi_handle);
+ }
+ VCHIQ_VCHI_UNLOCK(sc);
+}
+
+static __inline uint32_t
+vchiq_unbuffered_bytes(struct bcm2835_audio_chinfo *ch)
+{
+ uint32_t size, ready, readyptr, readyend;
+
+ size = sndbuf_getsize(ch->buffer);
+ readyptr = sndbuf_getreadyptr(ch->buffer);
+ ready = sndbuf_getready(ch->buffer);
+
+ readyend = readyptr + ready;
+ /* Normal case */
+ if (ch->buffered_ptr >= readyptr) {
+ if (readyend > ch->buffered_ptr)
+ return readyend - ch->buffered_ptr;
+ else
+ return 0;
+ }
+ else { /* buffered_ptr overflow */
+ if (readyend > ch->buffered_ptr + size)
+ return readyend - ch->buffered_ptr - size;
+ else
+ return 0;
+ }
+}
+
+static void
+bcm2835_audio_write_samples(struct bcm2835_audio_chinfo *ch)
+{
+ struct bcm2835_audio_info *sc = ch->parent;
+ VC_AUDIO_MSG_T m;
+ void *buf;
+ uint32_t count, size;
+ int ret;
+
+ VCHIQ_VCHI_LOCK(sc);
+ if (sc->vchi_handle == VCHIQ_SERVICE_HANDLE_INVALID) {
+ VCHIQ_VCHI_UNLOCK(sc);
+ return;
+ }
+
+ vchi_service_use(sc->vchi_handle);
+
+ size = sndbuf_getsize(ch->buffer);
+ count = vchiq_unbuffered_bytes(ch);
+ buf = (uint8_t*)sndbuf_getbuf(ch->buffer) + ch->buffered_ptr;
+
+ if (ch->buffered_ptr + count > size)
+ count = size - ch->buffered_ptr;
+
+ if (count < VCHIQ_AUDIO_PACKET_SIZE)
+ goto done;
+
+ count = min(count, ch->free_buffer);
+ count -= count % VCHIQ_AUDIO_PACKET_SIZE;
+
+ m.type = VC_AUDIO_MSG_TYPE_WRITE;
+ m.u.write.count = count;
+ m.u.write.max_packet = VCHIQ_AUDIO_PACKET_SIZE;
+ m.u.write.callback = NULL;
+ m.u.write.cookie = ch;
+ if (buf)
+ m.u.write.silence = 0;
+ else
+ m.u.write.silence = 1;
+
+ ret = vchi_msg_queue(sc->vchi_handle,
+ &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);
+
+ if (buf) {
+ while (count > 0) {
+ int bytes = MIN((int)m.u.write.max_packet, (int)count);
+ ch->free_buffer -= bytes;
+ ch->buffered_ptr += bytes;
+ ch->buffered_ptr = ch->buffered_ptr % size;
+ ret = vchi_msg_queue(sc->vchi_handle,
+ buf, bytes, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);
+ if (ret != 0)
+ printf("%s: vchi_msg_queue failed: %d\n",
+ __func__, ret);
+ buf = (char *)buf + bytes;
+ count -= bytes;
+ }
+ }
+done:
+
+ vchi_service_release(sc->vchi_handle);
+ VCHIQ_VCHI_UNLOCK(sc);
+}
+
+static void
+bcm2835_audio_worker(void *data)
+{
+ struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)data;
+ struct bcm2835_audio_chinfo *ch = &sc->pch;
+ mtx_lock(&sc->data_lock);
+ while(1) {
+
+ if (sc->unloading)
+ break;
+
+ if ((ch->playback_state == PLAYBACK_PLAYING) &&
+ (vchiq_unbuffered_bytes(ch) >= VCHIQ_AUDIO_PACKET_SIZE)
+ && (ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)) {
+ bcm2835_audio_write_samples(ch);
+ } else {
+ if (ch->playback_state == PLAYBACK_STOPPING) {
+ bcm2835_audio_reset_channel(&sc->pch);
+ ch->playback_state = PLAYBACK_IDLE;
+ }
+
+ cv_wait_sig(&sc->data_cv, &sc->data_lock);
+
+ if (ch->playback_state == PLAYBACK_STARTING) {
+ /* Give it initial kick */
+ chn_intr(sc->pch.channel);
+ ch->playback_state = PLAYBACK_PLAYING;
+ }
+ }
+ }
+ mtx_unlock(&sc->data_lock);
+
+ kproc_exit(0);
+}
+
+static void
+bcm2835_audio_create_worker(struct bcm2835_audio_info *sc)
+{
+ struct proc *newp;
+
+ if (kproc_create(bcm2835_audio_worker, (void*)sc, &newp, 0, 0,
+ "bcm2835_audio_worker") != 0) {
+ printf("failed to create bcm2835_audio_worker\n");
+ }
+}
+
+/* -------------------------------------------------------------------- */
+/* channel interface for ESS18xx */
+static void *
+bcmchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir)
+{
+ struct bcm2835_audio_info *sc = devinfo;
+ struct bcm2835_audio_chinfo *ch = &sc->pch;
+ void *buffer;
+
+ if (dir == PCMDIR_REC)
+ return NULL;
+
+ ch->parent = sc;
+ ch->channel = c;
+ ch->buffer = b;
+
+ /* default values */
+ ch->spd = 44100;
+ ch->fmt = SND_FORMAT(AFMT_S16_LE, 2, 0);
+ ch->blksz = VCHIQ_AUDIO_PACKET_SIZE;
+
+ buffer = malloc(sc->bufsz, M_DEVBUF, M_WAITOK | M_ZERO);
+
+ if (sndbuf_setup(ch->buffer, buffer, sc->bufsz) != 0) {
+ free(buffer, M_DEVBUF);
+ return NULL;
+ }
+
+ bcm2835_audio_update_params(sc, ch);
+
+ return ch;
+}
+
+static int
+bcmchan_free(kobj_t obj, void *data)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+ void *buffer;
+
+ buffer = sndbuf_getbuf(ch->buffer);
+ if (buffer)
+ free(buffer, M_DEVBUF);
+
+ return (0);
+}
+
+static int
+bcmchan_setformat(kobj_t obj, void *data, uint32_t format)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+ struct bcm2835_audio_info *sc = ch->parent;
+
+ bcm2835_audio_lock(sc);
+
+ ch->fmt = format;
+ bcm2835_audio_update_params(sc, ch);
+
+ bcm2835_audio_unlock(sc);
+
+ return 0;
+}
+
+static uint32_t
+bcmchan_setspeed(kobj_t obj, void *data, uint32_t speed)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+ struct bcm2835_audio_info *sc = ch->parent;
+
+ bcm2835_audio_lock(sc);
+
+ ch->spd = speed;
+ bcm2835_audio_update_params(sc, ch);
+
+ bcm2835_audio_unlock(sc);
+
+ return ch->spd;
+}
+
+static uint32_t
+bcmchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+
+ return ch->blksz;
+}
+
+static int
+bcmchan_trigger(kobj_t obj, void *data, int go)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+ struct bcm2835_audio_info *sc = ch->parent;
+
+ if (!PCMTRIG_COMMON(go))
+ return (0);
+
+ bcm2835_audio_lock(sc);
+
+ switch (go) {
+ case PCMTRIG_START:
+ bcm2835_audio_start(ch);
+ ch->playback_state = PLAYBACK_STARTING;
+ /* wakeup worker thread */
+ cv_signal(&sc->data_cv);
+ break;
+
+ case PCMTRIG_STOP:
+ case PCMTRIG_ABORT:
+ ch->playback_state = 1;
+ bcm2835_audio_stop(ch);
+ break;
+
+ default:
+ break;
+ }
+
+ bcm2835_audio_unlock(sc);
+ return 0;
+}
+
+static uint32_t
+bcmchan_getptr(kobj_t obj, void *data)
+{
+ struct bcm2835_audio_chinfo *ch = data;
+ struct bcm2835_audio_info *sc = ch->parent;
+ uint32_t ret;
+
+ bcm2835_audio_lock(sc);
+
+ ret = ch->complete_pos - (ch->complete_pos % VCHIQ_AUDIO_PACKET_SIZE);
+
+ bcm2835_audio_unlock(sc);
+
+ return ret;
+}
+
+static struct pcmchan_caps *
+bcmchan_getcaps(kobj_t obj, void *data)
+{
+
+ return &bcm2835_audio_playcaps;
+}
+
+static kobj_method_t bcmchan_methods[] = {
+ KOBJMETHOD(channel_init, bcmchan_init),
+ KOBJMETHOD(channel_free, bcmchan_free),
+ KOBJMETHOD(channel_setformat, bcmchan_setformat),
+ KOBJMETHOD(channel_setspeed, bcmchan_setspeed),
+ KOBJMETHOD(channel_setblocksize, bcmchan_setblocksize),
+ KOBJMETHOD(channel_trigger, bcmchan_trigger),
+ KOBJMETHOD(channel_getptr, bcmchan_getptr),
+ KOBJMETHOD(channel_getcaps, bcmchan_getcaps),
+ KOBJMETHOD_END
+};
+CHANNEL_DECLARE(bcmchan);
+
+/************************************************************/
+
+static int
+bcmmix_init(struct snd_mixer *m)
+{
+
+ mix_setdevs(m, SOUND_MASK_VOLUME);
+
+ return (0);
+}
+
+static int
+bcmmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
+{
+ struct bcm2835_audio_info *sc = mix_getdevinfo(m);
+
+ switch (dev) {
+ case SOUND_MIXER_VOLUME:
+ sc->volume = left;
+ bcm2835_audio_update_controls(sc);
+ break;
+
+ default:
+ break;
+ }
+
+ return left | (left << 8);
+}
+
+static kobj_method_t bcmmixer_methods[] = {
+ KOBJMETHOD(mixer_init, bcmmix_init),
+ KOBJMETHOD(mixer_set, bcmmix_set),
+ KOBJMETHOD_END
+};
+
+MIXER_DECLARE(bcmmixer);
+
+static int
+sysctl_bcm2835_audio_dest(SYSCTL_HANDLER_ARGS)
+{
+ struct bcm2835_audio_info *sc = arg1;
+ int val;
+ int err;
+
+ val = sc->dest;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err || !req->newptr) /* error || read request */
+ return (err);
+
+ if ((val < 0) || (val > 2))
+ return (EINVAL);
+
+ sc->dest = val;
+ device_printf(sc->dev, "destination set to %s\n", dest_description(val));
+ bcm2835_audio_update_controls(sc);
+
+ return (0);
+}
+
+static void
+vchi_audio_sysctl_init(struct bcm2835_audio_info *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree_node;
+ struct sysctl_oid_list *tree;
+
+ /*
+ * Add system sysctl tree/handlers.
+ */
+ ctx = device_get_sysctl_ctx(sc->dev);
+ tree_node = device_get_sysctl_tree(sc->dev);
+ tree = SYSCTL_CHILDREN(tree_node);
+ SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "dest",
+ CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc),
+ sysctl_bcm2835_audio_dest, "IU", "audio destination, "
+ "0 - auto, 1 - headphones, 2 - HDMI");
+}
+
+static void
+bcm2835_audio_identify(driver_t *driver, device_t parent)
+{
+
+ BUS_ADD_CHILD(parent, 0, "pcm", 0);
+}
+
+static int
+bcm2835_audio_probe(device_t dev)
+{
+
+ device_set_desc(dev, "VCHQI audio");
+ return (BUS_PROBE_DEFAULT);
+}
+
+
+static void
+bcm2835_audio_delayed_init(void *xsc)
+{
+ struct bcm2835_audio_info *sc;
+ char status[SND_STATUSLEN];
+
+ sc = xsc;
+
+ config_intrhook_disestablish(&sc->intr_hook);
+
+ bcm2835_audio_init(sc);
+ bcm2835_audio_open(sc);
+ sc->volume = 75;
+ sc->dest = DEST_AUTO;
+
+ if (mixer_init(sc->dev, &bcmmixer_class, sc)) {
+ device_printf(sc->dev, "mixer_init failed\n");
+ goto no;
+ }
+
+ if (pcm_register(sc->dev, sc, 1, 1)) {
+ device_printf(sc->dev, "pcm_register failed\n");
+ goto no;
+ }
+
+ pcm_addchan(sc->dev, PCMDIR_PLAY, &bcmchan_class, sc);
+ snprintf(status, SND_STATUSLEN, "at VCHIQ");
+ pcm_setstatus(sc->dev, status);
+
+ bcm2835_audio_reset_channel(&sc->pch);
+ bcm2835_audio_create_worker(sc);
+
+ vchi_audio_sysctl_init(sc);
+
+no:
+ ;
+}
+
+static int
+bcm2835_audio_attach(device_t dev)
+{
+ struct bcm2835_audio_info *sc;
+
+ sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ sc->dev = dev;
+ sc->bufsz = VCHIQ_AUDIO_BUFFER_SIZE;
+
+ sc->lock = snd_mtxcreate(device_get_nameunit(dev), "bcm2835_audio softc");
+
+ mtx_init(&sc->vchi_lock, "bcm2835_audio", "vchi_lock", MTX_DEF);
+ mtx_init(&sc->msg_avail_lock, "msg_avail_mtx", "msg_avail_mtx", MTX_DEF);
+ cv_init(&sc->msg_avail_cv, "msg_avail_cv");
+ mtx_init(&sc->data_lock, "data_mtx", "data_mtx", MTX_DEF);
+ cv_init(&sc->data_cv, "data_cv");
+ sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ /*
+ * We need interrupts enabled for VCHI to work properly,
+ * so delay intialization until it happens
+ */
+ sc->intr_hook.ich_func = bcm2835_audio_delayed_init;
+ sc->intr_hook.ich_arg = sc;
+
+ if (config_intrhook_establish(&sc->intr_hook) != 0)
+ goto no;
+
+ return 0;
+
+no:
+ return ENXIO;
+}
+
+static int
+bcm2835_audio_detach(device_t dev)
+{
+ int r;
+ struct bcm2835_audio_info *sc;
+ sc = pcm_getdevinfo(dev);
+
+ /* Stop worker thread */
+ sc->unloading = 1;
+ cv_signal(&sc->data_cv);
+
+ r = pcm_unregister(dev);
+ if (r)
+ return r;
+
+ mtx_destroy(&sc->vchi_lock);
+ mtx_destroy(&sc->msg_avail_lock);
+ cv_destroy(&sc->msg_avail_cv);
+ mtx_destroy(&sc->data_lock);
+ cv_destroy(&sc->data_cv);
+
+ bcm2835_audio_release(sc);
+
+ if (sc->lock) {
+ snd_mtxfree(sc->lock);
+ sc->lock = NULL;
+ }
+
+ free(sc, M_DEVBUF);
+
+ return 0;
+}
+
+static device_method_t bcm2835_audio_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, bcm2835_audio_identify),
+ DEVMETHOD(device_probe, bcm2835_audio_probe),
+ DEVMETHOD(device_attach, bcm2835_audio_attach),
+ DEVMETHOD(device_detach, bcm2835_audio_detach),
+
+ { 0, 0 }
+};
+
+static driver_t bcm2835_audio_driver = {
+ "pcm",
+ bcm2835_audio_methods,
+ PCM_SOFTC_SIZE,
+};
+
+DRIVER_MODULE(bcm2835_audio, vchiq, bcm2835_audio_driver, pcm_devclass, 0, 0);
+MODULE_DEPEND(bcm2835_audio, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER);
+MODULE_DEPEND(bcm2835_audio, vchiq, 1, 1, 1);
+MODULE_VERSION(bcm2835_audio, 1);
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
index 2a3cebd99299..97256f71d73f 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
@@ -31,26 +31,19 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
-
+#include <sys/gpio.h>
+#include <sys/interrupt.h>
#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/rman.h>
#include <sys/lock.h>
+#include <sys/module.h>
#include <sys/mutex.h>
-#include <sys/gpio.h>
+#include <sys/rman.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
-#include <machine/cpu.h>
-#include <machine/cpufunc.h>
-#include <machine/resource.h>
-#include <machine/fdt.h>
-#include <machine/intr.h>
-#include <dev/fdt/fdt_common.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
#include <arm/broadcom/bcm2835/bcm2835_gpio.h>
@@ -65,6 +58,7 @@ __FBSDID("$FreeBSD$");
#define BCM_GPIO_IRQS 4
#define BCM_GPIO_PINS 54
+#define BCM_GPIO_PINS_PER_BANK 32
#define BCM_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN)
@@ -89,12 +83,15 @@ struct bcm_gpio_softc {
struct resource * sc_res[BCM_GPIO_IRQS + 1];
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
- void * sc_intrhand;
+ void * sc_intrhand[BCM_GPIO_IRQS];
int sc_gpio_npins;
int sc_ro_npins;
int sc_ro_pins[BCM_GPIO_PINS];
struct gpio_pin sc_gpio_pins[BCM_GPIO_PINS];
+ struct intr_event * sc_events[BCM_GPIO_PINS];
struct bcm_gpio_sysctl sc_sysctl[BCM_GPIO_PINS];
+ enum intr_trigger sc_irq_trigger[BCM_GPIO_PINS];
+ enum intr_polarity sc_irq_polarity[BCM_GPIO_PINS];
};
enum bcm_gpio_pud {
@@ -103,21 +100,35 @@ enum bcm_gpio_pud {
BCM_GPIO_PULLUP,
};
-#define BCM_GPIO_LOCK(_sc) mtx_lock(&_sc->sc_mtx)
-#define BCM_GPIO_UNLOCK(_sc) mtx_unlock(&_sc->sc_mtx)
-#define BCM_GPIO_LOCK_ASSERT(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED)
-
-#define BCM_GPIO_GPFSEL(_bank) 0x00 + _bank * 4
-#define BCM_GPIO_GPSET(_bank) 0x1c + _bank * 4
-#define BCM_GPIO_GPCLR(_bank) 0x28 + _bank * 4
-#define BCM_GPIO_GPLEV(_bank) 0x34 + _bank * 4
-#define BCM_GPIO_GPPUD(_bank) 0x94
-#define BCM_GPIO_GPPUDCLK(_bank) 0x98 + _bank * 4
-
+#define BCM_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx)
+#define BCM_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx)
+#define BCM_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define BCM_GPIO_WRITE(_sc, _off, _val) \
- bus_space_write_4(_sc->sc_bst, _sc->sc_bsh, _off, _val)
+ bus_space_write_4((_sc)->sc_bst, (_sc)->sc_bsh, _off, _val)
#define BCM_GPIO_READ(_sc, _off) \
- bus_space_read_4(_sc->sc_bst, _sc->sc_bsh, _off)
+ bus_space_read_4((_sc)->sc_bst, (_sc)->sc_bsh, _off)
+#define BCM_GPIO_CLEAR_BITS(_sc, _off, _bits) \
+ BCM_GPIO_WRITE(_sc, _off, BCM_GPIO_READ(_sc, _off) & ~(_bits))
+#define BCM_GPIO_SET_BITS(_sc, _off, _bits) \
+ BCM_GPIO_WRITE(_sc, _off, BCM_GPIO_READ(_sc, _off) | _bits)
+#define BCM_GPIO_BANK(a) (a / BCM_GPIO_PINS_PER_BANK)
+#define BCM_GPIO_MASK(a) (1U << (a % BCM_GPIO_PINS_PER_BANK))
+
+#define BCM_GPIO_GPFSEL(_bank) (0x00 + _bank * 4) /* Function Select */
+#define BCM_GPIO_GPSET(_bank) (0x1c + _bank * 4) /* Pin Out Set */
+#define BCM_GPIO_GPCLR(_bank) (0x28 + _bank * 4) /* Pin Out Clear */
+#define BCM_GPIO_GPLEV(_bank) (0x34 + _bank * 4) /* Pin Level */
+#define BCM_GPIO_GPEDS(_bank) (0x40 + _bank * 4) /* Event Status */
+#define BCM_GPIO_GPREN(_bank) (0x4c + _bank * 4) /* Rising Edge irq */
+#define BCM_GPIO_GPFEN(_bank) (0x58 + _bank * 4) /* Falling Edge irq */
+#define BCM_GPIO_GPHEN(_bank) (0x64 + _bank * 4) /* High Level irq */
+#define BCM_GPIO_GPLEN(_bank) (0x70 + _bank * 4) /* Low Level irq */
+#define BCM_GPIO_GPAREN(_bank) (0x7c + _bank * 4) /* Async Rising Edge */
+#define BCM_GPIO_GPAFEN(_bank) (0x88 + _bank * 4) /* Async Falling Egde */
+#define BCM_GPIO_GPPUD(_bank) (0x94) /* Pin Pull up/down */
+#define BCM_GPIO_GPPUDCLK(_bank) (0x98 + _bank * 4) /* Pin Pull up clock */
+
+static struct bcm_gpio_softc *bcm_gpio_sc = NULL;
static int
bcm_gpio_pin_is_ro(struct bcm_gpio_softc *sc, int pin)
@@ -665,6 +676,40 @@ bcm_gpio_get_reserved_pins(struct bcm_gpio_softc *sc)
}
static int
+bcm_gpio_intr(void *arg)
+{
+ int bank_last, irq;
+ struct bcm_gpio_softc *sc;
+ struct intr_event *event;
+ uint32_t bank, mask, reg;
+
+ sc = (struct bcm_gpio_softc *)arg;
+ reg = 0;
+ bank_last = -1;
+ for (irq = 0; irq < BCM_GPIO_PINS; irq++) {
+ bank = BCM_GPIO_BANK(irq);
+ mask = BCM_GPIO_MASK(irq);
+ if (bank != bank_last) {
+ reg = BCM_GPIO_READ(sc, BCM_GPIO_GPEDS(bank));
+ bank_last = bank;
+ }
+ if (reg & mask) {
+ event = sc->sc_events[irq];
+ if (event != NULL && !TAILQ_EMPTY(&event->ie_handlers))
+ intr_event_handle(event, NULL);
+ else {
+ device_printf(sc->sc_dev, "Stray IRQ %d\n",
+ irq);
+ }
+ /* Clear the Status bit by writing '1' to it. */
+ BCM_GPIO_WRITE(sc, BCM_GPIO_GPEDS(bank), mask);
+ }
+ }
+
+ return (FILTER_HANDLED);
+}
+
+static int
bcm_gpio_probe(device_t dev)
{
@@ -679,6 +724,39 @@ bcm_gpio_probe(device_t dev)
}
static int
+bcm_gpio_intr_attach(device_t dev)
+{
+ struct bcm_gpio_softc *sc;
+ int i;
+
+ sc = device_get_softc(dev);
+ for (i = 0; i < BCM_GPIO_IRQS; i++) {
+ if (bus_setup_intr(dev, sc->sc_res[i + 1],
+ INTR_TYPE_MISC | INTR_MPSAFE, bcm_gpio_intr,
+ NULL, sc, &sc->sc_intrhand[i]) != 0) {
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+static void
+bcm_gpio_intr_detach(device_t dev)
+{
+ struct bcm_gpio_softc *sc;
+ int i;
+
+ sc = device_get_softc(dev);
+ for (i = 0; i < BCM_GPIO_IRQS; i++) {
+ if (sc->sc_intrhand[i]) {
+ bus_teardown_intr(dev, sc->sc_res[i + 1],
+ sc->sc_intrhand[i]);
+ }
+ }
+}
+
+static int
bcm_gpio_attach(device_t dev)
{
int i, j;
@@ -686,30 +764,34 @@ bcm_gpio_attach(device_t dev)
struct bcm_gpio_softc *sc;
uint32_t func;
- sc = device_get_softc(dev);
- sc->sc_dev = dev;
- mtx_init(&sc->sc_mtx, "bcm gpio", "gpio", MTX_DEF);
+ if (bcm_gpio_sc != NULL)
+ return (ENXIO);
+
+ bcm_gpio_sc = sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ mtx_init(&sc->sc_mtx, "bcm gpio", "gpio", MTX_SPIN);
if (bus_alloc_resources(dev, bcm_gpio_res_spec, sc->sc_res) != 0) {
device_printf(dev, "cannot allocate resources\n");
goto fail;
}
sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
-
+ /* Setup the GPIO interrupt handler. */
+ if (bcm_gpio_intr_attach(dev)) {
+ device_printf(dev, "unable to setup the gpio irq handler\n");
+ goto fail;
+ }
/* Find our node. */
gpio = ofw_bus_get_node(sc->sc_dev);
-
if (!OF_hasprop(gpio, "gpio-controller"))
/* Node is not a GPIO controller. */
goto fail;
-
/*
* Find the read-only pins. These are pins we never touch or bad
* things could happen.
*/
if (bcm_gpio_get_reserved_pins(sc) == -1)
goto fail;
-
/* Initialize the software controlled pins. */
for (i = 0, j = 0; j < BCM_GPIO_PINS; j++) {
snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
@@ -718,6 +800,9 @@ bcm_gpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_pin = j;
sc->sc_gpio_pins[i].gp_caps = BCM_GPIO_DEFAULT_CAPS;
sc->sc_gpio_pins[i].gp_flags = bcm_gpio_func_flag(func);
+ /* The default is active-low interrupts. */
+ sc->sc_irq_trigger[i] = INTR_TRIGGER_LEVEL;
+ sc->sc_irq_polarity[i] = INTR_POLARITY_LOW;
i++;
}
sc->sc_gpio_npins = i;
@@ -729,6 +814,7 @@ bcm_gpio_attach(device_t dev)
return (0);
fail:
+ bcm_gpio_intr_detach(dev);
bus_release_resources(dev, bcm_gpio_res_spec, sc->sc_res);
mtx_destroy(&sc->sc_mtx);
@@ -742,6 +828,177 @@ bcm_gpio_detach(device_t dev)
return (EBUSY);
}
+static uint32_t
+bcm_gpio_intr_reg(struct bcm_gpio_softc *sc, unsigned int irq, uint32_t bank)
+{
+
+ if (irq > BCM_GPIO_PINS)
+ return (0);
+ if (sc->sc_irq_trigger[irq] == INTR_TRIGGER_LEVEL) {
+ if (sc->sc_irq_polarity[irq] == INTR_POLARITY_LOW)
+ return (BCM_GPIO_GPLEN(bank));
+ else if (sc->sc_irq_polarity[irq] == INTR_POLARITY_HIGH)
+ return (BCM_GPIO_GPHEN(bank));
+ } else if (sc->sc_irq_trigger[irq] == INTR_TRIGGER_EDGE) {
+ if (sc->sc_irq_polarity[irq] == INTR_POLARITY_LOW)
+ return (BCM_GPIO_GPFEN(bank));
+ else if (sc->sc_irq_polarity[irq] == INTR_POLARITY_HIGH)
+ return (BCM_GPIO_GPREN(bank));
+ }
+
+ return (0);
+}
+
+static void
+bcm_gpio_mask_irq(void *source)
+{
+ uint32_t bank, mask, reg;
+ unsigned int irq;
+
+ irq = (unsigned int)source;
+ if (irq > BCM_GPIO_PINS)
+ return;
+ if (bcm_gpio_pin_is_ro(bcm_gpio_sc, irq))
+ return;
+ bank = BCM_GPIO_BANK(irq);
+ mask = BCM_GPIO_MASK(irq);
+ BCM_GPIO_LOCK(bcm_gpio_sc);
+ reg = bcm_gpio_intr_reg(bcm_gpio_sc, irq, bank);
+ if (reg != 0)
+ BCM_GPIO_CLEAR_BITS(bcm_gpio_sc, reg, mask);
+ BCM_GPIO_UNLOCK(bcm_gpio_sc);
+}
+
+static void
+bcm_gpio_unmask_irq(void *source)
+{
+ uint32_t bank, mask, reg;
+ unsigned int irq;
+
+ irq = (unsigned int)source;
+ if (irq > BCM_GPIO_PINS)
+ return;
+ if (bcm_gpio_pin_is_ro(bcm_gpio_sc, irq))
+ return;
+ bank = BCM_GPIO_BANK(irq);
+ mask = BCM_GPIO_MASK(irq);
+ BCM_GPIO_LOCK(bcm_gpio_sc);
+ reg = bcm_gpio_intr_reg(bcm_gpio_sc, irq, bank);
+ if (reg != 0)
+ BCM_GPIO_SET_BITS(bcm_gpio_sc, reg, mask);
+ BCM_GPIO_UNLOCK(bcm_gpio_sc);
+}
+
+static int
+bcm_gpio_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *res)
+{
+ int pin;
+
+ if (type != SYS_RES_IRQ)
+ return (ENXIO);
+ /* Unmask the interrupt. */
+ pin = rman_get_start(res);
+ bcm_gpio_unmask_irq((void *)pin);
+
+ return (0);
+}
+
+static int
+bcm_gpio_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *res)
+{
+ int pin;
+
+ if (type != SYS_RES_IRQ)
+ return (ENXIO);
+ /* Mask the interrupt. */
+ pin = rman_get_start(res);
+ bcm_gpio_mask_irq((void *)pin);
+
+ return (0);
+}
+
+static int
+bcm_gpio_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol)
+{
+ int bank;
+ struct bcm_gpio_softc *sc;
+ uint32_t mask, oldreg, reg;
+
+ if (irq > BCM_GPIO_PINS)
+ return (EINVAL);
+ /* There is no standard trigger or polarity. */
+ if (trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ if (bcm_gpio_pin_is_ro(sc, irq))
+ return (EINVAL);
+ bank = BCM_GPIO_BANK(irq);
+ mask = BCM_GPIO_MASK(irq);
+ BCM_GPIO_LOCK(sc);
+ oldreg = bcm_gpio_intr_reg(sc, irq, bank);
+ sc->sc_irq_trigger[irq] = trig;
+ sc->sc_irq_polarity[irq] = pol;
+ reg = bcm_gpio_intr_reg(sc, irq, bank);
+ if (reg != 0)
+ BCM_GPIO_SET_BITS(sc, reg, mask);
+ if (reg != oldreg && oldreg != 0)
+ BCM_GPIO_CLEAR_BITS(sc, oldreg, mask);
+ BCM_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+bcm_gpio_setup_intr(device_t bus, device_t child, struct resource *ires,
+ int flags, driver_filter_t *filt, driver_intr_t *handler,
+ void *arg, void **cookiep)
+{
+ struct bcm_gpio_softc *sc;
+ struct intr_event *event;
+ int pin, error;
+
+ sc = device_get_softc(bus);
+ pin = rman_get_start(ires);
+ if (pin > BCM_GPIO_PINS)
+ panic("%s: bad pin %d", __func__, pin);
+ event = sc->sc_events[pin];
+ if (event == NULL) {
+ error = intr_event_create(&event, (void *)pin, 0, pin,
+ bcm_gpio_mask_irq, bcm_gpio_unmask_irq, NULL, NULL,
+ "gpio%d pin%d:", device_get_unit(bus), pin);
+ if (error != 0)
+ return (error);
+ sc->sc_events[pin] = event;
+ }
+ intr_event_add_handler(event, device_get_nameunit(child), filt,
+ handler, arg, intr_priority(flags), flags, cookiep);
+
+ return (0);
+}
+
+static int
+bcm_gpio_teardown_intr(device_t dev, device_t child, struct resource *ires,
+ void *cookie)
+{
+ struct bcm_gpio_softc *sc;
+ int pin, err;
+
+ sc = device_get_softc(dev);
+ pin = rman_get_start(ires);
+ if (pin > BCM_GPIO_PINS)
+ panic("%s: bad pin %d", __func__, pin);
+ if (sc->sc_events[pin] == NULL)
+ panic("Trying to teardown unoccupied IRQ");
+ err = intr_event_remove_handler(cookie);
+ if (!err)
+ sc->sc_events[pin] = NULL;
+
+ return (err);
+}
+
static phandle_t
bcm_gpio_get_node(device_t bus, device_t dev)
{
@@ -767,6 +1024,13 @@ static device_method_t bcm_gpio_methods[] = {
DEVMETHOD(gpio_pin_set, bcm_gpio_pin_set),
DEVMETHOD(gpio_pin_toggle, bcm_gpio_pin_toggle),
+ /* Bus interface */
+ DEVMETHOD(bus_activate_resource, bcm_gpio_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bcm_gpio_deactivate_resource),
+ DEVMETHOD(bus_config_intr, bcm_gpio_config_intr),
+ DEVMETHOD(bus_setup_intr, bcm_gpio_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bcm_gpio_teardown_intr),
+
/* ofw_bus interface */
DEVMETHOD(ofw_bus_get_node, bcm_gpio_get_node),
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
index 5f31478266e3..11cf3bc36c8b 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
@@ -29,32 +29,17 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/bio.h>
#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/endian.h>
#include <sys/kernel.h>
-#include <sys/kthread.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
-#include <sys/queue.h>
-#include <sys/resource.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
-#include <sys/time.h>
-#include <sys/timetc.h>
-#include <sys/watchdog.h>
-
-#include <sys/kdb.h>
#include <machine/bus.h>
-#include <machine/cpu.h>
-#include <machine/cpufunc.h>
-#include <machine/resource.h>
-#include <machine/intr.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
@@ -82,19 +67,9 @@ __FBSDID("$FreeBSD$");
#define dprintf(fmt, args...)
#endif
-/*
- * Arasan HC seems to have problem with Data CRC on lower frequencies.
- * Use this tunable to cap initialization sequence frequency at higher
- * value. Default is standard 400kHz.
- * HS mode brings too many problems for most of cards, so disable HS mode
- * until a better fix comes up.
- * HS mode still can be enabled with the tunable.
- */
-static int bcm2835_sdhci_min_freq = 400000;
static int bcm2835_sdhci_hs = 1;
static int bcm2835_sdhci_pio_mode = 0;
-TUNABLE_INT("hw.bcm2835.sdhci.min_freq", &bcm2835_sdhci_min_freq);
TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs);
TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode);
@@ -211,16 +186,12 @@ bcm_sdhci_attach(device_t dev)
RF_ACTIVE);
if (!sc->sc_irq_res) {
device_printf(dev, "cannot allocate interrupt\n");
- bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
err = ENXIO;
goto fail;
}
if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand))
- {
- bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
- bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
+ NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) {
device_printf(dev, "cannot setup interrupt handler\n");
err = ENXIO;
goto fail;
@@ -286,6 +257,7 @@ fail:
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
if (sc->sc_mem_res)
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
+ mtx_destroy(&sc->sc_mtx);
return (err);
}
@@ -422,13 +394,6 @@ bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
}
-static uint32_t
-bcm_sdhci_min_freq(device_t dev, struct sdhci_slot *slot)
-{
-
- return bcm2835_sdhci_min_freq;
-}
-
static void
bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc)
{
@@ -681,7 +646,6 @@ static device_method_t bcm_sdhci_methods[] = {
DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
- DEVMETHOD(sdhci_min_freq, bcm_sdhci_min_freq),
/* Platform transfer methods */
DEVMETHOD(sdhci_platform_will_handle, bcm_sdhci_will_handle_transfer),
DEVMETHOD(sdhci_platform_start_transfer, bcm_sdhci_start_transfer),
diff --git a/sys/arm/broadcom/bcm2835/files.bcm2835 b/sys/arm/broadcom/bcm2835/files.bcm2835
index 53fd12f5c3d1..89d6584fa321 100644
--- a/sys/arm/broadcom/bcm2835/files.bcm2835
+++ b/sys/arm/broadcom/bcm2835/files.bcm2835
@@ -27,3 +27,26 @@ kern/kern_clocksource.c standard
dev/mbox/mbox_if.m standard
dev/ofw/ofw_cpu.c standard
+
+arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+
+# VideoCore driver
+contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq \
+ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq \
+ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
+contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq \
+ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
diff --git a/sys/arm/broadcom/bcm2835/vc_vchi_audioserv_defs.h b/sys/arm/broadcom/bcm2835/vc_vchi_audioserv_defs.h
new file mode 100644
index 000000000000..143c54385916
--- /dev/null
+++ b/sys/arm/broadcom/bcm2835/vc_vchi_audioserv_defs.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2012, Broadcom Europe Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VC_AUDIO_DEFS_H_
+#define _VC_AUDIO_DEFS_H_
+
+#define VC_AUDIOSERV_MIN_VER 1
+#define VC_AUDIOSERV_VER 2
+
+/* FourCC code used for VCHI connection */
+#define VC_AUDIO_SERVER_NAME MAKE_FOURCC("AUDS")
+
+/* Maximum message length */
+#define VC_AUDIO_MAX_MSG_LEN (sizeof( VC_AUDIO_MSG_T ))
+
+/*
+ * List of screens that are currently supported
+ * All message types supported for HOST->VC direction
+ */
+typedef enum
+{
+ VC_AUDIO_MSG_TYPE_RESULT, /* Generic result */
+ VC_AUDIO_MSG_TYPE_COMPLETE, /* playback of samples complete */
+ VC_AUDIO_MSG_TYPE_CONFIG, /* Configure */
+ VC_AUDIO_MSG_TYPE_CONTROL, /* control */
+ VC_AUDIO_MSG_TYPE_OPEN, /* open */
+ VC_AUDIO_MSG_TYPE_CLOSE, /* close/shutdown */
+ VC_AUDIO_MSG_TYPE_START, /* start output (i.e. resume) */
+ VC_AUDIO_MSG_TYPE_STOP, /* stop output (i.e. pause) */
+ VC_AUDIO_MSG_TYPE_WRITE, /* write samples */
+ VC_AUDIO_MSG_TYPE_MAX
+
+} VC_AUDIO_MSG_TYPE;
+
+static const char *vc_audio_msg_type_names[] = {
+ "VC_AUDIO_MSG_TYPE_RESULT",
+ "VC_AUDIO_MSG_TYPE_COMPLETE",
+ "VC_AUDIO_MSG_TYPE_CONFIG",
+ "VC_AUDIO_MSG_TYPE_CONTROL",
+ "VC_AUDIO_MSG_TYPE_OPEN",
+ "VC_AUDIO_MSG_TYPE_CLOSE",
+ "VC_AUDIO_MSG_TYPE_START",
+ "VC_AUDIO_MSG_TYPE_STOP",
+ "VC_AUDIO_MSG_TYPE_WRITE",
+ "VC_AUDIO_MSG_TYPE_MAX"
+};
+
+/* configure the audio */
+typedef struct
+{
+ uint32_t channels;
+ uint32_t samplerate;
+ uint32_t bps;
+
+} VC_AUDIO_CONFIG_T;
+
+typedef struct
+{
+ uint32_t volume;
+ uint32_t dest;
+
+} VC_AUDIO_CONTROL_T;
+
+typedef struct
+{
+ uint32_t dummy;
+
+} VC_AUDIO_OPEN_T;
+
+typedef struct
+{
+ uint32_t dummy;
+
+} VC_AUDIO_CLOSE_T;
+
+typedef struct
+{
+ uint32_t dummy;
+
+} VC_AUDIO_START_T;
+
+typedef struct
+{
+ uint32_t draining;
+
+} VC_AUDIO_STOP_T;
+
+typedef struct
+{
+ uint32_t count; /* in bytes */
+ void *callback;
+ void *cookie;
+ uint16_t silence;
+ uint16_t max_packet;
+} VC_AUDIO_WRITE_T;
+
+/* Generic result for a request (VC->HOST) */
+typedef struct
+{
+ int32_t success; /* Success value */
+
+} VC_AUDIO_RESULT_T;
+
+/* Generic result for a request (VC->HOST) */
+typedef struct
+{
+ int32_t count; /* Success value */
+ void *callback;
+ void *cookie;
+} VC_AUDIO_COMPLETE_T;
+
+/* Message header for all messages in HOST->VC direction */
+typedef struct
+{
+ int32_t type; /* Message type (VC_AUDIO_MSG_TYPE) */
+ union
+ {
+ VC_AUDIO_CONFIG_T config;
+ VC_AUDIO_CONTROL_T control;
+ VC_AUDIO_OPEN_T open;
+ VC_AUDIO_CLOSE_T close;
+ VC_AUDIO_START_T start;
+ VC_AUDIO_STOP_T stop;
+ VC_AUDIO_WRITE_T write;
+ VC_AUDIO_RESULT_T result;
+ VC_AUDIO_COMPLETE_T complete;
+ } u;
+} VC_AUDIO_MSG_T;
+
+#endif /* _VC_AUDIO_DEFS_H_ */
diff --git a/sys/arm/conf/BEAGLEBONE b/sys/arm/conf/BEAGLEBONE
index 44e5c4e6c17e..975d6b241551 100644
--- a/sys/arm/conf/BEAGLEBONE
+++ b/sys/arm/conf/BEAGLEBONE
@@ -27,6 +27,12 @@ include "../ti/am335x/std.am335x"
makeoptions WITHOUT_MODULES="ahc"
+# DTrace support
+options KDTRACE_HOOKS # Kernel DTrace hooks
+options DDB_CTF # all architectures - kernel ELF linker loads CTF data
+makeoptions WITH_CTF=1
+makeoptions MODULES_OVERRIDE="opensolaris dtrace dtrace/lockstat dtrace/profile dtrace/fbt"
+
options HZ=100
options SCHED_4BSD # 4BSD scheduler
options PREEMPTION # Enable kernel thread preemption
diff --git a/sys/arm/conf/RPI-B b/sys/arm/conf/RPI-B
index 0b886dd0582e..77c6fa0290eb 100644
--- a/sys/arm/conf/RPI-B
+++ b/sys/arm/conf/RPI-B
@@ -129,9 +129,13 @@ device smsc
device spibus
device bcm2835_spi
+device vchiq
+device sound
+
# Flattened Device Tree
options FDT # Configure using FDT/DTB data
# Note: DTB is normally loaded and modified by RPi boot loader, then
# handed to kernel via U-Boot and ubldr.
#options FDT_DTB_STATIC
-makeoptions FDT_DTS_FILE=rpi.dts
+#makeoptions FDT_DTS_FILE=rpi.dts
+makeoptions MODULES_EXTRA=dtb/rpi
diff --git a/sys/arm/include/armreg.h b/sys/arm/include/armreg.h
index 4d0078cce853..3163ba5bf46d 100644
--- a/sys/arm/include/armreg.h
+++ b/sys/arm/include/armreg.h
@@ -320,6 +320,9 @@
#define CPU_CT_S (1U << 24) /* split cache */
#define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */
#define CPU_CT_FORMAT(x) ((x) >> 29)
+/* Cache type register definitions for ARM v7 */
+#define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */
+#define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */
#define CPU_CT_CTYPE_WT 0 /* write-through */
#define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */
diff --git a/sys/arm/include/cpufunc.h b/sys/arm/include/cpufunc.h
index ae2f979ba203..25e5efe25551 100644
--- a/sys/arm/include/cpufunc.h
+++ b/sys/arm/include/cpufunc.h
@@ -175,7 +175,7 @@ struct cpu_functions {
extern struct cpu_functions cpufuncs;
extern u_int cputype;
-#define cpu_id() cpufuncs.cf_id()
+#define cpu_ident() cpufuncs.cf_id()
#define cpu_cpwait() cpufuncs.cf_cpwait()
#define cpu_control(c, e) cpufuncs.cf_control(c, e)
diff --git a/sys/arm/ti/ti_gpio.c b/sys/arm/ti/ti_gpio.c
index 63680c647157..d0ce91846f0b 100644
--- a/sys/arm/ti/ti_gpio.c
+++ b/sys/arm/ti/ti_gpio.c
@@ -1004,7 +1004,7 @@ ti_gpio_config_intr(device_t dev, int irq, enum intr_trigger trig,
val |= TI_GPIO_MASK(irq);
ti_gpio_write_4(sc, TI_GPIO_BANK(irq), reg, val);
}
- if (oldreg != 0) {
+ if (reg != oldreg && oldreg != 0) {
/* Remove the old settings. */
val = ti_gpio_read_4(sc, TI_GPIO_BANK(irq), oldreg);
val &= ~TI_GPIO_MASK(irq);
diff --git a/sys/arm/xscale/ixp425/if_npe.c b/sys/arm/xscale/ixp425/if_npe.c
index d6db7e6134fd..fa3eacbeed8c 100644
--- a/sys/arm/xscale/ixp425/if_npe.c
+++ b/sys/arm/xscale/ixp425/if_npe.c
@@ -285,7 +285,7 @@ unit2npeid(int unit)
};
/* XXX check feature register instead */
return (unit < 3 ? npeidmap[
- (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
+ (cpu_ident() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
}
static int
diff --git a/sys/boot/amd64/boot1.efi/fat.tmpl.bz2.uu b/sys/boot/amd64/boot1.efi/fat.tmpl.bz2.uu
index 52ed67a468cb..c9044eece9a6 100644
--- a/sys/boot/amd64/boot1.efi/fat.tmpl.bz2.uu
+++ b/sys/boot/amd64/boot1.efi/fat.tmpl.bz2.uu
@@ -2,20 +2,19 @@ FAT template boot filesystem created by generate-fat.sh
DO NOT EDIT
$FreeBSD$
begin 644 fat.tmpl.bz2
-M0EIH.3%!62936=AO?&0`&J9____[ZZKJJ_^N_ZO^Z_^[OO_\`5`(0!0&#$D"
-M0$)$2&(<P`(\K5,M9"5%&DQ---,FAH,0```T````R-!HQ`&30`-&"#)ID$JF
-M1)[U4_5-&@`T```#0&AH````````````(,F`$P`!,`1IA&````"9-,308`1@
-M````!))!BDVI/U0`]0R>H:`81HT#)D!H-#U`T#31IH-&(``R8$9_I)6[MY/,
-M(H=/()+4&!(3V0"20C3J5$L5@2`219,"T6JI,@0"2*2\=LAD6=>N6<O!'F<.
-MQ2`;9$.P]202R&N9I3BX[E0;C7/9BF'`((K?M1ZK0B2Q;BLJ9!(`21P&1:;B
-MDD)S!-L5BL5BL/173`@@M!!!!!X@BQ0@@@@@@CJ"A!!!"!`TWD!B!`@0($"J
-M9(E`B,1(D2)$B;V]S&-,&#!@P8,'1\*$+59`:41,';;/)32*G*Q52N![20F8
-M9CCYSB#!2=[EG.),$:P,>(8QSW'U+N42P^'5X@7X``23=EA``#Z,O)^-VTX@
-M`+E!=,&6PV11C:*D8K#^<%FTG-%!@PR72@\ZU0B<D6I$FT/1\<A#/`;ET5$)
-MKT-MBW97\J.P/H0ADE"(EBN6";@5#$2#!(\F/E8M+!8-A-S"+$U5[\J>F1Y]
-MF-FPGL2L>4QCU&O/>89^#H$6^<;&WKC9W52KUX."CM6+GD;(=1!MUD,,?Y[]
-MTLAG0];,:B^]M%BH0J1":_C-*2I9R3AS#,&0>$RCY'T/R?HR!?'5$MILQ:!"
-M+;10A*!&^<(_/8>D8<CDN9]HO)OH13W(Q())?'R2WTV9*G_4T<=Y!'1+'9,(
-J1-/V<ME.&9Q3LKI2S$(`$D,``'_=FF*).\[A.)#4HU1=R13A0D-AO?&0
+M0EIH.3%!629362AK*D(`&I+____[ZZKJZ_^N_ZO^Z_Z_OJ[L`4`!7I0$#&$"
+M0$!$3&(<P`(;J*C:0E0E#30&AH`T````9#0```9````#)ZF0:,-3U/409,`)
+M@`"8`C3",````$R:8F@P`C`````"24U,D>I-DTU,)ZAZ0VA-!M0T'J`>H#"9
+M'I#0-H&HQI&0&3&FH>H>*`JHHU3V]1%/4/2``T#0`!H``#0`````#1H,@``6
+M'1&G'&@?$6[T#A)?X8$A160"20BO#")0J4TB1*4GXF$B4I,&>43+=_?K=#3*
+M6]<E0HE`UBF?(J%8BRF#?8OQ2'D)`)(EL2;F4.'R>R"ZNKJZI,9*68E8*E2Q
+M4J5*E3'(1830A"$(12A-"<(0A#]VD)H0A"$,>I0FA"$(0I\>P^=F5:M6K5JU
+M:DI3:64UN;[7%5B]Y-^\]@_K@B:N\/,5F%&H<\G#IXQXAEFC&D?![6%0'6MR
+MX1@@%FC"FD`M7,/SXFNG:2`'-0<-C$8^+$N.7M1B,^6)9,DV9,0A\OL<:C"L
+ML1V&,<\9YRB>XV#BG")'6NKRK^("UF2XO?_L!#29">MGDF$R3).!PX&%E,4C
+M''=(FL1.`_3?CN@-IB2PI3!FF\<8X.X@D,>CA90I)#M$XRPNDFJELL<3=1?8
+M2B7\5Z64,!7Z;EEBW-MXN-4IJ@W$462]-*\YCR,-B,5[W?=3&L/U>SX,WV#\
+M\B`:I"'0Z)5"$1B.E)(K[5I4RS`%R$>Y\D0NR*,;<9CZ:^V3P(I?D<D#!UC)
+D^M-HEE3SAN-8O0FQ$(`$(DF`?ZQ]'U2F_XNY(IPH2!0UE2$`
`
end
diff --git a/sys/boot/amd64/boot1.efi/generate-fat.sh b/sys/boot/amd64/boot1.efi/generate-fat.sh
index e8f9cdedd4b5..04d0ed25bf88 100755
--- a/sys/boot/amd64/boot1.efi/generate-fat.sh
+++ b/sys/boot/amd64/boot1.efi/generate-fat.sh
@@ -20,7 +20,7 @@ OUTPUT_FILE=fat.tmpl
dd if=/dev/zero of=$OUTPUT_FILE bs=512 count=$FAT_SIZE
DEVICE=`mdconfig -a -f $OUTPUT_FILE`
-newfs_msdos -F 12 $DEVICE
+newfs_msdos -F 12 -L EFI $DEVICE
mkdir stub
mount -t msdosfs /dev/$DEVICE stub
diff --git a/sys/boot/amd64/efi/main.c b/sys/boot/amd64/efi/main.c
index 2262a1d03143..9d57fb2f2ad8 100644
--- a/sys/boot/amd64/efi/main.c
+++ b/sys/boot/amd64/efi/main.c
@@ -53,6 +53,10 @@ EFI_GUID imgid = LOADED_IMAGE_PROTOCOL;
EFI_GUID mps = MPS_TABLE_GUID;
EFI_GUID netid = EFI_SIMPLE_NETWORK_PROTOCOL;
EFI_GUID smbios = SMBIOS_TABLE_GUID;
+EFI_GUID dxe = DXE_SERVICES_TABLE_GUID;
+EFI_GUID hoblist = HOB_LIST_TABLE_GUID;
+EFI_GUID memtype = MEMORY_TYPE_INFORMATION_TABLE_GUID;
+EFI_GUID debugimg = DEBUG_IMAGE_INFO_TABLE_GUID;
EFI_STATUS
main(int argc, CHAR16 *argv[])
@@ -264,6 +268,14 @@ command_configuration(int argc, char *argv[])
printf("ACPI 2.0 Table");
else if (!memcmp(guid, &smbios, sizeof(EFI_GUID)))
printf("SMBIOS Table");
+ else if (!memcmp(guid, &dxe, sizeof(EFI_GUID)))
+ printf("DXE Table");
+ else if (!memcmp(guid, &hoblist, sizeof(EFI_GUID)))
+ printf("HOB List Table");
+ else if (!memcmp(guid, &memtype, sizeof(EFI_GUID)))
+ printf("Memory Type Information Table");
+ else if (!memcmp(guid, &debugimg, sizeof(EFI_GUID)))
+ printf("Debug Image Info Table");
else
printf("Unknown Table (%s)", guid_to_string(guid));
printf(" at %p\n", ST->ConfigurationTable[i].VendorTable);
@@ -319,7 +331,7 @@ command_mode(int argc, char *argv[])
}
if (i != 0)
- printf("Choose the mode with \"col <mode number>\"\n");
+ printf("Choose the mode with \"col <mode number>\"\n");
return (CMD_OK);
}
diff --git a/sys/boot/arm/ixp425/boot2/ixp425_board.c b/sys/boot/arm/ixp425/boot2/ixp425_board.c
index c13f8ed52694..d2bf81398e9a 100644
--- a/sys/boot/arm/ixp425/boot2/ixp425_board.c
+++ b/sys/boot/arm/ixp425/boot2/ixp425_board.c
@@ -74,7 +74,7 @@ board_init(void)
{
struct board_config **pbp;
- cputype = cpu_id() & CPU_ID_CPU_MASK;
+ cputype = cpu_ident() & CPU_ID_CPU_MASK;
SET_FOREACH(pbp, boards)
/* XXX pass down redboot board type */
diff --git a/sys/boot/efi/include/efiapi.h b/sys/boot/efi/include/efiapi.h
index 623346d6016d..9c2dfbbb74e1 100644
--- a/sys/boot/efi/include/efiapi.h
+++ b/sys/boot/efi/include/efiapi.h
@@ -88,7 +88,7 @@ EFI_STATUS
IN VOID *Buffer
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_SET_VIRTUAL_ADDRESS_MAP) (
IN UINTN MemoryMapSize,
@@ -103,7 +103,7 @@ EFI_STATUS
#define EFI_INTERNAL_PTR 0x00000004 // Pointer to internal runtime data
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_CONVERT_POINTER) (
IN UINTN DebugDisposition,
@@ -168,7 +168,7 @@ EFI_STATUS
IN EFI_EVENT Event
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_WAIT_FOR_EVENT) (
IN UINTN NumberOfEvents,
@@ -194,8 +194,8 @@ EFI_STATUS
#define TPL_APPLICATION 4
#define TPL_CALLBACK 8
-#define TPL_NOTIFY 16
-#define TPL_HIGH_LEVEL 31
+#define TPL_NOTIFY 16
+#define TPL_HIGH_LEVEL 31
typedef
EFI_TPL
@@ -320,14 +320,14 @@ EFI_STATUS
// Image Entry prototype
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_ENTRY_POINT) (
IN EFI_HANDLE ImageHandle,
IN struct _EFI_SYSTEM_TABLE *SystemTable
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_LOAD) (
IN BOOLEAN BootPolicy,
@@ -338,7 +338,7 @@ EFI_STATUS
OUT EFI_HANDLE *ImageHandle
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_START) (
IN EFI_HANDLE ImageHandle,
@@ -355,7 +355,7 @@ EFI_STATUS
IN CHAR16 *ExitData OPTIONAL
);
-typedef
+typedef
EFI_STATUS
(EFIAPI *EFI_IMAGE_UNLOAD) (
IN EFI_HANDLE ImageHandle
@@ -491,7 +491,7 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_REGISTER_PROTOCOL_NOTIFY) (
IN EFI_GUID *Protocol,
IN EFI_EVENT Event,
@@ -535,7 +535,7 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_CONNECT_CONTROLLER) (
IN EFI_HANDLE ControllerHandle,
IN EFI_HANDLE *DriverImageHandle OPTIONAL,
@@ -544,19 +544,19 @@ EFI_STATUS
);
typedef
-EFI_STATUS
+EFI_STATUS
(EFIAPI *EFI_DISCONNECT_CONTROLLER)(
IN EFI_HANDLE ControllerHandle,
IN EFI_HANDLE DriverImageHandle, OPTIONAL
IN EFI_HANDLE ChildHandle OPTIONAL
- );
+ );
-#define EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL 0x00000001
-#define EFI_OPEN_PROTOCOL_GET_PROTOCOL 0x00000002
-#define EFI_OPEN_PROTOCOL_TEST_PROTOCOL 0x00000004
-#define EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER 0x00000008
-#define EFI_OPEN_PROTOCOL_BY_DRIVER 0x00000010
-#define EFI_OPEN_PROTOCOL_EXCLUSIVE 0x00000020
+#define EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL 0x00000001
+#define EFI_OPEN_PROTOCOL_GET_PROTOCOL 0x00000002
+#define EFI_OPEN_PROTOCOL_TEST_PROTOCOL 0x00000004
+#define EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER 0x00000008
+#define EFI_OPEN_PROTOCOL_BY_DRIVER 0x00000010
+#define EFI_OPEN_PROTOCOL_EXCLUSIVE 0x00000020
typedef
EFI_STATUS
@@ -804,7 +804,7 @@ typedef struct {
//
EFI_PROTOCOLS_PER_HANDLE ProtocolsPerHandle;
EFI_LOCATE_HANDLE_BUFFER LocateHandleBuffer;
- EFI_LOCATE_PROTOCOL LocateProtocol;
+ EFI_LOCATE_PROTOCOL LocateProtocol;
EFI_INSTALL_MULTIPLE_PROTOCOL_INTERFACES InstallMultipleProtocolInterfaces;
EFI_UNINSTALL_MULTIPLE_PROTOCOL_INTERFACES UninstallMultipleProtocolInterfaces;
@@ -845,6 +845,18 @@ typedef struct {
#define FDT_TABLE_GUID \
{ 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 }
+#define DXE_SERVICES_TABLE_GUID \
+ { 0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9 }
+
+#define HOB_LIST_TABLE_GUID \
+ { 0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d }
+
+#define MEMORY_TYPE_INFORMATION_TABLE_GUID \
+ { 0x4c19049f, 0x4137, 0x4dd3, 0x9c, 0x10, 0x8b, 0x97, 0xa8, 0x3f, 0xfd, 0xfa }
+
+#define DEBUG_IMAGE_INFO_TABLE_GUID \
+ { 0x49152e77, 0x1ada, 0x4764, 0xb7, 0xa2, 0x7a, 0xfe, 0xfe, 0xd9, 0x5e, 0x8b }
+
typedef struct _EFI_CONFIGURATION_TABLE {
EFI_GUID VendorGuid;
VOID *VendorTable;
diff --git a/sys/boot/forth/beastie.4th b/sys/boot/forth/beastie.4th
index 6512d739c271..8d2424408bbc 100644
--- a/sys/boot/forth/beastie.4th
+++ b/sys/boot/forth/beastie.4th
@@ -89,7 +89,7 @@ variable logoY
0 25 at-xy
;
-: fbsdbw-logo ( x y -- ) \ "FreeBSD" logo in B/W (12 rows x 21 columns)
+: fbsdbw-logo ( x y -- ) \ "FreeBSD" logo in B/W (13 rows x 21 columns)
\ We used to use the beastie himself as our default... until the
\ eventual complaint derided his reign of the advanced boot-menu.
@@ -106,16 +106,17 @@ variable logoY
5 + swap 6 + swap
2dup at-xy ." ______" 1+
- 2dup at-xy ." | ____|" 1+
- 2dup at-xy ." | |__ _ __ ___ ___ " 1+
- 2dup at-xy ." | __|| '__/ _ \/ _ \" 1+
- 2dup at-xy ." | | | | | __/ __/" 1+
+ 2dup at-xy ." | ____| __ ___ ___ " 1+
+ 2dup at-xy ." | |__ | '__/ _ \/ _ \" 1+
+ 2dup at-xy ." | __|| | | __/ __/" 1+
+ 2dup at-xy ." | | | | | | |" 1+
2dup at-xy ." |_| |_| \___|\___|" 1+
2dup at-xy ." ____ _____ _____" 1+
2dup at-xy ." | _ \ / ____| __ \" 1+
2dup at-xy ." | |_) | (___ | | | |" 1+
2dup at-xy ." | _ < \___ \| | | |" 1+
2dup at-xy ." | |_) |____) | |__| |" 1+
+ 2dup at-xy ." | | | |" 1+
at-xy ." |____/|_____/|_____/"
\ Put the cursor back at the bottom
diff --git a/sys/boot/forth/brand.4th b/sys/boot/forth/brand.4th
index 3dda97cc3e2f..28d3c5c1d26f 100644
--- a/sys/boot/forth/brand.4th
+++ b/sys/boot/forth/brand.4th
@@ -33,13 +33,14 @@ variable brandY
2 brandX !
1 brandY !
-: fbsd-logo ( x y -- ) \ "FreeBSD" [wide] logo in B/W (6 rows x 42 columns)
+: fbsd-logo ( x y -- ) \ "FreeBSD" [wide] logo in B/W (7 rows x 42 columns)
2dup at-xy ." ______ ____ _____ _____ " 1+
2dup at-xy ." | ____| | _ \ / ____| __ \ " 1+
2dup at-xy ." | |___ _ __ ___ ___ | |_) | (___ | | | |" 1+
2dup at-xy ." | ___| '__/ _ \/ _ \| _ < \___ \| | | |" 1+
2dup at-xy ." | | | | | __/ __/| |_) |____) | |__| |" 1+
+ 2dup at-xy ." | | | | | | || | | |" 1+
at-xy ." |_| |_| \___|\___||____/|_____/|_____/ "
\ Put the cursor back at the bottom
diff --git a/sys/boot/pc98/boot2/Makefile b/sys/boot/pc98/boot2/Makefile
index 3d571cfebee5..24de9d4a3da3 100644
--- a/sys/boot/pc98/boot2/Makefile
+++ b/sys/boot/pc98/boot2/Makefile
@@ -92,6 +92,7 @@ boot2.out: ${BTXCRT} boot2.o sio.o
${LD} ${LD_FLAGS} -Ttext ${ORG2} -o ${.TARGET} ${.ALLSRC}
boot2.o: boot2.s
+ ${CC} ${ACFLAGS} -c boot2.s
SRCS= boot2.c boot2.h
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 7ee7d79bd352..61b7a2f3f6e0 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -1147,6 +1147,7 @@ struct ccb_dev_advinfo {
#define CDAI_TYPE_SERIAL_NUM 2
#define CDAI_TYPE_PHYS_PATH 3
#define CDAI_TYPE_RCAPLONG 4
+#define CDAI_TYPE_EXT_INQ 5
off_t bufsiz; /* IN: Size of external buffer */
#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
off_t provsiz; /* OUT: Size required/used */
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 33bc2ac17ca7..75d81e6f41b1 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -4795,6 +4795,7 @@ xpt_release_device(struct cam_ed *device)
*/
free(device->supported_vpds, M_CAMXPT);
free(device->device_id, M_CAMXPT);
+ free(device->ext_inq, M_CAMXPT);
free(device->physpath, M_CAMXPT);
free(device->rcap_buf, M_CAMXPT);
free(device->serial_num, M_CAMXPT);
diff --git a/sys/cam/cam_xpt_internal.h b/sys/cam/cam_xpt_internal.h
index f8c6498d1f9a..23d6d34cff79 100644
--- a/sys/cam/cam_xpt_internal.h
+++ b/sys/cam/cam_xpt_internal.h
@@ -83,6 +83,8 @@ struct cam_ed {
uint8_t supported_vpds_len;
uint32_t device_id_len;
uint8_t *device_id;
+ uint32_t ext_inq_len;
+ uint8_t *ext_inq;
uint8_t physpath_len;
uint8_t *physpath; /* physical path string form */
uint32_t rcap_len;
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index e6305aab1bc9..defce21a5369 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -4483,6 +4483,8 @@ ctl_init_log_page_index(struct ctl_lun *lun)
lun->log_pages.index[1].page_len = k * 2;
lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
+ lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
+ lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
return (CTL_RETVAL_COMPLETE);
}
@@ -4720,6 +4722,9 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
lun->serseq = CTL_LUN_SERSEQ_OFF;
lun->ctl_softc = ctl_softc;
+#ifdef CTL_TIME_IO
+ lun->last_busy = getsbinuptime();
+#endif
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
STAILQ_INIT(&lun->error_list);
@@ -7085,6 +7090,67 @@ ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
}
int
+ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ int pc)
+{
+ struct ctl_lun *lun;
+ struct stat_page *data;
+ uint64_t rn, wn, rb, wb;
+ struct bintime rt, wt;
+ int i;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ data = (struct stat_page *)page_index->page_data;
+
+ scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
+ data->sap.hdr.param_control = SLP_LBIN;
+ data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
+ sizeof(struct scsi_log_param_header);
+ rn = wn = rb = wb = 0;
+ bintime_clear(&rt);
+ bintime_clear(&wt);
+ for (i = 0; i < CTL_MAX_PORTS; i++) {
+ rn += lun->stats.ports[i].operations[CTL_STATS_READ];
+ wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
+ rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
+ wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
+ bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
+ bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
+ }
+ scsi_u64to8b(rn, data->sap.read_num);
+ scsi_u64to8b(wn, data->sap.write_num);
+ if (lun->stats.blocksize > 0) {
+ scsi_u64to8b(wb / lun->stats.blocksize,
+ data->sap.recvieved_lba);
+ scsi_u64to8b(rb / lun->stats.blocksize,
+ data->sap.transmitted_lba);
+ }
+ scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
+ data->sap.read_int);
+ scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
+ data->sap.write_int);
+ scsi_u64to8b(0, data->sap.weighted_num);
+ scsi_u64to8b(0, data->sap.weighted_int);
+ scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
+ data->it.hdr.param_control = SLP_LBIN;
+ data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
+ sizeof(struct scsi_log_param_header);
+#ifdef CTL_TIME_IO
+ scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
+#endif
+ scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
+ data->it.hdr.param_control = SLP_LBIN;
+ data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
+ sizeof(struct scsi_log_param_header);
+ scsi_ulto4b(3, data->ti.exponent);
+ scsi_ulto4b(1, data->ti.integer);
+
+ page_index->page_len = sizeof(*data);
+ return (0);
+}
+
+int
ctl_log_sense(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
@@ -11689,6 +11755,12 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
* Every I/O goes into the OOA queue for a
* particular LUN, and stays there until completion.
*/
+#ifdef CTL_TIME_IO
+ if (TAILQ_EMPTY(&lun->ooa_queue)) {
+ lun->idle_time += getsbinuptime() -
+ lun->last_busy;
+ }
+#endif
TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
ooa_links);
}
@@ -12286,64 +12358,57 @@ ctl_abort_task(union ctl_io *io)
printf("%s\n", sbuf_data(&sb));
#endif
- if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
- && (xio->io_hdr.nexus.initid.id ==
- io->io_hdr.nexus.initid.id)) {
- /*
- * If the abort says that the task is untagged, the
- * task in the queue must be untagged. Otherwise,
- * we just check to see whether the tag numbers
- * match. This is because the QLogic firmware
- * doesn't pass back the tag type in an abort
- * request.
- */
+ if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
+ || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id)
+ || (xio->io_hdr.flags & CTL_FLAG_ABORT))
+ continue;
+
+ /*
+ * If the abort says that the task is untagged, the
+ * task in the queue must be untagged. Otherwise,
+ * we just check to see whether the tag numbers
+ * match. This is because the QLogic firmware
+ * doesn't pass back the tag type in an abort
+ * request.
+ */
#if 0
- if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
- && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
- || (xio->scsiio.tag_num == io->taskio.tag_num)) {
+ if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
+ || (xio->scsiio.tag_num == io->taskio.tag_num)) {
#endif
- /*
- * XXX KDM we've got problems with FC, because it
- * doesn't send down a tag type with aborts. So we
- * can only really go by the tag number...
- * This may cause problems with parallel SCSI.
- * Need to figure that out!!
- */
- if (xio->scsiio.tag_num == io->taskio.tag_num) {
- xio->io_hdr.flags |= CTL_FLAG_ABORT;
- found = 1;
- if ((io->io_hdr.flags &
- CTL_FLAG_FROM_OTHER_SC) == 0 &&
- !(lun->flags & CTL_LUN_PRIMARY_SC)) {
- union ctl_ha_msg msg_info;
-
- io->io_hdr.flags |=
- CTL_FLAG_SENT_2OTHER_SC;
- msg_info.hdr.nexus = io->io_hdr.nexus;
- msg_info.task.task_action =
- CTL_TASK_ABORT_TASK;
- msg_info.task.tag_num =
- io->taskio.tag_num;
- msg_info.task.tag_type =
- io->taskio.tag_type;
- msg_info.hdr.msg_type =
- CTL_MSG_MANAGE_TASKS;
- msg_info.hdr.original_sc = NULL;
- msg_info.hdr.serializing_sc = NULL;
+ /*
+ * XXX KDM we've got problems with FC, because it
+ * doesn't send down a tag type with aborts. So we
+ * can only really go by the tag number...
+ * This may cause problems with parallel SCSI.
+ * Need to figure that out!!
+ */
+ if (xio->scsiio.tag_num == io->taskio.tag_num) {
+ xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ found = 1;
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
+ !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+ union ctl_ha_msg msg_info;
+
+ io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ msg_info.task.task_action = CTL_TASK_ABORT_TASK;
+ msg_info.task.tag_num = io->taskio.tag_num;
+ msg_info.task.tag_type = io->taskio.tag_type;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
#if 0
- printf("Sent Abort to other side\n");
+ printf("Sent Abort to other side\n");
#endif
- if (CTL_HA_STATUS_SUCCESS !=
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info,
- sizeof(msg_info), 0)) {
- }
+ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info, sizeof(msg_info), 0) !=
+ CTL_HA_STATUS_SUCCESS) {
}
+ }
#if 0
- printf("ctl_abort_task: found I/O to abort\n");
+ printf("ctl_abort_task: found I/O to abort\n");
#endif
- break;
- }
}
}
mtx_unlock(&lun->lun_lock);
@@ -13742,6 +13807,10 @@ ctl_process_done(union ctl_io *io)
* Remove this from the OOA queue.
*/
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+#ifdef CTL_TIME_IO
+ if (TAILQ_EMPTY(&lun->ooa_queue))
+ lun->last_busy = getsbinuptime();
+#endif
/*
* Run through the blocked queue on this LUN and see if anything
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index 9cf967c39877..2693419414ea 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -181,6 +181,9 @@ int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc);
+int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index,
+ int pc);
int ctl_config_move_done(union ctl_io *io);
void ctl_datamove(union ctl_io *io);
void ctl_done(union ctl_io *io);
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index 817f9ffbdb96..2742c3da4893 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -164,6 +164,12 @@ static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request);
static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request);
static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request);
static void cfiscsi_session_terminate(struct cfiscsi_session *cs);
+static struct cfiscsi_data_wait *cfiscsi_data_wait_new(
+ struct cfiscsi_session *cs, union ctl_io *io,
+ uint32_t initiator_task_tag,
+ uint32_t *target_transfer_tagp);
+static void cfiscsi_data_wait_free(struct cfiscsi_session *cs,
+ struct cfiscsi_data_wait *cdw);
static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc
*softc, const char *name, uint16_t tag);
static struct cfiscsi_target *cfiscsi_target_find_or_create(
@@ -929,7 +935,7 @@ cfiscsi_pdu_handle_data_out(struct icl_pdu *request)
CFISCSI_SESSION_UNLOCK(cs);
done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end ||
io->scsiio.ext_data_filled == io->scsiio.kern_data_len);
- uma_zfree(cfiscsi_data_wait_zone, cdw);
+ cfiscsi_data_wait_free(cs, cdw);
if (done)
io->scsiio.be_move_done(io);
else
@@ -1067,6 +1073,45 @@ cfiscsi_callout(void *context)
cfiscsi_pdu_queue(cp);
}
+static struct cfiscsi_data_wait *
+cfiscsi_data_wait_new(struct cfiscsi_session *cs, union ctl_io *io,
+ uint32_t initiator_task_tag, uint32_t *target_transfer_tagp)
+{
+ struct cfiscsi_data_wait *cdw;
+ int error;
+
+ cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO);
+ if (cdw == NULL) {
+ CFISCSI_SESSION_WARN(cs,
+ "failed to allocate %zd bytes", sizeof(*cdw));
+ return (NULL);
+ }
+
+ error = icl_conn_transfer_setup(cs->cs_conn, io, target_transfer_tagp,
+ &cdw->cdw_icl_prv);
+ if (error != 0) {
+ CFISCSI_SESSION_WARN(cs,
+ "icl_conn_transfer_setup() failed with error %d", error);
+ uma_zfree(cfiscsi_data_wait_zone, cdw);
+ return (NULL);
+ }
+
+ cdw->cdw_ctl_io = io;
+ cdw->cdw_target_transfer_tag = *target_transfer_tagp;
+ cdw->cdw_initiator_task_tag = initiator_task_tag;
+
+ return (cdw);
+}
+
+static void
+cfiscsi_data_wait_free(struct cfiscsi_session *cs,
+ struct cfiscsi_data_wait *cdw)
+{
+
+ icl_conn_transfer_done(cs->cs_conn, cdw->cdw_icl_prv);
+ uma_zfree(cfiscsi_data_wait_zone, cdw);
+}
+
static void
cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
{
@@ -1106,7 +1151,7 @@ cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
*/
cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42;
cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
- uma_zfree(cfiscsi_data_wait_zone, cdw);
+ cfiscsi_data_wait_free(cs, cdw);
CFISCSI_SESSION_LOCK(cs);
}
CFISCSI_SESSION_UNLOCK(cs);
@@ -1222,7 +1267,7 @@ cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs)
}
static struct cfiscsi_session *
-cfiscsi_session_new(struct cfiscsi_softc *softc)
+cfiscsi_session_new(struct cfiscsi_softc *softc, const char *offload)
{
struct cfiscsi_session *cs;
int error;
@@ -1242,7 +1287,11 @@ cfiscsi_session_new(struct cfiscsi_softc *softc)
cv_init(&cs->cs_login_cv, "cfiscsi_login");
#endif
- cs->cs_conn = icl_new_conn(NULL, "cfiscsi", &cs->cs_lock);
+ cs->cs_conn = icl_new_conn(offload, "cfiscsi", &cs->cs_lock);
+ if (cs->cs_conn == NULL) {
+ free(cs, M_CFISCSI);
+ return (NULL);
+ }
cs->cs_conn->ic_receive = cfiscsi_receive_callback;
cs->cs_conn->ic_error = cfiscsi_error_callback;
cs->cs_conn->ic_prv0 = cs;
@@ -1325,7 +1374,7 @@ cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id)
{
struct cfiscsi_session *cs;
- cs = cfiscsi_session_new(&cfiscsi_softc);
+ cs = cfiscsi_session_new(&cfiscsi_softc, NULL);
if (cs == NULL) {
CFISCSI_WARN("failed to create session");
return;
@@ -1469,7 +1518,7 @@ cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
mtx_unlock(&cfiscsi_softc.lock);
} else {
#endif
- cs = cfiscsi_session_new(softc);
+ cs = cfiscsi_session_new(softc, cihp->offload);
if (cs == NULL) {
ci->status = CTL_ISCSI_ERROR;
snprintf(ci->error_str, sizeof(ci->error_str),
@@ -1620,6 +1669,7 @@ cfiscsi_ioctl_list(struct ctl_iscsi *ci)
"<max_data_segment_length>%zd</max_data_segment_length>"
"<immediate_data>%d</immediate_data>"
"<iser>%d</iser>"
+ "<offload>%s</offload>"
"</connection>\n",
cs->cs_id,
cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias,
@@ -1629,7 +1679,8 @@ cfiscsi_ioctl_list(struct ctl_iscsi *ci)
cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None",
cs->cs_max_data_segment_length,
cs->cs_immediate_data,
- cs->cs_conn->ic_iser);
+ cs->cs_conn->ic_iser,
+ cs->cs_conn->ic_offload);
if (error != 0)
break;
}
@@ -1651,41 +1702,40 @@ cfiscsi_ioctl_list(struct ctl_iscsi *ci)
}
static void
-cfiscsi_ioctl_terminate(struct ctl_iscsi *ci)
+cfiscsi_ioctl_logout(struct ctl_iscsi *ci)
{
struct icl_pdu *response;
struct iscsi_bhs_asynchronous_message *bhsam;
- struct ctl_iscsi_terminate_params *citp;
+ struct ctl_iscsi_logout_params *cilp;
struct cfiscsi_session *cs;
struct cfiscsi_softc *softc;
int found = 0;
- citp = (struct ctl_iscsi_terminate_params *)&(ci->data);
+ cilp = (struct ctl_iscsi_logout_params *)&(ci->data);
softc = &cfiscsi_softc;
mtx_lock(&softc->lock);
TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
- if (citp->all == 0 && cs->cs_id != citp->connection_id &&
- strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 &&
- strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0)
+ if (cilp->all == 0 && cs->cs_id != cilp->connection_id &&
+ strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 &&
+ strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0)
continue;
response = icl_pdu_new(cs->cs_conn, M_NOWAIT);
if (response == NULL) {
- /*
- * Oh well. Just terminate the connection.
- */
- } else {
- bhsam = (struct iscsi_bhs_asynchronous_message *)
- response->ip_bhs;
- bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
- bhsam->bhsam_flags = 0x80;
- bhsam->bhsam_0xffffffff = 0xffffffff;
- bhsam->bhsam_async_event =
- BHSAM_EVENT_TARGET_TERMINATES_SESSION;
- cfiscsi_pdu_queue(response);
+ ci->status = CTL_ISCSI_ERROR;
+ snprintf(ci->error_str, sizeof(ci->error_str),
+ "Unable to allocate memory");
+ mtx_unlock(&softc->lock);
+ return;
}
- cfiscsi_session_terminate(cs);
+ bhsam =
+ (struct iscsi_bhs_asynchronous_message *)response->ip_bhs;
+ bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
+ bhsam->bhsam_flags = 0x80;
+ bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT;
+ bhsam->bhsam_parameter3 = htons(10);
+ cfiscsi_pdu_queue(response);
found++;
}
mtx_unlock(&softc->lock);
@@ -1701,40 +1751,41 @@ cfiscsi_ioctl_terminate(struct ctl_iscsi *ci)
}
static void
-cfiscsi_ioctl_logout(struct ctl_iscsi *ci)
+cfiscsi_ioctl_terminate(struct ctl_iscsi *ci)
{
struct icl_pdu *response;
struct iscsi_bhs_asynchronous_message *bhsam;
- struct ctl_iscsi_logout_params *cilp;
+ struct ctl_iscsi_terminate_params *citp;
struct cfiscsi_session *cs;
struct cfiscsi_softc *softc;
int found = 0;
- cilp = (struct ctl_iscsi_logout_params *)&(ci->data);
+ citp = (struct ctl_iscsi_terminate_params *)&(ci->data);
softc = &cfiscsi_softc;
mtx_lock(&softc->lock);
TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
- if (cilp->all == 0 && cs->cs_id != cilp->connection_id &&
- strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 &&
- strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0)
+ if (citp->all == 0 && cs->cs_id != citp->connection_id &&
+ strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 &&
+ strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0)
continue;
response = icl_pdu_new(cs->cs_conn, M_NOWAIT);
if (response == NULL) {
- ci->status = CTL_ISCSI_ERROR;
- snprintf(ci->error_str, sizeof(ci->error_str),
- "Unable to allocate memory");
- mtx_unlock(&softc->lock);
- return;
+ /*
+ * Oh well. Just terminate the connection.
+ */
+ } else {
+ bhsam = (struct iscsi_bhs_asynchronous_message *)
+ response->ip_bhs;
+ bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
+ bhsam->bhsam_flags = 0x80;
+ bhsam->bhsam_0xffffffff = 0xffffffff;
+ bhsam->bhsam_async_event =
+ BHSAM_EVENT_TARGET_TERMINATES_SESSION;
+ cfiscsi_pdu_queue(response);
}
- bhsam =
- (struct iscsi_bhs_asynchronous_message *)response->ip_bhs;
- bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE;
- bhsam->bhsam_flags = 0x80;
- bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT;
- bhsam->bhsam_parameter3 = htons(10);
- cfiscsi_pdu_queue(response);
+ cfiscsi_session_terminate(cs);
found++;
}
mtx_unlock(&softc->lock);
@@ -1749,6 +1800,26 @@ cfiscsi_ioctl_logout(struct ctl_iscsi *ci)
ci->status = CTL_ISCSI_OK;
}
+static void
+cfiscsi_ioctl_limits(struct ctl_iscsi *ci)
+{
+ struct ctl_iscsi_limits_params *cilp;
+ int error;
+
+ cilp = (struct ctl_iscsi_limits_params *)&(ci->data);
+
+ error = icl_limits(cilp->offload, &cilp->data_segment_limit);
+ if (error != 0) {
+ ci->status = CTL_ISCSI_ERROR;
+ snprintf(ci->error_str, sizeof(ci->error_str),
+ "%s: icl_limits failed with error %d",
+ __func__, error);
+ return;
+ }
+
+ ci->status = CTL_ISCSI_OK;
+}
+
#ifdef ICL_KERNEL_PROXY
static void
cfiscsi_ioctl_listen(struct ctl_iscsi *ci)
@@ -2170,11 +2241,14 @@ cfiscsi_ioctl(struct cdev *dev,
case CTL_ISCSI_LIST:
cfiscsi_ioctl_list(ci);
break;
+ case CTL_ISCSI_LOGOUT:
+ cfiscsi_ioctl_logout(ci);
+ break;
case CTL_ISCSI_TERMINATE:
cfiscsi_ioctl_terminate(ci);
break;
- case CTL_ISCSI_LOGOUT:
- cfiscsi_ioctl_logout(ci);
+ case CTL_ISCSI_LIMITS:
+ cfiscsi_ioctl_limits(ci);
break;
#ifdef ICL_KERNEL_PROXY
case CTL_ISCSI_LISTEN:
@@ -2571,13 +2645,8 @@ cfiscsi_datamove_out(union ctl_io *io)
target_transfer_tag =
atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
-
-#if 0
- CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator "
- "task tag 0x%x, target transfer tag 0x%x",
- bhssc->bhssc_initiator_task_tag, target_transfer_tag);
-#endif
- cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO);
+ cdw = cfiscsi_data_wait_new(cs, io, bhssc->bhssc_initiator_task_tag,
+ &target_transfer_tag);
if (cdw == NULL) {
CFISCSI_SESSION_WARN(cs, "failed to "
"allocate memory; dropping connection");
@@ -2586,6 +2655,12 @@ cfiscsi_datamove_out(union ctl_io *io)
cfiscsi_session_terminate(cs);
return;
}
+#if 0
+ CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator "
+ "task tag 0x%x, target transfer tag 0x%x",
+ bhssc->bhssc_initiator_task_tag, target_transfer_tag);
+#endif
+
cdw->cdw_ctl_io = io;
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
@@ -2622,7 +2697,7 @@ cfiscsi_datamove_out(union ctl_io *io)
icl_pdu_data_segment_length(request)) {
done = cfiscsi_handle_data_segment(request, cdw);
if (done) {
- uma_zfree(cfiscsi_data_wait_zone, cdw);
+ cfiscsi_data_wait_free(cs, cdw);
io->scsiio.be_move_done(io);
return;
}
@@ -2825,7 +2900,7 @@ cfiscsi_task_management_done(union ctl_io *io)
TAILQ_REMOVE(&cs->cs_waiting_for_data_out,
cdw, cdw_next);
cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
- uma_zfree(cfiscsi_data_wait_zone, cdw);
+ cfiscsi_data_wait_free(cs, cdw);
}
CFISCSI_SESSION_UNLOCK(cs);
}
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.h b/sys/cam/ctl/ctl_frontend_iscsi.h
index e1a094974378..140aa5223cee 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.h
+++ b/sys/cam/ctl/ctl_frontend_iscsi.h
@@ -59,6 +59,7 @@ struct cfiscsi_data_wait {
size_t cdw_sg_len;
uint32_t cdw_r2t_end;
uint32_t cdw_datasn;
+ void *cdw_icl_prv;
};
#define CFISCSI_SESSION_STATE_INVALID 0
diff --git a/sys/cam/ctl/ctl_ioctl.h b/sys/cam/ctl/ctl_ioctl.h
index 532953fb7fab..c7a3c2938400 100644
--- a/sys/cam/ctl/ctl_ioctl.h
+++ b/sys/cam/ctl/ctl_ioctl.h
@@ -657,6 +657,7 @@ typedef enum {
CTL_ISCSI_LIST,
CTL_ISCSI_LOGOUT,
CTL_ISCSI_TERMINATE,
+ CTL_ISCSI_LIMITS,
#if defined(ICL_KERNEL_PROXY) || 1
/*
* We actually need those in all cases, but leave the ICL_KERNEL_PROXY,
@@ -677,6 +678,7 @@ typedef enum {
#define CTL_ISCSI_NAME_LEN 224 /* 223 bytes, by RFC 3720, + '\0' */
#define CTL_ISCSI_ADDR_LEN 47 /* INET6_ADDRSTRLEN + '\0' */
#define CTL_ISCSI_ALIAS_LEN 128 /* Arbitrary. */
+#define CTL_ISCSI_OFFLOAD_LEN 8 /* Arbitrary. */
struct ctl_iscsi_handoff_params {
char initiator_name[CTL_ISCSI_NAME_LEN];
@@ -698,11 +700,12 @@ struct ctl_iscsi_handoff_params {
uint32_t max_burst_length;
uint32_t first_burst_length;
uint32_t immediate_data;
+ char offload[CTL_ISCSI_OFFLOAD_LEN];
#ifdef ICL_KERNEL_PROXY
int connection_id;
- int spare[3];
+ int spare[1];
#else
- int spare[4];
+ int spare[2];
#endif
};
@@ -733,6 +736,14 @@ struct ctl_iscsi_terminate_params {
int spare[4];
};
+struct ctl_iscsi_limits_params {
+ char offload[CTL_ISCSI_OFFLOAD_LEN];
+ /* passed to kernel */
+ size_t data_segment_limit;
+ /* passed to userland */
+ int spare[4];
+};
+
#ifdef ICL_KERNEL_PROXY
struct ctl_iscsi_listen_params {
int iser;
@@ -780,6 +791,7 @@ union ctl_iscsi_data {
struct ctl_iscsi_list_params list;
struct ctl_iscsi_logout_params logout;
struct ctl_iscsi_terminate_params terminate;
+ struct ctl_iscsi_limits_params limits;
#ifdef ICL_KERNEL_PROXY
struct ctl_iscsi_listen_params listen;
struct ctl_iscsi_accept_params accept;
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index ffcb063e784b..428142c3b5e2 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -342,6 +342,8 @@ static const struct ctl_page_index log_page_index_template[] = {
CTL_PAGE_FLAG_NONE, NULL, NULL},
{SLS_LOGICAL_BLOCK_PROVISIONING, 0, 0, NULL,
CTL_PAGE_FLAG_NONE, ctl_lbp_log_sense_handler, NULL},
+ {SLS_STAT_AND_PERF, 0, 0, NULL,
+ CTL_PAGE_FLAG_NONE, ctl_sap_log_sense_handler, NULL},
};
#define CTL_NUM_LOG_PAGES sizeof(log_page_index_template)/ \
@@ -351,6 +353,11 @@ struct ctl_log_pages {
uint8_t pages_page[CTL_NUM_LOG_PAGES];
uint8_t subpages_page[CTL_NUM_LOG_PAGES * 2];
uint8_t lbp_page[12*CTL_NUM_LBP_PARAMS];
+ struct stat_page {
+ struct scsi_log_stat_and_perf sap;
+ struct scsi_log_idle_time it;
+ struct scsi_log_time_interval ti;
+ } stat_page;
struct ctl_page_index index[CTL_NUM_LOG_PAGES];
};
@@ -403,6 +410,10 @@ struct ctl_lun {
struct ctl_lun_delay_info delay_info;
int sync_interval;
int sync_count;
+#ifdef CTL_TIME_IO
+ sbintime_t idle_time;
+ sbintime_t last_busy;
+#endif
TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue;
TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
STAILQ_ENTRY(ctl_lun) links;
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index dfd0db085a92..46d0bfc0466f 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -561,6 +561,7 @@ struct scsi_log_sense
#define SLS_ERROR_LASTN_PAGE 0x07
#define SLS_LOGICAL_BLOCK_PROVISIONING 0x0c
#define SLS_SELF_TEST_PAGE 0x10
+#define SLS_STAT_AND_PERF 0x19
#define SLS_IE_PAGE 0x2f
#define SLS_PAGE_CTRL_MASK 0xC0
#define SLS_PAGE_CTRL_THRESHOLD 0x00
@@ -619,6 +620,45 @@ struct scsi_log_param_header {
u_int8_t param_len;
};
+struct scsi_log_stat_and_perf {
+ struct scsi_log_param_header hdr;
+#define SLP_SAP 0x0001
+ uint8_t read_num[8];
+ uint8_t write_num[8];
+ uint8_t recvieved_lba[8];
+ uint8_t transmitted_lba[8];
+ uint8_t read_int[8];
+ uint8_t write_int[8];
+ uint8_t weighted_num[8];
+ uint8_t weighted_int[8];
+};
+
+struct scsi_log_idle_time {
+ struct scsi_log_param_header hdr;
+#define SLP_IT 0x0002
+ uint8_t idle_int[8];
+};
+
+struct scsi_log_time_interval {
+ struct scsi_log_param_header hdr;
+#define SLP_TI 0x0003
+ uint8_t exponent[4];
+ uint8_t integer[4];
+};
+
+struct scsi_log_fua_stat_and_perf {
+ struct scsi_log_param_header hdr;
+#define SLP_FUA_SAP 0x0004
+ uint8_t fua_read_num[8];
+ uint8_t fua_write_num[8];
+ uint8_t fuanv_read_num[8];
+ uint8_t fuanv_write_num[8];
+ uint8_t fua_read_int[8];
+ uint8_t fua_write_int[8];
+ uint8_t fuanv_read_int[8];
+ uint8_t fuanv_write_int[8];
+};
+
struct scsi_control_page {
u_int8_t page_code;
u_int8_t page_length;
diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c
index 242504ada1ad..0f44cf55a473 100644
--- a/sys/cam/scsi/scsi_xpt.c
+++ b/sys/cam/scsi/scsi_xpt.c
@@ -139,6 +139,7 @@ typedef enum {
PROBE_MODE_SENSE,
PROBE_SUPPORTED_VPD_LIST,
PROBE_DEVICE_ID,
+ PROBE_EXTENDED_INQUIRY,
PROBE_SERIAL_NUM,
PROBE_TUR_FOR_NEGOTIATION,
PROBE_INQUIRY_BASIC_DV1,
@@ -156,6 +157,7 @@ static char *probe_action_text[] = {
"PROBE_MODE_SENSE",
"PROBE_SUPPORTED_VPD_LIST",
"PROBE_DEVICE_ID",
+ "PROBE_EXTENDED_INQUIRY",
"PROBE_SERIAL_NUM",
"PROBE_TUR_FOR_NEGOTIATION",
"PROBE_INQUIRY_BASIC_DV1",
@@ -923,6 +925,34 @@ done:
}
goto done;
}
+ case PROBE_EXTENDED_INQUIRY:
+ {
+ struct scsi_vpd_extended_inquiry_data *ext_inq;
+
+ ext_inq = NULL;
+ if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA))
+ ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT,
+ M_NOWAIT | M_ZERO);
+
+ if (ext_inq != NULL) {
+ scsi_inquiry(csio,
+ /*retries*/4,
+ probedone,
+ MSG_SIMPLE_Q_TAG,
+ (uint8_t *)ext_inq,
+ sizeof(*ext_inq),
+ /*evpd*/TRUE,
+ SVPD_EXTENDED_INQUIRY_DATA,
+ SSD_MIN_SIZE,
+ /*timeout*/60 * 1000);
+ break;
+ }
+ /*
+ * We'll have to do without, let our probedone
+ * routine finish up for us.
+ */
+ goto done;
+ }
case PROBE_SERIAL_NUM:
{
struct scsi_vpd_unit_serial_number *serial_buf;
@@ -1454,6 +1484,50 @@ out:
if (devid && length == 0)
free(devid, M_CAMXPT);
xpt_release_ccb(done_ccb);
+ PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY);
+ xpt_schedule(periph, priority);
+ goto out;
+ }
+ case PROBE_EXTENDED_INQUIRY: {
+ struct scsi_vpd_extended_inquiry_data *ext_inq;
+ struct ccb_scsiio *csio;
+ int32_t length = 0;
+
+ csio = &done_ccb->csio;
+ ext_inq = (struct scsi_vpd_extended_inquiry_data *)
+ csio->data_ptr;
+ if (path->device->ext_inq != NULL) {
+ path->device->ext_inq_len = 0;
+ free(path->device->ext_inq, M_CAMXPT);
+ path->device->ext_inq = NULL;
+ }
+
+ if (ext_inq == NULL) {
+ /* Don't process the command as it was never sent */
+ } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
+ length = scsi_2btoul(ext_inq->page_length) +
+ __offsetof(struct scsi_vpd_extended_inquiry_data,
+ flags1);
+ length = min(length, sizeof(*ext_inq));
+ length -= csio->resid;
+ if (length > 0) {
+ path->device->ext_inq_len = length;
+ path->device->ext_inq = (uint8_t *)ext_inq;
+ }
+ } else if (cam_periph_error(done_ccb, 0,
+ SF_RETRY_UA,
+ &softc->saved_ccb) == ERESTART) {
+ return;
+ } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ /* Don't wedge the queue */
+ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
+ /*run_queue*/TRUE);
+ }
+
+ /* Free the device id space if we don't use it */
+ if (ext_inq && length <= 0)
+ free(ext_inq, M_CAMXPT);
+ xpt_release_ccb(done_ccb);
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM);
xpt_schedule(periph, priority);
goto out;
@@ -2477,6 +2551,21 @@ scsi_dev_advinfo(union ccb *start_ccb)
memcpy(cdai->buf, device->rcap_buf, amt);
}
break;
+ case CDAI_TYPE_EXT_INQ:
+ /*
+ * We fetch extended inquiry data during probe, if
+ * available. We don't allow changing it.
+ */
+ if (cdai->flags & CDAI_FLAG_STORE)
+ return;
+ cdai->provsiz = device->ext_inq_len;
+ if (device->ext_inq_len == 0)
+ break;
+ amt = device->ext_inq_len;
+ if (cdai->provsiz > cdai->bufsiz)
+ amt = cdai->bufsiz;
+ memcpy(cdai->buf, device->ext_inq, amt);
+ break;
default:
return;
}
diff --git a/sys/cddl/contrib/opensolaris/uts/arm/dtrace/fasttrap_isa.c b/sys/cddl/contrib/opensolaris/uts/arm/dtrace/fasttrap_isa.c
new file mode 100644
index 000000000000..18e3837b35b6
--- /dev/null
+++ b/sys/cddl/contrib/opensolaris/uts/arm/dtrace/fasttrap_isa.c
@@ -0,0 +1,30 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+/*
+ * XXX: Placeholder for ARM fasttrap code
+ */
diff --git a/sys/cddl/contrib/opensolaris/uts/arm/sys/fasttrap_isa.h b/sys/cddl/contrib/opensolaris/uts/arm/sys/fasttrap_isa.h
new file mode 100644
index 000000000000..10361cbed8de
--- /dev/null
+++ b/sys/cddl/contrib/opensolaris/uts/arm/sys/fasttrap_isa.h
@@ -0,0 +1,94 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _FASTTRAP_ISA_H
+#define _FASTTRAP_ISA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is our reserved trap instruction: ta 0x38
+ */
+#define FASTTRAP_INSTR 0x91d02038
+
+#define FASTTRAP_SUNWDTRACE_SIZE 128
+
+typedef uint32_t fasttrap_instr_t;
+
+typedef struct fasttrap_machtp {
+ fasttrap_instr_t ftmt_instr; /* original instruction */
+ uintptr_t ftmt_dest; /* destination of DCTI */
+ uint8_t ftmt_type; /* emulation type */
+ uint8_t ftmt_flags; /* emulation flags */
+ uint8_t ftmt_cc; /* which cc to look at */
+ uint8_t ftmt_code; /* branch condition */
+} fasttrap_machtp_t;
+
+#define ftt_instr ftt_mtp.ftmt_instr
+#define ftt_dest ftt_mtp.ftmt_dest
+#define ftt_type ftt_mtp.ftmt_type
+#define ftt_flags ftt_mtp.ftmt_flags
+#define ftt_cc ftt_mtp.ftmt_cc
+#define ftt_code ftt_mtp.ftmt_code
+
+#define FASTTRAP_T_COMMON 0x00 /* common case -- no emulation */
+#define FASTTRAP_T_CCR 0x01 /* integer condition code branch */
+#define FASTTRAP_T_FCC 0x02 /* floating-point branch */
+#define FASTTRAP_T_REG 0x03 /* register predicated branch */
+#define FASTTRAP_T_ALWAYS 0x04 /* branch always */
+#define FASTTRAP_T_CALL 0x05 /* call instruction */
+#define FASTTRAP_T_JMPL 0x06 /* jmpl instruction */
+#define FASTTRAP_T_RDPC 0x07 /* rdpc instruction */
+#define FASTTRAP_T_RETURN 0x08 /* return instruction */
+
+/*
+ * For performance rather than correctness.
+ */
+#define FASTTRAP_T_SAVE 0x10 /* save instruction (func entry only) */
+#define FASTTRAP_T_RESTORE 0x11 /* restore instruction */
+#define FASTTRAP_T_OR 0x12 /* mov instruction */
+#define FASTTRAP_T_SETHI 0x13 /* sethi instruction (includes nop) */
+
+#define FASTTRAP_F_ANNUL 0x01 /* branch is annulled */
+#define FASTTRAP_F_RETMAYBE 0x02 /* not definitely a return site */
+
+#define FASTTRAP_AFRAMES 3
+#define FASTTRAP_RETURN_AFRAMES 4
+#define FASTTRAP_ENTRY_AFRAMES 3
+#define FASTTRAP_OFFSET_AFRAMES 3
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FASTTRAP_ISA_H */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
index 68e81616a5a7..818f18064f89 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
@@ -11880,7 +11880,7 @@ err:
int i;
*factor = 1;
-#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
+#if defined(__amd64__) || defined(__arm__) || defined(__mips__) || defined(__powerpc__)
/*
* FreeBSD isn't good at limiting the amount of memory we
* ask to malloc, so let's place a limit here before trying
@@ -16881,7 +16881,7 @@ dtrace_dtr(void *data)
#ifdef illumos
dtrace_state_destroy(state);
#else
- if (state == NULL) {
+ if (state != NULL) {
dtrace_state_destroy(state);
kmem_free(state, 0);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
index d449294d7507..3cb7eb27c7c4 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h
@@ -2434,6 +2434,13 @@ extern void dtrace_helpers_destroy(proc_t *);
#define DTRACE_INVOP_MFLR_R0 5
#define DTRACE_INVOP_NOP 6
+#elif defined(__arm__)
+
+#define DTRACE_INVOP_PUSHM 1
+#define DTRACE_INVOP_POPM 2
+#define DTRACE_INVOP_B 3
+
+
#endif
#ifdef __cplusplus
diff --git a/sys/cddl/dev/dtrace/arm/dtrace_asm.S b/sys/cddl/dev/dtrace/arm/dtrace_asm.S
new file mode 100644
index 000000000000..a536492c02b7
--- /dev/null
+++ b/sys/cddl/dev/dtrace/arm/dtrace_asm.S
@@ -0,0 +1,197 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#define _ASM
+#define _LOCORE
+#define LOCORE
+
+#include <sys/cpuvar_defs.h>
+#include <sys/dtrace.h>
+
+#include <machine/armreg.h>
+#include <machine/asm.h>
+
+#include "assym.s"
+
+/*
+void dtrace_membar_producer(void)
+*/
+ENTRY(dtrace_membar_producer)
+ RET
+END(dtrace_membar_producer)
+
+/*
+void dtrace_membar_consumer(void)
+*/
+ENTRY(dtrace_membar_consumer)
+ RET
+END(dtrace_membar_consumer)
+
+/*
+dtrace_icookie_t dtrace_interrupt_disable(void)
+*/
+ENTRY(dtrace_interrupt_disable)
+ mrs r0, cpsr
+ mov r1, r0
+ orr r1, r1, #(PSR_I | PSR_F)
+ msr cpsr_c, r1
+ RET
+END(dtrace_interrupt_disable)
+
+/*
+void dtrace_interrupt_enable(dtrace_icookie_t cookie)
+*/
+ENTRY(dtrace_interrupt_enable)
+ and r0, r0, #(PSR_I | PSR_F)
+ mrs r1, cpsr
+ bic r1, r1, #(PSR_I | PSR_F)
+ orr r1, r1, r0
+ msr cpsr_c, r1
+ RET
+END(dtrace_interrupt_enable)
+/*
+uint8_t
+dtrace_fuword8_nocheck(void *addr)
+*/
+ENTRY(dtrace_fuword8_nocheck)
+ ldrb r3, [r0]
+ mov r0, r3
+ RET
+END(dtrace_fuword8_nocheck)
+
+/*
+uint16_t
+dtrace_fuword16_nocheck(void *addr)
+*/
+ENTRY(dtrace_fuword16_nocheck)
+ ldrh r3, [r0]
+ mov r0, r3
+ RET
+END(dtrace_fuword16_nocheck)
+
+/*
+uint32_t
+dtrace_fuword32_nocheck(void *addr)
+*/
+ENTRY(dtrace_fuword32_nocheck)
+ ldr r3, [r0]
+ mov r0, r3
+ RET
+END(dtrace_fuword32_nocheck)
+
+/*
+uint64_t
+dtrace_fuword64_nocheck(void *addr)
+*/
+ENTRY(dtrace_fuword64_nocheck)
+ ldm r0, {r2, r3}
+
+ mov r0, r2
+ mov r1, r3
+#if defined(__BIG_ENDIAN__)
+/* big endian */
+ mov r0, r3
+ mov r1, r2
+#else
+/* little endian */
+ mov r0, r2
+ mov r1, r3
+
+#endif
+ RET
+END(dtrace_fuword64_nocheck)
+
+/*
+void
+dtrace_copy(uintptr_t uaddr, uintptr_t kaddr, size_t size)
+*/
+ENTRY(dtrace_copy)
+ stmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ teq r2, #0x00000000
+ mov r5, #0x00000000
+ beq 2f
+
+1: ldrb r4, [r0], #0x0001
+ add r5, r5, #0x00000001
+ strb r4, [r1], #0x0001
+ teqne r5, r2
+ bne 1b
+
+2: ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ RET
+END(dtrace_copy)
+
+/*
+void
+dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+XXX: Check for flags?
+*/
+ENTRY(dtrace_copystr)
+ stmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ teq r2, #0x00000000
+ mov r5, #0x00000000
+ beq 2f
+
+1: ldrb r4, [r0], #0x0001
+ add r5, r5, #0x00000001
+ teq r4, #0x00000000
+ strb r4, [r1], #0x0001
+ teqne r5, r2
+ bne 1b
+
+2: ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ RET
+END(dtrace_copystr)
+
+/*
+void
+vpanic(const char *format, va_list alist)
+*/
+ENTRY(vpanic) /* Initial stack layout: */
+vpanic_common:
+ RET
+END(vpanic)
+
+/*
+void
+dtrace_vpanic(const char *format, va_list alist)
+*/
+ENTRY(dtrace_vpanic) /* Initial stack layout: */
+ b vpanic
+ RET
+END(dtrace_vpanic) /* Initial stack layout: */
+
+/*
+uintptr_t
+dtrace_caller(int aframes)
+*/
+ENTRY(dtrace_caller)
+ mov r0, #-1
+ RET
+END(dtrace_caller)
diff --git a/sys/cddl/dev/dtrace/arm/dtrace_isa.c b/sys/cddl/dev/dtrace/arm/dtrace_isa.c
new file mode 100644
index 000000000000..7d3dc2e03801
--- /dev/null
+++ b/sys/cddl/dev/dtrace/arm/dtrace_isa.c
@@ -0,0 +1,356 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/stack.h>
+#include <sys/pcpu.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <machine/atomic.h>
+#include <machine/db_machdep.h>
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+#include <machine/stack.h>
+#include <ddb/db_sym.h>
+#include <ddb/ddb.h>
+#include <sys/kdb.h>
+
+#include "regset.h"
+
+/*
+ * Wee need some reasonable default to prevent backtrace code
+ * from wandering too far
+ */
+#define MAX_FUNCTION_SIZE 0x10000
+#define MAX_PROLOGUE_SIZE 0x100
+
+
+uint8_t dtrace_fuword8_nocheck(void *);
+uint16_t dtrace_fuword16_nocheck(void *);
+uint32_t dtrace_fuword32_nocheck(void *);
+uint64_t dtrace_fuword64_nocheck(void *);
+
+void
+dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
+ uint32_t *intrpc)
+{
+ u_int32_t *frame, *lastframe;
+ int scp_offset;
+ int depth = 0;
+ pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
+
+ if (intrpc != 0)
+ pcstack[depth++] = (pc_t) intrpc;
+
+ aframes++;
+
+ frame = (u_int32_t *)__builtin_frame_address(0);;
+ lastframe = NULL;
+ scp_offset = -(get_pc_str_offset() >> 2);
+
+ while ((frame != NULL) && (depth < pcstack_limit)) {
+ db_addr_t scp;
+#if 0
+ u_int32_t savecode;
+ int r;
+ u_int32_t *rp;
+#endif
+
+ /*
+ * In theory, the SCP isn't guaranteed to be in the function
+ * that generated the stack frame. We hope for the best.
+ */
+ scp = frame[FR_SCP];
+
+ if (aframes > 0) {
+ aframes--;
+ if ((aframes == 0) && (caller != 0)) {
+ pcstack[depth++] = caller;
+ }
+ }
+ else {
+ pcstack[depth++] = scp;
+ }
+
+#if 0
+ savecode = ((u_int32_t *)scp)[scp_offset];
+ if ((savecode & 0x0e100000) == 0x08000000) {
+ /* Looks like an STM */
+ rp = frame - 4;
+ for (r = 10; r >= 0; r--) {
+ if (savecode & (1 << r)) {
+ /* register r == *rp-- */
+ }
+ }
+ }
+#endif
+
+ /*
+ * Switch to next frame up
+ */
+ if (frame[FR_RFP] == 0)
+ break; /* Top of stack */
+
+ lastframe = frame;
+ frame = (u_int32_t *)(frame[FR_RFP]);
+
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ /* bad frame pointer */
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ for (; depth < pcstack_limit; depth++) {
+ pcstack[depth] = 0;
+ }
+}
+
+void
+dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
+{
+ printf("IMPLEMENT ME: %s\n", __func__);
+}
+
+int
+dtrace_getustackdepth(void)
+{
+ printf("IMPLEMENT ME: %s\n", __func__);
+ return (0);
+}
+
+void
+dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
+{
+ printf("IMPLEMENT ME: %s\n", __func__);
+}
+
+/*ARGSUSED*/
+uint64_t
+dtrace_getarg(int arg, int aframes)
+{
+/* struct arm_frame *fp = (struct arm_frame *)dtrace_getfp();*/
+
+ return (0);
+}
+
+int
+dtrace_getstackdepth(int aframes)
+{
+ u_int32_t *frame, *lastframe;
+ int scp_offset;
+ int depth = 1;
+
+ frame = (u_int32_t *)__builtin_frame_address(0);;
+ lastframe = NULL;
+ scp_offset = -(get_pc_str_offset() >> 2);
+
+ while (frame != NULL) {
+ db_addr_t scp;
+#if 0
+ u_int32_t savecode;
+ int r;
+ u_int32_t *rp;
+#endif
+
+ /*
+ * In theory, the SCP isn't guaranteed to be in the function
+ * that generated the stack frame. We hope for the best.
+ */
+ scp = frame[FR_SCP];
+
+ depth++;
+
+ /*
+ * Switch to next frame up
+ */
+ if (frame[FR_RFP] == 0)
+ break; /* Top of stack */
+
+ lastframe = frame;
+ frame = (u_int32_t *)(frame[FR_RFP]);
+
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ /* bad frame pointer */
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ if (depth < aframes)
+ return 0;
+ else
+ return depth - aframes;
+
+}
+
+ulong_t
+dtrace_getreg(struct trapframe *rp, uint_t reg)
+{
+ printf("IMPLEMENT ME: %s\n", __func__);
+
+ return (0);
+}
+
+static int
+dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
+{
+
+ if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
+ return (0);
+ }
+
+ return (1);
+}
+
+void
+dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copy(uaddr, kaddr, size);
+}
+
+void
+dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copy(kaddr, uaddr, size);
+}
+
+void
+dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copystr(uaddr, kaddr, size, flags);
+}
+
+void
+dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copystr(kaddr, uaddr, size, flags);
+}
+
+uint8_t
+dtrace_fuword8(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword8_nocheck(uaddr));
+}
+
+uint16_t
+dtrace_fuword16(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword16_nocheck(uaddr));
+}
+
+uint32_t
+dtrace_fuword32(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword32_nocheck(uaddr));
+}
+
+uint64_t
+dtrace_fuword64(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword64_nocheck(uaddr));
+}
+
+#define __with_interrupts_disabled(expr) \
+ do { \
+ u_int cpsr_save, tmp; \
+ \
+ __asm __volatile( \
+ "mrs %0, cpsr;" \
+ "orr %1, %0, %2;" \
+ "msr cpsr_fsxc, %1;" \
+ : "=r" (cpsr_save), "=r" (tmp) \
+ : "I" (PSR_I | PSR_F) \
+ : "cc" ); \
+ (expr); \
+ __asm __volatile( \
+ "msr cpsr_fsxc, %0" \
+ : /* no output */ \
+ : "r" (cpsr_save) \
+ : "cc" ); \
+ } while(0)
+
+uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
+{
+ return atomic_cmpset_32((uint32_t*)target, (uint32_t)cmp, (uint32_t)new);
+
+}
+
+void * dtrace_casptr(volatile void *target, volatile void *cmp, volatile void *new)
+{
+ return (void*)dtrace_cas32((uint32_t*)target, (uint32_t)cmp, (uint32_t)new);
+}
+
diff --git a/sys/cddl/dev/dtrace/arm/dtrace_subr.c b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
new file mode 100644
index 000000000000..d4c12a6a0f11
--- /dev/null
+++ b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
@@ -0,0 +1,261 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ *
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kmem.h>
+#include <sys/smp.h>
+#include <sys/dtrace_impl.h>
+#include <sys/dtrace_bsd.h>
+#include <machine/armreg.h>
+#include <machine/clock.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+#include <vm/pmap.h>
+
+#define DELAYBRANCH(x) ((int)(x) < 0)
+
+extern uintptr_t dtrace_in_probe_addr;
+extern int dtrace_in_probe;
+extern dtrace_id_t dtrace_probeid_error;
+extern int (*dtrace_invop_jump_addr)(struct trapframe *);
+
+int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
+void dtrace_invop_init(void);
+void dtrace_invop_uninit(void);
+
+typedef struct dtrace_invop_hdlr {
+ int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
+ struct dtrace_invop_hdlr *dtih_next;
+} dtrace_invop_hdlr_t;
+
+dtrace_invop_hdlr_t *dtrace_invop_hdlr;
+
+int
+dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
+{
+ dtrace_invop_hdlr_t *hdlr;
+ int rval;
+
+ for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
+ if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
+ return (rval);
+
+ return (0);
+}
+
+
+void
+dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
+{
+ dtrace_invop_hdlr_t *hdlr;
+
+ hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
+ hdlr->dtih_func = func;
+ hdlr->dtih_next = dtrace_invop_hdlr;
+ dtrace_invop_hdlr = hdlr;
+}
+
+void
+dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
+{
+ dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
+
+ for (;;) {
+ if (hdlr == NULL)
+ panic("attempt to remove non-existent invop handler");
+
+ if (hdlr->dtih_func == func)
+ break;
+
+ prev = hdlr;
+ hdlr = hdlr->dtih_next;
+ }
+
+ if (prev == NULL) {
+ ASSERT(dtrace_invop_hdlr == hdlr);
+ dtrace_invop_hdlr = hdlr->dtih_next;
+ } else {
+ ASSERT(dtrace_invop_hdlr != hdlr);
+ prev->dtih_next = hdlr->dtih_next;
+ }
+
+ kmem_free(hdlr, 0);
+}
+
+
+/*ARGSUSED*/
+void
+dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
+{
+ printf("IMPLEMENT ME: dtrace_toxic_ranges\n");
+}
+
+void
+dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
+{
+ cpuset_t cpus;
+
+ if (cpu == DTRACE_CPUALL)
+ cpus = all_cpus;
+ else
+ CPU_SETOF(cpu, &cpus);
+
+ smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
+ smp_no_rendevous_barrier, arg);
+}
+
+static void
+dtrace_sync_func(void)
+{
+}
+
+void
+dtrace_sync(void)
+{
+ dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
+}
+
+/*
+ * DTrace needs a high resolution time function which can
+ * be called from a probe context and guaranteed not to have
+ * instrumented with probes itself.
+ *
+ * Returns nanoseconds since boot.
+ */
+uint64_t
+dtrace_gethrtime()
+{
+ struct timespec curtime;
+
+ nanouptime(&curtime);
+
+ return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
+
+}
+
+uint64_t
+dtrace_gethrestime(void)
+{
+ struct timespec curtime;
+
+ getnanotime(&curtime);
+
+ return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
+}
+
+/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
+int
+dtrace_trap(struct trapframe *frame, u_int type)
+{
+ /*
+ * A trap can occur while DTrace executes a probe. Before
+ * executing the probe, DTrace blocks re-scheduling and sets
+ * a flag in it's per-cpu flags to indicate that it doesn't
+ * want to fault. On returning from the probe, the no-fault
+ * flag is cleared and finally re-scheduling is enabled.
+ *
+ * Check if DTrace has enabled 'no-fault' mode:
+ *
+ */
+ if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
+ /*
+ * There are only a couple of trap types that are expected.
+ * All the rest will be handled in the usual way.
+ */
+ switch (type) {
+ /* Page fault. */
+ case FAULT_ALIGN:
+ /* Flag a bad address. */
+ cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
+ cpu_core[curcpu].cpuc_dtrace_illval = 0;
+
+ /*
+ * Offset the instruction pointer to the instruction
+ * following the one causing the fault.
+ */
+ frame->tf_pc += sizeof(int);
+ return (1);
+ default:
+ /* Handle all other traps in the usual way. */
+ break;
+ }
+ }
+
+ /* Handle the trap in the usual way. */
+ return (0);
+}
+
+void
+dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
+ int fault, int fltoffs, uintptr_t illval)
+{
+
+ dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state,
+ (uintptr_t)epid,
+ (uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
+}
+
+static int
+dtrace_invop_start(struct trapframe *frame)
+{
+ printf("IMPLEMENT ME: %s\n", __func__);
+ switch (dtrace_invop(frame->tf_pc, (uintptr_t *)frame, frame->tf_pc)) {
+ case DTRACE_INVOP_PUSHM:
+ // TODO:
+ break;
+ case DTRACE_INVOP_POPM:
+ // TODO:
+ break;
+ case DTRACE_INVOP_B:
+ // TODO
+ break;
+ default:
+ return (-1);
+ break;
+ }
+
+ return (0);
+}
+
+void dtrace_invop_init(void)
+{
+ dtrace_invop_jump_addr = dtrace_invop_start;
+}
+
+void dtrace_invop_uninit(void)
+{
+ dtrace_invop_jump_addr = 0;
+}
diff --git a/sys/cddl/dev/dtrace/arm/regset.h b/sys/cddl/dev/dtrace/arm/regset.h
new file mode 100644
index 000000000000..ce9e97ea7a09
--- /dev/null
+++ b/sys/cddl/dev/dtrace/arm/regset.h
@@ -0,0 +1,57 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
+
+/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
+/* All Rights Reserved */
+
+#ifndef _REGSET_H
+#define _REGSET_H
+
+/*
+ * #pragma ident "@(#)regset.h 1.11 05/06/08 SMI"
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if 0
+#define REG_LINK R14
+#define REG_SP R12
+#define REG_PS R0
+#define REG_R0 R0
+#define REG_R1 R1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _REGSET_H */
diff --git a/sys/cddl/dev/fbt/arm/fbt_isa.c b/sys/cddl/dev/fbt/arm/fbt_isa.c
new file mode 100644
index 000000000000..99fe067cd0ad
--- /dev/null
+++ b/sys/cddl/dev/fbt/arm/fbt_isa.c
@@ -0,0 +1,192 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Portions Copyright 2006-2008 John Birrell jb@freebsd.org
+ * Portions Copyright 2013 Justin Hibbits jhibbits@freebsd.org
+ * Portions Copyright 2013 Howard Su howardsu@freebsd.org
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+
+#include <sys/dtrace.h>
+
+#include "fbt.h"
+
+#define FBT_PATCHVAL 0xe06a0cfe /* illegal instruction */
+
+#define FBT_PUSHM 0xe92d0000
+#define FBT_POPM 0xe8bd0000
+#define FBT_JUMP 0xea000000
+
+#define FBT_ENTRY "entry"
+#define FBT_RETURN "return"
+
+int
+fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
+{
+ struct trapframe *frame = (struct trapframe *)stack;
+ solaris_cpu_t *cpu = &solaris_cpu[curcpu];
+ fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
+
+ for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
+ if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
+ fbt->fbtp_invop_cnt++;
+ cpu->cpu_dtrace_caller = addr;
+
+ /* TODO: Need 5th parameter from stack */
+ dtrace_probe(fbt->fbtp_id, frame->tf_r0,
+ frame->tf_r1, frame->tf_r2,
+ frame->tf_r3, 0);
+
+ cpu->cpu_dtrace_caller = 0;
+
+ return (fbt->fbtp_rval);
+ }
+ }
+
+ return (0);
+}
+
+void
+fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val)
+{
+
+ *fbt->fbtp_patchpoint = val;
+ cpu_icache_sync_range((vm_offset_t)fbt->fbtp_patchpoint, 4);
+}
+
+int
+fbt_provide_module_function(linker_file_t lf, int symindx,
+ linker_symval_t *symval, void *opaque)
+{
+ char *modname = opaque;
+ const char *name = symval->name;
+ fbt_probe_t *fbt, *retfbt;
+ uint32_t *instr, *limit;
+ int popm;
+
+ if (strncmp(name, "dtrace_", 7) == 0 &&
+ strncmp(name, "dtrace_safe_", 12) != 0) {
+ /*
+ * Anything beginning with "dtrace_" may be called
+ * from probe context unless it explicitly indicates
+ * that it won't be called from probe context by
+ * using the prefix "dtrace_safe_".
+ */
+ return (0);
+ }
+
+ if (name[0] == '_' && name[1] == '_')
+ return (0);
+
+ instr = (uint32_t *)symval->value;
+ limit = (uint32_t *)(symval->value + symval->size);
+
+ for (; instr < limit; instr++)
+ if ((*instr & 0xffff0000) == FBT_PUSHM &&
+ (*instr & 0x4000) != 0)
+ break;
+
+ if (instr >= limit)
+ return (0);
+
+ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
+ fbt->fbtp_name = name;
+ fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
+ name, FBT_ENTRY, 3, fbt);
+ fbt->fbtp_patchpoint = instr;
+ fbt->fbtp_ctl = lf;
+ fbt->fbtp_loadcnt = lf->loadcnt;
+ fbt->fbtp_savedval = *instr;
+ fbt->fbtp_patchval = FBT_PATCHVAL;
+ fbt->fbtp_rval = DTRACE_INVOP_PUSHM;
+ fbt->fbtp_symindx = symindx;
+
+ fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
+ fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
+
+ lf->fbt_nentries++;
+
+ popm = FBT_POPM | ((*instr) & 0x3FFF) | 0x8000;
+
+ retfbt = NULL;
+again:
+ for (; instr < limit; instr++) {
+ if (*instr == popm)
+ break;
+ else if ((*instr & 0xff000000) == FBT_JUMP) {
+ uint32_t *target, *start;
+ int offset;
+
+ offset = (*instr & 0xffffff);
+ offset <<= 8;
+ offset /= 64;
+ target = instr + (2 + offset);
+ start = (uint32_t *)symval->value;
+ if (target >= limit || target < start)
+ break;
+ instr++; /* skip delay slot */
+ }
+ }
+
+ if (instr >= limit)
+ return (0);
+
+ /*
+ * We have a winner!
+ */
+ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
+ fbt->fbtp_name = name;
+ if (retfbt == NULL) {
+ fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
+ name, FBT_RETURN, 5, fbt);
+ } else {
+ retfbt->fbtp_next = fbt;
+ fbt->fbtp_id = retfbt->fbtp_id;
+ }
+ retfbt = fbt;
+
+ fbt->fbtp_patchpoint = instr;
+ fbt->fbtp_ctl = lf;
+ fbt->fbtp_loadcnt = lf->loadcnt;
+ fbt->fbtp_symindx = symindx;
+ if ((*instr & 0xff000000) == FBT_JUMP)
+ fbt->fbtp_rval = DTRACE_INVOP_B;
+ else
+ fbt->fbtp_rval = DTRACE_INVOP_POPM;
+ fbt->fbtp_savedval = *instr;
+ fbt->fbtp_patchval = FBT_PATCHVAL;
+ fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
+ fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
+
+ lf->fbt_nentries++;
+
+ instr++;
+ goto again;
+}
diff --git a/sys/cddl/dev/fbt/arm/fbt_isa.h b/sys/cddl/dev/fbt/arm/fbt_isa.h
new file mode 100644
index 000000000000..5552f31a64a6
--- /dev/null
+++ b/sys/cddl/dev/fbt/arm/fbt_isa.h
@@ -0,0 +1,30 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _FBT_ISA_H_
+#define _FBT_ISA_H_
+
+typedef uint32_t fbt_patchval_t;
+
+#endif
diff --git a/sys/cddl/dev/lockstat/lockstat.c b/sys/cddl/dev/lockstat/lockstat.c
index b8d12ac77d0e..a4e4efd93c58 100644
--- a/sys/cddl/dev/lockstat/lockstat.c
+++ b/sys/cddl/dev/lockstat/lockstat.c
@@ -44,7 +44,8 @@
#include <sys/lockstat.h>
#if defined(__i386__) || defined(__amd64__) || \
- defined(__mips__) || defined(__powerpc__)
+ defined(__mips__) || defined(__powerpc__) || \
+ defined(__arm__)
#define LOCKSTAT_AFRAMES 1
#else
#error "architecture not supported"
diff --git a/sys/cddl/dev/profile/profile.c b/sys/cddl/dev/profile/profile.c
index 5291bb168041..51105e209833 100644
--- a/sys/cddl/dev/profile/profile.c
+++ b/sys/cddl/dev/profile/profile.c
@@ -128,6 +128,16 @@
struct profile_probe_percpu;
+#ifdef __mips
+/* bogus */
+#define PROF_ARTIFICIAL_FRAMES 3
+#endif
+
+#ifdef __arm__
+/* bogus */
+#define PROF_ARTIFICIAL_FRAMES 3
+#endif
+
typedef struct profile_probe {
char prof_name[PROF_NAMELEN];
dtrace_id_t prof_id;
diff --git a/sys/conf/dtb.mk b/sys/conf/dtb.mk
index 9ba22cd505dd..41bc9bb20d8d 100644
--- a/sys/conf/dtb.mk
+++ b/sys/conf/dtb.mk
@@ -45,7 +45,6 @@ SYSDIR= ${_dir}
.PATH: ${SYSDIR}/gnu/dts/${MACHINE} ${SYSDIR}/boot/fdt/dts/${MACHINE}
-DTBDIR?=/boot/dtb
DTB=${DTS:R:S/$/.dtb/}
all: ${DTB}
@@ -64,6 +63,10 @@ CLEANFILES+=${_dts:R:S/$/.dtb/}
realinstall: _dtbinstall
.ORDER: beforeinstall _kmodinstall
_dtbinstall:
+# Need to create this because installkernel doesn't invoke mtree with BSD.root.mtree
+# to make sure the tree is setup properly. We don't recreate it to avoid duplicate
+# entries in the NO_ROOT case.
+ test -d ${DESTDIR}${DTBDIR} || ${INSTALL} -d -o ${DTBOWN} -g ${DTBGRP} ${DESTDIR}${DTBDIR}
.for _dtb in ${DTB}
${INSTALL} -o ${DTBOWN} -g ${DTBGRP} -m ${DTBMODE} \
${_INSTALLFLAGS} ${_dtb} ${DESTDIR}${DTBDIR}
diff --git a/sys/conf/files b/sys/conf/files
index 97bbe5914762..e10fd5e44173 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -3193,7 +3193,7 @@ libkern/jenkins_hash.c standard
libkern/murmur3_32.c standard
libkern/mcount.c optional profiling-routine
libkern/memcchr.c standard
-libkern/memchr.c optional fdt | gdb
+libkern/memchr.c standard
libkern/memcmp.c standard
libkern/memmem.c optional gdb
libkern/qsort.c standard
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index e20e5649f046..73d3979d0d7b 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -242,9 +242,3 @@ powerpc/psim/iobus.c optional psim
powerpc/psim/ata_iobus.c optional ata psim
powerpc/psim/openpic_iobus.c optional psim
powerpc/psim/uart_iobus.c optional uart psim
-powerpc/wii/platform_wii.c optional wii
-powerpc/wii/wii_bus.c optional wii
-powerpc/wii/wii_pic.c optional wii
-powerpc/wii/wii_fb.c optional wii
-powerpc/wii/wii_gpio.c optional wii wiigpio
-powerpc/wii/wii_ipc.c optional wii
diff --git a/sys/conf/kern.mk b/sys/conf/kern.mk
index c031b3a37906..4317de56c65e 100644
--- a/sys/conf/kern.mk
+++ b/sys/conf/kern.mk
@@ -158,6 +158,14 @@ INLINE_LIMIT?= 8000
CFLAGS+= -ffreestanding
#
+# The C standard leaves signed integer overflow behavior undefined.
+# gcc and clang opimizers take advantage of this. The kernel makes
+# use of signed integer wraparound mechanics so we need the compiler
+# to treat it as a wraparound and not take shortcuts.
+#
+CFLAGS+= -fwrapv
+
+#
# GCC SSP support
#
.if ${MK_SSP} != "no" && \
diff --git a/sys/conf/kern.opts.mk b/sys/conf/kern.opts.mk
index 81d91afc374e..112c217e2da1 100644
--- a/sys/conf/kern.opts.mk
+++ b/sys/conf/kern.opts.mk
@@ -58,7 +58,11 @@ MK_${var}:= no
.else
MK_${var}:= yes
.endif
+.else
+.if ${MK_${var}} != "yes" && ${MK_${var}} != "no"
+.error "Illegal value for MK_${var}: ${MK_${var}}"
.endif
+.endif # !defined(MK_${var})
.endfor
.undef __DEFAULT_YES_OPTIONS
@@ -70,7 +74,11 @@ MK_${var}:= yes
.else
MK_${var}:= no
.endif
+.else
+.if ${MK_${var}} != "yes" && ${MK_${var}} != "no"
+.error "Illegal value for MK_${var}: ${MK_${var}}"
.endif
+.endif # !defined(MK_${var})
.endfor
.undef __DEFAULT_NO_OPTIONS
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index 99cdd67512b3..232d88b69e82 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -79,6 +79,9 @@ INCLUDES+= -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal
# ... and the same for the NgATM stuff
INCLUDES+= -I$S/contrib/ngatm
+# ... and the same for vchiq
+INCLUDES+= -I$S/contrib/vchiq
+
# ... and the same for twa
INCLUDES+= -I$S/dev/twa
@@ -171,7 +174,7 @@ SYSTEM_OBJS= locore.o ${MDOBJS} ${OBJS}
SYSTEM_OBJS+= ${SYSTEM_CFILES:.c=.o}
SYSTEM_OBJS+= hack.So
SYSTEM_LD= @${LD} -Bdynamic -T ${LDSCRIPT} ${_LDFLAGS} --no-warn-mismatch \
- -warn-common -export-dynamic -dynamic-linker /red/herring \
+ --warn-common --export-dynamic --dynamic-linker /red/herring \
-o ${.TARGET} -X ${SYSTEM_OBJS} vers.o
SYSTEM_LD_TAIL= @${OBJCOPY} --strip-symbol gcc2_compiled. ${.TARGET} ; \
${SIZE} ${.TARGET} ; chmod 755 ${.TARGET}
@@ -183,15 +186,13 @@ SYSTEM_DEP+= ${LDSCRIPT}
MKMODULESENV+= MAKEOBJDIRPREFIX=${.OBJDIR}/modules KMODDIR=${KODIR}
MKMODULESENV+= MACHINE_CPUARCH=${MACHINE_CPUARCH}
MKMODULESENV+= MACHINE=${MACHINE} MACHINE_ARCH=${MACHINE_ARCH}
+MKMODULESENV+= MODULES_EXTRA="${MODULES_EXTRA}" WITHOUT_MODULES="${WITHOUT_MODULES}"
.if (${KERN_IDENT} == LINT)
MKMODULESENV+= ALL_MODULES=LINT
.endif
.if defined(MODULES_OVERRIDE)
MKMODULESENV+= MODULES_OVERRIDE="${MODULES_OVERRIDE}"
.endif
-.if defined(WITHOUT_MODULES)
-MKMODULESENV+= WITHOUT_MODULES="${WITHOUT_MODULES}"
-.endif
.if defined(DEBUG)
MKMODULESENV+= DEBUG_FLAGS="${DEBUG}"
.endif
diff --git a/sys/conf/options b/sys/conf/options
index c23f8f05fe08..bf157677db10 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -129,6 +129,7 @@ GEOM_SHSEC opt_geom.h
GEOM_STRIPE opt_geom.h
GEOM_SUNLABEL opt_geom.h
GEOM_UNCOMPRESS opt_geom.h
+GEOM_UNCOMPRESS_DEBUG opt_geom.h
GEOM_UZIP opt_geom.h
GEOM_VINUM opt_geom.h
GEOM_VIRSTOR opt_geom.h
@@ -209,7 +210,7 @@ SW_WATCHDOG opt_watchdog.h
TURNSTILE_PROFILING
UMTX_PROFILING
VFS_AIO
-VERBOSE_SYSINIT opt_global.h
+VERBOSE_SYSINIT
WLCACHE opt_wavelan.h
WLDEBUG opt_wavelan.h
@@ -846,6 +847,9 @@ MWL_TX_NODROP opt_mwl.h
# Options for the Intel 802.11n wireless driver
IWN_DEBUG opt_iwn.h
+# Options for the Intel 3945ABG wireless driver
+WPI_DEBUG opt_wpi.h
+
# dcons options
DCONS_BUF_SIZE opt_dcons.h
DCONS_POLL_HZ opt_dcons.h
diff --git a/sys/conf/options.powerpc b/sys/conf/options.powerpc
index d1b8b40d3637..e3f024dd6a65 100644
--- a/sys/conf/options.powerpc
+++ b/sys/conf/options.powerpc
@@ -24,7 +24,6 @@ PS3 opt_platform.h
MAMBO
PSERIES
PSIM
-WII opt_platform.h
SC_OFWFB opt_ofwfb.h
diff --git a/sys/contrib/dev/acpica/include/actbl2.h b/sys/contrib/dev/acpica/include/actbl2.h
index a6f2f2081f64..0f929eba4665 100644
--- a/sys/contrib/dev/acpica/include/actbl2.h
+++ b/sys/contrib/dev/acpica/include/actbl2.h
@@ -466,6 +466,7 @@ typedef struct acpi_table_dmar
/* Masks for Flags field above */
#define ACPI_DMAR_INTR_REMAP (1)
+#define ACPI_DMAR_X2APIC_OPT_OUT (2)
/* DMAR subtable header */
diff --git a/sys/contrib/dev/ath/ath_hal/ar9300/ar9300.h b/sys/contrib/dev/ath/ath_hal/ar9300/ar9300.h
index 30c55a077ffe..f16ef6b12fcb 100644
--- a/sys/contrib/dev/ath/ath_hal/ar9300/ar9300.h
+++ b/sys/contrib/dev/ath/ath_hal/ar9300/ar9300.h
@@ -317,12 +317,12 @@ typedef struct {
/* Support for multiple INIs */
struct ar9300_ini_array {
- u_int32_t *ia_array;
+ const u_int32_t *ia_array;
u_int32_t ia_rows;
u_int32_t ia_columns;
};
#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
- (iniarray)->ia_array = (u_int32_t *)(array); \
+ (iniarray)->ia_array = (const u_int32_t *)(array); \
(iniarray)->ia_rows = (rows); \
(iniarray)->ia_columns = (columns); \
} while (0)
diff --git a/sys/contrib/dev/ral/microcode.h b/sys/contrib/dev/ral/microcode.h
index 9c9adc394c89..c7109d0fe30f 100644
--- a/sys/contrib/dev/ral/microcode.h
+++ b/sys/contrib/dev/ral/microcode.h
@@ -2268,24 +2268,24 @@ static const uint8_t rt2661[] = {
};
static const uint8_t rt2860[] = {
- 0x02, 0x03, 0x5b, 0x02, 0x02, 0xa6, 0x22, 0x22, 0xff, 0xff, 0xff,
+ 0x02, 0x02, 0xa3, 0x02, 0x02, 0x2e, 0x22, 0xff, 0xff, 0xff, 0xff,
0x02, 0x01, 0x2c, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x00, 0x1e,
0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x00, 0xdd, 0xc0, 0xe0, 0xc0,
0xf0, 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0, 0x75, 0xd0, 0x18, 0xc2,
0xaf, 0x30, 0x45, 0x03, 0x12, 0x10, 0x09, 0x90, 0x04, 0x16, 0xe0,
0x30, 0xe3, 0x03, 0x74, 0x08, 0xf0, 0x90, 0x04, 0x14, 0xe0, 0x20,
0xe7, 0x03, 0x02, 0x00, 0xcb, 0x74, 0x80, 0xf0, 0x90, 0x70, 0x12,
- 0xe0, 0xf5, 0x36, 0x90, 0x04, 0x04, 0xe0, 0x24, 0xcf, 0x60, 0x30,
+ 0xe0, 0xf5, 0x24, 0x90, 0x04, 0x04, 0xe0, 0x24, 0xcf, 0x60, 0x30,
0x14, 0x60, 0x42, 0x24, 0xe2, 0x60, 0x47, 0x14, 0x60, 0x55, 0x24,
0x21, 0x70, 0x60, 0xe5, 0x55, 0x24, 0xfe, 0x60, 0x07, 0x14, 0x60,
0x08, 0x24, 0x02, 0x70, 0x08, 0x7d, 0x01, 0x80, 0x28, 0x7d, 0x02,
- 0x80, 0x24, 0x90, 0x70, 0x10, 0xe0, 0xf5, 0x50, 0x85, 0x36, 0x40,
+ 0x80, 0x24, 0x90, 0x70, 0x10, 0xe0, 0xf5, 0x50, 0x85, 0x24, 0x40,
0xd2, 0x01, 0x80, 0x3e, 0xe5, 0x55, 0x64, 0x03, 0x60, 0x04, 0xe5,
- 0x55, 0x70, 0x04, 0x7d, 0x02, 0x80, 0x09, 0x85, 0x36, 0x41, 0xd2,
- 0x02, 0x80, 0x29, 0xad, 0x55, 0xaf, 0x36, 0x12, 0x02, 0x82, 0x80,
+ 0x55, 0x70, 0x04, 0x7d, 0x02, 0x80, 0x09, 0x85, 0x24, 0x41, 0xd2,
+ 0x02, 0x80, 0x29, 0xad, 0x55, 0xaf, 0x24, 0x12, 0x02, 0x0a, 0x80,
0x20, 0x90, 0x70, 0x10, 0xe0, 0xf5, 0x47, 0x90, 0x70, 0x11, 0xe0,
0xf5, 0x44, 0x12, 0x10, 0x25, 0x80, 0x06, 0x90, 0x70, 0x10, 0xe0,
- 0xf5, 0x45, 0xe4, 0xfd, 0xaf, 0x36, 0x12, 0x02, 0x82, 0xd2, 0x04,
+ 0xf5, 0x45, 0xe4, 0xfd, 0xaf, 0x24, 0x12, 0x02, 0x0a, 0xd2, 0x04,
0x90, 0x70, 0x13, 0xe4, 0xf0, 0x90, 0x70, 0x13, 0xe4, 0xf0, 0xd2,
0xaf, 0xd0, 0xd0, 0xd0, 0x82, 0xd0, 0x83, 0xd0, 0xf0, 0xd0, 0xe0,
0x32, 0xc0, 0xe0, 0xc0, 0xf0, 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0,
@@ -2302,54 +2302,54 @@ static const uint8_t rt2860[] = {
0x15, 0x50, 0x80, 0x02, 0xc2, 0x59, 0xd5, 0x53, 0x07, 0x30, 0x60,
0x04, 0x15, 0x46, 0xd2, 0x04, 0x30, 0x45, 0x03, 0x12, 0x10, 0x0f,
0xc2, 0x8d, 0xd2, 0xaf, 0xd0, 0xd0, 0xd0, 0x82, 0xd0, 0x83, 0xd0,
- 0xf0, 0xd0, 0xe0, 0x32, 0x90, 0x70, 0x2a, 0xe0, 0x30, 0xe1, 0x43,
- 0xc2, 0xaf, 0x90, 0x70, 0x28, 0xe0, 0x90, 0x10, 0x1c, 0xf0, 0x90,
- 0x70, 0x29, 0xe0, 0x90, 0x10, 0x1d, 0xf0, 0x90, 0x70, 0x2a, 0xe0,
- 0x90, 0x10, 0x1e, 0xf0, 0x90, 0x10, 0x1c, 0xe0, 0xf5, 0x37, 0x90,
- 0x10, 0x1e, 0xe0, 0x20, 0xe1, 0xf3, 0x90, 0x10, 0x1c, 0xe0, 0x90,
- 0x70, 0x28, 0xf0, 0x90, 0x10, 0x1d, 0xe0, 0x90, 0x70, 0x29, 0xf0,
- 0x90, 0x10, 0x1e, 0xe0, 0x90, 0x70, 0x2a, 0xf0, 0xc2, 0x05, 0xd2,
- 0xaf, 0x22, 0x12, 0x02, 0xc8, 0x30, 0x45, 0x03, 0x12, 0x10, 0x03,
- 0x30, 0x01, 0x06, 0x20, 0x09, 0x03, 0x12, 0x10, 0x1c, 0x30, 0x02,
- 0x06, 0x20, 0x0a, 0x03, 0x12, 0x10, 0x1f, 0x30, 0x03, 0x06, 0x20,
- 0x0b, 0x03, 0x12, 0x10, 0x1f, 0x30, 0x04, 0x06, 0x20, 0x0c, 0x03,
- 0x12, 0x10, 0x22, 0x20, 0x13, 0x09, 0x20, 0x11, 0x06, 0xe5, 0x2b,
- 0x45, 0x2c, 0x60, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92, 0xa9, 0x12,
- 0x03, 0x1c, 0x80, 0xbf, 0xc2, 0x43, 0xd2, 0x45, 0xe4, 0xf5, 0x20,
- 0xf5, 0x21, 0xf5, 0x53, 0xf5, 0x46, 0xf5, 0x2b, 0xf5, 0x2c, 0xc2,
- 0x42, 0xf5, 0x51, 0xf5, 0x52, 0xf5, 0x55, 0x90, 0x04, 0x18, 0x74,
- 0x80, 0xf0, 0x90, 0x04, 0x1a, 0x74, 0x08, 0xf0, 0xc2, 0x1a, 0xc2,
- 0x18, 0xc2, 0x1b, 0x22, 0xc8, 0xef, 0xc8, 0xe6, 0xfa, 0x08, 0xe6,
- 0x4a, 0x60, 0x0c, 0xc8, 0xef, 0xc8, 0x08, 0xe6, 0x16, 0x18, 0x70,
- 0x01, 0x16, 0xc3, 0x22, 0xed, 0x24, 0xff, 0xfd, 0xec, 0x34, 0xff,
- 0xc8, 0xef, 0xc8, 0xf6, 0x08, 0xc6, 0xed, 0xc6, 0xd3, 0x22, 0xd0,
- 0x83, 0xd0, 0x82, 0xf8, 0xe4, 0x93, 0x70, 0x12, 0x74, 0x01, 0x93,
- 0x70, 0x0d, 0xa3, 0xa3, 0x93, 0xf8, 0x74, 0x01, 0x93, 0xf5, 0x82,
- 0x88, 0x83, 0xe4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60, 0xef, 0xa3,
- 0xa3, 0xa3, 0x80, 0xdf, 0xef, 0xf4, 0x60, 0x1f, 0xe4, 0xfe, 0x12,
- 0x03, 0x67, 0xe0, 0xb4, 0xff, 0x12, 0x12, 0x03, 0x67, 0xef, 0xf0,
- 0x74, 0x1c, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83, 0xed,
- 0xf0, 0x22, 0x0e, 0xbe, 0x04, 0xe3, 0x22, 0xc0, 0xe0, 0xc0, 0xf0,
- 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0, 0x75, 0xd0, 0x08, 0xc2, 0xaf,
- 0x30, 0x45, 0x03, 0x12, 0x10, 0x06, 0xd2, 0xaf, 0xd0, 0xd0, 0xd0,
- 0x82, 0xd0, 0x83, 0xd0, 0xf0, 0xd0, 0xe0, 0x32, 0xc2, 0xaf, 0x12,
- 0x00, 0x06, 0x12, 0x02, 0x09, 0x12, 0x02, 0xe1, 0xe4, 0xf5, 0x22,
- 0xf5, 0x47, 0x90, 0x04, 0x00, 0x74, 0x80, 0xf0, 0xd2, 0xaf, 0x22,
- 0x75, 0x89, 0x02, 0xe4, 0xf5, 0x8c, 0xf5, 0x8a, 0xf5, 0x88, 0xf5,
- 0xb8, 0xf5, 0xe8, 0x75, 0x90, 0x18, 0xd2, 0x8c, 0x75, 0xa8, 0x05,
- 0x22, 0xef, 0x60, 0x03, 0x1f, 0x80, 0xfa, 0x22, 0xff, 0xc0, 0x26,
+ 0xf0, 0xd0, 0xe0, 0x32, 0x12, 0x02, 0x50, 0x30, 0x45, 0x03, 0x12,
+ 0x10, 0x03, 0x30, 0x01, 0x06, 0x20, 0x09, 0x03, 0x12, 0x10, 0x1c,
+ 0x30, 0x02, 0x06, 0x20, 0x0a, 0x03, 0x12, 0x10, 0x1f, 0x30, 0x03,
+ 0x06, 0x20, 0x0b, 0x03, 0x12, 0x10, 0x1f, 0x30, 0x04, 0x06, 0x20,
+ 0x0c, 0x03, 0x12, 0x10, 0x22, 0x20, 0x13, 0x09, 0x20, 0x11, 0x06,
+ 0xe5, 0x2b, 0x45, 0x2c, 0x60, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92,
+ 0xa9, 0x12, 0x02, 0x80, 0x80, 0xbf, 0xc2, 0x43, 0xd2, 0x45, 0xe4,
+ 0xf5, 0x20, 0xf5, 0x21, 0xf5, 0x53, 0xf5, 0x46, 0xf5, 0x2b, 0xf5,
+ 0x2c, 0xc2, 0x42, 0xf5, 0x51, 0xf5, 0x52, 0xf5, 0x55, 0x90, 0x04,
+ 0x18, 0x74, 0x80, 0xf0, 0x90, 0x04, 0x1a, 0x74, 0x08, 0xf0, 0x22,
+ 0xd0, 0x83, 0xd0, 0x82, 0xf8, 0xe4, 0x93, 0x70, 0x12, 0x74, 0x01,
+ 0x93, 0x70, 0x0d, 0xa3, 0xa3, 0x93, 0xf8, 0x74, 0x01, 0x93, 0xf5,
+ 0x82, 0x88, 0x83, 0xe4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60, 0xef,
+ 0xa3, 0xa3, 0xa3, 0x80, 0xdf, 0xef, 0xf4, 0x60, 0x1f, 0xe4, 0xfe,
+ 0x12, 0x02, 0xaf, 0xe0, 0xb4, 0xff, 0x12, 0x12, 0x02, 0xaf, 0xef,
+ 0xf0, 0x74, 0x1c, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83,
+ 0xed, 0xf0, 0x22, 0x0e, 0xbe, 0x04, 0xe3, 0x22, 0xc0, 0xe0, 0xc0,
+ 0xf0, 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0, 0x75, 0xd0, 0x08, 0xc2,
+ 0xaf, 0x30, 0x45, 0x03, 0x12, 0x10, 0x06, 0xd2, 0xaf, 0xd0, 0xd0,
+ 0xd0, 0x82, 0xd0, 0x83, 0xd0, 0xf0, 0xd0, 0xe0, 0x32, 0xc2, 0xaf,
+ 0x12, 0x00, 0x06, 0x12, 0x01, 0xbe, 0x12, 0x02, 0x69, 0xe4, 0xf5,
+ 0x22, 0xf5, 0x47, 0x90, 0x04, 0x00, 0x74, 0x80, 0xf0, 0xd2, 0xaf,
+ 0x22, 0x75, 0x89, 0x02, 0xe4, 0xf5, 0x8c, 0xf5, 0x8a, 0xf5, 0x88,
+ 0xf5, 0xb8, 0xf5, 0xe8, 0x75, 0x90, 0x18, 0xd2, 0x8c, 0x75, 0xa8,
+ 0x05, 0x22, 0x30, 0x45, 0x03, 0x12, 0x10, 0x15, 0xe5, 0x20, 0x70,
+ 0x03, 0x20, 0x10, 0x03, 0x30, 0x11, 0x03, 0x43, 0x87, 0x01, 0x22,
+ 0xce, 0xef, 0xce, 0xee, 0x60, 0x08, 0x7f, 0xff, 0x12, 0x02, 0xc5,
+ 0x1e, 0x80, 0xf5, 0x22, 0x78, 0x7f, 0xe4, 0xf6, 0xd8, 0xfd, 0x75,
+ 0x81, 0x5f, 0x02, 0x01, 0x7a, 0x74, 0x14, 0x2e, 0xf5, 0x82, 0xe4,
+ 0x34, 0x70, 0xf5, 0x83, 0x22, 0xef, 0x90, 0x02, 0xc3, 0x93, 0x90,
+ 0x03, 0x00, 0x73, 0x0a, 0x18, 0xef, 0x60, 0x03, 0x1f, 0x80, 0xfa,
+ 0x22, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x26,
0x74, 0x03, 0xc0, 0xe0, 0xc0, 0x82, 0xc0, 0x83, 0x75, 0x26, 0x0a,
0x22, 0xc0, 0x26, 0x74, 0x03, 0xc0, 0xe0, 0xc0, 0x82, 0xc0, 0x83,
- 0x75, 0x26, 0x18, 0x22, 0x30, 0x45, 0x03, 0x12, 0x10, 0x15, 0xe5,
- 0x20, 0x70, 0x03, 0x20, 0x10, 0x03, 0x30, 0x11, 0x03, 0x43, 0x87,
- 0x01, 0x22, 0xce, 0xef, 0xce, 0xee, 0x60, 0x08, 0x7f, 0xff, 0x12,
- 0x02, 0xf8, 0x1e, 0x80, 0xf5, 0x22, 0xc8, 0xef, 0xc8, 0xe6, 0x60,
- 0x03, 0x16, 0xc3, 0x22, 0xed, 0x14, 0xf6, 0xd3, 0x22, 0xc8, 0xef,
- 0xc8, 0xe6, 0x60, 0x06, 0x16, 0xe6, 0x24, 0xff, 0xb3, 0x22, 0xc3,
- 0x22, 0x78, 0x7f, 0xe4, 0xf6, 0xd8, 0xfd, 0x75, 0x81, 0x5f, 0x02,
- 0x01, 0xc5, 0x74, 0x14, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5,
- 0x83, 0x22, 0xef, 0x90, 0x03, 0x7b, 0x93, 0x90, 0x03, 0x00, 0x73,
- 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x75, 0x26, 0x18, 0x22, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -2641,318 +2641,318 @@ static const uint8_t rt2860[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x10, 0x28, 0x02,
- 0x10, 0x3b, 0x02, 0x10, 0x3c, 0x02, 0x13, 0xbc, 0x02, 0x13, 0xbd,
- 0x02, 0x14, 0x72, 0x02, 0x14, 0x73, 0xc3, 0x22, 0xff, 0xff, 0x02,
- 0x19, 0x4a, 0x02, 0x1a, 0xf4, 0x02, 0x15, 0x6c, 0x02, 0x14, 0xa7,
- 0x30, 0x05, 0x06, 0x20, 0x0d, 0x03, 0x12, 0x01, 0x7a, 0x30, 0x06,
- 0x06, 0x20, 0x0e, 0x03, 0x12, 0x1c, 0x2e, 0x22, 0x22, 0x90, 0x04,
- 0x14, 0xe0, 0x20, 0xe7, 0x03, 0x02, 0x13, 0xbb, 0x90, 0x70, 0x12,
- 0xe0, 0xf5, 0x56, 0x90, 0x04, 0x04, 0xe0, 0x12, 0x02, 0x5c, 0x10,
- 0xfb, 0x30, 0x10, 0xd2, 0x31, 0x10, 0x93, 0x33, 0x10, 0xa1, 0x34,
- 0x10, 0xb4, 0x35, 0x10, 0xab, 0x36, 0x11, 0x09, 0x50, 0x11, 0x4e,
- 0x51, 0x11, 0x57, 0x52, 0x11, 0x57, 0x53, 0x11, 0x57, 0x54, 0x11,
- 0x93, 0x55, 0x11, 0xf0, 0x56, 0x12, 0x43, 0x70, 0x12, 0x69, 0x71,
- 0x12, 0x92, 0x72, 0x13, 0x3e, 0x73, 0x13, 0x61, 0x80, 0x13, 0x88,
- 0x83, 0x13, 0xa0, 0x84, 0x00, 0x00, 0x13, 0xbb, 0xd2, 0x18, 0xd2,
- 0x61, 0x75, 0x35, 0x2a, 0x75, 0x32, 0x0b, 0x75, 0x33, 0xb8, 0x22,
- 0xc2, 0x18, 0x90, 0x01, 0x14, 0xe0, 0x54, 0xfd, 0xf0, 0x22, 0x90,
- 0x70, 0x11, 0xe0, 0xf5, 0x3c, 0x02, 0x13, 0xb5, 0xe5, 0x55, 0xb4,
+ 0x10, 0x32, 0x02, 0x10, 0x33, 0x02, 0x14, 0xc2, 0x02, 0x14, 0xc3,
+ 0x02, 0x15, 0x8f, 0x02, 0x15, 0x90, 0xc3, 0x22, 0xff, 0xff, 0x02,
+ 0x1a, 0x6f, 0x02, 0x1b, 0xec, 0x02, 0x16, 0xbc, 0x02, 0x15, 0xf7,
+ 0x30, 0x05, 0x06, 0x20, 0x0d, 0x03, 0x12, 0x1d, 0x19, 0x22, 0x22,
+ 0x90, 0x04, 0x14, 0xe0, 0x20, 0xe7, 0x03, 0x02, 0x14, 0xc1, 0x90,
+ 0x70, 0x12, 0xe0, 0xf5, 0x56, 0x90, 0x04, 0x04, 0xe0, 0x12, 0x01,
+ 0xe4, 0x10, 0xda, 0x30, 0x10, 0xb1, 0x31, 0x10, 0x93, 0x35, 0x10,
+ 0x8a, 0x36, 0x10, 0xe7, 0x40, 0x10, 0xfe, 0x41, 0x11, 0x15, 0x50,
+ 0x11, 0x5a, 0x51, 0x11, 0x63, 0x52, 0x11, 0x63, 0x53, 0x11, 0x63,
+ 0x54, 0x11, 0x9f, 0x55, 0x11, 0xfc, 0x56, 0x12, 0x4f, 0x64, 0x12,
+ 0x6a, 0x72, 0x13, 0x1e, 0x73, 0x13, 0x42, 0x74, 0x14, 0x35, 0x80,
+ 0x14, 0xa5, 0x83, 0x14, 0x5c, 0x91, 0x00, 0x00, 0x14, 0xc1, 0x90,
+ 0x70, 0x11, 0xe0, 0xf5, 0x3c, 0x02, 0x14, 0xbb, 0xe5, 0x55, 0xb4,
0x02, 0x0f, 0xe5, 0x58, 0x30, 0xe0, 0x06, 0x90, 0x01, 0x0d, 0x74,
0x08, 0xf0, 0x7d, 0x01, 0x80, 0x02, 0x7d, 0x02, 0xaf, 0x56, 0x12,
- 0x02, 0x82, 0x02, 0x13, 0xb5, 0x20, 0x02, 0x03, 0x30, 0x03, 0x0a,
- 0x7d, 0x02, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x02, 0x13, 0xb5, 0xe5,
- 0x34, 0xd3, 0x94, 0x01, 0x40, 0x0c, 0x90, 0x01, 0x0c, 0xe0, 0x44,
+ 0x02, 0x0a, 0x02, 0x14, 0xbb, 0x20, 0x02, 0x03, 0x30, 0x03, 0x0a,
+ 0x7d, 0x02, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x02, 0x14, 0xbb, 0xe5,
+ 0x30, 0xd3, 0x94, 0x01, 0x40, 0x0c, 0x90, 0x01, 0x0c, 0xe0, 0x44,
0x02, 0xf0, 0xa3, 0xe0, 0x44, 0x04, 0xf0, 0x85, 0x56, 0x41, 0xd2,
- 0x02, 0x22, 0x90, 0x70, 0x11, 0xe0, 0xf4, 0x70, 0x03, 0x02, 0x13,
- 0xbb, 0xe0, 0xf5, 0x30, 0x22, 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40,
- 0x07, 0xe5, 0x55, 0x60, 0x03, 0x02, 0x13, 0xbb, 0x90, 0x70, 0x10,
- 0xe0, 0x54, 0x7f, 0xff, 0xbf, 0x0a, 0x0d, 0x90, 0x70, 0x11, 0xe0,
- 0xb4, 0x08, 0x06, 0x75, 0x4e, 0x01, 0x75, 0x4f, 0x84, 0x90, 0x70,
- 0x10, 0xe0, 0x54, 0x7f, 0xff, 0xbf, 0x02, 0x12, 0x90, 0x70, 0x11,
- 0xe0, 0x64, 0x08, 0x60, 0x04, 0xe0, 0xb4, 0x20, 0x06, 0x75, 0x4e,
- 0x03, 0x75, 0x4f, 0x20, 0xe4, 0xf5, 0x27, 0x22, 0x90, 0x70, 0x11,
- 0xe0, 0x24, 0xff, 0x92, 0x47, 0x22, 0xe5, 0x34, 0xd3, 0x94, 0x01,
- 0x40, 0x07, 0xe5, 0x55, 0x60, 0x03, 0x02, 0x13, 0x49, 0x90, 0x04,
- 0x04, 0xe0, 0x25, 0xe0, 0x24, 0x5d, 0xf5, 0x57, 0x90, 0x70, 0x10,
- 0xe0, 0xff, 0x74, 0x47, 0x25, 0x57, 0xf8, 0xc6, 0xef, 0xc6, 0x90,
- 0x70, 0x11, 0xe0, 0xff, 0x74, 0x48, 0x25, 0x57, 0xf8, 0xc6, 0xef,
- 0xc6, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x02, 0x13, 0xb5,
- 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40, 0x07, 0xe5, 0x55, 0x60, 0x03,
- 0x02, 0x13, 0x49, 0xe5, 0x47, 0x64, 0x07, 0x60, 0x1d, 0xe5, 0x47,
- 0x64, 0x08, 0x60, 0x17, 0xe5, 0x47, 0x64, 0x09, 0x60, 0x11, 0xe5,
- 0x47, 0x64, 0x0a, 0x60, 0x0b, 0xe5, 0x47, 0x64, 0x0b, 0x60, 0x05,
- 0xe5, 0x47, 0xb4, 0x0c, 0x08, 0x90, 0x70, 0x11, 0xe0, 0x54, 0x0f,
- 0xf5, 0x3a, 0xe5, 0x47, 0xb4, 0x09, 0x08, 0xe5, 0x3a, 0xb4, 0x03,
- 0x03, 0xe4, 0xf5, 0x46, 0xe5, 0x47, 0xb4, 0x0a, 0x08, 0xe5, 0x3a,
- 0xb4, 0x01, 0x03, 0xe4, 0xf5, 0x46, 0xe4, 0xfd, 0xaf, 0x56, 0x12,
- 0x02, 0x82, 0xd2, 0x04, 0x22, 0x90, 0x70, 0x11, 0xe0, 0xf4, 0xff,
- 0x90, 0x70, 0x10, 0xe0, 0x5f, 0xff, 0x90, 0x70, 0x11, 0xe0, 0x55,
- 0x27, 0x4f, 0x90, 0x70, 0x18, 0xf0, 0x90, 0x70, 0x11, 0xe0, 0x90,
- 0x70, 0x19, 0xf0, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x30,
- 0x15, 0x03, 0xd2, 0x14, 0x22, 0x90, 0x70, 0x18, 0xe0, 0xf5, 0x27,
- 0x90, 0x02, 0x29, 0xe0, 0xff, 0x90, 0x70, 0x19, 0xe0, 0xfe, 0xef,
- 0x5e, 0x90, 0x02, 0x29, 0xf0, 0x30, 0x47, 0x04, 0xaf, 0x27, 0x80,
- 0x04, 0xe5, 0x27, 0xf4, 0xff, 0x90, 0x02, 0x28, 0xef, 0xf0, 0x22,
- 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40, 0x07, 0xe5, 0x55, 0x60, 0x03,
- 0x02, 0x13, 0x49, 0x90, 0x70, 0x10, 0xe0, 0xfe, 0x90, 0x70, 0x11,
- 0xe0, 0xfd, 0xed, 0xf8, 0xe6, 0xf5, 0x57, 0xfd, 0xaf, 0x56, 0x12,
- 0x02, 0x82, 0x02, 0x13, 0xb5, 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40,
- 0x07, 0xe5, 0x55, 0x60, 0x03, 0x02, 0x13, 0x49, 0x90, 0x70, 0x10,
- 0xe0, 0xfe, 0x90, 0x70, 0x11, 0xe0, 0xfd, 0xed, 0xf5, 0x82, 0x8e,
- 0x83, 0xe0, 0xf5, 0x57, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x02,
- 0x13, 0xb5, 0x90, 0x10, 0x00, 0xe0, 0xf5, 0x57, 0xe4, 0xf5, 0x58,
- 0xf5, 0x59, 0x90, 0x10, 0x03, 0xe0, 0xb4, 0x28, 0x05, 0x75, 0x58,
- 0x01, 0x80, 0x3c, 0x90, 0x10, 0x03, 0xe0, 0xb4, 0x30, 0x05, 0x75,
- 0x58, 0x02, 0x80, 0x30, 0x90, 0x10, 0x03, 0xe0, 0xb4, 0x33, 0x05,
- 0x75, 0x58, 0x04, 0x80, 0x24, 0x90, 0x10, 0x03, 0xe0, 0xb4, 0x35,
- 0x0c, 0x90, 0x10, 0x02, 0xe0, 0xb4, 0x72, 0x05, 0x75, 0x58, 0x08,
- 0x80, 0x11, 0x90, 0x10, 0x03, 0xe0, 0xb4, 0x35, 0x0a, 0x90, 0x10,
- 0x02, 0xe0, 0xb4, 0x93, 0x03, 0x75, 0x58, 0x10, 0xe5, 0x58, 0x30,
- 0xe1, 0x19, 0x90, 0x05, 0x08, 0xe0, 0x44, 0x01, 0xf0, 0xfd, 0x90,
- 0x05, 0x05, 0xe0, 0x54, 0xfb, 0xf0, 0x44, 0x04, 0xf0, 0xed, 0x54,
- 0xfe, 0x90, 0x05, 0x08, 0xf0, 0xe4, 0xf5, 0x4e, 0xf5, 0x4f, 0x75,
- 0x3a, 0xff, 0xc2, 0x1a, 0xc2, 0x18, 0xc2, 0x1b, 0xf5, 0x34, 0x90,
- 0x05, 0xa4, 0x74, 0x11, 0xf0, 0xa3, 0x74, 0xff, 0xf0, 0xa3, 0x74,
- 0x03, 0xf0, 0xe4, 0xf5, 0x30, 0xc2, 0x19, 0x90, 0x01, 0x0d, 0xe0,
- 0x44, 0x40, 0xf0, 0x75, 0x3c, 0xff, 0xad, 0x57, 0xaf, 0x56, 0x12,
- 0x02, 0x82, 0xe4, 0x90, 0x70, 0x32, 0xf0, 0x80, 0x77, 0xe5, 0x34,
- 0xd3, 0x94, 0x01, 0x40, 0x0b, 0xe5, 0x55, 0x60, 0x07, 0x7d, 0x03,
- 0xaf, 0x56, 0x02, 0x02, 0x82, 0x90, 0x70, 0x10, 0xe0, 0x24, 0xff,
- 0x92, 0x93, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x80, 0x54,
- 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40, 0x0d, 0xe5, 0x55, 0x60, 0x09,
- 0x7d, 0x03, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x80, 0x40, 0x90, 0x70,
- 0x10, 0xe0, 0x24, 0xff, 0x92, 0x4a, 0xd2, 0x05, 0xad, 0x57, 0xaf,
- 0x56, 0x12, 0x02, 0x82, 0x80, 0x2d, 0xe4, 0xf5, 0x34, 0xf5, 0x30,
- 0x90, 0x70, 0x10, 0xe0, 0xf4, 0x60, 0x03, 0xe0, 0xf5, 0x34, 0xad,
- 0x57, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x80, 0x15, 0xd2, 0x19, 0x05,
- 0x2f, 0xe5, 0x2f, 0xb4, 0x1a, 0x03, 0xe4, 0xf5, 0x2f, 0xd2, 0x04,
- 0xad, 0x57, 0xaf, 0x56, 0x12, 0x02, 0x82, 0x90, 0x04, 0x14, 0x74,
- 0x80, 0xf0, 0x22, 0x22, 0xe5, 0x34, 0xc3, 0x94, 0x03, 0x40, 0x17,
- 0xe5, 0x55, 0xb4, 0x02, 0x12, 0xe5, 0x30, 0x60, 0x0e, 0x30, 0x60,
- 0x0b, 0x74, 0xfd, 0x25, 0x46, 0xf5, 0x46, 0xd2, 0x04, 0xe4, 0xf5,
- 0x53, 0xe5, 0x53, 0x60, 0x03, 0x02, 0x14, 0x71, 0x30, 0x60, 0x21,
- 0xb2, 0x4d, 0x30, 0x4d, 0x1c, 0xe5, 0x34, 0xc3, 0x94, 0x03, 0x40,
- 0x11, 0xe5, 0x55, 0xb4, 0x02, 0x0c, 0xe5, 0x30, 0x60, 0x08, 0x74,
- 0x03, 0x25, 0x46, 0xf5, 0x46, 0x80, 0x02, 0x05, 0x46, 0xc2, 0x04,
- 0xe5, 0x4f, 0x45, 0x4e, 0x60, 0x08, 0xe5, 0x4f, 0x15, 0x4f, 0x70,
- 0x02, 0x15, 0x4e, 0x30, 0x1a, 0x49, 0x7f, 0x32, 0x7d, 0xb8, 0x7c,
- 0x0b, 0x12, 0x02, 0x35, 0x50, 0x06, 0x90, 0x04, 0x10, 0x74, 0x40,
- 0xf0, 0x7f, 0x35, 0x7d, 0x32, 0x12, 0x03, 0x3f, 0x50, 0x09, 0x90,
- 0x10, 0x04, 0xe0, 0x54, 0xf7, 0xf0, 0xd2, 0x06, 0xe5, 0x35, 0xd3,
- 0x94, 0x2d, 0x40, 0x30, 0x30, 0x1b, 0x2d, 0xc2, 0x1b, 0xa2, 0x18,
- 0x92, 0x1a, 0x20, 0x1a, 0x24, 0x90, 0x04, 0x09, 0xe0, 0x54, 0xdd,
- 0xf0, 0x90, 0x10, 0x04, 0xe0, 0x44, 0x08, 0xf0, 0xc2, 0x61, 0xd2,
- 0x03, 0x22, 0xe4, 0xf5, 0x35, 0xa2, 0x18, 0x92, 0x1a, 0x30, 0x1a,
- 0x07, 0x90, 0x04, 0x09, 0xe0, 0x44, 0x22, 0xf0, 0x22, 0x22, 0x30,
- 0x14, 0x30, 0x90, 0x70, 0x19, 0xe0, 0x55, 0x27, 0xff, 0x90, 0x70,
- 0x18, 0xe0, 0x4f, 0xf5, 0x27, 0x90, 0x02, 0x29, 0xe0, 0xff, 0x90,
- 0x70, 0x19, 0xe0, 0xfe, 0xef, 0x5e, 0x90, 0x02, 0x29, 0xf0, 0x30,
- 0x47, 0x04, 0xaf, 0x27, 0x80, 0x04, 0xe5, 0x27, 0xf4, 0xff, 0x90,
- 0x02, 0x28, 0xef, 0xf0, 0xc2, 0x14, 0x22, 0xc2, 0x4b, 0xc2, 0x4c,
- 0xe5, 0x44, 0x12, 0x02, 0x5c, 0x14, 0xc9, 0x00, 0x15, 0x57, 0x04,
- 0x15, 0x53, 0x08, 0x15, 0x33, 0x10, 0x14, 0xdd, 0x20, 0x14, 0xfd,
- 0x60, 0x15, 0x0e, 0xa0, 0x00, 0x00, 0x15, 0x59, 0x85, 0x48, 0x43,
- 0x85, 0x4a, 0x42, 0x85, 0x4c, 0x5e, 0xe5, 0x47, 0x64, 0x06, 0x60,
- 0x03, 0x02, 0x15, 0x59, 0x80, 0x1b, 0xe5, 0x48, 0xc4, 0x54, 0x0f,
- 0xf5, 0x43, 0xe5, 0x4a, 0xc4, 0x54, 0x0f, 0xf5, 0x42, 0xe5, 0x4c,
- 0xc4, 0x54, 0x0f, 0xf5, 0x5e, 0xe5, 0x47, 0x64, 0x06, 0x70, 0x61,
- 0x53, 0x43, 0x0f, 0x80, 0x5c, 0x85, 0x49, 0x43, 0x85, 0x4b, 0x42,
- 0x85, 0x4d, 0x5e, 0xe5, 0x47, 0x64, 0x06, 0x70, 0x4d, 0x80, 0x1b,
- 0xe5, 0x49, 0xc4, 0x54, 0x0f, 0xf5, 0x43, 0xe5, 0x4b, 0xc4, 0x54,
- 0x0f, 0xf5, 0x42, 0xe5, 0x4d, 0xc4, 0x54, 0x0f, 0xf5, 0x5e, 0xe5,
- 0x47, 0x64, 0x06, 0x70, 0x30, 0xe5, 0x43, 0x54, 0x0f, 0x44, 0x10,
- 0xf5, 0x43, 0x80, 0x26, 0xe5, 0x47, 0x64, 0x04, 0x60, 0x05, 0xe5,
- 0x47, 0xb4, 0x05, 0x06, 0x43, 0x5e, 0x04, 0x75, 0x42, 0x09, 0xe5,
- 0x47, 0xb4, 0x06, 0x10, 0xe5, 0x43, 0x54, 0x0f, 0x44, 0x30, 0xf5,
- 0x43, 0x80, 0x06, 0xd2, 0x4b, 0x80, 0x02, 0xd2, 0x4c, 0xe4, 0xf5,
- 0x2a, 0xe5, 0x42, 0xc4, 0x54, 0xf0, 0xff, 0xe5, 0x43, 0x54, 0x0f,
- 0x4f, 0xf5, 0x5f, 0xd2, 0x60, 0x22, 0xd2, 0x15, 0xe5, 0x47, 0x24,
- 0xf5, 0x60, 0x0b, 0x24, 0xcb, 0x60, 0x07, 0x24, 0x40, 0x70, 0x06,
- 0xc2, 0x15, 0x22, 0x12, 0x19, 0x15, 0x12, 0x15, 0x8e, 0xc2, 0x15,
- 0xc2, 0xaf, 0xc2, 0x04, 0xd2, 0xaf, 0x22, 0xc2, 0xaf, 0x90, 0x04,
- 0x14, 0xe0, 0x54, 0x0e, 0x60, 0x04, 0xd2, 0x1c, 0x80, 0x08, 0xe5,
- 0x4e, 0x45, 0x4f, 0x24, 0xff, 0x92, 0x1c, 0xd2, 0xaf, 0x90, 0x04,
- 0x14, 0xe0, 0xa2, 0xe4, 0x92, 0x1d, 0x74, 0x1e, 0xf0, 0xe5, 0x5f,
- 0x54, 0x0f, 0xf5, 0x2d, 0xe5, 0x2a, 0x70, 0x13, 0x30, 0x1c, 0x05,
- 0xe5, 0x5f, 0x20, 0xe5, 0x0b, 0x30, 0x1d, 0x29, 0xe5, 0x5f, 0x54,
- 0x30, 0x64, 0x30, 0x70, 0x21, 0xe5, 0x2a, 0x70, 0x15, 0xe5, 0x34,
- 0xc3, 0x94, 0x03, 0x40, 0x09, 0xe5, 0x30, 0x60, 0x05, 0x75, 0x2a,
- 0x05, 0x80, 0x07, 0x75, 0x2a, 0x0c, 0x80, 0x02, 0x15, 0x2a, 0xd2,
- 0x6c, 0xd2, 0x6d, 0x80, 0x0f, 0xe5, 0x5f, 0x30, 0xe6, 0x06, 0xc2,
- 0x6c, 0xd2, 0x6d, 0x80, 0x04, 0xd2, 0x6c, 0xc2, 0x6d, 0xe5, 0x47,
- 0x64, 0x03, 0x70, 0x21, 0x30, 0x4b, 0x06, 0xc2, 0x6c, 0xd2, 0x6d,
- 0x80, 0x18, 0xe5, 0x2a, 0x70, 0x03, 0x30, 0x4c, 0x11, 0xc2, 0x4c,
- 0xe5, 0x2a, 0x70, 0x05, 0x75, 0x2a, 0x07, 0x80, 0x02, 0x15, 0x2a,
- 0xd2, 0x6c, 0xd2, 0x6d, 0xe5, 0x47, 0xb4, 0x09, 0x14, 0xe5, 0x44,
- 0x20, 0xe3, 0x0b, 0xe5, 0x3a, 0x64, 0x02, 0x60, 0x05, 0xe5, 0x3a,
- 0xb4, 0x03, 0x04, 0xc2, 0x6c, 0xd2, 0x6d, 0xe5, 0x47, 0xb4, 0x0a,
- 0x13, 0xe5, 0x3a, 0xb4, 0x01, 0x06, 0xc2, 0x6c, 0xd2, 0x6d, 0x80,
- 0x08, 0xe5, 0x3a, 0x70, 0x04, 0xd2, 0x6c, 0xc2, 0x6d, 0x20, 0x69,
- 0x07, 0xe5, 0x5e, 0x20, 0xe0, 0x02, 0xb2, 0x68, 0x20, 0x6b, 0x07,
- 0xe5, 0x5e, 0x20, 0xe1, 0x02, 0xb2, 0x6a, 0x20, 0x6d, 0x07, 0xe5,
- 0x5e, 0x20, 0xe2, 0x02, 0xb2, 0x6c, 0x75, 0x2e, 0x40, 0x20, 0x69,
- 0x04, 0xa2, 0x68, 0x80, 0x45, 0x30, 0x68, 0x06, 0xe5, 0x46, 0xa2,
- 0xe2, 0x80, 0x3c, 0x30, 0x19, 0x1c, 0xe5, 0x5e, 0x20, 0xe0, 0x04,
- 0x7f, 0x01, 0x80, 0x02, 0x7f, 0x00, 0xe5, 0x2f, 0xb4, 0x19, 0x04,
- 0x7e, 0x01, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f, 0x24, 0xff, 0x80,
- 0x1d, 0xe5, 0x5e, 0x20, 0xe0, 0x04, 0x7f, 0x01, 0x80, 0x02, 0x7f,
- 0x00, 0xe5, 0x46, 0x54, 0xf0, 0xfe, 0xbe, 0xf0, 0x04, 0x7e, 0x01,
- 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f, 0x24, 0xff, 0x92, 0x73, 0x92,
- 0x72, 0x20, 0x6b, 0x04, 0xa2, 0x6a, 0x80, 0x45, 0x30, 0x6a, 0x06,
- 0xe5, 0x46, 0xa2, 0xe2, 0x80, 0x3c, 0x30, 0x19, 0x1c, 0xe5, 0x5e,
- 0x20, 0xe1, 0x04, 0x7f, 0x01, 0x80, 0x02, 0x7f, 0x00, 0xe5, 0x2f,
- 0xb4, 0x19, 0x04, 0x7e, 0x01, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f,
- 0x24, 0xff, 0x80, 0x1d, 0xe5, 0x5e, 0x20, 0xe1, 0x04, 0x7f, 0x01,
+ 0x02, 0x22, 0x90, 0x70, 0x11, 0xe0, 0xb4, 0x5a, 0x03, 0xc2, 0x4f,
+ 0x22, 0xd2, 0x4f, 0x22, 0xe5, 0x30, 0xd3, 0x94, 0x01, 0x50, 0x03,
+ 0x02, 0x14, 0xc1, 0x90, 0x01, 0x0c, 0xe0, 0x44, 0x02, 0xf0, 0xa3,
+ 0xe0, 0x44, 0x04, 0xf0, 0x22, 0xe5, 0x30, 0xd3, 0x94, 0x01, 0x50,
+ 0x03, 0x02, 0x14, 0xc1, 0x90, 0x01, 0x0c, 0xe0, 0x54, 0xfd, 0xf0,
+ 0xa3, 0xe0, 0x54, 0xfb, 0xf0, 0x22, 0xe5, 0x30, 0xd3, 0x94, 0x01,
+ 0x40, 0x07, 0xe5, 0x55, 0x60, 0x03, 0x02, 0x14, 0xc1, 0x90, 0x70,
+ 0x10, 0xe0, 0x54, 0x7f, 0xff, 0xbf, 0x0a, 0x0d, 0x90, 0x70, 0x11,
+ 0xe0, 0xb4, 0x08, 0x06, 0x75, 0x4e, 0x01, 0x75, 0x4f, 0x84, 0x90,
+ 0x70, 0x10, 0xe0, 0x54, 0x7f, 0xff, 0xbf, 0x02, 0x12, 0x90, 0x70,
+ 0x11, 0xe0, 0x64, 0x08, 0x60, 0x04, 0xe0, 0xb4, 0x20, 0x06, 0x75,
+ 0x4e, 0x03, 0x75, 0x4f, 0x20, 0xe4, 0xf5, 0x3f, 0x22, 0x90, 0x70,
+ 0x11, 0xe0, 0x24, 0xff, 0x92, 0x47, 0x22, 0xe5, 0x30, 0xd3, 0x94,
+ 0x01, 0x40, 0x07, 0xe5, 0x55, 0x60, 0x03, 0x02, 0x13, 0x29, 0x90,
+ 0x04, 0x04, 0xe0, 0x25, 0xe0, 0x24, 0x5d, 0xf5, 0x57, 0x90, 0x70,
+ 0x10, 0xe0, 0xff, 0x74, 0x47, 0x25, 0x57, 0xf8, 0xc6, 0xef, 0xc6,
+ 0x90, 0x70, 0x11, 0xe0, 0xff, 0x74, 0x48, 0x25, 0x57, 0xf8, 0xc6,
+ 0xef, 0xc6, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x02, 0x14,
+ 0xbb, 0xe5, 0x30, 0xd3, 0x94, 0x01, 0x40, 0x07, 0xe5, 0x55, 0x60,
+ 0x03, 0x02, 0x13, 0x29, 0xe5, 0x47, 0x64, 0x07, 0x60, 0x1d, 0xe5,
+ 0x47, 0x64, 0x08, 0x60, 0x17, 0xe5, 0x47, 0x64, 0x09, 0x60, 0x11,
+ 0xe5, 0x47, 0x64, 0x0a, 0x60, 0x0b, 0xe5, 0x47, 0x64, 0x0b, 0x60,
+ 0x05, 0xe5, 0x47, 0xb4, 0x0c, 0x08, 0x90, 0x70, 0x11, 0xe0, 0x54,
+ 0x0f, 0xf5, 0x3a, 0xe5, 0x47, 0xb4, 0x09, 0x08, 0xe5, 0x3a, 0xb4,
+ 0x03, 0x03, 0xe4, 0xf5, 0x46, 0xe5, 0x47, 0xb4, 0x0a, 0x08, 0xe5,
+ 0x3a, 0xb4, 0x01, 0x03, 0xe4, 0xf5, 0x46, 0xe4, 0xfd, 0xaf, 0x56,
+ 0x12, 0x02, 0x0a, 0xd2, 0x04, 0x22, 0x90, 0x70, 0x11, 0xe0, 0xf4,
+ 0xff, 0x90, 0x70, 0x10, 0xe0, 0x5f, 0xff, 0x90, 0x70, 0x11, 0xe0,
+ 0x55, 0x3f, 0x4f, 0x90, 0x70, 0x18, 0xf0, 0x90, 0x70, 0x11, 0xe0,
+ 0x90, 0x70, 0x19, 0xf0, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x0a,
+ 0x30, 0x15, 0x03, 0xd2, 0x14, 0x22, 0x90, 0x70, 0x18, 0xe0, 0xf5,
+ 0x3f, 0x90, 0x02, 0x29, 0xe0, 0xff, 0x90, 0x70, 0x19, 0xe0, 0xfe,
+ 0xef, 0x5e, 0x90, 0x02, 0x29, 0xf0, 0x30, 0x47, 0x04, 0xaf, 0x3f,
+ 0x80, 0x04, 0xe5, 0x3f, 0xf4, 0xff, 0x90, 0x02, 0x28, 0xef, 0xf0,
+ 0x22, 0x90, 0x70, 0x10, 0xe0, 0x24, 0xff, 0x92, 0x1a, 0x75, 0x32,
+ 0x03, 0x75, 0x33, 0x1f, 0xe4, 0xf5, 0x31, 0xad, 0x57, 0xaf, 0x56,
+ 0x12, 0x02, 0x0a, 0x02, 0x14, 0xbb, 0x90, 0x10, 0x00, 0xe0, 0xf5,
+ 0x57, 0xe4, 0xf5, 0x58, 0xf5, 0x59, 0x90, 0x10, 0x03, 0xe0, 0xb4,
+ 0x28, 0x05, 0x75, 0x58, 0x01, 0x80, 0x3c, 0x90, 0x10, 0x03, 0xe0,
+ 0xb4, 0x30, 0x05, 0x75, 0x58, 0x02, 0x80, 0x30, 0x90, 0x10, 0x03,
+ 0xe0, 0xb4, 0x33, 0x05, 0x75, 0x58, 0x04, 0x80, 0x24, 0x90, 0x10,
+ 0x03, 0xe0, 0xb4, 0x35, 0x0c, 0x90, 0x10, 0x02, 0xe0, 0xb4, 0x72,
+ 0x05, 0x75, 0x58, 0x08, 0x80, 0x11, 0x90, 0x10, 0x03, 0xe0, 0xb4,
+ 0x35, 0x0a, 0x90, 0x10, 0x02, 0xe0, 0xb4, 0x93, 0x03, 0x75, 0x58,
+ 0x10, 0xe5, 0x58, 0x30, 0xe1, 0x19, 0x90, 0x05, 0x08, 0xe0, 0x44,
+ 0x01, 0xf0, 0xfd, 0x90, 0x05, 0x05, 0xe0, 0x54, 0xfb, 0xf0, 0x44,
+ 0x04, 0xf0, 0xed, 0x54, 0xfe, 0x90, 0x05, 0x08, 0xf0, 0xe4, 0xf5,
+ 0x4e, 0xf5, 0x4f, 0x75, 0x3a, 0xff, 0xf5, 0x30, 0x90, 0x05, 0xa4,
+ 0x74, 0x11, 0xf0, 0xa3, 0x74, 0xff, 0xf0, 0xa3, 0x74, 0x03, 0xf0,
+ 0xd2, 0x4f, 0x90, 0x01, 0x0d, 0xe0, 0x44, 0x40, 0xf0, 0x75, 0x3c,
+ 0xff, 0xad, 0x57, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x90, 0x70, 0x36,
+ 0x74, 0x37, 0xf0, 0xa3, 0x74, 0x32, 0xf0, 0x90, 0x04, 0x01, 0xe0,
+ 0x44, 0x01, 0xf0, 0xc2, 0x1a, 0xc2, 0x17, 0x02, 0x14, 0xbb, 0xe5,
+ 0x30, 0xd3, 0x94, 0x01, 0x40, 0x0b, 0xe5, 0x55, 0x60, 0x07, 0x7d,
+ 0x03, 0xaf, 0x56, 0x02, 0x02, 0x0a, 0x90, 0x70, 0x10, 0xe0, 0x24,
+ 0xff, 0x92, 0x93, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x02,
+ 0x14, 0xbb, 0x90, 0x10, 0x00, 0xe0, 0x90, 0x10, 0x2c, 0xf0, 0x90,
+ 0x10, 0x2f, 0x74, 0x40, 0xf0, 0x90, 0x70, 0x11, 0xe0, 0x54, 0x7f,
+ 0xf5, 0x57, 0xe0, 0x54, 0x80, 0x90, 0x70, 0x32, 0xf0, 0x90, 0x70,
+ 0x10, 0xe0, 0xff, 0xe5, 0x57, 0xd3, 0x9f, 0x40, 0x43, 0x90, 0x70,
+ 0x33, 0xe5, 0x57, 0xf0, 0x90, 0x70, 0x10, 0xe0, 0xff, 0x90, 0x70,
+ 0x33, 0xe0, 0xc3, 0x9f, 0xd3, 0x94, 0x04, 0x40, 0x73, 0xe0, 0x24,
+ 0xfc, 0xf0, 0xe0, 0xff, 0x90, 0x70, 0x32, 0xe0, 0x4f, 0x90, 0x05,
+ 0x00, 0xf0, 0xe5, 0x58, 0x54, 0x0f, 0x60, 0x04, 0x7f, 0x17, 0x80,
+ 0x02, 0x7f, 0x11, 0x90, 0x05, 0x01, 0xef, 0xf0, 0xa3, 0x74, 0x01,
+ 0xf0, 0x74, 0x03, 0xf0, 0xff, 0x12, 0x02, 0x94, 0x80, 0xc3, 0x90,
+ 0x70, 0x33, 0xe5, 0x57, 0xf0, 0x90, 0x70, 0x33, 0xe0, 0xff, 0x90,
+ 0x70, 0x10, 0xe0, 0xc3, 0x9f, 0xd3, 0x94, 0x04, 0x40, 0x30, 0x90,
+ 0x70, 0x33, 0xe0, 0x24, 0x04, 0xf0, 0xe0, 0xff, 0x90, 0x70, 0x32,
+ 0xe0, 0x4f, 0x90, 0x05, 0x00, 0xf0, 0xe5, 0x58, 0x54, 0x0f, 0x60,
+ 0x04, 0x7f, 0x17, 0x80, 0x02, 0x7f, 0x11, 0x90, 0x05, 0x01, 0xef,
+ 0xf0, 0xa3, 0x74, 0x01, 0xf0, 0x74, 0x03, 0xf0, 0xff, 0x12, 0x02,
+ 0x94, 0x80, 0xc0, 0x90, 0x70, 0x10, 0xe0, 0xff, 0x90, 0x70, 0x32,
+ 0xe0, 0x4f, 0x90, 0x05, 0x00, 0xf0, 0xe5, 0x58, 0x54, 0x0f, 0x60,
+ 0x04, 0x7f, 0x17, 0x80, 0x02, 0x7f, 0x11, 0x90, 0x05, 0x01, 0xef,
+ 0xf0, 0xa3, 0x74, 0x01, 0xf0, 0x74, 0x03, 0xf0, 0xff, 0x12, 0x02,
+ 0x94, 0x90, 0x10, 0x00, 0xe0, 0x90, 0x10, 0x2c, 0xf0, 0x90, 0x10,
+ 0x2f, 0x74, 0x7f, 0xf0, 0xe4, 0xfd, 0xaf, 0x56, 0x12, 0x02, 0x0a,
+ 0x02, 0x14, 0xbb, 0xe5, 0x30, 0xd3, 0x94, 0x01, 0x40, 0x0d, 0xe5,
+ 0x55, 0x60, 0x09, 0x7d, 0x03, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x80,
+ 0x72, 0x90, 0x70, 0x10, 0xe0, 0x24, 0xff, 0x92, 0x4a, 0xd2, 0x05,
+ 0xad, 0x57, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x80, 0x5f, 0x90, 0x70,
+ 0x11, 0xe0, 0x24, 0xff, 0x92, 0x17, 0x90, 0x70, 0x10, 0xe0, 0xf5,
+ 0x5d, 0xad, 0x57, 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x90, 0x04, 0x14,
+ 0x74, 0x80, 0xf0, 0x30, 0x17, 0x13, 0x90, 0x10, 0x00, 0xe0, 0x90,
+ 0x10, 0x2c, 0xf0, 0x90, 0x10, 0x2f, 0xe0, 0x54, 0xf0, 0xf5, 0x57,
+ 0x45, 0x5d, 0xf0, 0xe4, 0x90, 0x70, 0x13, 0xf0, 0xe5, 0x56, 0xf4,
+ 0x60, 0x2a, 0x90, 0x70, 0x25, 0xe0, 0x44, 0x01, 0xf0, 0x90, 0x02,
+ 0x2c, 0x74, 0xff, 0xf0, 0x22, 0xe4, 0xf5, 0x30, 0xd2, 0x4f, 0x90,
+ 0x70, 0x10, 0xe0, 0xf4, 0x60, 0x03, 0xe0, 0xf5, 0x30, 0xad, 0x57,
+ 0xaf, 0x56, 0x12, 0x02, 0x0a, 0x90, 0x04, 0x14, 0x74, 0x80, 0xf0,
+ 0x22, 0x22, 0xe5, 0x33, 0x45, 0x32, 0x60, 0x0a, 0xe5, 0x33, 0x15,
+ 0x33, 0x70, 0x0a, 0x15, 0x32, 0x80, 0x06, 0x75, 0x32, 0x03, 0x75,
+ 0x33, 0x1f, 0xe5, 0x33, 0x45, 0x32, 0x60, 0x03, 0x02, 0x15, 0x70,
+ 0x20, 0x1a, 0x03, 0x02, 0x15, 0x70, 0x74, 0xa0, 0x25, 0x31, 0xf5,
+ 0x82, 0xe4, 0x34, 0x4c, 0xf5, 0x83, 0xe0, 0x60, 0x7a, 0x7f, 0x7e,
+ 0x12, 0x15, 0xde, 0xef, 0x54, 0xfe, 0x44, 0x02, 0xfd, 0x7f, 0x7e,
+ 0x12, 0x15, 0xc4, 0xe5, 0x31, 0x7f, 0x00, 0x25, 0xe0, 0xfe, 0xef,
+ 0x24, 0x00, 0xf5, 0x82, 0x74, 0x4d, 0x3e, 0xaf, 0x82, 0x90, 0x4c,
+ 0xa8, 0xf0, 0xa3, 0xef, 0xf0, 0xe4, 0xf5, 0x56, 0xf5, 0x57, 0x7f,
+ 0x7f, 0x12, 0x15, 0xde, 0x90, 0x4c, 0xa8, 0xe0, 0xfa, 0xa3, 0xe0,
+ 0x25, 0x57, 0xf5, 0x82, 0xea, 0x35, 0x56, 0xf5, 0x83, 0xef, 0xf0,
+ 0x05, 0x57, 0xe5, 0x57, 0x70, 0x02, 0x05, 0x56, 0xc3, 0x94, 0x80,
+ 0xe5, 0x56, 0x94, 0x01, 0x40, 0xd8, 0x7f, 0x7e, 0x12, 0x15, 0xde,
+ 0xef, 0x44, 0x03, 0xfd, 0x7f, 0x7e, 0x12, 0x15, 0xc4, 0x74, 0xa0,
+ 0x25, 0x31, 0xf5, 0x82, 0xe4, 0x34, 0x4c, 0xf5, 0x83, 0xe4, 0xf0,
+ 0x05, 0x31, 0xe5, 0x31, 0xb4, 0x08, 0x03, 0xe4, 0xf5, 0x31, 0xe5,
+ 0x53, 0x70, 0x1a, 0x30, 0x60, 0x09, 0xb2, 0x4d, 0x30, 0x4d, 0x04,
+ 0x05, 0x46, 0xc2, 0x04, 0xe5, 0x4f, 0x45, 0x4e, 0x60, 0x08, 0xe5,
+ 0x4f, 0x15, 0x4f, 0x70, 0x02, 0x15, 0x4e, 0x22, 0x22, 0x30, 0x14,
+ 0x30, 0x90, 0x70, 0x19, 0xe0, 0x55, 0x3f, 0xff, 0x90, 0x70, 0x18,
+ 0xe0, 0x4f, 0xf5, 0x3f, 0x90, 0x02, 0x29, 0xe0, 0xff, 0x90, 0x70,
+ 0x19, 0xe0, 0xfe, 0xef, 0x5e, 0x90, 0x02, 0x29, 0xf0, 0x30, 0x47,
+ 0x04, 0xaf, 0x3f, 0x80, 0x04, 0xe5, 0x3f, 0xf4, 0xff, 0x90, 0x02,
+ 0x28, 0xef, 0xf0, 0xc2, 0x14, 0x22, 0x90, 0x10, 0x1c, 0xed, 0xf0,
+ 0xa3, 0xef, 0xf0, 0xa3, 0x74, 0x0a, 0xf0, 0x90, 0x10, 0x1c, 0xe0,
+ 0xf5, 0x58, 0x90, 0x10, 0x1e, 0xe0, 0x20, 0xe1, 0xf3, 0x22, 0x90,
+ 0x10, 0x1d, 0xef, 0xf0, 0xa3, 0x74, 0x0b, 0xf0, 0x90, 0x10, 0x1c,
+ 0xe0, 0xf5, 0x58, 0x90, 0x10, 0x1e, 0xe0, 0x20, 0xe1, 0xf3, 0xaf,
+ 0x58, 0x22, 0xc2, 0x4b, 0xc2, 0x4c, 0xe5, 0x44, 0x12, 0x01, 0xe4,
+ 0x16, 0x19, 0x00, 0x16, 0xa7, 0x04, 0x16, 0xa3, 0x08, 0x16, 0x83,
+ 0x10, 0x16, 0x2d, 0x20, 0x16, 0x4d, 0x60, 0x16, 0x5e, 0xa0, 0x00,
+ 0x00, 0x16, 0xa9, 0x85, 0x48, 0x43, 0x85, 0x4a, 0x42, 0x85, 0x4c,
+ 0x5e, 0xe5, 0x47, 0x64, 0x06, 0x60, 0x03, 0x02, 0x16, 0xa9, 0x80,
+ 0x1b, 0xe5, 0x48, 0xc4, 0x54, 0x0f, 0xf5, 0x43, 0xe5, 0x4a, 0xc4,
+ 0x54, 0x0f, 0xf5, 0x42, 0xe5, 0x4c, 0xc4, 0x54, 0x0f, 0xf5, 0x5e,
+ 0xe5, 0x47, 0x64, 0x06, 0x70, 0x61, 0x53, 0x43, 0x0f, 0x80, 0x5c,
+ 0x85, 0x49, 0x43, 0x85, 0x4b, 0x42, 0x85, 0x4d, 0x5e, 0xe5, 0x47,
+ 0x64, 0x06, 0x70, 0x4d, 0x80, 0x1b, 0xe5, 0x49, 0xc4, 0x54, 0x0f,
+ 0xf5, 0x43, 0xe5, 0x4b, 0xc4, 0x54, 0x0f, 0xf5, 0x42, 0xe5, 0x4d,
+ 0xc4, 0x54, 0x0f, 0xf5, 0x5e, 0xe5, 0x47, 0x64, 0x06, 0x70, 0x30,
+ 0xe5, 0x43, 0x54, 0x0f, 0x44, 0x10, 0xf5, 0x43, 0x80, 0x26, 0xe5,
+ 0x47, 0x64, 0x04, 0x60, 0x05, 0xe5, 0x47, 0xb4, 0x05, 0x06, 0x43,
+ 0x5e, 0x04, 0x75, 0x42, 0x09, 0xe5, 0x47, 0xb4, 0x06, 0x10, 0xe5,
+ 0x43, 0x54, 0x0f, 0x44, 0x30, 0xf5, 0x43, 0x80, 0x06, 0xd2, 0x4b,
+ 0x80, 0x02, 0xd2, 0x4c, 0xe4, 0xf5, 0x27, 0xe5, 0x42, 0xc4, 0x54,
+ 0xf0, 0xff, 0xe5, 0x43, 0x54, 0x0f, 0x4f, 0xf5, 0x5f, 0xd2, 0x60,
+ 0x22, 0xd2, 0x15, 0xe5, 0x47, 0x24, 0xf5, 0x60, 0x0b, 0x24, 0xcb,
+ 0x60, 0x07, 0x24, 0x40, 0x70, 0x06, 0xc2, 0x15, 0x22, 0x12, 0x1a,
+ 0x3a, 0x12, 0x16, 0xde, 0xc2, 0x15, 0xc2, 0xaf, 0xc2, 0x04, 0xd2,
+ 0xaf, 0x22, 0xc2, 0xaf, 0x90, 0x04, 0x14, 0xe0, 0x54, 0x0e, 0x60,
+ 0x04, 0xd2, 0x18, 0x80, 0x08, 0xe5, 0x4e, 0x45, 0x4f, 0x24, 0xff,
+ 0x92, 0x18, 0xd2, 0xaf, 0x90, 0x04, 0x14, 0xe0, 0xa2, 0xe4, 0x92,
+ 0x19, 0x74, 0x1e, 0xf0, 0xe5, 0x5f, 0x54, 0x0f, 0xf5, 0x2d, 0xe5,
+ 0x27, 0x70, 0x13, 0x30, 0x18, 0x05, 0xe5, 0x5f, 0x20, 0xe5, 0x0b,
+ 0x30, 0x19, 0x19, 0xe5, 0x5f, 0x54, 0x30, 0xff, 0xbf, 0x30, 0x11,
+ 0xe5, 0x27, 0x70, 0x05, 0x75, 0x27, 0x0c, 0x80, 0x02, 0x15, 0x27,
+ 0xd2, 0x6c, 0xd2, 0x6d, 0x80, 0x0f, 0xe5, 0x5f, 0x30, 0xe6, 0x06,
+ 0xc2, 0x6c, 0xd2, 0x6d, 0x80, 0x04, 0xd2, 0x6c, 0xc2, 0x6d, 0xe5,
+ 0x47, 0x64, 0x03, 0x70, 0x21, 0x30, 0x4b, 0x06, 0xc2, 0x6c, 0xd2,
+ 0x6d, 0x80, 0x18, 0xe5, 0x27, 0x70, 0x03, 0x30, 0x4c, 0x11, 0xc2,
+ 0x4c, 0xe5, 0x27, 0x70, 0x05, 0x75, 0x27, 0x07, 0x80, 0x02, 0x15,
+ 0x27, 0xd2, 0x6c, 0xd2, 0x6d, 0xe5, 0x47, 0xb4, 0x09, 0x14, 0xe5,
+ 0x44, 0x20, 0xe3, 0x0b, 0xe5, 0x3a, 0x64, 0x02, 0x60, 0x05, 0xe5,
+ 0x3a, 0xb4, 0x03, 0x04, 0xc2, 0x6c, 0xd2, 0x6d, 0xe5, 0x47, 0xb4,
+ 0x0a, 0x13, 0xe5, 0x3a, 0xb4, 0x01, 0x06, 0xc2, 0x6c, 0xd2, 0x6d,
+ 0x80, 0x08, 0xe5, 0x3a, 0x70, 0x04, 0xd2, 0x6c, 0xc2, 0x6d, 0x20,
+ 0x69, 0x07, 0xe5, 0x5e, 0x20, 0xe0, 0x02, 0xb2, 0x68, 0x20, 0x6b,
+ 0x07, 0xe5, 0x5e, 0x20, 0xe1, 0x02, 0xb2, 0x6a, 0x20, 0x6d, 0x07,
+ 0xe5, 0x5e, 0x20, 0xe2, 0x02, 0xb2, 0x6c, 0x75, 0x2e, 0x40, 0x20,
+ 0x69, 0x04, 0xa2, 0x68, 0x80, 0x26, 0x30, 0x68, 0x06, 0xe5, 0x46,
+ 0xa2, 0xe2, 0x80, 0x1d, 0xe5, 0x5e, 0x20, 0xe0, 0x04, 0x7f, 0x01,
0x80, 0x02, 0x7f, 0x00, 0xe5, 0x46, 0x54, 0xf0, 0xfe, 0xbe, 0xf0,
0x04, 0x7e, 0x01, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f, 0x24, 0xff,
- 0x92, 0x75, 0x92, 0x74, 0x20, 0x6d, 0x04, 0xa2, 0x6c, 0x80, 0x26,
- 0xe5, 0x47, 0x64, 0x0a, 0x70, 0x22, 0x30, 0x6c, 0x06, 0xe5, 0x46,
- 0xa2, 0xe3, 0x80, 0x17, 0xe5, 0x3a, 0xb4, 0x01, 0x06, 0xe5, 0x46,
- 0xa2, 0xe3, 0x80, 0x53, 0xe5, 0x46, 0x20, 0xe4, 0x03, 0x30, 0xe5,
- 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x80, 0x45, 0x30, 0x6c, 0x06, 0xe5,
- 0x46, 0xa2, 0xe2, 0x80, 0x3c, 0x30, 0x19, 0x1c, 0xe5, 0x5e, 0x20,
- 0xe2, 0x04, 0x7f, 0x01, 0x80, 0x02, 0x7f, 0x00, 0xe5, 0x2f, 0xb4,
- 0x19, 0x04, 0x7e, 0x01, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f, 0x24,
- 0xff, 0x80, 0x1d, 0xe5, 0x5e, 0x20, 0xe2, 0x04, 0x7f, 0x01, 0x80,
- 0x02, 0x7f, 0x00, 0xe5, 0x46, 0x54, 0xf0, 0xfe, 0xbe, 0xf0, 0x04,
- 0x7e, 0x01, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x6f, 0x24, 0xff, 0x92,
- 0x71, 0x92, 0x70, 0x90, 0x10, 0x00, 0xe0, 0x90, 0x10, 0x2c, 0xf0,
- 0x90, 0x10, 0x03, 0xe0, 0xc3, 0x94, 0x30, 0x40, 0x14, 0xa2, 0x71,
- 0x92, 0x77, 0xa2, 0x70, 0x92, 0x76, 0xe5, 0x2e, 0x13, 0x13, 0x54,
- 0x3f, 0xf5, 0x2e, 0xc2, 0x77, 0xd2, 0x76, 0x90, 0x10, 0x2f, 0xe5,
- 0x2e, 0xf0, 0xe5, 0x47, 0x64, 0x06, 0x70, 0x39, 0x90, 0x02, 0x29,
- 0xe0, 0x54, 0xfe, 0xf0, 0xe5, 0x43, 0xc4, 0x54, 0x0f, 0x14, 0x60,
- 0x0c, 0x24, 0xfe, 0x60, 0x0c, 0x24, 0x03, 0x70, 0x13, 0xc2, 0x38,
- 0x80, 0x0f, 0xd2, 0x38, 0x80, 0x0b, 0xe5, 0x46, 0x30, 0xe2, 0x03,
- 0xd3, 0x80, 0x01, 0xc3, 0x92, 0x38, 0x30, 0x47, 0x05, 0xaf, 0x27,
- 0x02, 0x19, 0x0f, 0xe5, 0x27, 0xf4, 0xff, 0x02, 0x19, 0x0f, 0xe5,
- 0x47, 0x64, 0x07, 0x60, 0x0f, 0xe5, 0x47, 0x64, 0x08, 0x60, 0x09,
- 0xe5, 0x47, 0x64, 0x09, 0x60, 0x03, 0x02, 0x18, 0x8e, 0x90, 0x02,
- 0x29, 0xe0, 0x54, 0xfc, 0xf0, 0xe5, 0x3a, 0x14, 0x60, 0x22, 0x14,
- 0x60, 0x25, 0x14, 0x60, 0x2d, 0x24, 0xfc, 0x60, 0x49, 0x24, 0xf9,
- 0x60, 0x14, 0x24, 0x0e, 0x70, 0x50, 0xe5, 0x46, 0x13, 0x13, 0x54,
- 0x3f, 0x75, 0xf0, 0x03, 0x84, 0xe5, 0xf0, 0x24, 0xff, 0x80, 0x3a,
- 0xd2, 0x39, 0xc2, 0x38, 0x80, 0x3e, 0xe5, 0x46, 0x30, 0xe2, 0x03,
- 0xd3, 0x80, 0x1d, 0xc3, 0x80, 0x1a, 0xe5, 0x46, 0x30, 0xe2, 0x0d,
- 0x54, 0x38, 0xc3, 0x94, 0x30, 0x50, 0x06, 0x7e, 0x00, 0x7f, 0x01,
- 0x80, 0x04, 0x7e, 0x00, 0x7f, 0x00, 0xee, 0x4f, 0x24, 0xff, 0x92,
- 0x38, 0xc2, 0x39, 0x80, 0x13, 0xe5, 0x46, 0x30, 0xe2, 0x03, 0xd3,
- 0x80, 0x01, 0xc3, 0x92, 0x39, 0xc2, 0x38, 0x80, 0x04, 0xc2, 0x38,
- 0xc2, 0x39, 0x30, 0x47, 0x04, 0xaf, 0x27, 0x80, 0x04, 0xe5, 0x27,
- 0xf4, 0xff, 0x02, 0x19, 0x0f, 0xe5, 0x47, 0x64, 0x0c, 0x60, 0x06,
- 0xe5, 0x47, 0x64, 0x0b, 0x70, 0x7a, 0x90, 0x02, 0x29, 0xe0, 0x54,
- 0xfd, 0xf0, 0xe5, 0x3a, 0x14, 0x60, 0x20, 0x14, 0x60, 0x21, 0x14,
- 0x60, 0x2b, 0x24, 0xfc, 0x60, 0x45, 0x24, 0xf9, 0x60, 0x12, 0x24,
- 0x0e, 0x70, 0x4a, 0xe5, 0x46, 0x13, 0x13, 0x54, 0x3f, 0x75, 0xf0,
- 0x03, 0x84, 0xe5, 0xf0, 0x80, 0x29, 0xd2, 0x39, 0x80, 0x3a, 0xe5,
- 0x46, 0x30, 0xe2, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92, 0x39, 0x80,
- 0x2d, 0xe5, 0x46, 0x30, 0xe2, 0x0d, 0x54, 0x38, 0xc3, 0x94, 0x30,
- 0x50, 0x06, 0x7e, 0x00, 0x7f, 0x01, 0x80, 0x04, 0x7e, 0x00, 0x7f,
- 0x00, 0xee, 0x4f, 0x24, 0xff, 0x92, 0x39, 0x80, 0x0f, 0xe5, 0x46,
- 0x30, 0xe2, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92, 0x39, 0x80, 0x02,
- 0xc2, 0x39, 0x30, 0x47, 0x04, 0xaf, 0x27, 0x80, 0x04, 0xe5, 0x27,
- 0xf4, 0xff, 0x90, 0x02, 0x28, 0xef, 0xf0, 0x22, 0xe5, 0x47, 0xb4,
- 0x0b, 0x10, 0x90, 0x02, 0x29, 0xe0, 0x54, 0xeb, 0xf0, 0xe5, 0x27,
- 0x54, 0xeb, 0x45, 0x45, 0xf5, 0x27, 0x22, 0xe4, 0x90, 0x02, 0x29,
- 0xf0, 0x30, 0x47, 0x04, 0xaf, 0x45, 0x80, 0x04, 0xe5, 0x45, 0xf4,
- 0xff, 0x90, 0x02, 0x28, 0xef, 0xf0, 0x22, 0x8f, 0x50, 0xd2, 0x59,
- 0x22, 0x8f, 0x54, 0xd2, 0x58, 0x22, 0xe4, 0xf5, 0x37, 0xc2, 0xaf,
- 0xe5, 0x51, 0x14, 0x60, 0x4a, 0x14, 0x60, 0x6b, 0x24, 0x02, 0x60,
- 0x03, 0x02, 0x1a, 0xd5, 0xd2, 0x59, 0x75, 0x55, 0x01, 0x20, 0x1a,
- 0x1c, 0x90, 0x02, 0x08, 0xe0, 0x54, 0xfe, 0xf0, 0xe0, 0x20, 0xe1,
- 0x23, 0x90, 0x04, 0x34, 0xe0, 0xb4, 0x02, 0x1c, 0xa3, 0xe0, 0xb4,
- 0x02, 0x17, 0xa3, 0xe0, 0xb4, 0x02, 0x12, 0x7f, 0x20, 0x12, 0x19,
- 0x40, 0x90, 0x10, 0x04, 0xe0, 0x54, 0xf3, 0xf0, 0x75, 0x51, 0x01,
- 0x02, 0x1a, 0xd5, 0xe5, 0x50, 0x70, 0x06, 0x75, 0x37, 0x03, 0x02,
- 0x1a, 0xd5, 0x90, 0x12, 0x00, 0xe0, 0x54, 0x03, 0x70, 0x15, 0x7f,
- 0x20, 0x12, 0x19, 0x40, 0x20, 0x1a, 0x07, 0x90, 0x02, 0x08, 0xe0,
- 0x54, 0xfb, 0xf0, 0x75, 0x51, 0x02, 0x02, 0x1a, 0xd5, 0xe5, 0x50,
- 0x70, 0x03, 0x02, 0x1a, 0xd0, 0x20, 0x1a, 0x15, 0x90, 0x02, 0x08,
- 0xe0, 0x30, 0xe3, 0x03, 0x02, 0x1a, 0xcc, 0x90, 0x04, 0x37, 0xe0,
- 0x64, 0x22, 0x60, 0x03, 0x02, 0x1a, 0xcc, 0x90, 0x12, 0x04, 0x74,
- 0x0a, 0xf0, 0xe5, 0x58, 0x30, 0xe3, 0x15, 0xe4, 0x90, 0x05, 0x00,
+ 0x92, 0x73, 0x92, 0x72, 0x20, 0x6b, 0x04, 0xa2, 0x6a, 0x80, 0x26,
+ 0x30, 0x6a, 0x06, 0xe5, 0x46, 0xa2, 0xe2, 0x80, 0x1d, 0xe5, 0x5e,
+ 0x20, 0xe1, 0x04, 0x7f, 0x01, 0x80, 0x02, 0x7f, 0x00, 0xe5, 0x46,
+ 0x54, 0xf0, 0xfe, 0xbe, 0xf0, 0x04, 0x7e, 0x01, 0x80, 0x02, 0x7e,
+ 0x00, 0xee, 0x6f, 0x24, 0xff, 0x92, 0x75, 0x92, 0x74, 0x20, 0x6d,
+ 0x04, 0xa2, 0x6c, 0x80, 0x26, 0xe5, 0x47, 0x64, 0x0a, 0x70, 0x22,
+ 0x30, 0x6c, 0x06, 0xe5, 0x46, 0xa2, 0xe3, 0x80, 0x17, 0xe5, 0x3a,
+ 0xb4, 0x01, 0x06, 0xe5, 0x46, 0xa2, 0xe3, 0x80, 0x34, 0xe5, 0x46,
+ 0x20, 0xe4, 0x03, 0x30, 0xe5, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x80,
+ 0x26, 0x30, 0x6c, 0x06, 0xe5, 0x46, 0xa2, 0xe2, 0x80, 0x1d, 0xe5,
+ 0x5e, 0x20, 0xe2, 0x04, 0x7f, 0x01, 0x80, 0x02, 0x7f, 0x00, 0xe5,
+ 0x46, 0x54, 0xf0, 0xfe, 0xbe, 0xf0, 0x04, 0x7e, 0x01, 0x80, 0x02,
+ 0x7e, 0x00, 0xee, 0x6f, 0x24, 0xff, 0x92, 0x71, 0x92, 0x70, 0x90,
+ 0x10, 0x00, 0xe0, 0x90, 0x10, 0x2c, 0xf0, 0x90, 0x10, 0x03, 0xe0,
+ 0xc3, 0x94, 0x30, 0x40, 0x19, 0xe0, 0x64, 0x32, 0x60, 0x14, 0xa2,
+ 0x71, 0x92, 0x77, 0xa2, 0x70, 0x92, 0x76, 0xe5, 0x2e, 0x13, 0x13,
+ 0x54, 0x3f, 0xf5, 0x2e, 0xc2, 0x77, 0xd2, 0x76, 0x30, 0x17, 0x0d,
+ 0x53, 0x2e, 0xf0, 0xe5, 0x2e, 0x45, 0x5d, 0x90, 0x10, 0x2f, 0xf0,
+ 0x80, 0x06, 0x90, 0x10, 0x2f, 0xe5, 0x2e, 0xf0, 0xe5, 0x47, 0x64,
+ 0x06, 0x70, 0x47, 0x90, 0x02, 0x28, 0xe0, 0x30, 0x47, 0x03, 0xff,
+ 0x80, 0x02, 0xf4, 0xff, 0x8f, 0x3f, 0x90, 0x02, 0x29, 0xe0, 0x54,
+ 0xfe, 0xf0, 0xe5, 0x43, 0xc4, 0x54, 0x0f, 0x14, 0x60, 0x0c, 0x24,
+ 0xfe, 0x60, 0x0c, 0x24, 0x03, 0x70, 0x13, 0xc2, 0xf8, 0x80, 0x0f,
+ 0xd2, 0xf8, 0x80, 0x0b, 0xe5, 0x46, 0x30, 0xe2, 0x03, 0xd3, 0x80,
+ 0x01, 0xc3, 0x92, 0xf8, 0x30, 0x47, 0x05, 0xaf, 0x3f, 0x02, 0x1a,
+ 0x34, 0xe5, 0x3f, 0xf4, 0xff, 0x02, 0x1a, 0x34, 0xe5, 0x47, 0x64,
+ 0x07, 0x60, 0x0f, 0xe5, 0x47, 0x64, 0x08, 0x60, 0x09, 0xe5, 0x47,
+ 0x64, 0x09, 0x60, 0x03, 0x02, 0x19, 0xa2, 0x90, 0x02, 0x28, 0xe0,
+ 0x30, 0x47, 0x03, 0xff, 0x80, 0x02, 0xf4, 0xff, 0x8f, 0x3f, 0x90,
+ 0x02, 0x29, 0xe0, 0x54, 0xfc, 0xf0, 0xe5, 0x3a, 0x14, 0x60, 0x22,
+ 0x14, 0x60, 0x25, 0x14, 0x60, 0x2d, 0x24, 0xfc, 0x60, 0x49, 0x24,
+ 0xf9, 0x60, 0x14, 0x24, 0x0e, 0x70, 0x50, 0xe5, 0x46, 0x13, 0x13,
+ 0x54, 0x3f, 0x75, 0xf0, 0x03, 0x84, 0xe5, 0xf0, 0x24, 0xff, 0x80,
+ 0x3a, 0xd2, 0xf9, 0xc2, 0xf8, 0x80, 0x3e, 0xe5, 0x46, 0x30, 0xe2,
+ 0x03, 0xd3, 0x80, 0x1d, 0xc3, 0x80, 0x1a, 0xe5, 0x46, 0x30, 0xe2,
+ 0x0d, 0x54, 0x38, 0xc3, 0x94, 0x30, 0x50, 0x06, 0x7e, 0x00, 0x7f,
+ 0x01, 0x80, 0x04, 0x7e, 0x00, 0x7f, 0x00, 0xee, 0x4f, 0x24, 0xff,
+ 0x92, 0xf8, 0xc2, 0xf9, 0x80, 0x13, 0xe5, 0x46, 0x30, 0xe2, 0x03,
+ 0xd3, 0x80, 0x01, 0xc3, 0x92, 0xf9, 0xc2, 0xf8, 0x80, 0x04, 0xc2,
+ 0xf8, 0xc2, 0xf9, 0x30, 0x47, 0x04, 0xaf, 0x3f, 0x80, 0x04, 0xe5,
+ 0x3f, 0xf4, 0xff, 0x02, 0x1a, 0x34, 0xe5, 0x47, 0x64, 0x0c, 0x60,
+ 0x09, 0xe5, 0x47, 0x64, 0x0b, 0x60, 0x03, 0x02, 0x1a, 0x39, 0x90,
+ 0x02, 0x28, 0xe0, 0x30, 0x47, 0x03, 0xff, 0x80, 0x02, 0xf4, 0xff,
+ 0x8f, 0x3f, 0x90, 0x02, 0x29, 0xe0, 0x54, 0xfd, 0xf0, 0xe5, 0x3a,
+ 0x14, 0x60, 0x20, 0x14, 0x60, 0x21, 0x14, 0x60, 0x2b, 0x24, 0xfc,
+ 0x60, 0x45, 0x24, 0xf9, 0x60, 0x12, 0x24, 0x0e, 0x70, 0x4a, 0xe5,
+ 0x46, 0x13, 0x13, 0x54, 0x3f, 0x75, 0xf0, 0x03, 0x84, 0xe5, 0xf0,
+ 0x80, 0x29, 0xd2, 0xf9, 0x80, 0x3a, 0xe5, 0x46, 0x30, 0xe2, 0x03,
+ 0xd3, 0x80, 0x01, 0xc3, 0x92, 0xf9, 0x80, 0x2d, 0xe5, 0x46, 0x30,
+ 0xe2, 0x0d, 0x54, 0x38, 0xc3, 0x94, 0x30, 0x50, 0x06, 0x7e, 0x00,
+ 0x7f, 0x01, 0x80, 0x04, 0x7e, 0x00, 0x7f, 0x00, 0xee, 0x4f, 0x24,
+ 0xff, 0x92, 0xf9, 0x80, 0x0f, 0xe5, 0x46, 0x30, 0xe2, 0x03, 0xd3,
+ 0x80, 0x01, 0xc3, 0x92, 0xf9, 0x80, 0x02, 0xc2, 0xf9, 0x30, 0x47,
+ 0x04, 0xaf, 0x3f, 0x80, 0x04, 0xe5, 0x3f, 0xf4, 0xff, 0x90, 0x02,
+ 0x28, 0xef, 0xf0, 0x22, 0xe5, 0x47, 0xb4, 0x0b, 0x10, 0x90, 0x02,
+ 0x29, 0xe0, 0x54, 0xeb, 0xf0, 0xe5, 0x3f, 0x54, 0xeb, 0x45, 0x45,
+ 0xf5, 0x3f, 0x22, 0xe4, 0x90, 0x02, 0x29, 0xf0, 0x30, 0x47, 0x04,
+ 0xaf, 0x45, 0x80, 0x04, 0xe5, 0x45, 0xf4, 0xff, 0x90, 0x02, 0x28,
+ 0xef, 0xf0, 0x22, 0x8f, 0x50, 0xd2, 0x59, 0x22, 0x8f, 0x54, 0xd2,
+ 0x58, 0x22, 0xe4, 0xf5, 0x25, 0xc2, 0xaf, 0xe5, 0x51, 0x14, 0x60,
+ 0x4a, 0x14, 0x60, 0x6b, 0x24, 0x02, 0x60, 0x03, 0x02, 0x1b, 0xd0,
+ 0xd2, 0x59, 0x75, 0x55, 0x01, 0x90, 0x02, 0x08, 0xe0, 0x54, 0xfe,
+ 0xf0, 0xe0, 0x20, 0xe1, 0x23, 0x90, 0x04, 0x34, 0xe0, 0xb4, 0x02,
+ 0x1c, 0xa3, 0xe0, 0xb4, 0x02, 0x17, 0xa3, 0xe0, 0xb4, 0x02, 0x12,
+ 0x7f, 0x20, 0x12, 0x1a, 0x65, 0x90, 0x10, 0x04, 0xe0, 0x54, 0xf3,
+ 0xf0, 0x75, 0x51, 0x01, 0x02, 0x1b, 0xd0, 0xe5, 0x50, 0x60, 0x03,
+ 0x02, 0x1b, 0xd0, 0x75, 0x25, 0x03, 0x02, 0x1b, 0xd0, 0x90, 0x12,
+ 0x00, 0xe0, 0x54, 0x03, 0x70, 0x12, 0x7f, 0x20, 0x12, 0x1a, 0x65,
+ 0x90, 0x02, 0x08, 0xe0, 0x54, 0xfb, 0xf0, 0x75, 0x51, 0x02, 0x02,
+ 0x1b, 0xd0, 0xe5, 0x50, 0x60, 0x03, 0x02, 0x1b, 0xd0, 0x02, 0x1b,
+ 0xcb, 0x90, 0x02, 0x08, 0xe0, 0x30, 0xe3, 0x03, 0x02, 0x1b, 0xc7,
+ 0x90, 0x04, 0x37, 0xe0, 0x64, 0x22, 0x60, 0x03, 0x02, 0x1b, 0xc7,
+ 0x90, 0x12, 0x04, 0x74, 0x0a, 0xf0, 0xe5, 0x58, 0x30, 0xe3, 0x1c,
+ 0x90, 0x00, 0x02, 0xe0, 0x30, 0xe0, 0x15, 0xe4, 0x90, 0x05, 0x00,
0xf0, 0xa3, 0x74, 0x08, 0xf0, 0xa3, 0x74, 0x01, 0xf0, 0x74, 0x03,
- 0xf0, 0x7f, 0x01, 0x12, 0x03, 0x30, 0x90, 0x13, 0x28, 0xe0, 0x90,
+ 0xf0, 0x7f, 0x01, 0x12, 0x02, 0x94, 0x90, 0x13, 0x28, 0xe0, 0x90,
0x70, 0x1a, 0xf0, 0x90, 0x13, 0x29, 0xe0, 0x90, 0x70, 0x1b, 0xf0,
0x90, 0x13, 0x2b, 0xe0, 0x90, 0x70, 0x22, 0xf0, 0x90, 0x13, 0x28,
0xe0, 0x54, 0xf0, 0xf0, 0xa3, 0xe0, 0x54, 0xf0, 0xf0, 0x90, 0x13,
0x2b, 0xe0, 0x54, 0xcc, 0xf0, 0xe5, 0x58, 0x30, 0xe3, 0x17, 0xe5,
- 0x34, 0x70, 0x13, 0xe5, 0x3c, 0xf4, 0x90, 0x13, 0x2a, 0x60, 0x05,
+ 0x30, 0x70, 0x13, 0xe5, 0x3c, 0xf4, 0x90, 0x13, 0x2a, 0x60, 0x05,
0xe0, 0x54, 0xf3, 0x80, 0x11, 0xe0, 0x54, 0xfb, 0xf0, 0x80, 0x14,
0xe5, 0x3c, 0xf4, 0x90, 0x13, 0x2a, 0x60, 0x08, 0xe0, 0x54, 0xf2,
- 0x45, 0x3c, 0xf0, 0x80, 0x04, 0xe0, 0x54, 0xfa, 0xf0, 0x20, 0x1a,
- 0x07, 0x90, 0x04, 0x01, 0xe0, 0x44, 0x10, 0xf0, 0xe5, 0x34, 0xd3,
- 0x94, 0x01, 0x40, 0x09, 0xe5, 0x30, 0x70, 0x05, 0x75, 0x8c, 0x40,
- 0x80, 0x03, 0x75, 0x8c, 0x80, 0x90, 0x04, 0x01, 0xe0, 0x54, 0xfd,
- 0xf0, 0x20, 0x1a, 0x07, 0x90, 0x12, 0x04, 0xe0, 0x44, 0x04, 0xf0,
- 0xe5, 0x58, 0x30, 0xe0, 0x06, 0x90, 0x01, 0x0d, 0xe0, 0xf5, 0x31,
- 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40, 0x2c, 0x90, 0x01, 0x0d, 0xe0,
- 0x44, 0x01, 0xf0, 0xe5, 0x58, 0x20, 0xe3, 0x0c, 0xe5, 0x34, 0xb4,
- 0x03, 0x07, 0x90, 0x12, 0x04, 0xe0, 0x54, 0xfd, 0xf0, 0x20, 0x02,
- 0x11, 0x20, 0x03, 0x0e, 0x90, 0x01, 0x0d, 0xe0, 0x54, 0xfb, 0xf0,
- 0x90, 0x01, 0x0c, 0xe0, 0x54, 0xfd, 0xf0, 0x75, 0x37, 0x01, 0x75,
- 0x55, 0x02, 0xe4, 0xf5, 0x51, 0x80, 0x09, 0xe5, 0x50, 0x70, 0x05,
- 0x75, 0x37, 0x03, 0xf5, 0x51, 0xe5, 0x37, 0x60, 0x18, 0xc2, 0x01,
- 0xe4, 0xf5, 0x51, 0xc2, 0x59, 0x20, 0x1a, 0x0e, 0xad, 0x37, 0xaf,
- 0x40, 0x12, 0x1b, 0xfa, 0xe5, 0x37, 0xb4, 0x03, 0x02, 0xd2, 0x03,
- 0xd2, 0xaf, 0x22, 0xc2, 0xaf, 0x30, 0x01, 0x0e, 0xe4, 0xf5, 0x51,
- 0xc2, 0x59, 0xc2, 0x01, 0x7d, 0x02, 0xaf, 0x40, 0x12, 0x1b, 0xfa,
- 0xe5, 0x52, 0x14, 0x60, 0x56, 0x14, 0x60, 0x33, 0x24, 0x02, 0x60,
- 0x03, 0x02, 0x1b, 0xf7, 0xe5, 0x34, 0xd3, 0x94, 0x01, 0x40, 0x1f,
- 0x90, 0x01, 0x0c, 0xe0, 0x44, 0x02, 0xf0, 0xa3, 0xe0, 0x44, 0x04,
- 0xf0, 0x90, 0x12, 0x04, 0xe0, 0x44, 0x02, 0xf0, 0x7f, 0x32, 0x12,
- 0x03, 0x30, 0x90, 0x01, 0x0d, 0xe0, 0x54, 0xfe, 0xf0, 0x75, 0x52,
- 0x02, 0x75, 0x55, 0x03, 0xe5, 0x58, 0x30, 0xe0, 0x06, 0x90, 0x01,
- 0x0d, 0xe5, 0x31, 0xf0, 0x90, 0x12, 0x04, 0xe0, 0x54, 0xfb, 0xf0,
- 0x7f, 0x20, 0x12, 0x19, 0x45, 0x75, 0x52, 0x01, 0x75, 0x55, 0x03,
- 0x02, 0x1b, 0xf7, 0xe5, 0x54, 0x60, 0x03, 0x02, 0x1b, 0xf7, 0x90,
- 0x04, 0x01, 0xe0, 0x44, 0x0e, 0xf0, 0x20, 0x1a, 0x04, 0xe0, 0x54,
+ 0x45, 0x3c, 0xf0, 0x80, 0x04, 0xe0, 0x54, 0xfa, 0xf0, 0x90, 0x04,
+ 0x01, 0xe0, 0x44, 0x10, 0xf0, 0x75, 0x8c, 0x80, 0xe0, 0x54, 0xfd,
+ 0xf0, 0x90, 0x12, 0x04, 0xe0, 0x44, 0x04, 0xf0, 0xe5, 0x58, 0x30,
+ 0xe0, 0x06, 0x90, 0x01, 0x0d, 0xe0, 0xf5, 0x2f, 0xe5, 0x30, 0xd3,
+ 0x94, 0x01, 0x40, 0x17, 0x20, 0x02, 0x14, 0x20, 0x03, 0x11, 0x30,
+ 0x4f, 0x0e, 0x90, 0x01, 0x0d, 0xe0, 0x54, 0xfb, 0xf0, 0x90, 0x01,
+ 0x0c, 0xe0, 0x54, 0xfd, 0xf0, 0x75, 0x25, 0x01, 0x75, 0x55, 0x02,
+ 0xe4, 0xf5, 0x51, 0x80, 0x09, 0xe5, 0x50, 0x70, 0x05, 0x75, 0x25,
+ 0x03, 0xf5, 0x51, 0xe5, 0x25, 0x60, 0x15, 0xc2, 0x01, 0xe4, 0xf5,
+ 0x51, 0xc2, 0x59, 0xad, 0x25, 0xaf, 0x40, 0x12, 0x1c, 0xe5, 0xe5,
+ 0x25, 0xb4, 0x03, 0x02, 0xd2, 0x03, 0xd2, 0xaf, 0x22, 0xc2, 0xaf,
+ 0x30, 0x01, 0x0e, 0xe4, 0xf5, 0x51, 0xc2, 0x59, 0xc2, 0x01, 0x7d,
+ 0x02, 0xaf, 0x40, 0x12, 0x1c, 0xe5, 0xe5, 0x52, 0x14, 0x60, 0x48,
+ 0x14, 0x60, 0x25, 0x24, 0x02, 0x60, 0x03, 0x02, 0x1c, 0xe2, 0xe5,
+ 0x30, 0xd3, 0x94, 0x01, 0x40, 0x11, 0x90, 0x01, 0x0c, 0xe0, 0x44,
+ 0x02, 0xf0, 0xa3, 0xe0, 0x44, 0x04, 0xf0, 0x7f, 0x0a, 0x12, 0x02,
+ 0x94, 0x75, 0x52, 0x02, 0x75, 0x55, 0x03, 0xe5, 0x58, 0x30, 0xe0,
+ 0x06, 0x90, 0x01, 0x0d, 0xe5, 0x2f, 0xf0, 0x90, 0x12, 0x04, 0xe0,
+ 0x54, 0xfb, 0xf0, 0x7f, 0x20, 0x12, 0x1a, 0x6a, 0x75, 0x52, 0x01,
+ 0x75, 0x55, 0x03, 0x02, 0x1c, 0xe2, 0xe5, 0x54, 0x60, 0x03, 0x02,
+ 0x1c, 0xe2, 0x90, 0x04, 0x01, 0xe0, 0x44, 0x0e, 0xf0, 0xe0, 0x54,
0xef, 0xf0, 0xe4, 0xf5, 0x8c, 0xe5, 0x58, 0x54, 0x18, 0x60, 0x1e,
0x90, 0x70, 0x1a, 0xe0, 0x90, 0x13, 0x28, 0xf0, 0x90, 0x70, 0x1b,
0xe0, 0x90, 0x13, 0x29, 0xf0, 0xa3, 0x74, 0x05, 0xf0, 0x90, 0x70,
0x22, 0xe0, 0x90, 0x13, 0x2b, 0xf0, 0x80, 0x11, 0x90, 0x13, 0x28,
0xe0, 0x44, 0x0f, 0xf0, 0xa3, 0xe0, 0x44, 0x0f, 0xf0, 0xa3, 0xe0,
0x44, 0x05, 0xf0, 0x90, 0x12, 0x04, 0x74, 0x03, 0xf0, 0xe5, 0x58,
- 0x30, 0xe3, 0x16, 0x90, 0x05, 0x00, 0x74, 0xe2, 0xf0, 0xa3, 0x74,
- 0x08, 0xf0, 0xa3, 0x74, 0x01, 0xf0, 0x74, 0x03, 0xf0, 0x7f, 0x01,
- 0x12, 0x03, 0x30, 0x20, 0x1a, 0x07, 0x90, 0x02, 0x08, 0xe0, 0x44,
- 0x05, 0xf0, 0x90, 0x10, 0x04, 0xe0, 0x44, 0x0c, 0xf0, 0xe4, 0xf5,
- 0x52, 0xf5, 0x55, 0x30, 0x02, 0x09, 0xc2, 0x02, 0x7d, 0x01, 0xaf,
- 0x41, 0x12, 0x1b, 0xfa, 0x30, 0x03, 0x02, 0xc2, 0x03, 0xd2, 0xaf,
- 0x22, 0xef, 0xf4, 0x60, 0x2d, 0xe4, 0xfe, 0x74, 0x14, 0x2e, 0xf5,
- 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83, 0xe0, 0xb4, 0xff, 0x19, 0x74,
- 0x14, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83, 0xef, 0xf0,
- 0x74, 0x1c, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83, 0xed,
- 0xf0, 0x22, 0x0e, 0xbe, 0x04, 0xd5, 0x22, 0x22, 0x22, 0x30, 0x1a,
- 0x77, 0x90, 0x04, 0x37, 0xe0, 0x20, 0xe5, 0x6c, 0x90, 0x04, 0x28,
- 0xe0, 0xf5, 0x38, 0xa3, 0xe0, 0xf5, 0x37, 0xf5, 0x39, 0xe4, 0xf5,
- 0x25, 0xe5, 0x39, 0x75, 0xf0, 0x80, 0xa4, 0x24, 0x00, 0xff, 0xe5,
- 0xf0, 0x34, 0x80, 0xfe, 0xe5, 0x37, 0x65, 0x39, 0x70, 0x05, 0xfc,
- 0x7d, 0x28, 0x80, 0x04, 0x7c, 0x00, 0x7d, 0x00, 0xef, 0x2d, 0xff,
- 0xee, 0x3c, 0xfe, 0x12, 0x1c, 0xa9, 0x50, 0x07, 0x90, 0x01, 0x14,
- 0xe0, 0x44, 0x02, 0xf0, 0xe5, 0x39, 0x65, 0x38, 0x60, 0x10, 0xe4,
- 0x25, 0x39, 0xff, 0xe4, 0x34, 0x80, 0x8f, 0x82, 0xf5, 0x83, 0xe0,
- 0xf5, 0x39, 0x80, 0xbb, 0x90, 0x04, 0x10, 0x74, 0x01, 0xf0, 0x90,
- 0x04, 0x28, 0xe5, 0x38, 0xf0, 0xa3, 0xe5, 0x37, 0xf0, 0x90, 0x04,
- 0x11, 0x74, 0x01, 0xf0, 0x80, 0x8d, 0xc2, 0x06, 0xd2, 0x1b, 0x22,
- 0xe5, 0x25, 0xc3, 0x94, 0x06, 0x50, 0x19, 0x8f, 0x82, 0x8e, 0x83,
- 0xe0, 0xb4, 0xff, 0x07, 0x05, 0x25, 0xe4, 0xf5, 0x24, 0x80, 0x2e,
- 0xe4, 0xf5, 0x25, 0x8f, 0x82, 0x8e, 0x83, 0xf0, 0x80, 0x24, 0xe5,
- 0x24, 0x75, 0xf0, 0x06, 0x84, 0x74, 0x08, 0x25, 0xf0, 0xf5, 0x82,
- 0xe4, 0x34, 0x10, 0xf5, 0x83, 0xe0, 0xfd, 0x8f, 0x82, 0x8e, 0x83,
- 0xe0, 0x6d, 0x70, 0x06, 0x05, 0x25, 0x05, 0x24, 0x80, 0x03, 0xe4,
- 0xf5, 0x25, 0x0f, 0xbf, 0x00, 0x01, 0x0e, 0xef, 0x54, 0x7f, 0x60,
- 0x07, 0xe5, 0x25, 0xc3, 0x94, 0x2a, 0x40, 0xab, 0xe5, 0x25, 0xb4,
- 0x2a, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x22, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x30, 0xe3, 0x1d, 0x90, 0x00, 0x02, 0xe0, 0x30, 0xe0, 0x16, 0x90,
+ 0x05, 0x00, 0x74, 0xe2, 0xf0, 0xa3, 0x74, 0x08, 0xf0, 0xa3, 0x74,
+ 0x01, 0xf0, 0x74, 0x03, 0xf0, 0x7f, 0x01, 0x12, 0x02, 0x94, 0x90,
+ 0x02, 0x08, 0xe0, 0x44, 0x05, 0xf0, 0x90, 0x10, 0x04, 0xe0, 0x44,
+ 0x0c, 0xf0, 0xe4, 0xf5, 0x52, 0xf5, 0x55, 0x30, 0x02, 0x09, 0xc2,
+ 0x02, 0x7d, 0x01, 0xaf, 0x41, 0x12, 0x1c, 0xe5, 0x30, 0x03, 0x02,
+ 0xc2, 0x03, 0xd2, 0xaf, 0x22, 0xef, 0xf4, 0x60, 0x2d, 0xe4, 0xfe,
+ 0x74, 0x14, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70, 0xf5, 0x83, 0xe0,
+ 0xb4, 0xff, 0x19, 0x74, 0x14, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0x70,
+ 0xf5, 0x83, 0xef, 0xf0, 0x74, 0x1c, 0x2e, 0xf5, 0x82, 0xe4, 0x34,
+ 0x70, 0xf5, 0x83, 0xed, 0xf0, 0x22, 0x0e, 0xbe, 0x04, 0xd5, 0x22,
+ 0x22, 0x22, 0x90, 0x70, 0x2a, 0xe0, 0x30, 0xe1, 0x43, 0xc2, 0xaf,
+ 0x90, 0x70, 0x28, 0xe0, 0x90, 0x10, 0x1c, 0xf0, 0x90, 0x70, 0x29,
+ 0xe0, 0x90, 0x10, 0x1d, 0xf0, 0x90, 0x70, 0x2a, 0xe0, 0x90, 0x10,
+ 0x1e, 0xf0, 0x90, 0x10, 0x1c, 0xe0, 0xf5, 0x25, 0x90, 0x10, 0x1e,
+ 0xe0, 0x20, 0xe1, 0xf3, 0x90, 0x10, 0x1c, 0xe0, 0x90, 0x70, 0x28,
+ 0xf0, 0x90, 0x10, 0x1d, 0xe0, 0x90, 0x70, 0x29, 0xf0, 0x90, 0x10,
+ 0x1e, 0xe0, 0x90, 0x70, 0x2a, 0xf0, 0xc2, 0x05, 0xd2, 0xaf, 0x22,
+ 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3012,5 +3012,5 @@ static const uint8_t rt2860[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x53, 0x88
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x5b, 0xd2
};
diff --git a/sys/contrib/dev/ral/rt2860.fw.uu b/sys/contrib/dev/ral/rt2860.fw.uu
index ed8c8027327f..2350550b2482 100644
--- a/sys/contrib/dev/ral/rt2860.fw.uu
+++ b/sys/contrib/dev/ral/rt2860.fw.uu
@@ -15,27 +15,24 @@
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
begin 644 rt2860.fw
-M`@-;`@*F(B+___\"`2S______P(`'O______`@#=P.#`\,"#P(+`T'70&,*O
-M,$4#$A`)D`06X##C`W0(\)`$%.`@YP,"`,MT@/"0<!+@]3:0!`3@),]@,!1@
-M0B3B8$<48%4D(7!@Y54D_F`'%&`()`)P"'T!@"A]`H`DD'`0X/50A39`T@&`
-M/N559`-@!.55<`1]`H`)A39!T@*`*:U5KS82`H*`()!P$.#U1Y!P$>#U1!(0
-M)8`&D'`0X/5%Y/VO-A("@M($D'`3Y/"0<!/D\-*OT-#0@M"#T/#0X#+`X,#P
+M`@*C`@(N(O____\"`2S______P(`'O______`@#=P.#`\,"#P(+`T'70&,*O
+M,$4#$A`)D`06X##C`W0(\)`$%.`@YP,"`,MT@/"0<!+@]220!`3@),]@,!1@
+M0B3B8$<48%4D(7!@Y54D_F`'%&`()`)P"'T!@"A]`H`DD'`0X/50A21`T@&`
+M/N559`-@!.55<`1]`H`)A21!T@*`*:U5KR02`@J`()!P$.#U1Y!P$>#U1!(0
+M)8`&D'`0X/5%Y/VO)!(""M($D'`3Y/"0<!/D\-*OT-#0@M"#T/#0X#+`X,#P
MP(/`@L#0Z,#@Z<#@ZL#@Z\#@[,#@[<#@[L#@[\#@PJ\P10,2$!+2K]#@_]#@
M_M#@_=#@_-#@^]#@^M#@^=#@^-#0T(+0@]#PT.`RP.#`\,"#P(+`T'70$,*O
M,$4#$A`,,%@*Y51@!!54@`+"6#!9"N508`054(`"PEG54P<P8`051M($,$4#
-M$A`/PHW2K]#0T(+0@]#PT.`RD'`JX##A0\*OD'`HX)`0'/"0<"G@D!`=\)!P
-M*N"0$![PD!`<X/4WD!`>X"#A\Y`0'."0<"CPD!`=X)!P*?"0$![@D'`J\,(%
-MTJ\B$@+(,$4#$A`#,`$&(`D#$A`<,`(&(`H#$A`?,`,&(`L#$A`?,`0&(`P#
-M$A`B(!,)(!$&Y2M%+&`#TX`!PY*I$@,<@+_"0])%Y/4@]2'U4_5&]2OU+,)"
-M]5'U4O55D`08=(#PD`0:=`CPPAK"&,(;(LCOR.;Z".9*8`S([\@(YA88<`$6
-MPR+M)/_][#3_R._(]@C&[<;3(M"#T(+XY)-P$G0!DW`-HZ.3^'0!D_6"B(/D
-M<W0"DVA@[Z.CHX#?[_1@'^3^$@-GX+3_$A(#9^_P=!PN]8+D-'#U@^WP(@Z^
-M!.,BP.#`\,"#P(+`T'70",*O,$4#$A`&TJ_0T-""T(/0\-#@,L*O$@`&$@()
-M$@+AY/4B]4>0!`!T@/#2KR)UB0+D]8SUBO6(];CUZ'60&-*,=:@%(N]@`Q^`
-M^B+_P"9T`\#@P(+`@W4F"B+`)G0#P.#`@L"#=288(C!%`Q(0%>4@<`,@$`,P
-M$0-#AP$BSN_.[F`(?_\2`O@>@/4BR._(YF`#%L,B[13VTR+([\CF8`86YB3_
-MLR+#(GA_Y/;8_76!7P(!Q704+O6"Y#1P]8,B[Y`#>Y.0`P!S"AC_________
+M$A`/PHW2K]#0T(+0@]#PT.`R$@)0,$4#$A`#,`$&(`D#$A`<,`(&(`H#$A`?
+M,`,&(`L#$A`?,`0&(`P#$A`B(!,)(!$&Y2M%+&`#TX`!PY*I$@*`@+_"0])%
+MY/4@]2'U4_5&]2OU+,)"]5'U4O55D`08=(#PD`0:=`CP(M"#T(+XY)-P$G0!
+MDW`-HZ.3^'0!D_6"B(/D<W0"DVA@[Z.CHX#?[_1@'^3^$@*OX+3_$A("K^_P
+M=!PN]8+D-'#U@^WP(@Z^!.,BP.#`\,"#P(+`T'70",*O,$4#$A`&TJ_0T-""
+MT(/0\-#@,L*O$@`&$@&^$@)IY/4B]4>0!`!T@/#2KR)UB0+D]8SUBO6(];CU
+MZ'60&-*,=:@%(C!%`Q(0%>4@<`,@$`,P$0-#AP$BSN_.[F`(?_\2`L4>@/4B
+M>'_D]MC]=8%?`@%Z=!0N]8+D-'#U@R+OD`+#DY`#`',*&.]@`Q^`^B+_____
M____________________________________________________________
+M____P"9T`\#@P(+`@W4F"B+`)G0#P.#`@L"#=288(O__________________
M____________________________________________________________
M____________________________________________________________
M____________________________________________________________
@@ -106,83 +103,86 @@ M____________________________________________________________
M____________________________________________________________
M____________________________________________________________
M____________________________________________________________
-M_____P(0*`(0.P(0/`(3O`(3O0(4<@(4<\,B__\"&4H"&O0"%6P"%*<P!08@
-M#0,2`7HP!@8@#@,2'"XB(I`$%.`@YP,"$[N0<!+@]5:0!`3@$@)<$/LP$-(Q
-M$),S$*$T$+0U$*LV$0E0$4Y1$5=2$5=3$5=4$9-5$?!6$D-P$FEQ$I)R$SYS
-M$V&`$XB#$Z"$```3N](8TF%U-2IU,@MU,[@BPAB0`13@5/WP(I!P$>#U/`(3
-MM>55M`(/Y5@PX`:0`0UT"/!]`8`"?0*O5A("@@(3M2`"`S`#"GT"KU82`H("
-M$[7E--.4`4`,D`$,X$0"\*/@1`3PA59!T@(BD'`1X/1P`P(3N^#U,"+E--.4
-M`4`'Y55@`P(3NY!P$.!4?_^_"@V0<!'@M`@&=4X!=4^$D'`0X%1__[\"$I!P
-M$>!D"&`$X+0@!G5.`W5/(.3U)R*0<!'@)/^21R+E--.4`4`'Y55@`P(329`$
-M!.`EX"1=]5>0<!#@_W1')5?XQN_&D'`1X/]T2"57^,;OQN3]KU82`H("$[7E
-M--.4`4`'Y55@`P(32>5'9`=@'>5'9`A@%^5'9`E@$>5'9`I@"^5'9`M@!>5'
-MM`P(D'`1X%0/]3KE1[0)".4ZM`,#Y/5&Y4>T"@CE.K0!`^3U1N3]KU82`H+2
-M!"*0<!'@]/^0<!#@7_^0<!'@52=/D'`8\)!P$>"0<!GPY/VO5A("@C`5`](4
-M(I!P&.#U)Y`"*>#_D'`9X/[O7I`"*?`P1P2O)X`$Y2?T_Y`"*._P(N4TTY0!
-M0`?E56`#`A-)D'`0X/Z0<!'@_>WXYO57_:]6$@*"`A.UY333E`%`!^558`,"
-M$TF0<!#@_I!P$>#][?6"CH/@]5?]KU82`H("$[60$`#@]5?D]5CU69`0`^"T
-M*`5U6`&`/)`0`^"T,`5U6`*`,)`0`^"T,P5U6`2`))`0`^"T-0R0$`+@M'(%
-M=5@(@!&0$`/@M#4*D!`"X+23`W58$.58,.$9D`4(X$0!\/V0!07@5/OP1`3P
-M[53^D`4(\.3U3O5/=3K_PAK"&,(;]320!:1T$?"C=/_PHW0#\.3U,,(9D`$-
-MX$1`\'4\_ZU7KU82`H+DD'`R\(!WY333E`%`"^558`=]`Z]6`@*"D'`0X"3_
-MDI/D_:]6$@*"@%3E--.4`4`-Y55@"7T#KU82`H*`0)!P$.`D_Y)*T@6M5Z]6
-M$@*"@"WD]33U,)!P$.#T8`/@]32M5Z]6$@*"@!72&04OY2^T&@/D]2_2!*U7
-MKU82`H*0!!1T@/`B(N4TPY0#0!?E5;0"$N4P8`XP8`MT_25&]4;2!.3U4^53
-M8`,"%'$P8"&R33!-'.4TPY0#0!'E5;0"#.4P8`AT`R5&]4:``@5&P@3E3T5.
-M8`CE3Q5/<`(53C`:27\R?;A\"Q("-5`&D`00=$#P?S5],A(#/U`)D!`$X%3W
-M\-(&Y373E"U`,#`;+<(;HAB2&B`:))`$">!4W?"0$`3@1`CPPF'2`R+D]36B
-M&)(:,!H'D`0)X$0B\"(B,!0PD'`9X%4G_Y!P&.!/]2>0`BG@_Y!P&>#^[UZ0
-M`BGP,$<$KR>`!.4G]/^0`BCO\,(4(L)+PDSE1!("7!3)`!57!!53"!4S$!3=
-M(!3]8!4.H```%5F%2$.%2D*%3%[E1V0&8`,"%5F`&^5(Q%0/]4/E2L14#_5"
-MY4S$5`_U7N5'9`9P85-##X!<A4E#A4M"A4U>Y4=D!G!-@!OE2<14#_5#Y4O$
-M5`_U0N5-Q%0/]5[E1V0&<##E0U0/1!#U0X`FY4=D!&`%Y4>T!09#7@1U0@GE
-M1[0&$.5#5`]$,/5#@`;22X`"TDSD]2KE0L14\/_E0U0/3_5?TF`BTA7E1R3U
-M8`LDRV`')$!P!L(5(A(9%1(5CL(5PJ_"!-*O(L*OD`04X%0.8`32'(`(Y4Y%
-M3R3_DAS2KY`$%."BY)(==![PY5]4#_4MY2IP$S`<!>5?(.4+,!TIY5]4,&0P
-M<"'E*G`5Y33#E`-`">4P8`5U*@6`!W4J#(`"%2K2;-)M@`_E7S#F!L)LTFV`
-M!-)LPFWE1V0#<"$P2P;";-)M@!CE*G`#,$P1PDSE*G`%=2H'@`(5*M)LTFWE
-M1[0)%.5$(.,+Y3ID`F`%Y3JT`P3";-)MY4>T"A/E.K0!!L)LTFV`".4Z<`32
-M;,)M(&D'Y5X@X`*R:"!K!^5>(.$"LFH@;0?E7B#B`K)L=2Y`(&D$HFB`13!H
-M!N5&HN*`/#`9'.5>(.`$?P&``G\`Y2^T&01^`8`"?@#N;R3_@!WE7B#@!'\!
-M@`)_`.5&5/#^OO`$?@&``GX`[F\D_Y)SDG(@:P2B:H!%,&H&Y4:BXH`\,!D<
-MY5X@X01_`8`"?P#E+[09!'X!@`)^`.YO)/^`'>5>(.$$?P&``G\`Y494\/Z^
-M\`1^`8`"?@#N;R3_DG62="!M!*)L@";E1V0*<"(P;`;E1J+C@!?E.K0!!N5&
-MHN.`4^5&(.0#,.4#TX`!PX!%,&P&Y4:BXH`\,!D<Y5X@X@1_`8`"?P#E+[09
-M!'X!@`)^`.YO)/^`'>5>(.($?P&``G\`Y494\/Z^\`1^`8`"?@#N;R3_DG&2
-M<)`0`."0$"SPD!`#X,.4,$`4HG&2=Z)PDG;E+A,35#_U+L)WTG:0$"_E+O#E
-M1V0&<#F0`BG@5/[PY4/$5`\48`PD_F`,)`-P$\(X@`_2.(`+Y48PX@/3@`'#
-MDC@P1P6O)P(9#^4G]/\"&0_E1V0'8`_E1V0(8`GE1V0)8`,"&(Z0`BG@5/SP
-MY3H48"(48"448"TD_&!))/E@%"0.<%#E1A,35#]U\`.$Y?`D_X`ZTCG".(`^
-MY48PX@/3@!W#@!KE1C#B#50XPY0P4`9^`'\!@`1^`'\`[D\D_Y(XPCF`$^5&
-M,.(#TX`!PY(YPCB`!,(XPCDP1P2O)X`$Y2?T_P(9#^5'9`Q@!N5'9`MP>I`"
-M*>!4_?#E.A1@(!1@(11@*R3\8$4D^6`2)`YP2N5&$Q-4/W7P`X3E\(`ITCF`
-M.N5&,.(#TX`!PY(Y@"WE1C#B#50XPY0P4`9^`'\!@`1^`'\`[D\D_Y(Y@`_E
-M1C#B`].``<.2.8`"PCDP1P2O)X`$Y2?T_Y`"*._P(N5'M`L0D`(IX%3K\.4G
-M5.M%1?4G(N20`BGP,$<$KT6`!.5%]/^0`BCO\"*/4-)9(H]4TE@BY/4WPJ_E
-M411@2A1@:R0"8`,"&M726755`2`:')`"".!4_O#@(.$CD`0TX+0"'*/@M`(7
-MH^"T`A)_(!(90)`0!.!4\_!U40$"&M7E4'`&=3<#`AK5D!(`X%0#<!5_(!(9
-M0"`:!Y`"".!4^_!U40("&M7E4'`#`AK0(!H5D`((X##C`P(:S)`$-^!D(F`#
-M`AK,D!($=`KPY5@PXQ7DD`4`\*-T"/"C=`'P=`/P?P$2`S"0$RC@D'`:\)`3
-M*>"0<!OPD!,KX)!P(O"0$RC@5/#PH^!4\/"0$RO@5,SPY5@PXQ?E-'`3Y3ST
-MD!,J8`7@5/.`$>!4^_"`%.4\])`3*F`(X%3R13SP@`3@5/KP(!H'D`0!X$00
-M\.4TTY0!0`GE,'`%=8Q`@`-UC("0!`'@5/WP(!H'D!($X$0$\.58,.`&D`$-
-MX/4QY333E`%`+)`!#>!$`?#E6"#C#.4TM`,'D!($X%3]\"`"$2`##I`!#>!4
-M^_"0`0S@5/WP=3<!=54"Y/51@`GE4'`%=3<#]5'E-V`8P@'D]5'"62`:#JTW
-MKT`2&_KE-[0#`M(#TJ\BPJ\P`0[D]5'"6<(!?0*O0!(;^N52%&!6%&`S)`)@
-M`P(;]^4TTY0!0!^0`0S@1`+PH^!$!/"0$@3@1`+P?S(2`S"0`0W@5/[P=5("
-M=54#Y5@PX`:0`0WE,?"0$@3@5/OP?R`2&45U4@%U50,"&_?E5&`#`AOWD`0!
-MX$0.\"`:!.!4[_#D]8SE6%088!Z0<!K@D!,H\)!P&^"0$RGPHW0%\)!P(N"0
-M$ROP@!&0$RC@1`_PH^!$#_"CX$0%\)`2!'0#\.58,.,6D`4`=.+PHW0(\*-T
-M`?!T`_!_`1(#,"`:!Y`"".!$!?"0$`3@1`SPY/52]54P`@G"`GT!KT$2&_HP
-M`P+"`]*O(N_T8"WD_G04+O6"Y#1P]8/@M/\9=!0N]8+D-'#U@^_P=!PN]8+D
-M-'#U@^WP(@Z^!-4B(B(P&G>0!#?@(.5LD`0HX/4XH^#U-_4YY/4EY3EU\("D
-M)`#_Y?`T@/[E-V4Y<`7\?2B`!'P`?0#O+?_N//X2'*E0!Y`!%.!$`O#E.64X
-M8!#D)3G_Y#2`CX+U@^#U.8"[D`00=`'PD`0HY3CPH^4W\)`$$70!\("-P@;2
-M&R+E)<.4!E`9CX*.@^"T_P<%)>3U)(`NY/4ECX*.@_"`).4D=?`&A'0()?#U
-M@N0T$/6#X/V/@HZ#X&UP!@4E!22``^3U)0^_``$.[U1_8`?E)<.4*D"KY26T
-M*@/3@`'#(@``````````````````````````````````````````````````
-M````````````````````````````````````````````````````````````
-M````````````````````````````````````````````````````````````
+M____________________________________________________________
+M____________________________________________________________
+M____________________________________________________________
+M_____P(0*`(0,@(0,P(4P@(4PP(5CP(5D,,B__\"&F\"&^P"%KP"%?<P!08@
+M#0,2'1DB(I`$%.`@YP,"%,&0<!+@]5:0!`3@$@'D$-HP$+$Q$),U$(HV$.=`
+M$/Y!$150$5I1$6-2$6-3$6-4$9]5$?Q6$D]D$FIR$QYS$T)T%#6`%*6#%%R1
+M```4P9!P$>#U/`(4N^55M`(/Y5@PX`:0`0UT"/!]`8`"?0*O5A(""@(4NR`"
+M`S`#"GT"KU82`@H"%+OE,-.4`4`,D`$,X$0"\*/@1`3PA59!T@(BD'`1X+1:
+M`\)/(M)/(N4PTY0!4`,"%,&0`0S@1`+PH^!$!/`BY3#3E`%0`P(4P9`!#.!4
+M_?"CX%3[\"+E,-.4`4`'Y55@`P(4P9!P$.!4?_^_"@V0<!'@M`@&=4X!=4^$
+MD'`0X%1__[\"$I!P$>!D"&`$X+0@!G5.`W5/(.3U/R*0<!'@)/^21R+E,-.4
+M`4`'Y55@`P(3*9`$!.`EX"1=]5>0<!#@_W1')5?XQN_&D'`1X/]T2"57^,;O
+MQN3]KU82`@H"%+OE,-.4`4`'Y55@`P(3*>5'9`=@'>5'9`A@%^5'9`E@$>5'
+M9`I@"^5'9`M@!>5'M`P(D'`1X%0/]3KE1[0)".4ZM`,#Y/5&Y4>T"@CE.K0!
+M`^3U1N3]KU82`@K2!"*0<!'@]/^0<!#@7_^0<!'@53]/D'`8\)!P$>"0<!GP
+MY/VO5A(""C`5`](4(I!P&.#U/Y`"*>#_D'`9X/[O7I`"*?`P1P2O/X`$Y3_T
+M_Y`"*._P(I!P$.`D_Y(:=3(#=3,?Y/4QK5>O5A(""@(4NY`0`.#U5^3U6/59
+MD!`#X+0H!758`8`\D!`#X+0P!758`H`PD!`#X+0S!758!(`DD!`#X+0U#)`0
+M`N"T<@5U6`B`$9`0`^"T-0J0$`+@M),#=5@0Y5@PX1F0!0C@1`'P_9`%!>!4
+M^_!$!/#M5/Z0!0CPY/5.]4]U.O_U,)`%I'01\*-T__"C=`/PTD^0`0W@1$#P
+M=3S_K5>O5A(""I!P-G0W\*-T,O"0!`'@1`'PPAK"%P(4N^4PTY0!0`OE56`'
+M?0.O5@(""I!P$.`D_Y*3Y/VO5A(""@(4NY`0`."0$"SPD!`O=$#PD'`1X%1_
+M]5?@5("0<#+PD'`0X/_E5].?0$.0<#/E5_"0<!#@_Y!P,^##G].4!$!SX"3\
+M\.#_D'`RX$^0!0#PY5A4#V`$?Q>``G\1D`4![_"C=`'P=`/P_Q("E(##D'`S
+MY5?PD'`SX/^0<!#@PY_3E`1`,)!P,^`D!/#@_Y!P,N!/D`4`\.585`]@!'\7
+M@`)_$9`%`>_PHW0!\'0#\/\2`I2`P)!P$.#_D'`RX$^0!0#PY5A4#V`$?Q>`
+M`G\1D`4![_"C=`'P=`/P_Q("E)`0`."0$"SPD!`O='_PY/VO5A(""@(4N^4P
+MTY0!0`WE56`)?0.O5A(""H!RD'`0X"3_DDK2!:U7KU82`@J`7Y!P$>`D_Y(7
+MD'`0X/5=K5>O5A(""I`$%'2`\#`7$Y`0`."0$"SPD!`OX%3P]5=%7?#DD'`3
+M\.56]&`JD'`EX$0!\)`"+'3_\"+D]3#23Y!P$.#T8`/@]3"M5Z]6$@(*D`04
+M=(#P(B+E,T4R8`KE,Q4S<`H5,H`&=3(#=3,?Y3-%,F`#`A5P(!H#`A5P=*`E
+M,?6"Y#1,]8/@8'I_?A(5WN]4_D0"_7]^$A7$Y3%_`"7@_N\D`/6"=$T^KX*0
+M3*CPH^_PY/56]5=_?Q(5WI!,J.#ZH^`E5_6"ZC56]8/O\`57Y5=P`@56PY2`
+MY5:4`4#8?WX2%=[O1`/]?WX2%<1TH"4Q]8+D-$SU@^3P!3'E,;0(`^3U,>53
+M<!HP8`FR33!-!`5&P@3E3T5.8`CE3Q5/<`(53B(B,!0PD'`9X%4__Y!P&.!/
+M]3^0`BG@_Y!P&>#^[UZ0`BGP,$<$KS^`!.4_]/^0`BCO\,(4(I`0'.WPH^_P
+MHW0*\)`0'.#U6)`0'N`@X?,BD!`=[_"C=`OPD!`<X/58D!`>X"#A\Z]8(L)+
+MPDSE1!(!Y!89`!:G!!:C"!:#$!8M(!9-8!9>H```%JF%2$.%2D*%3%[E1V0&
+M8`,"%JF`&^5(Q%0/]4/E2L14#_5"Y4S$5`_U7N5'9`9P85-##X!<A4E#A4M"
+MA4U>Y4=D!G!-@!OE2<14#_5#Y4O$5`_U0N5-Q%0/]5[E1V0&<##E0U0/1!#U
+M0X`FY4=D!&`%Y4>T!09#7@1U0@GE1[0&$.5#5`]$,/5#@`;22X`"TDSD]2?E
+M0L14\/_E0U0/3_5?TF`BTA7E1R3U8`LDRV`')$!P!L(5(A(:.A(6WL(5PJ_"
+M!-*O(L*OD`04X%0.8`32&(`(Y4Y%3R3_DAC2KY`$%."BY)(9=![PY5]4#_4M
+MY2=P$S`8!>5?(.4+,!D9Y5]4,/^_,!'E)W`%=2<,@`(5)])LTFV`#^5?,.8&
+MPFS2;8`$TFS";>5'9`-P(3!+!L)LTFV`&.4G<`,P3!'"3.4G<`5U)P>``A4G
+MTFS2;>5'M`D4Y40@XPOE.F0"8`7E.K0#!,)LTFWE1[0*$^4ZM`$&PFS2;8`(
+MY3IP!-)LPFT@:0?E7B#@`K)H(&L'Y5X@X0*R:B!M!^5>(.("LFQU+D`@:02B
+M:(`F,&@&Y4:BXH`=Y5X@X`1_`8`"?P#E1E3P_K[P!'X!@`)^`.YO)/^2<Y)R
+M(&L$HFJ`)C!J!N5&HN*`'>5>(.$$?P&``G\`Y494\/Z^\`1^`8`"?@#N;R3_
+MDG62="!M!*)L@";E1V0*<"(P;`;E1J+C@!?E.K0!!N5&HN.`-.5&(.0#,.4#
+MTX`!PX`F,&P&Y4:BXH`=Y5X@X@1_`8`"?P#E1E3P_K[P!'X!@`)^`.YO)/^2
+M<9)PD!``X)`0+/"0$`/@PY0P0!G@9#)@%*)QDG>B<))VY2X3$U0_]2["=])V
+M,!<-4R[PY2Y%79`0+_"`!I`0+^4N\.5'9`9P1Y`"*.`P1P/_@`+T_X\_D`(I
+MX%3^\.5#Q%0/%&`,)/Y@#"0#<!/"^(`/TOB`"^5&,.(#TX`!PY+X,$<%KS\"
+M&C3E/_3_`AHTY4=D!V`/Y4=D"&`)Y4=D"6`#`AFBD`(HX#!'`_^``O3_CS^0
+M`BG@5/SPY3H48"(48"448"TD_&!))/E@%"0.<%#E1A,35#]U\`.$Y?`D_X`Z
+MTOG"^(`^Y48PX@/3@!W#@!KE1C#B#50XPY0P4`9^`'\!@`1^`'\`[D\D_Y+X
+MPOF`$^5&,.(#TX`!PY+YPOB`!,+XPODP1P2O/X`$Y3_T_P(:-.5'9`Q@">5'
+M9`M@`P(:.9`"*.`P1P/_@`+T_X\_D`(IX%3]\.4Z%&`@%&`A%&`K)/Q@123Y
+M8!(D#G!*Y483$U0_=?`#A.7P@"G2^8`ZY48PX@/3@`'#DOF`+>5&,.(-5#C#
+ME#!0!GX`?P&`!'X`?P#N3R3_DOF`#^5&,.(#TX`!PY+Y@`+"^3!'!*\_@`3E
+M/_3_D`(H[_`BY4>T"Q"0`BG@5.OPY3]4ZT5%]3\BY)`"*?`P1P2O18`$Y47T
+M_Y`"*._P(H]0TEDBCU326"+D]27"K^51%&!*%&!K)`)@`P(;T-)9=54!D`((
+MX%3^\.`@X2.0!#3@M`(<H^"T`A>CX+0"$G\@$AIED!`$X%3S\'51`0(;T.50
+M8`,"&]!U)0,"&]"0$@#@5`-P$G\@$AIED`((X%3[\'51`@(;T.508`,"&]`"
+M&\N0`@C@,.,#`AO'D`0WX&0B8`,"&\>0$@1T"O#E6##C')```N`PX!7DD`4`
+M\*-T"/"C=`'P=`/P?P$2`I20$RC@D'`:\)`3*>"0<!OPD!,KX)!P(O"0$RC@
+M5/#PH^!4\/"0$RO@5,SPY5@PXQ?E,'`3Y3STD!,J8`7@5/.`$>!4^_"`%.4\
+M])`3*F`(X%3R13SP@`3@5/KPD`0!X$00\'6,@.!4_?"0$@3@1`3PY5@PX`:0
+M`0W@]2_E,-.4`4`7(`(4(`,1,$\.D`$-X%3[\)`!#.!4_?!U)0%U50+D]5&`
+M">50<`5U)0/U4>4E8!7"`>3U4<)9K26O0!(<Y>4EM`,"T@/2KR+"KS`!#N3U
+M4<)9P@%]`J]`$ASEY5(48$@48"4D`F`#`ASBY3#3E`%`$9`!#.!$`O"CX$0$
+M\'\*$@*4=5("=54#Y5@PX`:0`0WE+_"0$@3@5/OP?R`2&FIU4@%U50,"'.+E
+M5&`#`ASBD`0!X$0.\.!4[_#D]8SE6%088!Z0<!K@D!,H\)!P&^"0$RGPHW0%
+M\)!P(N"0$ROP@!&0$RC@1`_PH^!$#_"CX$0%\)`2!'0#\.58,.,=D``"X##@
+M%I`%`'3B\*-T"/"C=`'P=`/P?P$2`I20`@C@1`7PD!`$X$0,\.3U4O55,`()
+MP@)]`:]!$ASE,`,"P@/2KR+O]&`MY/YT%"[U@N0T</6#X+3_&704+O6"Y#1P
+M]8/O\'0<+O6"Y#1P]8/M\"(.O@35(B(BD'`JX##A0\*OD'`HX)`0'/"0<"G@
+MD!`=\)!P*N"0$![PD!`<X/4ED!`>X"#A\Y`0'."0<"CPD!`=X)!P*?"0$![@
+MD'`J\,(%TJ\B(@``````````````````````````````````````````````
M````````````````````````````````````````````````````````````
M````````````````````````````````````````````````````````````
M````````````````````````````````````````````````````````````
@@ -196,7 +196,7 @@ M````````````````````````````````````````````````````````````
M````````````````````````````````````````````````````````````
M````````````````````````````````````````````````````````````
M````````````````````````````````````````````````````````````
-M```````````````````````````````````````````````````````````:
-"4X@`
+M```````````````````````````````````````````````````````````F
+"6](`
`
end
diff --git a/sys/contrib/vchiq/interface/compat/list.h b/sys/contrib/vchiq/interface/compat/list.h
new file mode 100644
index 000000000000..a669e6df3f50
--- /dev/null
+++ b/sys/contrib/vchiq/interface/compat/list.h
@@ -0,0 +1,256 @@
+/* $NetBSD: list.h,v 1.5 2014/08/20 15:26:52 riastradh Exp $ */
+
+/*-
+ * Copyright (c) 2013 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Notes on porting:
+ *
+ * - LIST_HEAD(x) means a declaration `struct list_head x =
+ * LIST_HEAD_INIT(x)' in Linux, but something else in NetBSD.
+ * Replace by the expansion.
+ *
+ * - The `_rcu' routines here are not actually pserialize(9)-safe.
+ * They need dependent read memory barriers added. Please fix this
+ * if you need to use them with pserialize(9).
+ */
+
+#ifndef _LINUX_LIST_H_
+#define _LINUX_LIST_H_
+
+#include <sys/queue.h>
+
+#define container_of(ptr, type, member) \
+({ \
+ __typeof(((type *)0)->member) *_p = (ptr); \
+ (type *)((char *)_p - offsetof(type, member)); \
+})
+
+/*
+ * Doubly-linked lists.
+ */
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define LIST_HEAD_INIT(name) { .prev = &(name), .next = &(name) }
+
+static inline void
+INIT_LIST_HEAD(struct list_head *head)
+{
+ head->prev = head;
+ head->next = head;
+}
+
+static inline struct list_head *
+list_first(const struct list_head *head)
+{
+ return head->next;
+}
+
+static inline struct list_head *
+list_last(const struct list_head *head)
+{
+ return head->prev;
+}
+
+static inline struct list_head *
+list_next(const struct list_head *node)
+{
+ return node->next;
+}
+
+static inline struct list_head *
+list_prev(const struct list_head *node)
+{
+ return node->prev;
+}
+
+static inline int
+list_empty(const struct list_head *head)
+{
+ return (head->next == head);
+}
+
+static inline int
+list_is_singular(const struct list_head *head)
+{
+
+ if (list_empty(head))
+ return false;
+ if (head->next != head->prev)
+ return false;
+ return true;
+}
+
+static inline void
+__list_add_between(struct list_head *prev, struct list_head *node,
+ struct list_head *next)
+{
+ prev->next = node;
+ node->prev = prev;
+ node->next = next;
+ next->prev = node;
+}
+
+static inline void
+list_add(struct list_head *node, struct list_head *head)
+{
+ __list_add_between(head, node, head->next);
+}
+
+static inline void
+list_add_tail(struct list_head *node, struct list_head *head)
+{
+ __list_add_between(head->prev, node, head);
+}
+
+static inline void
+list_del(struct list_head *entry)
+{
+ entry->prev->next = entry->next;
+ entry->next->prev = entry->prev;
+}
+
+static inline void
+__list_splice_between(struct list_head *prev, const struct list_head *list,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+static inline void
+list_splice(const struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice_between(head, list, head->next);
+}
+
+static inline void
+list_splice_tail(const struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice_between(head->prev, list, head);
+}
+
+static inline void
+list_move(struct list_head *node, struct list_head *head)
+{
+ list_del(node);
+ list_add(node, head);
+}
+
+static inline void
+list_move_tail(struct list_head *node, struct list_head *head)
+{
+ list_del(node);
+ list_add_tail(node, head);
+}
+
+static inline void
+list_replace(struct list_head *old, struct list_head *new)
+{
+ new->prev = old->prev;
+ old->prev->next = new;
+ new->next = old->next;
+ old->next->prev = new;
+}
+
+static inline void
+list_del_init(struct list_head *node)
+{
+ list_del(node);
+ INIT_LIST_HEAD(node);
+}
+
+#define list_entry(PTR, TYPE, FIELD) container_of(PTR, TYPE, FIELD)
+#define list_first_entry(PTR, TYPE, FIELD) \
+ list_entry(list_first((PTR)), TYPE, FIELD)
+#define list_last_entry(PTR, TYPE, FIELD) \
+ list_entry(list_last((PTR)), TYPE, FIELD)
+#define list_next_entry(ENTRY, FIELD) \
+ list_entry(list_next(&(ENTRY)->FIELD), typeof(*(ENTRY)), FIELD)
+#define list_prev_entry(ENTRY, FIELD) \
+ list_entry(list_prev(&(ENTRY)->FIELD), typeof(*(ENTRY)), FIELD)
+
+#define list_for_each(VAR, HEAD) \
+ for ((VAR) = list_first((HEAD)); \
+ (VAR) != (HEAD); \
+ (VAR) = list_next((VAR)))
+
+#define list_for_each_safe(VAR, NEXT, HEAD) \
+ for ((VAR) = list_first((HEAD)); \
+ ((VAR) != (HEAD)) && ((NEXT) = list_next((VAR)), 1); \
+ (VAR) = (NEXT))
+
+#define list_for_each_entry(VAR, HEAD, FIELD) \
+ for ((VAR) = list_entry(list_first((HEAD)), typeof(*(VAR)), FIELD); \
+ &(VAR)->FIELD != (HEAD); \
+ (VAR) = list_entry(list_next(&(VAR)->FIELD), typeof(*(VAR)), \
+ FIELD))
+
+#define list_for_each_entry_reverse(VAR, HEAD, FIELD) \
+ for ((VAR) = list_entry(list_last((HEAD)), typeof(*(VAR)), FIELD); \
+ &(VAR)->FIELD != (HEAD); \
+ (VAR) = list_entry(list_prev(&(VAR)->FIELD), typeof(*(VAR)), \
+ FIELD))
+
+#define list_for_each_entry_safe(VAR, NEXT, HEAD, FIELD) \
+ for ((VAR) = list_entry(list_first((HEAD)), typeof(*(VAR)), FIELD); \
+ (&(VAR)->FIELD != (HEAD)) && \
+ ((NEXT) = list_entry(list_next(&(VAR)->FIELD), \
+ typeof(*(VAR)), FIELD), 1); \
+ (VAR) = (NEXT))
+
+#define list_for_each_entry_continue(VAR, HEAD, FIELD) \
+ for ((VAR) = list_next_entry((VAR), FIELD); \
+ &(VAR)->FIELD != (HEAD); \
+ (VAR) = list_next_entry((VAR), FIELD))
+
+#define list_for_each_entry_continue_reverse(VAR, HEAD, FIELD) \
+ for ((VAR) = list_prev_entry((VAR), FIELD); \
+ &(VAR)->FIELD != (HEAD); \
+ (VAR) = list_prev_entry((VAR), FIELD))
+
+#define list_for_each_entry_safe_from(VAR, NEXT, HEAD, FIELD) \
+ for (; \
+ (&(VAR)->FIELD != (HEAD)) && \
+ ((NEXT) = list_next_entry((VAR), FIELD)); \
+ (VAR) = (NEXT))
+
+#endif /* _LINUX_LIST_H_ */
diff --git a/sys/contrib/vchiq/interface/compat/vchi_bsd.c b/sys/contrib/vchiq/interface/compat/vchi_bsd.c
new file mode 100644
index 000000000000..31170bc5e605
--- /dev/null
+++ b/sys/contrib/vchiq/interface/compat/vchi_bsd.c
@@ -0,0 +1,532 @@
+/*-
+ * Copyright (c) 2010 Max Khon <fjoe@freebsd.org>
+ * All rights reserved.
+ *
+ * This software was developed by Max Khon under sponsorship from
+ * the FreeBSD Foundation and Ethon Technologies GmbH.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: bsd-compat.c 9253 2010-09-02 10:12:09Z fjoe $
+ */
+
+#include <sys/types.h>
+#include <sys/limits.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/firmware.h>
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/syscallsubr.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+
+#include <machine/stdarg.h>
+
+#include "mbox_if.h"
+
+#include <interface/compat/vchi_bsd.h>
+
+MALLOC_DEFINE(M_VCHI, "VCHI", "VCHI");
+
+/*
+ * Timer API
+ */
+static void
+run_timer(void *arg)
+{
+ struct timer_list *t = (struct timer_list *) arg;
+ void (*function)(unsigned long);
+
+ mtx_lock_spin(&t->mtx);
+ if (callout_pending(&t->callout)) {
+ /* callout was reset */
+ mtx_unlock_spin(&t->mtx);
+ return;
+ }
+ if (!callout_active(&t->callout)) {
+ /* callout was stopped */
+ mtx_unlock_spin(&t->mtx);
+ return;
+ }
+ callout_deactivate(&t->callout);
+
+ function = t->function;
+ mtx_unlock_spin(&t->mtx);
+
+ function(t->data);
+}
+
+void
+init_timer(struct timer_list *t)
+{
+ mtx_init(&t->mtx, "dahdi timer lock", NULL, MTX_SPIN);
+ callout_init(&t->callout, CALLOUT_MPSAFE);
+ t->expires = 0;
+ /*
+ * function and data are not initialized intentionally:
+ * they are not initialized by Linux implementation too
+ */
+}
+
+void
+setup_timer(struct timer_list *t, void (*function)(unsigned long), unsigned long data)
+{
+ t->function = function;
+ t->data = data;
+ init_timer(t);
+}
+
+void
+mod_timer(struct timer_list *t, unsigned long expires)
+{
+ mtx_lock_spin(&t->mtx);
+ callout_reset(&t->callout, expires - jiffies, run_timer, t);
+ mtx_unlock_spin(&t->mtx);
+}
+
+void
+add_timer(struct timer_list *t)
+{
+ mod_timer(t, t->expires);
+}
+
+int
+del_timer_sync(struct timer_list *t)
+{
+ mtx_lock_spin(&t->mtx);
+ callout_stop(&t->callout);
+ mtx_unlock_spin(&t->mtx);
+
+ mtx_destroy(&t->mtx);
+ return 0;
+}
+
+int
+del_timer(struct timer_list *t)
+{
+ del_timer_sync(t);
+ return 0;
+}
+
+/*
+ * Completion API
+ */
+void
+init_completion(struct completion *c)
+{
+ cv_init(&c->cv, "VCHI completion cv");
+ mtx_init(&c->lock, "VCHI completion lock", "condvar", MTX_DEF);
+ c->done = 0;
+}
+
+void
+destroy_completion(struct completion *c)
+{
+ cv_destroy(&c->cv);
+ mtx_destroy(&c->lock);
+}
+
+void
+complete(struct completion *c)
+{
+ mtx_lock(&c->lock);
+
+ if (c->done >= 0) {
+ KASSERT(c->done < INT_MAX, ("c->done overflow")); /* XXX check */
+ c->done++;
+ cv_signal(&c->cv);
+ } else {
+ KASSERT(c->done == -1, ("Invalid value of c->done: %d", c->done));
+ }
+
+ mtx_unlock(&c->lock);
+}
+
+void
+complete_all(struct completion *c)
+{
+ mtx_lock(&c->lock);
+
+ if (c->done >= 0) {
+ KASSERT(c->done < INT_MAX, ("c->done overflow")); /* XXX check */
+ c->done = -1;
+ cv_broadcast(&c->cv);
+ } else {
+ KASSERT(c->done == -1, ("Invalid value of c->done: %d", c->done));
+ }
+
+ mtx_unlock(&c->lock);
+}
+
+void
+INIT_COMPLETION_locked(struct completion *c)
+{
+ mtx_lock(&c->lock);
+
+ c->done = 0;
+
+ mtx_unlock(&c->lock);
+}
+
+static void
+_completion_claim(struct completion *c)
+{
+
+ KASSERT(mtx_owned(&c->lock),
+ ("_completion_claim should be called with acquired lock"));
+ KASSERT(c->done != 0, ("_completion_claim on non-waited completion"));
+ if (c->done > 0)
+ c->done--;
+ else
+ KASSERT(c->done == -1, ("Invalid value of c->done: %d", c->done));
+}
+
+void
+wait_for_completion(struct completion *c)
+{
+ mtx_lock(&c->lock);
+ if (!c->done)
+ cv_wait(&c->cv, &c->lock);
+ c->done--;
+ mtx_unlock(&c->lock);
+}
+
+int
+try_wait_for_completion(struct completion *c)
+{
+ int res = 0;
+
+ mtx_lock(&c->lock);
+ if (!c->done)
+ res = 1;
+ else
+ c->done--;
+ mtx_unlock(&c->lock);
+ return res == 0;
+}
+
+int
+wait_for_completion_interruptible_timeout(struct completion *c, unsigned long timeout)
+{
+ int res = 0;
+ unsigned long start, now;
+ start = jiffies;
+
+ mtx_lock(&c->lock);
+ while (c->done == 0) {
+ res = cv_timedwait_sig(&c->cv, &c->lock, timeout);
+ if (res)
+ goto out;
+ now = jiffies;
+ if (timeout < (now - start)) {
+ res = EWOULDBLOCK;
+ goto out;
+ }
+
+ timeout -= (now - start);
+ start = now;
+ }
+
+ _completion_claim(c);
+ res = 0;
+
+out:
+ mtx_unlock(&c->lock);
+
+ if (res == EWOULDBLOCK) {
+ return 0;
+ } else if ((res == EINTR) || (res == ERESTART)) {
+ return -ERESTART;
+ } else {
+ KASSERT((res == 0), ("res = %d", res));
+ return timeout;
+ }
+}
+
+int
+wait_for_completion_interruptible(struct completion *c)
+{
+ int res = 0;
+
+ mtx_lock(&c->lock);
+ while (c->done == 0) {
+ res = cv_wait_sig(&c->cv, &c->lock);
+ if (res)
+ goto out;
+ }
+
+ _completion_claim(c);
+
+out:
+ mtx_unlock(&c->lock);
+
+ if ((res == EINTR) || (res == ERESTART))
+ res = -ERESTART;
+ return res;
+}
+
+int
+wait_for_completion_killable(struct completion *c)
+{
+
+ return wait_for_completion_interruptible(c);
+}
+
+/*
+ * Semaphore API
+ */
+
+void sema_sysinit(void *arg)
+{
+ struct semaphore *s = arg;
+
+ printf("sema_sysinit\n");
+ _sema_init(s, 1);
+}
+
+void
+_sema_init(struct semaphore *s, int value)
+{
+ bzero(s, sizeof(*s));
+ mtx_init(&s->mtx, "sema lock", "VCHIQ sepmaphore backing lock",
+ MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
+ cv_init(&s->cv, "sema cv");
+ s->value = value;
+}
+
+void
+_sema_destroy(struct semaphore *s)
+{
+ mtx_destroy(&s->mtx);
+ cv_destroy(&s->cv);
+}
+
+void
+down(struct semaphore *s)
+{
+
+ mtx_lock(&s->mtx);
+ while (s->value == 0) {
+ s->waiters++;
+ cv_wait(&s->cv, &s->mtx);
+ s->waiters--;
+ }
+
+ s->value--;
+ mtx_unlock(&s->mtx);
+}
+
+int
+down_interruptible(struct semaphore *s)
+{
+ int ret ;
+
+ ret = 0;
+
+ mtx_lock(&s->mtx);
+
+ while (s->value == 0) {
+ s->waiters++;
+ ret = cv_wait_sig(&s->cv, &s->mtx);
+ s->waiters--;
+
+ if (ret == EINTR) {
+ mtx_unlock(&s->mtx);
+ return (-EINTR);
+ }
+
+ if (ret == ERESTART)
+ continue;
+ }
+
+ s->value--;
+ mtx_unlock(&s->mtx);
+
+ return (0);
+}
+
+int
+down_trylock(struct semaphore *s)
+{
+ int ret;
+
+ ret = 0;
+
+ mtx_lock(&s->mtx);
+
+ if (s->value > 0) {
+ /* Success. */
+ s->value--;
+ ret = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+
+ mtx_unlock(&s->mtx);
+
+ return (ret);
+}
+
+void
+up(struct semaphore *s)
+{
+ mtx_lock(&s->mtx);
+ s->value++;
+ if (s->waiters && s->value > 0)
+ cv_signal(&s->cv);
+
+ mtx_unlock(&s->mtx);
+}
+
+/*
+ * Logging API
+ */
+void
+rlprintf(int pps, const char *fmt, ...)
+{
+ va_list ap;
+ static struct timeval last_printf;
+ static int count;
+
+ if (ppsratecheck(&last_printf, &count, pps)) {
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+void
+device_rlprintf(int pps, device_t dev, const char *fmt, ...)
+{
+ va_list ap;
+ static struct timeval last_printf;
+ static int count;
+
+ if (ppsratecheck(&last_printf, &count, pps)) {
+ va_start(ap, fmt);
+ device_print_prettyname(dev);
+ vprintf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/*
+ * Signals API
+ */
+
+void
+flush_signals(VCHIQ_THREAD_T thr)
+{
+ printf("Implement ME: %s\n", __func__);
+}
+
+int
+fatal_signal_pending(VCHIQ_THREAD_T thr)
+{
+ printf("Implement ME: %s\n", __func__);
+ return (0);
+}
+
+/*
+ * kthread API
+ */
+
+/*
+ * This is a hack to avoid memory leak
+ */
+#define MAX_THREAD_DATA_SLOTS 32
+static int thread_data_slot = 0;
+
+struct thread_data {
+ void *data;
+ int (*threadfn)(void *);
+};
+
+static struct thread_data thread_slots[MAX_THREAD_DATA_SLOTS];
+
+static void
+kthread_wrapper(void *data)
+{
+ struct thread_data *slot;
+
+ slot = data;
+ slot->threadfn(slot->data);
+}
+
+VCHIQ_THREAD_T
+vchiq_thread_create(int (*threadfn)(void *data),
+ void *data,
+ const char namefmt[], ...)
+{
+ VCHIQ_THREAD_T newp;
+ va_list ap;
+ char name[MAXCOMLEN+1];
+ struct thread_data *slot;
+
+ if (thread_data_slot >= MAX_THREAD_DATA_SLOTS) {
+ printf("kthread_create: out of thread data slots\n");
+ return (NULL);
+ }
+
+ slot = &thread_slots[thread_data_slot];
+ slot->data = data;
+ slot->threadfn = threadfn;
+
+ va_start(ap, namefmt);
+ vsnprintf(name, sizeof(name), namefmt, ap);
+ va_end(ap);
+
+ newp = NULL;
+ if (kproc_create(kthread_wrapper, (void*)slot, &newp, 0, 0,
+ "%s", name) != 0) {
+ /* Just to be sure */
+ newp = NULL;
+ }
+ else
+ thread_data_slot++;
+
+ return newp;
+}
+
+void
+set_user_nice(VCHIQ_THREAD_T thr, int nice)
+{
+ /* NOOP */
+}
+
+void
+wake_up_process(VCHIQ_THREAD_T thr)
+{
+ /* NOOP */
+}
+
+void
+bcm_mbox_write(int channel, uint32_t data)
+{
+ device_t mbox;
+
+ mbox = devclass_get_device(devclass_find("mbox"), 0);
+
+ if (mbox)
+ MBOX_WRITE(mbox, channel, data);
+}
diff --git a/sys/contrib/vchiq/interface/compat/vchi_bsd.h b/sys/contrib/vchiq/interface/compat/vchi_bsd.h
new file mode 100644
index 000000000000..2e665b44141e
--- /dev/null
+++ b/sys/contrib/vchiq/interface/compat/vchi_bsd.h
@@ -0,0 +1,434 @@
+/*-
+ * Copyright (c) 2010 Max Khon <fjoe@freebsd.org>
+ * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@bluezbox.com>
+ * Copyright (c) 2013 Jared D. McNeill <jmcneill@invisible.ca>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef __VCHI_BSD_H__
+#define __VCHI_BSD_H__
+
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/mutex.h>
+#include <sys/sx.h>
+#include <sys/sema.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/types.h>
+#include <sys/ioccom.h>
+
+/*
+ * Copy from/to user API
+ */
+#define copy_from_user(to, from, n) copyin((from), (to), (n))
+#define copy_to_user(to, from, n) copyout((from), (to), (n))
+
+/*
+ * Bit API
+ */
+
+static __inline int
+test_and_set_bit(int nr, volatile void *addr)
+{
+ int val;
+
+ do {
+ val = *(volatile int *) addr;
+ } while (atomic_cmpset_int(addr, val, val | (1 << nr)) == 0);
+ return (val & (1 << nr));
+}
+
+static __inline__
+int test_and_clear_bit(int nr, volatile void *addr)
+{
+ int val;
+
+ do {
+ val = *(volatile int *) addr;
+ } while (atomic_cmpset_int(addr, val, val & ~(1 << nr)) == 0);
+ return (val & (1 << nr));
+}
+
+/*
+ * Atomic API
+ */
+typedef volatile unsigned atomic_t;
+
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_int(p, 1)
+#define atomic_dec(p) atomic_subtract_int(p, 1)
+#define atomic_dec_and_test(p) (atomic_fetchadd_int(p, -1) == 1)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_add(v, p) atomic_add_int(p, v)
+#define atomic_sub(v, p) atomic_subtract_int(p, v)
+
+#define ATOMIC_INIT(v) (v)
+
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ return i + atomic_fetchadd_int(v, i);
+}
+
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ return atomic_fetchadd_int(v, -i) - i;
+}
+
+static inline int
+atomic_cmpxchg(atomic_t *v, int oldv, int newv)
+{
+ if (atomic_cmpset_rel_int(v, oldv, newv))
+ return newv;
+ else
+ return *v;
+}
+
+static inline int
+atomic_xchg(atomic_t *v, int newv)
+{
+ int oldv;
+ if (newv == 0)
+ return atomic_readandclear_int(v);
+ else {
+ do {
+ oldv = atomic_load_acq_int(v);
+ } while (!atomic_cmpset_rel_int(v, oldv, newv));
+ }
+
+ return (oldv);
+}
+
+/*
+ * Spinlock API
+ */
+typedef struct mtx spinlock_t;
+
+#define DEFINE_SPINLOCK(name) \
+ struct mtx name
+#define spin_lock_init(lock) mtx_init(lock, "VCHI spinlock " # lock, NULL, MTX_DEF)
+#define spin_lock_destroy(lock) mtx_destroy(lock)
+#define spin_lock(lock) mtx_lock(lock)
+#define spin_unlock(lock) mtx_unlock(lock)
+#define spin_lock_bh(lock) spin_lock(lock)
+#define spin_unlock_bh(lock) spin_unlock(lock)
+
+/*
+ * Mutex API
+ */
+struct mutex {
+ struct mtx mtx;
+};
+
+#define lmutex_init(lock) mtx_init(&(lock)->mtx, #lock, NULL, MTX_DEF)
+#define lmutex_lock(lock) mtx_lock(&(lock)->mtx)
+#define lmutex_lock_interruptible(lock) (mtx_lock(&(lock)->mtx),0)
+#define lmutex_unlock(lock) mtx_unlock(&(lock)->mtx)
+#define lmutex_destroy(lock) mtx_destroy(&(lock)->mtx)
+
+/*
+ * Rwlock API
+ */
+typedef struct sx rwlock_t;
+
+#if defined(SX_ADAPTIVESPIN) && !defined(SX_NOADAPTIVE)
+#define SX_NOADAPTIVE SX_ADAPTIVESPIN
+#endif
+
+#define DEFINE_RWLOCK(name) \
+ struct sx name; \
+ SX_SYSINIT(name, &name, #name)
+#define rwlock_init(rwlock) sx_init_flags(rwlock, "VCHI rwlock", SX_NOADAPTIVE)
+#define read_lock(rwlock) sx_slock(rwlock)
+#define read_unlock(rwlock) sx_sunlock(rwlock)
+
+#define write_lock(rwlock) sx_xlock(rwlock)
+#define write_unlock(rwlock) sx_xunlock(rwlock)
+#define write_lock_irqsave(rwlock, flags) \
+ do { \
+ sx_xlock(rwlock); \
+ (void) &(flags); \
+ } while (0)
+#define write_unlock_irqrestore(rwlock, flags) \
+ sx_xunlock(rwlock)
+
+#define read_lock_bh(rwlock) sx_slock(rwlock)
+#define read_unlock_bh(rwlock) sx_sunlock(rwlock)
+#define write_lock_bh(rwlock) sx_xlock(rwlock)
+#define write_unlock_bh(rwlock) sx_xunlock(rwlock)
+
+/*
+ * Timer API
+ */
+struct timer_list {
+ struct mtx mtx;
+ struct callout callout;
+
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+};
+
+void init_timer(struct timer_list *t);
+void setup_timer(struct timer_list *t, void (*function)(unsigned long), unsigned long data);
+void mod_timer(struct timer_list *t, unsigned long expires);
+void add_timer(struct timer_list *t);
+int del_timer(struct timer_list *t);
+int del_timer_sync(struct timer_list *t);
+
+/*
+ * Completion API
+ */
+struct completion {
+ struct cv cv;
+ struct mtx lock;
+ int done;
+};
+
+void init_completion(struct completion *c);
+void destroy_completion(struct completion *c);
+int try_wait_for_completion(struct completion *);
+int wait_for_completion_interruptible(struct completion *);
+int wait_for_completion_interruptible_timeout(struct completion *, unsigned long ticks);
+int wait_for_completion_killable(struct completion *);
+void wait_for_completion(struct completion *c);
+void complete(struct completion *c);
+void complete_all(struct completion *c);
+void INIT_COMPLETION_locked(struct completion *c);
+
+#define INIT_COMPLETION(x) INIT_COMPLETION_locked(&(x))
+
+/*
+ * Semaphore API
+ */
+struct semaphore {
+ struct mtx mtx;
+ struct cv cv;
+ int value;
+ int waiters;
+};
+
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name; \
+ SYSINIT(name##_sema_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ sema_sysinit, &name); \
+ SYSUNINIT(name##_sema_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ _sema_destroy, __DEVOLATILE(void *, &(name)))
+
+void sema_sysinit(void *arg);
+void _sema_init(struct semaphore *s, int value);
+void _sema_destroy(struct semaphore *s);
+void down(struct semaphore *s);
+int down_interruptible(struct semaphore *s);
+int down_trylock(struct semaphore *s);
+void up(struct semaphore *s);
+
+/*
+ * Logging and assertions API
+ */
+void rlprintf(int pps, const char *fmt, ...)
+ __printflike(2, 3);
+
+void
+device_rlprintf(int pps, device_t dev, const char *fmt, ...)
+ __printflike(3, 4);
+
+#define might_sleep()
+
+#define WARN(condition, msg) \
+({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ printf((msg)); \
+ unlikely(__ret_warn_on); \
+})
+
+
+
+#define WARN_ON(condition) \
+({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ printf("WARN_ON: " #condition "\n"); \
+ unlikely(__ret_warn_on); \
+})
+
+#define WARN_ON_ONCE(condition) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN_ON(!__warned)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+
+#define BUG_ON(cond) \
+ do { \
+ if (cond) \
+ panic("BUG_ON: " #cond); \
+ } while (0)
+
+#define BUG() \
+ do { \
+ panic("BUG: %s:%d", __FILE__, __LINE__); \
+ } while (0)
+
+#define vchiq_static_assert(cond) CTASSERT(cond)
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+#define KERN_CONT ""
+
+#define printk(fmt, args...) printf(fmt, ##args)
+#define vprintk(fmt, args) vprintf(fmt, args)
+
+/*
+ * Malloc API
+ */
+#define GFP_KERNEL 0
+#define GFP_ATOMIC 0
+
+MALLOC_DECLARE(M_VCHI);
+
+#define kmalloc(size, flags) malloc((size), M_VCHI, M_NOWAIT | M_ZERO)
+#define kcalloc(n, size, flags) malloc((n) * (size), M_VCHI, M_NOWAIT | M_ZERO)
+#define kzalloc(a, b) kcalloc(1, (a), (b))
+#define kfree(p) free(p, M_VCHI)
+
+/*
+ * Kernel module API
+ */
+#define __init
+#define __exit
+#define __devinit
+#define __devexit
+#define __devinitdata
+
+/*
+ * Time API
+ */
+#if 1
+/* emulate jiffies */
+static inline unsigned long
+_jiffies(void)
+{
+ struct timeval tv;
+
+ microuptime(&tv);
+ return tvtohz(&tv);
+}
+
+static inline unsigned long
+msecs_to_jiffies(unsigned long msecs)
+{
+ struct timeval tv;
+
+ tv.tv_sec = msecs / 1000000UL;
+ tv.tv_usec = msecs % 1000000UL;
+ return tvtohz(&tv);
+}
+
+#define jiffies _jiffies()
+#else
+#define jiffies ticks
+#endif
+#define HZ hz
+
+#define udelay(usec) DELAY(usec)
+#define mdelay(msec) DELAY((msec) * 1000)
+
+#define schedule_timeout(jiff) pause("dhdslp", jiff)
+
+#if defined(msleep)
+#undef msleep
+#endif
+#define msleep(msec) mdelay(msec)
+
+#define time_after(a, b) ((a) > (b))
+#define time_after_eq(a, b) ((a) >= (b))
+#define time_before(a, b) time_after((b), (a))
+
+/*
+ * kthread API (we use proc)
+ */
+typedef struct proc * VCHIQ_THREAD_T;
+
+VCHIQ_THREAD_T vchiq_thread_create(int (*threadfn)(void *data),
+ void *data,
+ const char namefmt[], ...);
+void set_user_nice(VCHIQ_THREAD_T p, int nice);
+void wake_up_process(VCHIQ_THREAD_T p);
+
+/*
+ * Proc APIs
+ */
+void flush_signals(VCHIQ_THREAD_T);
+int fatal_signal_pending(VCHIQ_THREAD_T);
+
+/*
+ * mbox API
+ */
+void bcm_mbox_write(int channel, uint32_t data);
+
+/*
+ * Misc API
+ */
+
+#define ENODATA EINVAL
+
+#define __user
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define current curproc
+#define EXPORT_SYMBOL(x)
+#define PAGE_ALIGN(addr) round_page(addr)
+
+typedef void irqreturn_t;
+typedef off_t loff_t;
+
+#define BCM2835_MBOX_CHAN_VCHIQ 3
+
+#define smp_mb wmb
+#define smp_rmb rmb
+#define smp_wmb wmb
+
+#define device_print_prettyname(dev) device_printf((dev), "")
+
+#endif /* __VCHI_BSD_H__ */
diff --git a/sys/contrib/vchiq/interface/vchi/connections/connection.h b/sys/contrib/vchiq/interface/vchi/connections/connection.h
new file mode 100644
index 000000000000..2da50523b1e8
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/connections/connection.h
@@ -0,0 +1,324 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CONNECTION_H_
+#define CONNECTION_H_
+
+#include "interface/vchi/vchi_cfg_internal.h"
+#include "interface/vchi/vchi_common.h"
+#include "interface/vchi/message_drivers/message.h"
+
+/******************************************************************************
+ Global defs
+ *****************************************************************************/
+
+// Opaque handle for a connection / service pair
+typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
+
+// opaque handle to the connection state information
+typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
+
+typedef struct vchi_connection_t VCHI_CONNECTION_T;
+
+
+/******************************************************************************
+ API
+ *****************************************************************************/
+
+// Routine to init a connection with a particular low level driver
+typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
+ const VCHI_MESSAGE_DRIVER_T * driver );
+
+// Routine to control CRC enabling at a connection level
+typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
+ VCHI_CRC_CONTROL_T control );
+
+// Routine to create a service
+typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
+ int32_t service_id,
+ uint32_t rx_fifo_size,
+ uint32_t tx_fifo_size,
+ int server,
+ VCHI_CALLBACK_T callback,
+ void *callback_param,
+ int32_t want_crc,
+ int32_t want_unaligned_bulk_rx,
+ int32_t want_unaligned_bulk_tx,
+ VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
+
+// Routine to close a service
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
+
+// Routine to queue a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// scatter-gather (vector) message queueing
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// Routine to dequeue a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to peek at a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to hold a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags,
+ void **message_handle );
+
+// Routine to initialise a received message iterator
+typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ VCHI_MSG_ITER_T *iter,
+ VCHI_FLAGS_T flags );
+
+// Routine to release a held message
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *message_handle );
+
+// Routine to get info on a held message
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *message_handle,
+ void **data,
+ int32_t *msg_size,
+ uint32_t *tx_timestamp,
+ uint32_t *rx_timestamp );
+
+// Routine to check whether the iterator has a next message
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ const VCHI_MSG_ITER_T *iter );
+
+// Routine to advance the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter,
+ void **data,
+ uint32_t *msg_size );
+
+// Routine to remove the last message returned by the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter );
+
+// Routine to hold the last message returned by the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter,
+ void **msg_handle );
+
+// Routine to transmit bulk data
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ const void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle );
+
+// Routine to receive data
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle );
+
+// Routine to report if a server is available
+typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
+
+// Routine to report the number of RX slots available
+typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
+
+// Routine to report the RX slot size
+typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
+
+// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
+typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
+ int32_t service,
+ uint32_t length,
+ MESSAGE_TX_CHANNEL_T channel,
+ uint32_t channel_params,
+ uint32_t data_length,
+ uint32_t data_offset);
+
+// Callback to inform a service that a Xon or Xoff message has been received
+typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
+
+// Callback to inform a service that a server available reply message has been received
+typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
+
+// Callback to indicate that bulk auxiliary messages have arrived
+typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
+
+// Callback to indicate that bulk auxiliary messages have arrived
+typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
+
+// Callback with all the connection info you require
+typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
+
+// Callback to inform of a disconnect
+typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
+
+// Callback to inform of a power control request
+typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
+
+// allocate memory suitably aligned for this connection
+typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
+
+// free memory allocated by buffer_allocate
+typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
+
+
+/******************************************************************************
+ System driver struct
+ *****************************************************************************/
+
+struct opaque_vchi_connection_api_t
+{
+ // Routine to init the connection
+ VCHI_CONNECTION_INIT_T init;
+
+ // Connection-level CRC control
+ VCHI_CONNECTION_CRC_CONTROL_T crc_control;
+
+ // Routine to connect to or create service
+ VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
+
+ // Routine to disconnect from a service
+ VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
+
+ // Routine to queue a message
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
+
+ // scatter-gather (vector) message queue
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
+
+ // Routine to dequeue a message
+ VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
+
+ // Routine to peek at a message
+ VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
+
+ // Routine to hold a message
+ VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
+
+ // Routine to initialise a received message iterator
+ VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
+
+ // Routine to release a message
+ VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
+
+ // Routine to get information on a held message
+ VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
+
+ // Routine to check for next message on iterator
+ VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
+
+ // Routine to get next message on iterator
+ VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
+
+ // Routine to remove the last message returned by iterator
+ VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
+
+ // Routine to hold the last message returned by iterator
+ VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
+
+ // Routine to transmit bulk data
+ VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
+
+ // Routine to receive data
+ VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
+
+ // Routine to report the available servers
+ VCHI_CONNECTION_SERVER_PRESENT server_present;
+
+ // Routine to report the number of RX slots available
+ VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
+
+ // Routine to report the RX slot size
+ VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
+
+ // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
+ VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
+
+ // Callback to inform a service that a Xon or Xoff message has been received
+ VCHI_CONNECTION_FLOW_CONTROL flow_control;
+
+ // Callback to inform a service that a server available reply message has been received
+ VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
+
+ // Callback to indicate that bulk auxiliary messages have arrived
+ VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
+
+ // Callback to indicate that a bulk auxiliary message has been transmitted
+ VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
+
+ // Callback to provide information about the connection
+ VCHI_CONNECTION_INFO connection_info;
+
+ // Callback to notify that peer has requested disconnect
+ VCHI_CONNECTION_DISCONNECT disconnect;
+
+ // Callback to notify that peer has requested power change
+ VCHI_CONNECTION_POWER_CONTROL power_control;
+
+ // allocate memory suitably aligned for this connection
+ VCHI_BUFFER_ALLOCATE buffer_allocate;
+
+ // free memory allocated by buffer_allocate
+ VCHI_BUFFER_FREE buffer_free;
+
+};
+
+struct vchi_connection_t {
+ const VCHI_CONNECTION_API_T *api;
+ VCHI_CONNECTION_STATE_T *state;
+#ifdef VCHI_COARSE_LOCKING
+ struct semaphore sem;
+#endif
+};
+
+
+#endif /* CONNECTION_H_ */
+
+/****************************** End of file **********************************/
diff --git a/sys/contrib/vchiq/interface/vchi/message_drivers/message.h b/sys/contrib/vchiq/interface/vchi/message_drivers/message.h
new file mode 100644
index 000000000000..8fa18ac674a3
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/message_drivers/message.h
@@ -0,0 +1,200 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VCHI_MESSAGE_H_
+#define _VCHI_MESSAGE_H_
+
+#include "interface/vchi/vchi_cfg_internal.h"
+#include "interface/vchi/vchi_common.h"
+
+
+typedef enum message_event_type {
+ MESSAGE_EVENT_NONE,
+ MESSAGE_EVENT_NOP,
+ MESSAGE_EVENT_MESSAGE,
+ MESSAGE_EVENT_SLOT_COMPLETE,
+ MESSAGE_EVENT_RX_BULK_PAUSED,
+ MESSAGE_EVENT_RX_BULK_COMPLETE,
+ MESSAGE_EVENT_TX_COMPLETE,
+ MESSAGE_EVENT_MSG_DISCARDED
+} MESSAGE_EVENT_TYPE_T;
+
+typedef enum vchi_msg_flags
+{
+ VCHI_MSG_FLAGS_NONE = 0x0,
+ VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
+} VCHI_MSG_FLAGS_T;
+
+typedef enum message_tx_channel
+{
+ MESSAGE_TX_CHANNEL_MESSAGE = 0,
+ MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
+} MESSAGE_TX_CHANNEL_T;
+
+// Macros used for cycling through bulk channels
+#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
+#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
+
+typedef enum message_rx_channel
+{
+ MESSAGE_RX_CHANNEL_MESSAGE = 0,
+ MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
+} MESSAGE_RX_CHANNEL_T;
+
+// Message receive slot information
+typedef struct rx_msg_slot_info {
+
+ struct rx_msg_slot_info *next;
+ //struct slot_info *prev;
+#if !defined VCHI_COARSE_LOCKING
+ struct semaphore sem;
+#endif
+
+ uint8_t *addr; // base address of slot
+ uint32_t len; // length of slot in bytes
+
+ uint32_t write_ptr; // hardware causes this to advance
+ uint32_t read_ptr; // this module does the reading
+ int active; // is this slot in the hardware dma fifo?
+ uint32_t msgs_parsed; // count how many messages are in this slot
+ uint32_t msgs_released; // how many messages have been released
+ void *state; // connection state information
+ uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
+} RX_MSG_SLOTINFO_T;
+
+// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
+// In particular, it mustn't use addr and len - they're the client buffer, but the message
+// driver will be tasked with sending the aligned core section.
+typedef struct rx_bulk_slotinfo_t {
+ struct rx_bulk_slotinfo_t *next;
+
+ struct semaphore *blocking;
+
+ // needed by DMA
+ void *addr;
+ uint32_t len;
+
+ // needed for the callback
+ void *service;
+ void *handle;
+ VCHI_FLAGS_T flags;
+} RX_BULK_SLOTINFO_T;
+
+
+/* ----------------------------------------------------------------------
+ * each connection driver will have a pool of the following struct.
+ *
+ * the pool will be managed by vchi_qman_*
+ * this means there will be multiple queues (single linked lists)
+ * a given struct message_info will be on exactly one of these queues
+ * at any one time
+ * -------------------------------------------------------------------- */
+typedef struct rx_message_info {
+
+ struct message_info *next;
+ //struct message_info *prev;
+
+ uint8_t *addr;
+ uint32_t len;
+ RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
+ uint32_t tx_timestamp;
+ uint32_t rx_timestamp;
+
+} RX_MESSAGE_INFO_T;
+
+typedef struct {
+ MESSAGE_EVENT_TYPE_T type;
+
+ struct {
+ // for messages
+ void *addr; // address of message
+ uint16_t slot_delta; // whether this message indicated slot delta
+ uint32_t len; // length of message
+ RX_MSG_SLOTINFO_T *slot; // slot this message is in
+ int32_t service; // service id this message is destined for
+ uint32_t tx_timestamp; // timestamp from the header
+ uint32_t rx_timestamp; // timestamp when we parsed it
+ } message;
+
+ // FIXME: cleanup slot reporting...
+ RX_MSG_SLOTINFO_T *rx_msg;
+ RX_BULK_SLOTINFO_T *rx_bulk;
+ void *tx_handle;
+ MESSAGE_TX_CHANNEL_T tx_channel;
+
+} MESSAGE_EVENT_T;
+
+
+// callbacks
+typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
+
+typedef struct {
+ VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
+} VCHI_MESSAGE_DRIVER_OPEN_T;
+
+
+// handle to this instance of message driver (as returned by ->open)
+typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
+
+struct opaque_vchi_message_driver_t {
+ VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
+ int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
+ int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
+ int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
+ int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
+ void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
+ int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
+ *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
+
+ int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
+ int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
+ void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
+ void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
+ int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
+ int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
+
+ int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
+ void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
+ void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
+};
+
+
+#endif // _VCHI_MESSAGE_H_
+
+/****************************** End of file ***********************************/
diff --git a/sys/contrib/vchiq/interface/vchi/vchi.h b/sys/contrib/vchiq/interface/vchi/vchi.h
new file mode 100644
index 000000000000..f1b9d1c2bb5a
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/vchi.h
@@ -0,0 +1,373 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_H_
+#define VCHI_H_
+
+#include "interface/vchi/vchi_cfg.h"
+#include "interface/vchi/vchi_common.h"
+#include "interface/vchi/connections/connection.h"
+#include "vchi_mh.h"
+
+
+/******************************************************************************
+ Global defs
+ *****************************************************************************/
+
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
+
+#ifdef USE_VCHIQ_ARM
+#define VCHI_BULK_ALIGNED(x) 1
+#else
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
+#endif
+
+struct vchi_version {
+ uint32_t version;
+ uint32_t version_min;
+};
+#define VCHI_VERSION(v_) { v_, v_ }
+#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
+
+typedef enum
+{
+ VCHI_VEC_POINTER,
+ VCHI_VEC_HANDLE,
+ VCHI_VEC_LIST
+} VCHI_MSG_VECTOR_TYPE_T;
+
+typedef struct vchi_msg_vector_ex {
+
+ VCHI_MSG_VECTOR_TYPE_T type;
+ union
+ {
+ // a memory handle
+ struct
+ {
+ VCHI_MEM_HANDLE_T handle;
+ uint32_t offset;
+ int32_t vec_len;
+ } handle;
+
+ // an ordinary data pointer
+ struct
+ {
+ const void *vec_base;
+ int32_t vec_len;
+ } ptr;
+
+ // a nested vector list
+ struct
+ {
+ struct vchi_msg_vector_ex *vec;
+ uint32_t vec_len;
+ } list;
+ } u;
+} VCHI_MSG_VECTOR_EX_T;
+
+
+// Construct an entry in a msg vector for a pointer (p) of length (l)
+#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
+
+// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
+#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
+
+// Macros to manipulate 'FOURCC' values
+#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
+#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
+
+
+// Opaque service information
+struct opaque_vchi_service_t;
+
+// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
+// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
+typedef struct
+{
+ struct opaque_vchi_service_t *service;
+ void *message;
+} VCHI_HELD_MSG_T;
+
+
+
+// structure used to provide the information needed to open a server or a client
+typedef struct {
+ struct vchi_version version;
+ int32_t service_id;
+ VCHI_CONNECTION_T *connection;
+ uint32_t rx_fifo_size;
+ uint32_t tx_fifo_size;
+ VCHI_CALLBACK_T callback;
+ void *callback_param;
+ /* client intends to receive bulk transfers of
+ odd lengths or into unaligned buffers */
+ int32_t want_unaligned_bulk_rx;
+ /* client intends to transmit bulk transfers of
+ odd lengths or out of unaligned buffers */
+ int32_t want_unaligned_bulk_tx;
+ /* client wants to check CRCs on (bulk) xfers.
+ Only needs to be set at 1 end - will do both directions. */
+ int32_t want_crc;
+} SERVICE_CREATION_T;
+
+// Opaque handle for a VCHI instance
+typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
+
+// Opaque handle for a server or client
+typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
+
+// Service registration & startup
+typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
+
+typedef struct service_info_tag {
+ const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
+ VCHI_SERVICE_INIT init; /* Service initialisation function */
+ void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
+} SERVICE_INFO_T;
+
+/******************************************************************************
+ Global funcs - implementation is specific to which side you are on (local / remote)
+ *****************************************************************************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
+ const VCHI_MESSAGE_DRIVER_T * low_level);
+
+
+// Routine used to initialise the vchi on both local + remote connections
+extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
+
+extern int32_t vchi_exit( void );
+
+extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
+ const uint32_t num_connections,
+ VCHI_INSTANCE_T instance_handle );
+
+//When this is called, ensure that all services have no data pending.
+//Bulk transfers can remain 'queued'
+extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
+
+// Global control over bulk CRC checking
+extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
+ VCHI_CRC_CONTROL_T control );
+
+// helper functions
+extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
+extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
+extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
+
+
+/******************************************************************************
+ Global service API
+ *****************************************************************************/
+// Routine to create a named service
+extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle );
+
+// Routine to destory a service
+extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to open a named service
+extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle);
+
+extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
+ short *peer_version );
+
+// Routine to close a named service
+extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to increment ref count on a named service
+extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to decrement ref count on a named service
+extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to send a message accross a service
+extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// scatter-gather (vector) and send message
+int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_EX_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// legacy scatter-gather (vector) and send message, only handles pointers
+int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// Routine to receive a msg from a service
+// Dequeue is equivalent to hold, copy into client buffer, release
+extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to look at a message in place.
+// The message is not dequeued, so a subsequent call to peek or dequeue
+// will return the same message.
+extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to remove a message after it has been read in place with peek
+// The first message on the queue is dequeued.
+extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to look at a message in place.
+// The message is dequeued, so the caller is left holding it; the descriptor is
+// filled in and must be released when the user has finished with the message.
+extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
+ void **data, // } may be NULL, as info can be
+ uint32_t *msg_size, // } obtained from HELD_MSG_T
+ VCHI_FLAGS_T flags,
+ VCHI_HELD_MSG_T *message_descriptor );
+
+// Initialise an iterator to look through messages in place
+extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_ITER_T *iter,
+ VCHI_FLAGS_T flags );
+
+/******************************************************************************
+ Global service support API - operations on held messages and message iterators
+ *****************************************************************************/
+
+// Routine to get the address of a held message
+extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the size of a held message
+extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the transmit timestamp as written into the header by the peer
+extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the reception timestamp, written as we parsed the header
+extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
+
+// Routine to release a held message after it has been processed
+extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
+
+// Indicates whether the iterator has a next message.
+extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
+
+// Return the pointer and length for the next message and advance the iterator.
+extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
+ void **data,
+ uint32_t *msg_size );
+
+// Remove the last message returned by vchi_msg_iter_next.
+// Can only be called once after each call to vchi_msg_iter_next.
+extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
+
+// Hold the last message returned by vchi_msg_iter_next.
+// Can only be called once after each call to vchi_msg_iter_next.
+extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
+ VCHI_HELD_MSG_T *message );
+
+// Return information for the next message, and hold it, advancing the iterator.
+extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
+ void **data, // } may be NULL
+ uint32_t *msg_size, // }
+ VCHI_HELD_MSG_T *message );
+
+
+/******************************************************************************
+ Global bulk API
+ *****************************************************************************/
+
+// Routine to prepare interface for a transfer from the other side
+extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+
+
+// Prepare interface for a transfer from the other side into relocatable memory.
+int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T h_dst,
+ uint32_t offset,
+ uint32_t data_size,
+ const VCHI_FLAGS_T flags,
+ void * const bulk_handle );
+
+// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
+extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
+ void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+
+
+/******************************************************************************
+ Configuration plumbing
+ *****************************************************************************/
+
+// function prototypes for the different mid layers (the state info gives the different physical connections)
+extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
+//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
+//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
+
+// declare all message drivers here
+const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T h_src,
+ uint32_t offset,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+#endif /* VCHI_H_ */
+
+/****************************** End of file **********************************/
diff --git a/sys/contrib/vchiq/interface/vchi/vchi_cfg.h b/sys/contrib/vchiq/interface/vchi/vchi_cfg.h
new file mode 100644
index 000000000000..26bc2d38d725
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/vchi_cfg.h
@@ -0,0 +1,224 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_CFG_H_
+#define VCHI_CFG_H_
+
+/****************************************************************************************
+ * Defines in this first section are part of the VCHI API and may be examined by VCHI
+ * services.
+ ***************************************************************************************/
+
+/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
+/* Really determined by the message driver, and should be available from a run-time call. */
+#ifndef VCHI_BULK_ALIGN
+# if __VCCOREVER__ >= 0x04000000
+# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
+# else
+# define VCHI_BULK_ALIGN 16
+# endif
+#endif
+
+/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
+/* May be less than or greater than VCHI_BULK_ALIGN */
+/* Really determined by the message driver, and should be available from a run-time call. */
+#ifndef VCHI_BULK_GRANULARITY
+# if __VCCOREVER__ >= 0x04000000
+# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
+# else
+# define VCHI_BULK_GRANULARITY 16
+# endif
+#endif
+
+/* The largest possible message to be queued with vchi_msg_queue. */
+#ifndef VCHI_MAX_MSG_SIZE
+# if defined VCHI_LOCAL_HOST_PORT
+# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
+# else
+# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
+# endif
+#endif
+
+/******************************************************************************************
+ * Defines below are system configuration options, and should not be used by VCHI services.
+ *****************************************************************************************/
+
+/* How many connections can we support? A localhost implementation uses 2 connections,
+ * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
+ * driver. */
+#ifndef VCHI_MAX_NUM_CONNECTIONS
+# define VCHI_MAX_NUM_CONNECTIONS 3
+#endif
+
+/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
+ * amount of static memory. */
+#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
+# define VCHI_MAX_SERVICES_PER_CONNECTION 36
+#endif
+
+/* Adjust if using a message driver that supports more logical TX channels */
+#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
+# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
+#endif
+
+/* Adjust if using a message driver that supports more logical RX channels */
+#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
+# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
+#endif
+
+/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
+ * receive queue space, less message headers. */
+#ifndef VCHI_NUM_READ_SLOTS
+# if defined(VCHI_LOCAL_HOST_PORT)
+# define VCHI_NUM_READ_SLOTS 4
+# else
+# define VCHI_NUM_READ_SLOTS 48
+# endif
+#endif
+
+/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
+ * performance. Only define on VideoCore end, talking to host.
+ */
+//#define VCHI_MSG_RX_OVERRUN
+
+/* How many transmit slots do we use. Generally don't need many, as the hardware driver
+ * underneath VCHI will usually have its own buffering. */
+#ifndef VCHI_NUM_WRITE_SLOTS
+# define VCHI_NUM_WRITE_SLOTS 4
+#endif
+
+/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
+ * then it's taking up too much buffer space, and the peer service will be told to stop
+ * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
+ * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
+ * is too high. */
+#ifndef VCHI_XOFF_THRESHOLD
+# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
+#endif
+
+/* After we've sent an XOFF, the peer will be told to resume transmission once the local
+ * service has dequeued/released enough messages that it's now occupying
+ * VCHI_XON_THRESHOLD slots or fewer. */
+#ifndef VCHI_XON_THRESHOLD
+# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
+#endif
+
+/* A size below which a bulk transfer omits the handshake completely and always goes
+ * via the message channel, if bulk auxiliary is being sent on that service. (The user
+ * can guarantee this by enabling unaligned transmits).
+ * Not API. */
+#ifndef VCHI_MIN_BULK_SIZE
+# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
+#endif
+
+/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
+ * speed and latency; the smaller the chunk size the better change of messages and other
+ * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
+ * break transmissions into chunks.
+ */
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
+# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
+#endif
+
+/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
+ * with multiple-line frames. Only use if the receiver can cope. */
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
+# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
+#endif
+
+/* How many TX messages can we have pending in our transmit slots. Once exhausted,
+ * vchi_msg_queue will be blocked. */
+#ifndef VCHI_TX_MSG_QUEUE_SIZE
+# define VCHI_TX_MSG_QUEUE_SIZE 256
+#endif
+
+/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
+ * will be suspended until older messages are dequeued/released. */
+#ifndef VCHI_RX_MSG_QUEUE_SIZE
+# define VCHI_RX_MSG_QUEUE_SIZE 256
+#endif
+
+/* Really should be able to cope if we run out of received message descriptors, by
+ * suspending parsing as the comment above says, but we don't. This sweeps the issue
+ * under the carpet. */
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+# undef VCHI_RX_MSG_QUEUE_SIZE
+# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+#endif
+
+/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
+ * will be blocked. */
+#ifndef VCHI_TX_BULK_QUEUE_SIZE
+# define VCHI_TX_BULK_QUEUE_SIZE 64
+#endif
+
+/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
+ * will be blocked. */
+#ifndef VCHI_RX_BULK_QUEUE_SIZE
+# define VCHI_RX_BULK_QUEUE_SIZE 64
+#endif
+
+/* A limit on how many outstanding bulk requests we expect the peer to give us. If
+ * the peer asks for more than this, VCHI will fail and assert. The number is determined
+ * by the peer's hardware - it's the number of outstanding requests that can be queued
+ * on all bulk channels. VC3's MPHI peripheral allows 16. */
+#ifndef VCHI_MAX_PEER_BULK_REQUESTS
+# define VCHI_MAX_PEER_BULK_REQUESTS 32
+#endif
+
+/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
+ * transmitter on and off.
+ */
+/*#define VCHI_CCP2TX_MANUAL_POWER*/
+
+#ifndef VCHI_CCP2TX_MANUAL_POWER
+
+/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
+ * negative for no IDLE.
+ */
+# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
+# define VCHI_CCP2TX_IDLE_TIMEOUT 5
+# endif
+
+/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
+ * negative for no OFF.
+ */
+# ifndef VCHI_CCP2TX_OFF_TIMEOUT
+# define VCHI_CCP2TX_OFF_TIMEOUT 1000
+# endif
+
+#endif /* VCHI_CCP2TX_MANUAL_POWER */
+
+#endif /* VCHI_CFG_H_ */
+
+/****************************** End of file **********************************/
diff --git a/sys/contrib/vchiq/interface/vchi/vchi_cfg_internal.h b/sys/contrib/vchiq/interface/vchi/vchi_cfg_internal.h
new file mode 100644
index 000000000000..35dcba4837d4
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/vchi_cfg_internal.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_CFG_INTERNAL_H_
+#define VCHI_CFG_INTERNAL_H_
+
+/****************************************************************************************
+ * Control optimisation attempts.
+ ***************************************************************************************/
+
+// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
+#define VCHI_COARSE_LOCKING
+
+// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
+// (only relevant if VCHI_COARSE_LOCKING)
+#define VCHI_ELIDE_BLOCK_EXIT_LOCK
+
+// Avoid lock on non-blocking peek
+// (only relevant if VCHI_COARSE_LOCKING)
+#define VCHI_AVOID_PEEK_LOCK
+
+// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
+#define VCHI_MULTIPLE_HANDLER_THREADS
+
+// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
+// our way through the pool of descriptors.
+#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
+
+// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
+#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
+
+// Don't use message descriptors for TX messages that don't need them
+#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
+
+// Nano-locks for multiqueue
+//#define VCHI_MQUEUE_NANOLOCKS
+
+// Lock-free(er) dequeuing
+//#define VCHI_RX_NANOLOCKS
+
+#endif /*VCHI_CFG_INTERNAL_H_*/
diff --git a/sys/contrib/vchiq/interface/vchi/vchi_common.h b/sys/contrib/vchiq/interface/vchi/vchi_common.h
new file mode 100644
index 000000000000..9e6c00e82324
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/vchi_common.h
@@ -0,0 +1,163 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_COMMON_H_
+#define VCHI_COMMON_H_
+
+
+//flags used when sending messages (must be bitmapped)
+typedef enum
+{
+ VCHI_FLAGS_NONE = 0x0,
+ VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
+ VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
+ VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
+ VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
+
+ VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
+ VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
+ VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
+ VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
+ VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
+ VCHI_FLAGS_INTERNAL = 0xFF0000
+} VCHI_FLAGS_T;
+
+// constants for vchi_crc_control()
+typedef enum {
+ VCHI_CRC_NOTHING = -1,
+ VCHI_CRC_PER_SERVICE = 0,
+ VCHI_CRC_EVERYTHING = 1,
+} VCHI_CRC_CONTROL_T;
+
+//callback reasons when an event occurs on a service
+typedef enum
+{
+ VCHI_CALLBACK_REASON_MIN,
+
+ //This indicates that there is data available
+ //handle is the msg id that was transmitted with the data
+ // When a message is received and there was no FULL message available previously, send callback
+ // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
+ VCHI_CALLBACK_MSG_AVAILABLE,
+ VCHI_CALLBACK_MSG_SENT,
+ VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
+
+ // This indicates that a transfer from the other side has completed
+ VCHI_CALLBACK_BULK_RECEIVED,
+ //This indicates that data queued up to be sent has now gone
+ //handle is the msg id that was used when sending the data
+ VCHI_CALLBACK_BULK_SENT,
+ VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
+ VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
+
+ VCHI_CALLBACK_SERVICE_CLOSED,
+
+ // this side has sent XOFF to peer due to lack of data consumption by service
+ // (suggests the service may need to take some recovery action if it has
+ // been deliberately holding off consuming data)
+ VCHI_CALLBACK_SENT_XOFF,
+ VCHI_CALLBACK_SENT_XON,
+
+ // indicates that a bulk transfer has finished reading the source buffer
+ VCHI_CALLBACK_BULK_DATA_READ,
+
+ // power notification events (currently host side only)
+ VCHI_CALLBACK_PEER_OFF,
+ VCHI_CALLBACK_PEER_SUSPENDED,
+ VCHI_CALLBACK_PEER_ON,
+ VCHI_CALLBACK_PEER_RESUMED,
+ VCHI_CALLBACK_FORCED_POWER_OFF,
+
+#ifdef USE_VCHIQ_ARM
+ // some extra notifications provided by vchiq_arm
+ VCHI_CALLBACK_SERVICE_OPENED,
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
+#endif
+
+ VCHI_CALLBACK_REASON_MAX
+} VCHI_CALLBACK_REASON_T;
+
+//Calback used by all services / bulk transfers
+typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
+ VCHI_CALLBACK_REASON_T reason,
+ void *handle ); //for transmitting msg's only
+
+
+
+/*
+ * Define vector struct for scatter-gather (vector) operations
+ * Vectors can be nested - if a vector element has negative length, then
+ * the data pointer is treated as pointing to another vector array, with
+ * '-vec_len' elements. Thus to append a header onto an existing vector,
+ * you can do this:
+ *
+ * void foo(const VCHI_MSG_VECTOR_T *v, int n)
+ * {
+ * VCHI_MSG_VECTOR_T nv[2];
+ * nv[0].vec_base = my_header;
+ * nv[0].vec_len = sizeof my_header;
+ * nv[1].vec_base = v;
+ * nv[1].vec_len = -n;
+ * ...
+ *
+ */
+typedef struct vchi_msg_vector {
+ const void *vec_base;
+ int32_t vec_len;
+} VCHI_MSG_VECTOR_T;
+
+// Opaque type for a connection API
+typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
+
+// Opaque type for a message driver
+typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
+
+
+// Iterator structure for reading ahead through received message queue. Allocated by client,
+// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
+// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
+// will not proceed to messages received since. Behaviour is undefined if an iterator
+// is used again after messages for that service are removed/dequeued by any
+// means other than vchi_msg_iter_... calls on the iterator itself.
+typedef struct {
+ struct opaque_vchi_service_t *service;
+ void *last;
+ void *next;
+ void *remove;
+} VCHI_MSG_ITER_T;
+
+
+#endif // VCHI_COMMON_H_
diff --git a/sys/contrib/vchiq/interface/vchi/vchi_mh.h b/sys/contrib/vchiq/interface/vchi/vchi_mh.h
new file mode 100644
index 000000000000..0381bc4172cb
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchi/vchi_mh.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_MH_H_
+#define VCHI_MH_H_
+
+#include <sys/types.h>
+
+typedef int32_t VCHI_MEM_HANDLE_T;
+#define VCHI_MEM_HANDLE_INVALID 0
+
+#endif
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq.h
new file mode 100644
index 000000000000..f87dcbdaaffc
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq.h
@@ -0,0 +1,41 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_VCHIQ_H
+#define VCHIQ_VCHIQ_H
+
+#include "vchiq_if.h"
+#include "vchiq_util.h"
+
+#endif
+
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835.h
new file mode 100644
index 000000000000..7ea5c64d5343
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_2835_H
+#define VCHIQ_2835_H
+
+#include "vchiq_pagelist.h"
+
+#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
+#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
+
+#endif /* VCHIQ_2835_H */
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
new file mode 100644
index 000000000000..a7c9683a39dc
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
@@ -0,0 +1,578 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <interface/compat/vchi_bsd.h>
+
+#include <sys/malloc.h>
+#include <sys/rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_param.h>
+#include <vm/vm_phys.h>
+
+#include <machine/bus.h>
+#include <arm/broadcom/bcm2835/bcm2835_mbox.h>
+#include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
+
+MALLOC_DEFINE(M_VCPAGELIST, "vcpagelist", "VideoCore pagelist memory");
+
+#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
+
+#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
+#define VCHIQ_ARM_ADDRESS(x) ((void *)PHYS_TO_VCBUS(pmap_kextract((vm_offset_t)(x))))
+
+#include "vchiq_arm.h"
+#include "vchiq_2835.h"
+#include "vchiq_connected.h"
+
+#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
+
+typedef struct vchiq_2835_state_struct {
+ int inited;
+ VCHIQ_ARM_STATE_T arm_state;
+} VCHIQ_2835_ARM_STATE_T;
+
+static char *g_slot_mem;
+static int g_slot_mem_size;
+vm_paddr_t g_slot_phys;
+/* BSD DMA */
+bus_dma_tag_t bcm_slots_dma_tag;
+bus_dmamap_t bcm_slots_dma_map;
+
+static FRAGMENTS_T *g_fragments_base;
+static FRAGMENTS_T *g_free_fragments;
+struct semaphore g_free_fragments_sema;
+
+static DEFINE_SEMAPHORE(g_free_fragments_mutex);
+
+typedef struct bulkinfo_struct {
+ PAGELIST_T *pagelist;
+ bus_dma_tag_t pagelist_dma_tag;
+ bus_dmamap_t pagelist_dma_map;
+ void *buf;
+ size_t size;
+} BULKINFO_T;
+
+static int
+create_pagelist(char __user *buf, size_t count, unsigned short type,
+ struct proc *p, BULKINFO_T *bi);
+
+static void
+free_pagelist(BULKINFO_T *bi, int actual);
+
+static void
+vchiq_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
+{
+ bus_addr_t *addr;
+
+ if (err)
+ return;
+
+ addr = (bus_addr_t*)arg;
+ *addr = PHYS_TO_VCBUS(segs[0].ds_addr);
+}
+
+int __init
+vchiq_platform_init(VCHIQ_STATE_T *state)
+{
+ VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
+ int frag_mem_size;
+ int err;
+ int i;
+
+ /* Allocate space for the channels in coherent memory */
+ g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
+ frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
+
+ err = bus_dma_tag_create(
+ NULL,
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ g_slot_mem_size + frag_mem_size, 1, /* maxsize, nsegments */
+ g_slot_mem_size + frag_mem_size, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &bcm_slots_dma_tag);
+
+ err = bus_dmamem_alloc(bcm_slots_dma_tag, (void **)&g_slot_mem,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bcm_slots_dma_map);
+ if (err) {
+ vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
+ err = -ENOMEM;
+ goto failed_alloc;
+ }
+
+ err = bus_dmamap_load(bcm_slots_dma_tag, bcm_slots_dma_map, g_slot_mem,
+ g_slot_mem_size + frag_mem_size, vchiq_dmamap_cb,
+ &g_slot_phys, 0);
+
+ if (err) {
+ vchiq_log_error(vchiq_core_log_level, "cannot load DMA map");
+ err = -ENOMEM;
+ goto failed_load;
+ }
+
+ WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
+
+ vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
+ if (!vchiq_slot_zero) {
+ err = -EINVAL;
+ goto failed_init_slots;
+ }
+
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
+ (int)g_slot_phys + g_slot_mem_size;
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
+ MAX_FRAGMENTS;
+
+ g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
+ g_slot_mem_size += frag_mem_size;
+
+ g_free_fragments = g_fragments_base;
+ for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
+ *(FRAGMENTS_T **)&g_fragments_base[i] =
+ &g_fragments_base[i + 1];
+ }
+ *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
+ _sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
+
+ if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
+ VCHIQ_SUCCESS) {
+ err = -EINVAL;
+ goto failed_vchiq_init;
+ }
+
+ bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq_init - done (slots %x, phys %x)",
+ (unsigned int)vchiq_slot_zero, g_slot_phys);
+
+ vchiq_call_connected_callbacks();
+
+ return 0;
+
+failed_vchiq_init:
+failed_init_slots:
+failed_load:
+ bus_dmamap_unload(bcm_slots_dma_tag, bcm_slots_dma_map);
+failed_alloc:
+ bus_dmamap_destroy(bcm_slots_dma_tag, bcm_slots_dma_map);
+ bus_dma_tag_destroy(bcm_slots_dma_tag);
+
+ return err;
+}
+
+void __exit
+vchiq_platform_exit(VCHIQ_STATE_T *state)
+{
+
+ bus_dmamap_unload(bcm_slots_dma_tag, bcm_slots_dma_map);
+ bus_dmamap_destroy(bcm_slots_dma_tag, bcm_slots_dma_map);
+ bus_dma_tag_destroy(bcm_slots_dma_tag);
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_init_state(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
+ status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
+ if(status != VCHIQ_SUCCESS)
+ {
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
+ }
+ return status;
+}
+
+VCHIQ_ARM_STATE_T*
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
+{
+ if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
+ {
+ BUG();
+ }
+ return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
+}
+
+int
+vchiq_copy_from_user(void *dst, const void *src, int size)
+{
+
+ if (((vm_offset_t)(src)) < VM_MIN_KERNEL_ADDRESS) {
+ int error = copyin(src, dst, size);
+ return error ? VCHIQ_ERROR : VCHIQ_SUCCESS;
+ }
+ else
+ bcopy(src, dst, size);
+
+ return 0;
+}
+
+VCHIQ_STATUS_T
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
+ void *offset, int size, int dir)
+{
+ BULKINFO_T *bi;
+ int ret;
+
+ WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
+ bi = malloc(sizeof(*bi), M_VCPAGELIST, M_WAITOK | M_ZERO);
+ if (bi == NULL)
+ return VCHIQ_ERROR;
+
+ ret = create_pagelist((char __user *)offset, size,
+ (dir == VCHIQ_BULK_RECEIVE)
+ ? PAGELIST_READ
+ : PAGELIST_WRITE,
+ current,
+ bi);
+ if (ret != 0)
+ return VCHIQ_ERROR;
+
+ bulk->handle = memhandle;
+ bulk->data = VCHIQ_ARM_ADDRESS(bi->pagelist);
+
+ /* Store the pagelist address in remote_data, which isn't used by the
+ slave. */
+ bulk->remote_data = bi;
+
+ return VCHIQ_SUCCESS;
+}
+
+void
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
+{
+ if (bulk && bulk->remote_data && bulk->actual)
+ free_pagelist((BULKINFO_T *)bulk->remote_data, bulk->actual);
+}
+
+void
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
+{
+ /*
+ * This should only be called on the master (VideoCore) side, but
+ * provide an implementation to avoid the need for ifdefery.
+ */
+ BUG();
+}
+
+void
+vchiq_dump_platform_state(void *dump_context)
+{
+ char buf[80];
+ int len;
+ len = snprintf(buf, sizeof(buf),
+ " Platform: 2835 (VC master)");
+ vchiq_dump(dump_context, buf, len + 1);
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_suspend(VCHIQ_STATE_T *state)
+{
+ return VCHIQ_ERROR;
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_resume(VCHIQ_STATE_T *state)
+{
+ return VCHIQ_SUCCESS;
+}
+
+void
+vchiq_platform_paused(VCHIQ_STATE_T *state)
+{
+}
+
+void
+vchiq_platform_resumed(VCHIQ_STATE_T *state)
+{
+}
+
+int
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
+{
+ return 1; // autosuspend not supported - videocore always wanted
+}
+
+int
+vchiq_platform_use_suspend_timer(void)
+{
+ return 0;
+}
+void
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
+{
+ vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
+}
+void
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
+{
+ (void)state;
+}
+/*
+ * Local functions
+ */
+
+/* There is a potential problem with partial cache lines (pages?)
+** at the ends of the block when reading. If the CPU accessed anything in
+** the same line (page?) then it may have pulled old data into the cache,
+** obscuring the new data underneath. We can solve this by transferring the
+** partial cache lines separately, and allowing the ARM to copy into the
+** cached area.
+
+** N.B. This implementation plays slightly fast and loose with the Linux
+** driver programming rules, e.g. its use of __virt_to_bus instead of
+** dma_map_single, but it isn't a multi-platform driver and it benefits
+** from increased speed as a result.
+*/
+
+static int
+create_pagelist(char __user *buf, size_t count, unsigned short type,
+ struct proc *p, BULKINFO_T *bi)
+{
+ PAGELIST_T *pagelist;
+ vm_page_t* pages;
+ unsigned long *addrs;
+ unsigned int num_pages, i;
+ vm_offset_t offset;
+ int pagelist_size;
+ char *addr, *base_addr, *next_addr;
+ int run, addridx, actual_pages;
+ int err;
+ vm_paddr_t pagelist_phys;
+
+ offset = (vm_offset_t)buf & (PAGE_SIZE - 1);
+ num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ bi->pagelist = NULL;
+ bi->buf = buf;
+ bi->size = count;
+
+ /* Allocate enough storage to hold the page pointers and the page
+ ** list
+ */
+ pagelist_size = sizeof(PAGELIST_T) +
+ (num_pages * sizeof(unsigned long)) +
+ (num_pages * sizeof(pages[0]));
+
+ err = bus_dma_tag_create(
+ NULL,
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ pagelist_size, 1, /* maxsize, nsegments */
+ pagelist_size, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &bi->pagelist_dma_tag);
+
+
+
+ err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist,
+ BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map);
+ if (err) {
+ vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory");
+ err = -ENOMEM;
+ goto failed_alloc;
+ }
+
+ err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist,
+ pagelist_size, vchiq_dmamap_cb,
+ &pagelist_phys, 0);
+
+ if (err) {
+ vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory");
+ err = -ENOMEM;
+ goto failed_load;
+ }
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "create_pagelist - %x", (unsigned int)pagelist);
+ if (!pagelist)
+ return -ENOMEM;
+
+ addrs = pagelist->addrs;
+ pages = (vm_page_t*)(addrs + num_pages);
+
+ actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map,
+ (vm_offset_t)buf, count,
+ (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);
+
+ if (actual_pages != num_pages) {
+ vm_page_unhold_pages(pages, actual_pages);
+ free(pagelist, M_VCPAGELIST);
+ return (-ENOMEM);
+ }
+
+ pagelist->length = count;
+ pagelist->type = type;
+ pagelist->offset = offset;
+
+ /* Group the pages into runs of contiguous pages */
+
+ base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0]));
+ next_addr = base_addr + PAGE_SIZE;
+ addridx = 0;
+ run = 0;
+
+ for (i = 1; i < num_pages; i++) {
+ addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i]));
+ if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
+ next_addr += PAGE_SIZE;
+ run++;
+ } else {
+ addrs[addridx] = (unsigned long)base_addr + run;
+ addridx++;
+ base_addr = addr;
+ next_addr = addr + PAGE_SIZE;
+ run = 0;
+ }
+ }
+
+ addrs[addridx] = (unsigned long)base_addr + run;
+ addridx++;
+
+ /* Partial cache lines (fragments) require special measures */
+ if ((type == PAGELIST_READ) &&
+ ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
+ ((pagelist->offset + pagelist->length) &
+ (CACHE_LINE_SIZE - 1)))) {
+ FRAGMENTS_T *fragments;
+
+ if (down_interruptible(&g_free_fragments_sema) != 0) {
+ free(pagelist, M_VCPAGELIST);
+ return -EINTR;
+ }
+
+ WARN_ON(g_free_fragments == NULL);
+
+ down(&g_free_fragments_mutex);
+ fragments = (FRAGMENTS_T *) g_free_fragments;
+ WARN_ON(fragments == NULL);
+ g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
+ up(&g_free_fragments_mutex);
+ pagelist->type =
+ PAGELIST_READ_WITH_FRAGMENTS + (fragments -
+ g_fragments_base);
+ }
+
+ /* XXX: optimize? INV operation for read WBINV for write? */
+ cpu_dcache_wbinv_range((vm_offset_t)buf, count);
+
+ bi->pagelist = pagelist;
+
+ return 0;
+
+failed_load:
+ bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map);
+failed_alloc:
+ bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map);
+ bus_dma_tag_destroy(bi->pagelist_dma_tag);
+
+ return err;
+}
+
+static void
+free_pagelist(BULKINFO_T *bi, int actual)
+{
+ vm_page_t*pages;
+ unsigned int num_pages, i;
+ PAGELIST_T *pagelist;
+
+ pagelist = bi->pagelist;
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
+
+ num_pages =
+ (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ pages = (vm_page_t*)(pagelist->addrs + num_pages);
+
+ /* Deal with any partial cache lines (fragments) */
+ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
+ FRAGMENTS_T *fragments = g_fragments_base +
+ (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
+ int head_bytes, tail_bytes;
+ head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
+ (CACHE_LINE_SIZE - 1);
+ tail_bytes = (pagelist->offset + actual) &
+ (CACHE_LINE_SIZE - 1);
+
+ if ((actual >= 0) && (head_bytes != 0)) {
+ if (head_bytes > actual)
+ head_bytes = actual;
+
+ memcpy((char *)bi->buf,
+ fragments->headbuf,
+ head_bytes);
+ }
+
+ if ((actual >= 0) && (head_bytes < actual) &&
+ (tail_bytes != 0)) {
+ memcpy((char *)bi->buf + actual - tail_bytes,
+ fragments->tailbuf, tail_bytes);
+ }
+
+ down(&g_free_fragments_mutex);
+ *(FRAGMENTS_T **) fragments = g_free_fragments;
+ g_free_fragments = fragments;
+ up(&g_free_fragments_mutex);
+ up(&g_free_fragments_sema);
+ }
+
+ for (i = 0; i < num_pages; i++) {
+ if (pagelist->type != PAGELIST_WRITE)
+ vm_page_dirty(pages[i]);
+ }
+
+ vm_page_unhold_pages(pages, num_pages);
+
+ bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map);
+ bus_dmamem_free(bi->pagelist_dma_tag, bi->pagelist, bi->pagelist_dma_map);
+ bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map);
+ bus_dma_tag_destroy(bi->pagelist_dma_tag);
+
+ free(bi, M_VCPAGELIST);
+}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
new file mode 100644
index 000000000000..d534a7fa755f
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
@@ -0,0 +1,2809 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "vchiq_core.h"
+#include "vchiq_ioctl.h"
+#include "vchiq_arm.h"
+
+#define DEVICE_NAME "vchiq"
+
+/* Override the default prefix, which would be vchiq_arm (from the filename) */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX DEVICE_NAME "."
+
+#define VCHIQ_MINOR 0
+
+/* Some per-instance constants */
+#define MAX_COMPLETIONS 16
+#define MAX_SERVICES 64
+#define MAX_ELEMENTS 8
+#define MSG_QUEUE_SIZE 64
+
+#define KEEPALIVE_VER 1
+#define KEEPALIVE_VER_MIN KEEPALIVE_VER
+
+MALLOC_DEFINE(M_VCHIQ, "vchiq_cdev", "VideoCore cdev memroy");
+
+/* Run time control of log level, based on KERN_XXX level. */
+int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
+
+#define SUSPEND_TIMER_TIMEOUT_MS 100
+#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
+
+#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
+static const char *const suspend_state_names[] = {
+ "VC_SUSPEND_FORCE_CANCELED",
+ "VC_SUSPEND_REJECTED",
+ "VC_SUSPEND_FAILED",
+ "VC_SUSPEND_IDLE",
+ "VC_SUSPEND_REQUESTED",
+ "VC_SUSPEND_IN_PROGRESS",
+ "VC_SUSPEND_SUSPENDED"
+};
+#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
+static const char *const resume_state_names[] = {
+ "VC_RESUME_FAILED",
+ "VC_RESUME_IDLE",
+ "VC_RESUME_REQUESTED",
+ "VC_RESUME_IN_PROGRESS",
+ "VC_RESUME_RESUMED"
+};
+/* The number of times we allow force suspend to timeout before actually
+** _forcing_ suspend. This is to cater for SW which fails to release vchiq
+** correctly - we don't want to prevent ARM suspend indefinitely in this case.
+*/
+#define FORCE_SUSPEND_FAIL_MAX 8
+
+/* The time in ms allowed for videocore to go idle when force suspend has been
+ * requested */
+#define FORCE_SUSPEND_TIMEOUT_MS 200
+
+
+static void suspend_timer_callback(unsigned long context);
+#ifdef notyet
+static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
+static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
+#endif
+
+
+typedef struct user_service_struct {
+ VCHIQ_SERVICE_T *service;
+ void *userdata;
+ VCHIQ_INSTANCE_T instance;
+ int is_vchi;
+ int dequeue_pending;
+ int message_available_pos;
+ int msg_insert;
+ int msg_remove;
+ struct semaphore insert_event;
+ struct semaphore remove_event;
+ VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
+} USER_SERVICE_T;
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance_struct {
+ VCHIQ_STATE_T *state;
+ VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
+ int completion_insert;
+ int completion_remove;
+ struct semaphore insert_event;
+ struct semaphore remove_event;
+ struct mutex completion_mutex;
+
+ int connected;
+ int closing;
+ int pid;
+ int mark;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+
+ struct proc_dir_entry *proc_entry;
+};
+
+typedef struct dump_context_struct {
+ char __user *buf;
+ size_t actual;
+ size_t space;
+ loff_t offset;
+} DUMP_CONTEXT_T;
+
+static struct cdev * vchiq_cdev;
+VCHIQ_STATE_T g_state;
+static DEFINE_SPINLOCK(msg_queue_spinlock);
+
+static const char *const ioctl_names[] = {
+ "CONNECT",
+ "SHUTDOWN",
+ "CREATE_SERVICE",
+ "REMOVE_SERVICE",
+ "QUEUE_MESSAGE",
+ "QUEUE_BULK_TRANSMIT",
+ "QUEUE_BULK_RECEIVE",
+ "AWAIT_COMPLETION",
+ "DEQUEUE_MESSAGE",
+ "GET_CLIENT_ID",
+ "GET_CONFIG",
+ "CLOSE_SERVICE",
+ "USE_SERVICE",
+ "RELEASE_SERVICE",
+ "SET_SERVICE_OPTION",
+ "DUMP_PHYS_MEM"
+};
+
+vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
+ (VCHIQ_IOC_MAX + 1));
+
+static eventhandler_tag vchiq_ehtag = NULL;
+static d_open_t vchiq_open;
+static d_close_t vchiq_close;
+static d_ioctl_t vchiq_ioctl;
+
+static struct cdevsw vchiq_cdevsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = vchiq_ioctl,
+ .d_open = vchiq_open,
+ .d_close = vchiq_close,
+ .d_name = DEVICE_NAME,
+};
+
+#if 0
+static void
+dump_phys_mem(void *virt_addr, uint32_t num_bytes);
+#endif
+
+/****************************************************************************
+*
+* add_completion
+*
+***************************************************************************/
+
+static VCHIQ_STATUS_T
+add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
+ void *bulk_userdata)
+{
+ VCHIQ_COMPLETION_DATA_T *completion;
+ DEBUG_INITIALISE(g_state.local)
+
+ while (instance->completion_insert ==
+ (instance->completion_remove + MAX_COMPLETIONS)) {
+ /* Out of space - wait for the client */
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_log_trace(vchiq_arm_log_level,
+ "add_completion - completion queue full");
+ DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
+ if (down_interruptible(&instance->remove_event) != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback interrupted");
+ return VCHIQ_RETRY;
+ } else if (instance->closing) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback closing");
+ return VCHIQ_ERROR;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ }
+
+ completion =
+ &instance->completions[instance->completion_insert &
+ (MAX_COMPLETIONS - 1)];
+
+ completion->header = header;
+ completion->reason = reason;
+ /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
+ completion->service_userdata = user_service->service;
+ completion->bulk_userdata = bulk_userdata;
+
+ if (reason == VCHIQ_SERVICE_CLOSED)
+ /* Take an extra reference, to be held until
+ this CLOSED notification is delivered. */
+ lock_service(user_service->service);
+
+ /* A write barrier is needed here to ensure that the entire completion
+ record is written out before the insert point. */
+ wmb();
+
+ if (reason == VCHIQ_MESSAGE_AVAILABLE)
+ user_service->message_available_pos =
+ instance->completion_insert;
+ instance->completion_insert++;
+
+ up(&instance->insert_event);
+
+ return VCHIQ_SUCCESS;
+}
+
+/****************************************************************************
+*
+* service_callback
+*
+***************************************************************************/
+
+static VCHIQ_STATUS_T
+service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
+ VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
+{
+ /* How do we ensure the callback goes to the right client?
+ ** The service_user data points to a USER_SERVICE_T record containing
+ ** the original callback and the user state structure, which contains a
+ ** circular buffer for completion records.
+ */
+ USER_SERVICE_T *user_service;
+ VCHIQ_SERVICE_T *service;
+ VCHIQ_INSTANCE_T instance;
+ DEBUG_INITIALISE(g_state.local)
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+
+ service = handle_to_service(handle);
+ BUG_ON(!service);
+ user_service = (USER_SERVICE_T *)service->base.userdata;
+ instance = user_service->instance;
+
+ if (!instance || instance->closing)
+ return VCHIQ_SUCCESS;
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "service_callback - service %lx(%d), handle %x, reason %d, header %lx, "
+ "instance %lx, bulk_userdata %lx",
+ (unsigned long)user_service,
+ service->localport, service->handle,
+ reason, (unsigned long)header,
+ (unsigned long)instance, (unsigned long)bulk_userdata);
+
+ if (header && user_service->is_vchi) {
+ spin_lock(&msg_queue_spinlock);
+ while (user_service->msg_insert ==
+ (user_service->msg_remove + MSG_QUEUE_SIZE)) {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
+ vchiq_log_trace(vchiq_arm_log_level,
+ "service_callback - msg queue full");
+ /* If there is no MESSAGE_AVAILABLE in the completion
+ ** queue, add one
+ */
+ if ((user_service->message_available_pos -
+ instance->completion_remove) < 0) {
+ VCHIQ_STATUS_T status;
+ vchiq_log_info(vchiq_arm_log_level,
+ "Inserting extra MESSAGE_AVAILABLE");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ status = add_completion(instance, reason,
+ NULL, user_service, bulk_userdata);
+ if (status != VCHIQ_SUCCESS) {
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return status;
+ }
+ }
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ if (down_interruptible(&user_service->remove_event)
+ != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback interrupted");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return VCHIQ_RETRY;
+ } else if (instance->closing) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback closing");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return VCHIQ_ERROR;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ spin_lock(&msg_queue_spinlock);
+ }
+
+ user_service->msg_queue[user_service->msg_insert &
+ (MSG_QUEUE_SIZE - 1)] = header;
+ user_service->msg_insert++;
+ spin_unlock(&msg_queue_spinlock);
+
+ up(&user_service->insert_event);
+
+ /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
+ ** there is a MESSAGE_AVAILABLE in the completion queue then
+ ** bypass the completion queue.
+ */
+ if (((user_service->message_available_pos -
+ instance->completion_remove) >= 0) ||
+ user_service->dequeue_pending) {
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ user_service->dequeue_pending = 0;
+ return VCHIQ_SUCCESS;
+ }
+
+ header = NULL;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+
+ return add_completion(instance, reason, header, user_service,
+ bulk_userdata);
+}
+
+/****************************************************************************
+*
+* user_service_free
+*
+***************************************************************************/
+static void
+user_service_free(void *userdata)
+{
+ USER_SERVICE_T *user_service = userdata;
+
+ _sema_destroy(&user_service->insert_event);
+ _sema_destroy(&user_service->remove_event);
+
+ kfree(user_service);
+}
+
+/****************************************************************************
+*
+* vchiq_ioctl
+*
+***************************************************************************/
+
+static int
+vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
+ struct thread *td)
+{
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ VCHIQ_SERVICE_T *service = NULL;
+ int ret = 0;
+ int i, rc;
+ DEBUG_INITIALISE(g_state.local)
+
+ if ((ret = devfs_get_cdevpriv((void**)&instance))) {
+ printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret);
+ return (ret);
+ }
+
+/* XXXBSD: HACK! */
+#define _IOC_NR(x) ((x) & 0xff)
+#define _IOC_TYPE(x) IOCGROUP(x)
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "vchiq_ioctl - instance %x, cmd %s, arg %p",
+ (unsigned int)instance,
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+
+ switch (cmd) {
+ case VCHIQ_IOC_SHUTDOWN:
+ if (!instance->connected)
+ break;
+
+ /* Remove all services */
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i)) != NULL) {
+ status = vchiq_remove_service(service->handle);
+ unlock_service(service);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+ service = NULL;
+
+ if (status == VCHIQ_SUCCESS) {
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ up(&instance->insert_event);
+ }
+
+ break;
+
+ case VCHIQ_IOC_CONNECT:
+ if (instance->connected) {
+ ret = -EINVAL;
+ break;
+ }
+ rc = lmutex_lock_interruptible(&instance->state->mutex);
+ if (rc != 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq: connect: could not lock mutex for "
+ "state %d: %d",
+ instance->state->id, rc);
+ ret = -EINTR;
+ break;
+ }
+ status = vchiq_connect_internal(instance->state, instance);
+ lmutex_unlock(&instance->state->mutex);
+
+ if (status == VCHIQ_SUCCESS)
+ instance->connected = 1;
+ else
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq: could not connect: %d", status);
+ break;
+
+ case VCHIQ_IOC_CREATE_SERVICE: {
+ VCHIQ_CREATE_SERVICE_T args;
+ USER_SERVICE_T *user_service = NULL;
+ void *userdata;
+ int srvstate;
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+
+ user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
+ if (!user_service) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (args.is_open) {
+ if (!instance->connected) {
+ ret = -ENOTCONN;
+ kfree(user_service);
+ break;
+ }
+ srvstate = VCHIQ_SRVSTATE_OPENING;
+ } else {
+ srvstate =
+ instance->connected ?
+ VCHIQ_SRVSTATE_LISTENING :
+ VCHIQ_SRVSTATE_HIDDEN;
+ }
+
+ userdata = args.params.userdata;
+ args.params.callback = service_callback;
+ args.params.userdata = user_service;
+ service = vchiq_add_service_internal(
+ instance->state,
+ &args.params, srvstate,
+ instance, user_service_free);
+
+ if (service != NULL) {
+ user_service->service = service;
+ user_service->userdata = userdata;
+ user_service->instance = instance;
+ user_service->is_vchi = args.is_vchi;
+ user_service->dequeue_pending = 0;
+ user_service->message_available_pos =
+ instance->completion_remove - 1;
+ user_service->msg_insert = 0;
+ user_service->msg_remove = 0;
+ _sema_init(&user_service->insert_event, 0);
+ _sema_init(&user_service->remove_event, 0);
+
+ if (args.is_open) {
+ status = vchiq_open_service_internal
+ (service, instance->pid);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_remove_service(service->handle);
+ service = NULL;
+ ret = (status == VCHIQ_RETRY) ?
+ -EINTR : -EIO;
+ break;
+ }
+ }
+
+#ifdef VCHIQ_IOCTL_DEBUG
+ printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);
+#endif
+ memcpy((void *)
+ &(((VCHIQ_CREATE_SERVICE_T*)
+ arg)->handle),
+ (const void *)&service->handle,
+ sizeof(service->handle));
+
+ service = NULL;
+ } else {
+ ret = -EEXIST;
+ kfree(user_service);
+ }
+ } break;
+
+ case VCHIQ_IOC_CLOSE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ memcpy(&handle, (const void*)arg, sizeof(handle));
+
+#ifdef VCHIQ_IOCTL_DEBUG
+ printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);
+#endif
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL)
+ status = vchiq_close_service(service->handle);
+ else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_REMOVE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ memcpy(&handle, (const void*)arg, sizeof(handle));
+
+#ifdef VCHIQ_IOCTL_DEBUG
+ printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);
+#endif
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL)
+ status = vchiq_remove_service(service->handle);
+ else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_USE_SERVICE:
+ case VCHIQ_IOC_RELEASE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ memcpy(&handle, (const void*)arg, sizeof(handle));
+
+#ifdef VCHIQ_IOCTL_DEBUG
+ printf("%s: [%s SERVICE] handle = %08x\n", __func__,
+ cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);
+#endif
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL) {
+ status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ vchiq_use_service_internal(service) :
+ vchiq_release_service_internal(service);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s: cmd %s returned error %d for "
+ "service %c%c%c%c:%8x",
+ __func__,
+ (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ "VCHIQ_IOC_USE_SERVICE" :
+ "VCHIQ_IOC_RELEASE_SERVICE",
+ status,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->client_id);
+ ret = -EINVAL;
+ }
+ } else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_QUEUE_MESSAGE: {
+ VCHIQ_QUEUE_MESSAGE_T args;
+ memcpy(&args, (const void*)arg, sizeof(args));
+
+#ifdef VCHIQ_IOCTL_DEBUG
+ printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle);
+#endif
+
+ service = find_service_for_instance(instance, args.handle);
+
+ if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
+ /* Copy elements into kernel space */
+ VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
+ if (copy_from_user(elements, args.elements,
+ args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
+ status = vchiq_queue_message
+ (args.handle,
+ elements, args.count);
+ else
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ } break;
+
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
+ VCHIQ_QUEUE_BULK_TRANSFER_T args;
+ struct bulk_waiter_node *waiter = NULL;
+ VCHIQ_BULK_DIR_T dir =
+ (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
+ waiter = kzalloc(sizeof(struct bulk_waiter_node),
+ GFP_KERNEL);
+ if (!waiter) {
+ ret = -ENOMEM;
+ break;
+ }
+ args.userdata = &waiter->bulk_waiter;
+ } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
+ struct list_head *pos;
+ lmutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each(pos, &instance->bulk_waiter_list) {
+ if (list_entry(pos, struct bulk_waiter_node,
+ list)->pid == current->p_pid) {
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ break;
+ }
+
+ }
+ lmutex_unlock(&instance->bulk_waiter_list_mutex);
+ if (!waiter) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "no bulk_waiter found for pid %d",
+ current->p_pid);
+ ret = -ESRCH;
+ break;
+ }
+ vchiq_log_info(vchiq_arm_log_level,
+ "found bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->p_pid);
+ args.userdata = &waiter->bulk_waiter;
+ }
+ status = vchiq_bulk_transfer
+ (args.handle,
+ VCHI_MEM_HANDLE_INVALID,
+ args.data, args.size,
+ args.userdata, args.mode,
+ dir);
+ if (!waiter)
+ break;
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ if (waiter->bulk_waiter.bulk) {
+ /* Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ waiter->bulk_waiter.bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ _sema_destroy(&waiter->bulk_waiter.event);
+ kfree(waiter);
+ } else {
+ const VCHIQ_BULK_MODE_T mode_waiting =
+ VCHIQ_BULK_MODE_WAITING;
+ waiter->pid = current->p_pid;
+ lmutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ lmutex_unlock(&instance->bulk_waiter_list_mutex);
+ vchiq_log_info(vchiq_arm_log_level,
+ "saved bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->p_pid);
+
+ memcpy((void *)
+ &(((VCHIQ_QUEUE_BULK_TRANSFER_T *)
+ arg)->mode),
+ (const void *)&mode_waiting,
+ sizeof(mode_waiting));
+ }
+ } break;
+
+ case VCHIQ_IOC_AWAIT_COMPLETION: {
+ VCHIQ_AWAIT_COMPLETION_T args;
+ int count = 0;
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ if (!instance->connected) {
+ ret = -ENOTCONN;
+ break;
+ }
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+
+ lmutex_lock(&instance->completion_mutex);
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ while ((instance->completion_remove ==
+ instance->completion_insert)
+ && !instance->closing) {
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ lmutex_unlock(&instance->completion_mutex);
+ rc = down_interruptible(&instance->insert_event);
+ lmutex_lock(&instance->completion_mutex);
+ if (rc != 0) {
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ vchiq_log_info(vchiq_arm_log_level,
+ "AWAIT_COMPLETION interrupted");
+ ret = -EINTR;
+ break;
+ }
+ }
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ /* A read memory barrier is needed to stop prefetch of a stale
+ ** completion record
+ */
+ rmb();
+
+ if (ret == 0) {
+ int msgbufcount = args.msgbufcount;
+ for (count = 0; count < args.count; count++) {
+ VCHIQ_COMPLETION_DATA_T *completion;
+ VCHIQ_SERVICE_T *service1;
+ USER_SERVICE_T *user_service;
+ VCHIQ_HEADER_T *header;
+ if (instance->completion_remove ==
+ instance->completion_insert)
+ break;
+ completion = &instance->completions[
+ instance->completion_remove &
+ (MAX_COMPLETIONS - 1)];
+
+ service1 = completion->service_userdata;
+ user_service = service1->base.userdata;
+ completion->service_userdata =
+ user_service->userdata;
+
+ header = completion->header;
+ if (header) {
+ void __user *msgbuf;
+ int msglen;
+
+ msglen = header->size +
+ sizeof(VCHIQ_HEADER_T);
+ /* This must be a VCHIQ-style service */
+ if (args.msgbufsize < msglen) {
+ vchiq_log_error(
+ vchiq_arm_log_level,
+ "header %x: msgbufsize"
+ " %x < msglen %x",
+ (unsigned int)header,
+ args.msgbufsize,
+ msglen);
+ WARN(1, "invalid message "
+ "size\n");
+ if (count == 0)
+ ret = -EMSGSIZE;
+ break;
+ }
+ if (msgbufcount <= 0)
+ /* Stall here for lack of a
+ ** buffer for the message. */
+ break;
+ /* Get the pointer from user space */
+ msgbufcount--;
+ if (copy_from_user(&msgbuf,
+ (const void __user *)
+ &args.msgbufs[msgbufcount],
+ sizeof(msgbuf)) != 0) {
+ if (count == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Copy the message to user space */
+ if (copy_to_user(msgbuf, header,
+ msglen) != 0) {
+ if (count == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Now it has been copied, the message
+ ** can be released. */
+ vchiq_release_message(service1->handle,
+ header);
+
+ /* The completion must point to the
+ ** msgbuf. */
+ completion->header = msgbuf;
+ }
+
+ if (completion->reason ==
+ VCHIQ_SERVICE_CLOSED)
+ unlock_service(service1);
+
+ if (copy_to_user((void __user *)(
+ (size_t)args.buf +
+ count * sizeof(VCHIQ_COMPLETION_DATA_T)),
+ completion,
+ sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ instance->completion_remove++;
+ }
+
+ if (msgbufcount != args.msgbufcount) {
+ memcpy((void __user *)
+ &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
+ msgbufcount,
+ &msgbufcount,
+ sizeof(msgbufcount));
+ }
+
+ if (count != args.count)
+ {
+ memcpy((void __user *)
+ &((VCHIQ_AWAIT_COMPLETION_T *)arg)->count,
+ &count, sizeof(count));
+ }
+ }
+
+ if (count != 0)
+ up(&instance->remove_event);
+
+ if ((ret == 0) && instance->closing)
+ ret = -ENOTCONN;
+ /*
+ * XXXBSD: ioctl return codes are not negative as in linux, so
+ * we can not indicate success with positive number of passed
+ * messages
+ */
+ if (ret > 0)
+ ret = 0;
+
+ lmutex_unlock(&instance->completion_mutex);
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ } break;
+
+ case VCHIQ_IOC_DEQUEUE_MESSAGE: {
+ VCHIQ_DEQUEUE_MESSAGE_T args;
+ USER_SERVICE_T *user_service;
+ VCHIQ_HEADER_T *header;
+
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ memcpy(&args, (const void*)arg, sizeof(args));
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+ user_service = (USER_SERVICE_T *)service->base.userdata;
+ if (user_service->is_vchi == 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock(&msg_queue_spinlock);
+ if (user_service->msg_remove == user_service->msg_insert) {
+ if (!args.blocking) {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ ret = -EWOULDBLOCK;
+ break;
+ }
+ user_service->dequeue_pending = 1;
+ do {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ if (down_interruptible(
+ &user_service->insert_event) != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "DEQUEUE_MESSAGE interrupted");
+ ret = -EINTR;
+ break;
+ }
+ spin_lock(&msg_queue_spinlock);
+ } while (user_service->msg_remove ==
+ user_service->msg_insert);
+
+ if (ret)
+ break;
+ }
+
+ BUG_ON((int)(user_service->msg_insert -
+ user_service->msg_remove) < 0);
+
+ header = user_service->msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&msg_queue_spinlock);
+
+ up(&user_service->remove_event);
+ if (header == NULL)
+ ret = -ENOTCONN;
+ else if (header->size <= args.bufsize) {
+ /* Copy to user space if msgbuf is not NULL */
+ if ((args.buf == NULL) ||
+ (copy_to_user((void __user *)args.buf,
+ header->data,
+ header->size) == 0)) {
+ args.bufsize = header->size;
+ memcpy((void *)arg, &args,
+ sizeof(args));
+ vchiq_release_message(
+ service->handle,
+ header);
+ } else
+ ret = -EFAULT;
+ } else {
+ vchiq_log_error(vchiq_arm_log_level,
+ "header %x: bufsize %x < size %x",
+ (unsigned int)header, args.bufsize,
+ header->size);
+ WARN(1, "invalid size\n");
+ ret = -EMSGSIZE;
+ }
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ } break;
+
+ case VCHIQ_IOC_GET_CLIENT_ID: {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ memcpy(&handle, (const void*)arg, sizeof(handle));
+
+ ret = vchiq_get_client_id(handle);
+ } break;
+
+ case VCHIQ_IOC_GET_CONFIG: {
+ VCHIQ_GET_CONFIG_T args;
+ VCHIQ_CONFIG_T config;
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+ if (args.config_size > sizeof(config)) {
+ ret = -EINVAL;
+ break;
+ }
+ status = vchiq_get_config(instance, args.config_size, &config);
+ if (status == VCHIQ_SUCCESS) {
+ if (copy_to_user((void __user *)args.pconfig,
+ &config, args.config_size) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ } break;
+
+ case VCHIQ_IOC_SET_SERVICE_OPTION: {
+ VCHIQ_SET_SERVICE_OPTION_T args;
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ status = vchiq_set_service_option(
+ args.handle, args.option, args.value);
+ } break;
+
+ case VCHIQ_IOC_DUMP_PHYS_MEM: {
+ VCHIQ_DUMP_MEM_T args;
+
+ memcpy(&args, (const void*)arg, sizeof(args));
+ printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);
+#if 0
+ dump_phys_mem(args.virt_addr, args.num_bytes);
+#endif
+ } break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ if (service)
+ unlock_service(service);
+
+ if (ret == 0) {
+ if (status == VCHIQ_ERROR)
+ ret = -EIO;
+ else if (status == VCHIQ_RETRY)
+ ret = -EINTR;
+ }
+
+ if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
+ (ret != -EWOULDBLOCK))
+ vchiq_log_info(vchiq_arm_log_level,
+ " ioctl instance %lx, cmd %s -> status %d, %d",
+ (unsigned long)instance,
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] :
+ "<invalid>",
+ status, ret);
+ else
+ vchiq_log_trace(vchiq_arm_log_level,
+ " ioctl instance %lx, cmd %s -> status %d, %d",
+ (unsigned long)instance,
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] :
+ "<invalid>",
+ status, ret);
+
+ /* XXXBSD: report BSD-style error to userland */
+ if (ret < 0)
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+instance_dtr(void *data)
+{
+
+ free(data, M_VCHIQ);
+}
+
+/****************************************************************************
+*
+* vchiq_open
+*
+***************************************************************************/
+
+static int
+vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
+{
+ vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
+ /* XXXBSD: do we really need this check? */
+ if (1) {
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ VCHIQ_INSTANCE_T instance;
+
+ if (!state) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq has no connection to VideoCore");
+ return -ENOTCONN;
+ }
+
+ instance = kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+
+ instance->state = state;
+ /* XXXBSD: PID or thread ID? */
+ instance->pid = td->td_proc->p_pid;
+
+#ifdef notyet
+ ret = vchiq_proc_add_instance(instance);
+ if (ret != 0) {
+ kfree(instance);
+ return ret;
+ }
+#endif
+
+ _sema_init(&instance->insert_event, 0);
+ _sema_init(&instance->remove_event, 0);
+ lmutex_init(&instance->completion_mutex);
+ lmutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ devfs_set_cdevpriv(instance, instance_dtr);
+ }
+ else {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unknown minor device");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+*
+* vchiq_release
+*
+***************************************************************************/
+
+static int
+vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,
+ struct thread *td)
+{
+ int ret = 0;
+ if (1) {
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ if ((ret = devfs_get_cdevpriv((void**)&instance))) {
+ printf("devfs_get_cdevpriv failed: error %d\n", ret);
+ return (ret);
+ }
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq_release: instance=%lx",
+ (unsigned long)instance);
+
+ if (!state) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Ensure videocore is awake to allow termination. */
+ vchiq_use_internal(instance->state, NULL,
+ USE_TYPE_VCHIQ);
+
+ lmutex_lock(&instance->completion_mutex);
+
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ up(&instance->insert_event);
+
+ lmutex_unlock(&instance->completion_mutex);
+
+ /* Wake the slot handler if the completion queue is full. */
+ up(&instance->remove_event);
+
+ /* Mark all services for termination... */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ USER_SERVICE_T *user_service = service->base.userdata;
+
+ /* Wake the slot handler if the msg queue is full. */
+ up(&user_service->remove_event);
+
+ vchiq_terminate_service_internal(service);
+ unlock_service(service);
+ }
+
+ /* ...and wait for them to die */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i))
+ != NULL) {
+ USER_SERVICE_T *user_service = service->base.userdata;
+
+ down(&service->remove_event);
+
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+
+ spin_lock(&msg_queue_spinlock);
+
+ while (user_service->msg_remove !=
+ user_service->msg_insert) {
+ VCHIQ_HEADER_T *header = user_service->
+ msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&msg_queue_spinlock);
+
+ if (header)
+ vchiq_release_message(
+ service->handle,
+ header);
+ spin_lock(&msg_queue_spinlock);
+ }
+
+ spin_unlock(&msg_queue_spinlock);
+
+ unlock_service(service);
+ }
+
+ /* Release any closed services */
+ while (instance->completion_remove !=
+ instance->completion_insert) {
+ VCHIQ_COMPLETION_DATA_T *completion;
+ VCHIQ_SERVICE_T *service1;
+ completion = &instance->completions[
+ instance->completion_remove &
+ (MAX_COMPLETIONS - 1)];
+ service1 = completion->service_userdata;
+ if (completion->reason == VCHIQ_SERVICE_CLOSED)
+ unlock_service(service1);
+ instance->completion_remove++;
+ }
+
+ /* Release the PEER service count. */
+ vchiq_release_internal(instance->state, NULL);
+
+ {
+ struct list_head *pos, *next;
+ list_for_each_safe(pos, next,
+ &instance->bulk_waiter_list) {
+ struct bulk_waiter_node *waiter;
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ vchiq_log_info(vchiq_arm_log_level,
+ "bulk_waiter - cleaned up %x "
+ "for pid %d",
+ (unsigned int)waiter, waiter->pid);
+ _sema_destroy(&waiter->bulk_waiter.event);
+ kfree(waiter);
+ }
+ }
+
+ }
+ else {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unknown minor device");
+ ret = -ENXIO;
+ }
+
+out:
+ return ret;
+}
+
+/****************************************************************************
+*
+* vchiq_dump
+*
+***************************************************************************/
+
+void
+vchiq_dump(void *dump_context, const char *str, int len)
+{
+ DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
+
+ if (context->actual < context->space) {
+ int copy_bytes;
+ if (context->offset > 0) {
+ int skip_bytes = min(len, (int)context->offset);
+ str += skip_bytes;
+ len -= skip_bytes;
+ context->offset -= skip_bytes;
+ if (context->offset > 0)
+ return;
+ }
+ copy_bytes = min(len, (int)(context->space - context->actual));
+ if (copy_bytes == 0)
+ return;
+ memcpy(context->buf + context->actual, str, copy_bytes);
+ context->actual += copy_bytes;
+ len -= copy_bytes;
+
+ /* If tne terminating NUL is included in the length, then it
+ ** marks the end of a line and should be replaced with a
+ ** carriage return. */
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
+ char cr = '\n';
+ memcpy(context->buf + context->actual - 1, &cr, 1);
+ }
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_dump_platform_instance_state
+*
+***************************************************************************/
+
+void
+vchiq_dump_platform_instances(void *dump_context)
+{
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ char buf[80];
+ int len;
+ int i;
+
+ /* There is no list of instances, so instead scan all services,
+ marking those that have been dumped. */
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ VCHIQ_INSTANCE_T instance;
+
+ if (service && (service->base.callback == service_callback)) {
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
+ }
+ }
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ VCHIQ_INSTANCE_T instance;
+
+ if (service && (service->base.callback == service_callback)) {
+ instance = service->instance;
+ if (instance && !instance->mark) {
+ len = snprintf(buf, sizeof(buf),
+ "Instance %x: pid %d,%s completions "
+ "%d/%d",
+ (unsigned int)instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ instance->mark = 1;
+ }
+ }
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_dump_platform_service_state
+*
+***************************************************************************/
+
+void
+vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
+{
+ USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), " instance %x",
+ (unsigned int)service->instance);
+
+ if ((service->base.callback == service_callback) &&
+ user_service->is_vchi) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ ", %d/%d messages",
+ user_service->msg_insert - user_service->msg_remove,
+ MSG_QUEUE_SIZE);
+
+ if (user_service->dequeue_pending)
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " (dequeue pending)");
+ }
+
+ vchiq_dump(dump_context, buf, len + 1);
+}
+
+#ifdef notyet
+/****************************************************************************
+*
+* dump_user_mem
+*
+***************************************************************************/
+
+static void
+dump_phys_mem(void *virt_addr, uint32_t num_bytes)
+{
+ int rc;
+ uint8_t *end_virt_addr = virt_addr + num_bytes;
+ int num_pages;
+ int offset;
+ int end_offset;
+ int page_idx;
+ int prev_idx;
+ struct page *page;
+ struct page **pages;
+ uint8_t *kmapped_virt_ptr;
+
+ /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
+
+ virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
+ end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
+ ~0x0fuL);
+
+ offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
+ end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
+
+ num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
+ if (pages == NULL) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unable to allocation memory for %d pages\n",
+ num_pages);
+ return;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ rc = get_user_pages(current, /* task */
+ current->mm, /* mm */
+ (unsigned long)virt_addr, /* start */
+ num_pages, /* len */
+ 0, /* write */
+ 0, /* force */
+ pages, /* pages (array of page pointers) */
+ NULL); /* vmas */
+ up_read(&current->mm->mmap_sem);
+
+ prev_idx = -1;
+ page = NULL;
+
+ while (offset < end_offset) {
+
+ int page_offset = offset % PAGE_SIZE;
+ page_idx = offset / PAGE_SIZE;
+
+ if (page_idx != prev_idx) {
+
+ if (page != NULL)
+ kunmap(page);
+ page = pages[page_idx];
+ kmapped_virt_ptr = kmap(page);
+
+ prev_idx = page_idx;
+ }
+
+ if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
+ vchiq_log_dump_mem("ph",
+ (uint32_t)(unsigned long)&kmapped_virt_ptr[
+ page_offset],
+ &kmapped_virt_ptr[page_offset], 16);
+
+ offset += 16;
+ }
+ if (page != NULL)
+ kunmap(page);
+
+ for (page_idx = 0; page_idx < num_pages; page_idx++)
+ page_cache_release(pages[page_idx]);
+
+ kfree(pages);
+}
+
+/****************************************************************************
+*
+* vchiq_read
+*
+***************************************************************************/
+
+static ssize_t
+vchiq_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ DUMP_CONTEXT_T context;
+ context.buf = buf;
+ context.actual = 0;
+ context.space = count;
+ context.offset = *ppos;
+
+ vchiq_dump_state(&context, &g_state);
+
+ *ppos += context.actual;
+
+ return context.actual;
+}
+#endif
+
+VCHIQ_STATE_T *
+vchiq_get_state(void)
+{
+
+ if (g_state.remote == NULL)
+ printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
+ else if (g_state.remote->initialised != 1)
+ printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
+ __func__, g_state.remote->initialised);
+
+ return ((g_state.remote != NULL) &&
+ (g_state.remote->initialised == 1)) ? &g_state : NULL;
+}
+
+/*
+ * Autosuspend related functionality
+ */
+
+int
+vchiq_videocore_wanted(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ if (!arm_state)
+ /* autosuspend not supported - always return wanted */
+ return 1;
+ else if (arm_state->blocked_count)
+ return 1;
+ else if (!arm_state->videocore_use_count)
+ /* usage count zero - check for override unless we're forcing */
+ if (arm_state->resume_blocked)
+ return 0;
+ else
+ return vchiq_platform_videocore_wanted(state);
+ else
+ /* non-zero usage count - videocore still required */
+ return 1;
+}
+
+static VCHIQ_STATUS_T
+vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header,
+ VCHIQ_SERVICE_HANDLE_T service_user,
+ void *bulk_user)
+{
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s callback reason %d", __func__, reason);
+ return 0;
+}
+
+static int
+vchiq_keepalive_thread_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ VCHIQ_STATUS_T status;
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_SERVICE_HANDLE_T ka_handle;
+
+ VCHIQ_SERVICE_PARAMS_T params = {
+ .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
+ .callback = vchiq_keepalive_vchiq_callback,
+ .version = KEEPALIVE_VER,
+ .version_min = KEEPALIVE_VER_MIN
+ };
+
+ status = vchiq_initialise(&instance);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_initialise failed %d", __func__, status);
+ goto exit;
+ }
+
+ status = vchiq_connect(instance);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_connect failed %d", __func__, status);
+ goto shutdown;
+ }
+
+ status = vchiq_add_service(instance, &params, &ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_open_service failed %d", __func__, status);
+ goto shutdown;
+ }
+
+ while (1) {
+ long rc = 0, uc = 0;
+ if (wait_for_completion_interruptible(&arm_state->ka_evt)
+ != 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+ flush_signals(current);
+ continue;
+ }
+
+ /* read and clear counters. Do release_count then use_count to
+ * prevent getting more releases than uses */
+ rc = atomic_xchg(&arm_state->ka_release_count, 0);
+ uc = atomic_xchg(&arm_state->ka_use_count, 0);
+
+ /* Call use/release service the requisite number of times.
+ * Process use before release so use counts don't go negative */
+ while (uc--) {
+ atomic_inc(&arm_state->ka_use_ack_count);
+ status = vchiq_use_service(ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_use_service error %d",
+ __func__, status);
+ }
+ }
+ while (rc--) {
+ status = vchiq_release_service(ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_release_service error %d",
+ __func__, status);
+ }
+ }
+ }
+
+shutdown:
+ vchiq_shutdown(instance);
+exit:
+ return 0;
+}
+
+VCHIQ_STATUS_T
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (arm_state) {
+ rwlock_init(&arm_state->susp_res_lock);
+
+ init_completion(&arm_state->ka_evt);
+ atomic_set(&arm_state->ka_use_count, 0);
+ atomic_set(&arm_state->ka_use_ack_count, 0);
+ atomic_set(&arm_state->ka_release_count, 0);
+
+ init_completion(&arm_state->vc_suspend_complete);
+
+ init_completion(&arm_state->vc_resume_complete);
+ /* Initialise to 'done' state. We only want to block on resume
+ * completion while videocore is suspended. */
+ set_resume_state(arm_state, VC_RESUME_RESUMED);
+
+ init_completion(&arm_state->resume_blocker);
+ /* Initialise to 'done' state. We only want to block on this
+ * completion while resume is blocked */
+ complete_all(&arm_state->resume_blocker);
+
+ init_completion(&arm_state->blocked_blocker);
+ /* Initialise to 'done' state. We only want to block on this
+ * completion while things are waiting on the resume blocker */
+ complete_all(&arm_state->blocked_blocker);
+
+ arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
+ arm_state->suspend_timer_running = 0;
+ init_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer.data = (unsigned long)(state);
+ arm_state->suspend_timer.function = suspend_timer_callback;
+
+ arm_state->first_connect = 0;
+
+ }
+ return status;
+}
+
+/*
+** Functions to modify the state variables;
+** set_suspend_state
+** set_resume_state
+**
+** There are more state variables than we might like, so ensure they remain in
+** step. Suspend and resume state are maintained separately, since most of
+** these state machines can operate independently. However, there are a few
+** states where state transitions in one state machine cause a reset to the
+** other state machine. In addition, there are some completion events which
+** need to occur on state machine reset and end-state(s), so these are also
+** dealt with in these functions.
+**
+** In all states we set the state variable according to the input, but in some
+** cases we perform additional steps outlined below;
+**
+** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
+** The suspend completion is completed after any suspend
+** attempt. When we reset the state machine we also reset
+** the completion. This reset occurs when videocore is
+** resumed, and also if we initiate suspend after a suspend
+** failure.
+**
+** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
+** suspend - ie from this point on we must try to suspend
+** before resuming can occur. We therefore also reset the
+** resume state machine to VC_RESUME_IDLE in this state.
+**
+** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
+** complete_all on the suspend completion to notify
+** anything waiting for suspend to happen.
+**
+** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
+** initiate resume, so no need to alter resume state.
+** We call complete_all on the suspend completion to notify
+** of suspend rejection.
+**
+** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
+** suspend completion and reset the resume state machine.
+**
+** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
+** resume completion is in it's 'done' state whenever
+** videcore is running. Therfore, the VC_RESUME_IDLE state
+** implies that videocore is suspended.
+** Hence, any thread which needs to wait until videocore is
+** running can wait on this completion - it will only block
+** if videocore is suspended.
+**
+** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
+** Call complete_all on the resume completion to unblock
+** any threads waiting for resume. Also reset the suspend
+** state machine to it's idle state.
+**
+** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
+*/
+
+inline void
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_suspend_status new_state)
+{
+ /* set the state in all cases */
+ arm_state->vc_suspend_state = new_state;
+
+ /* state specific additional actions */
+ switch (new_state) {
+ case VC_SUSPEND_FORCE_CANCELED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_REJECTED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_FAILED:
+ complete_all(&arm_state->vc_suspend_complete);
+ arm_state->vc_resume_state = VC_RESUME_RESUMED;
+ complete_all(&arm_state->vc_resume_complete);
+ break;
+ case VC_SUSPEND_IDLE:
+ INIT_COMPLETION(arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_REQUESTED:
+ break;
+ case VC_SUSPEND_IN_PROGRESS:
+ set_resume_state(arm_state, VC_RESUME_IDLE);
+ break;
+ case VC_SUSPEND_SUSPENDED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+inline void
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_resume_status new_state)
+{
+ /* set the state in all cases */
+ arm_state->vc_resume_state = new_state;
+
+ /* state specific additional actions */
+ switch (new_state) {
+ case VC_RESUME_FAILED:
+ break;
+ case VC_RESUME_IDLE:
+ INIT_COMPLETION(arm_state->vc_resume_complete);
+ break;
+ case VC_RESUME_REQUESTED:
+ break;
+ case VC_RESUME_IN_PROGRESS:
+ break;
+ case VC_RESUME_RESUMED:
+ complete_all(&arm_state->vc_resume_complete);
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+
+/* should be called with the write lock held */
+inline void
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
+{
+ del_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer.expires = jiffies +
+ msecs_to_jiffies(arm_state->
+ suspend_timer_timeout);
+ add_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer_running = 1;
+}
+
+/* should be called with the write lock held */
+static inline void
+stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
+{
+ if (arm_state->suspend_timer_running) {
+ del_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer_running = 0;
+ }
+}
+
+static inline int
+need_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
+ (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
+ vchiq_videocore_wanted(state);
+}
+
+static int
+block_resume(VCHIQ_ARM_STATE_T *arm_state)
+{
+ int status = VCHIQ_SUCCESS;
+ const unsigned long timeout_val =
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
+ int resume_count = 0;
+
+ /* Allow any threads which were blocked by the last force suspend to
+ * complete if they haven't already. Only give this one shot; if
+ * blocked_count is incremented after blocked_blocker is completed
+ * (which only happens when blocked_count hits 0) then those threads
+ * will have to wait until next time around */
+ if (arm_state->blocked_count) {
+ INIT_COMPLETION(arm_state->blocked_blocker);
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
+ "blocked clients", __func__);
+ if (wait_for_completion_interruptible_timeout(
+ &arm_state->blocked_blocker, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+ "previously blocked clients failed" , __func__);
+ status = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
+ "clients resumed", __func__);
+ write_lock_bh(&arm_state->susp_res_lock);
+ }
+
+ /* We need to wait for resume to complete if it's in process */
+ while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
+ arm_state->vc_resume_state > VC_RESUME_IDLE) {
+ if (resume_count > 1) {
+ status = VCHIQ_ERROR;
+ vchiq_log_error(vchiq_susp_log_level, "%s waited too "
+ "many times for resume" , __func__);
+ goto out;
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
+ __func__);
+ if (wait_for_completion_interruptible_timeout(
+ &arm_state->vc_resume_complete, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+ "resume failed (%s)", __func__,
+ resume_state_names[arm_state->vc_resume_state +
+ VC_RESUME_NUM_OFFSET]);
+ status = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
+ write_lock_bh(&arm_state->susp_res_lock);
+ resume_count++;
+ }
+ INIT_COMPLETION(arm_state->resume_blocker);
+ arm_state->resume_blocked = 1;
+
+out:
+ return status;
+}
+
+static inline void
+unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
+{
+ complete_all(&arm_state->resume_blocker);
+ arm_state->resume_blocked = 0;
+}
+
+/* Initiate suspend via slot handler. Should be called with the write lock
+ * held */
+VCHIQ_STATUS_T
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ status = VCHIQ_SUCCESS;
+
+
+ switch (arm_state->vc_suspend_state) {
+ case VC_SUSPEND_REQUESTED:
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
+ "requested", __func__);
+ break;
+ case VC_SUSPEND_IN_PROGRESS:
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
+ "progress", __func__);
+ break;
+
+ default:
+ /* We don't expect to be in other states, so log but continue
+ * anyway */
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s unexpected suspend state %s", __func__,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ /* fall through */
+ case VC_SUSPEND_REJECTED:
+ case VC_SUSPEND_FAILED:
+ /* Ensure any idle state actions have been run */
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+ /* fall through */
+ case VC_SUSPEND_IDLE:
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: suspending", __func__);
+ set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
+ /* kick the slot handler thread to initiate suspend */
+ request_poll(state, NULL, 0);
+ break;
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
+ return status;
+}
+
+void
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int susp = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
+ arm_state->vc_resume_state == VC_RESUME_RESUMED) {
+ set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
+ susp = 1;
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (susp)
+ vchiq_platform_suspend(state);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+}
+
+
+static void
+output_timeout_error(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ char service_err[50] = "";
+ int vc_use_count = arm_state->videocore_use_count;
+ int active_services = state->unused_service;
+ int i;
+
+ if (!arm_state->videocore_use_count) {
+ snprintf(service_err, 50, " Videocore usecount is 0");
+ goto output_msg;
+ }
+ for (i = 0; i < active_services; i++) {
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
+ if (service_ptr && service_ptr->service_use_count &&
+ (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ snprintf(service_err, 50, " %c%c%c%c(%8x) service has "
+ "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
+ service_ptr->base.fourcc),
+ service_ptr->client_id,
+ service_ptr->service_use_count,
+ service_ptr->service_use_count ==
+ vc_use_count ? "" : " (+ more)");
+ break;
+ }
+ }
+
+output_msg:
+ vchiq_log_error(vchiq_susp_log_level,
+ "timed out waiting for vc suspend (%d).%s",
+ arm_state->autosuspend_override, service_err);
+
+}
+
+/* Try to get videocore into suspended state, regardless of autosuspend state.
+** We don't actually force suspend, since videocore may get into a bad state
+** if we force suspend at a bad time. Instead, we wait for autosuspend to
+** determine a good point to suspend. If this doesn't happen within 100ms we
+** report failure.
+**
+** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
+** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
+*/
+VCHIQ_STATUS_T
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ long rc = 0;
+ int repeat = -1;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+
+ status = block_resume(arm_state);
+ if (status != VCHIQ_SUCCESS)
+ goto unlock;
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
+ /* Already suspended - just block resume and exit */
+ vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
+ __func__);
+ status = VCHIQ_SUCCESS;
+ goto unlock;
+ } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
+ /* initiate suspend immediately in the case that we're waiting
+ * for the timeout */
+ stop_suspend_timer(arm_state);
+ if (!vchiq_videocore_wanted(state)) {
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore "
+ "idle, initiating suspend", __func__);
+ status = vchiq_arm_vcsuspend(state);
+ } else if (arm_state->autosuspend_override <
+ FORCE_SUSPEND_FAIL_MAX) {
+ vchiq_log_info(vchiq_susp_log_level, "%s letting "
+ "videocore go idle", __func__);
+ status = VCHIQ_SUCCESS;
+ } else {
+ vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
+ "many times - attempting suspend", __func__);
+ status = vchiq_arm_vcsuspend(state);
+ }
+ } else {
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
+ "in progress - wait for completion", __func__);
+ status = VCHIQ_SUCCESS;
+ }
+
+ /* Wait for suspend to happen due to system idle (not forced..) */
+ if (status != VCHIQ_SUCCESS)
+ goto unblock_resume;
+
+ do {
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ rc = wait_for_completion_interruptible_timeout(
+ &arm_state->vc_suspend_complete,
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (rc < 0) {
+ vchiq_log_warning(vchiq_susp_log_level, "%s "
+ "interrupted waiting for suspend", __func__);
+ status = VCHIQ_ERROR;
+ goto unblock_resume;
+ } else if (rc == 0) {
+ if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
+ /* Repeat timeout once if in progress */
+ if (repeat < 0) {
+ repeat = 1;
+ continue;
+ }
+ }
+ arm_state->autosuspend_override++;
+ output_timeout_error(state);
+
+ status = VCHIQ_RETRY;
+ goto unblock_resume;
+ }
+ } while (0 < (repeat--));
+
+ /* Check and report state in case we need to abort ARM suspend */
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
+ status = VCHIQ_RETRY;
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s videocore suspend failed (state %s)", __func__,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ /* Reset the state only if it's still in an error state.
+ * Something could have already initiated another suspend. */
+ if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+
+ goto unblock_resume;
+ }
+
+ /* successfully suspended - unlock and exit */
+ goto unlock;
+
+unblock_resume:
+ /* all error states need to unblock resume before exit */
+ unblock_resume(arm_state);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
+ return status;
+}
+
+void
+vchiq_check_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
+ arm_state->first_connect &&
+ !vchiq_videocore_wanted(state)) {
+ vchiq_arm_vcsuspend(state);
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+}
+
+
+int
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int resume = 0;
+ int ret = -1;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ unblock_resume(arm_state);
+ resume = vchiq_check_resume(state);
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (resume) {
+ if (wait_for_completion_interruptible(
+ &arm_state->vc_resume_complete) < 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+ /* failed, cannot accurately derive suspend
+ * state, so exit early. */
+ goto out;
+ }
+ }
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: Videocore remains suspended", __func__);
+ } else {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: Videocore resumed", __func__);
+ ret = 0;
+ }
+ read_unlock_bh(&arm_state->susp_res_lock);
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+/* This function should be called with the write lock held */
+int
+vchiq_check_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int resume = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (need_resume(state)) {
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
+ request_poll(state, NULL, 0);
+ resume = 1;
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return resume;
+}
+
+#ifdef notyet
+void
+vchiq_platform_check_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int res = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->wake_address == 0) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: already awake", __func__);
+ goto unlock;
+ }
+ if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: already resuming", __func__);
+ goto unlock;
+ }
+
+ if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
+ set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
+ res = 1;
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s: not resuming (resume state %s)", __func__,
+ resume_state_names[arm_state->vc_resume_state +
+ VC_RESUME_NUM_OFFSET]);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (res)
+ vchiq_platform_resume(state);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+
+}
+#endif
+
+
+
+VCHIQ_STATUS_T
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ enum USE_TYPE_E use_type)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ char entity[16];
+ int *entity_uc;
+ int local_uc, local_entity_uc;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (use_type == USE_TYPE_VCHIQ) {
+ snprintf(entity, sizeof(entity), "VCHIQ: ");
+ entity_uc = &arm_state->peer_use_count;
+ } else if (service) {
+ snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ vchiq_log_error(vchiq_susp_log_level, "%s null service "
+ "ptr", __func__);
+ ret = VCHIQ_ERROR;
+ goto out;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ while (arm_state->resume_blocked) {
+ /* If we call 'use' while force suspend is waiting for suspend,
+ * then we're about to block the thread which the force is
+ * waiting to complete, so we're bound to just time out. In this
+ * case, set the suspend state such that the wait will be
+ * canceled, so we can complete as quickly as possible. */
+ if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
+ VC_SUSPEND_IDLE) {
+ set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
+ break;
+ }
+ /* If suspend is already in progress then we need to block */
+ if (!try_wait_for_completion(&arm_state->resume_blocker)) {
+ /* Indicate that there are threads waiting on the resume
+ * blocker. These need to be allowed to complete before
+ * a _second_ call to force suspend can complete,
+ * otherwise low priority threads might never actually
+ * continue */
+ arm_state->blocked_count++;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
+ "blocked - waiting...", __func__, entity);
+ if (wait_for_completion_killable(
+ &arm_state->resume_blocker) != 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s %s "
+ "wait for resume blocker interrupted",
+ __func__, entity);
+ ret = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ arm_state->blocked_count--;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
+ "unblocked", __func__, entity);
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (--arm_state->blocked_count == 0)
+ complete_all(&arm_state->blocked_blocker);
+ }
+ }
+
+ stop_suspend_timer(arm_state);
+
+ local_uc = ++arm_state->videocore_use_count;
+ local_entity_uc = ++(*entity_uc);
+
+ /* If there's a pending request which hasn't yet been serviced then
+ * just clear it. If we're past VC_SUSPEND_REQUESTED state then
+ * vc_resume_complete will block until we either resume or fail to
+ * suspend */
+ if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+
+ if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, local_entity_uc, local_uc);
+ request_poll(state, NULL, 0);
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc, local_uc);
+
+
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ /* Completion is in a done state when we're not suspended, so this won't
+ * block for the non-suspended case. */
+ if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
+ vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
+ __func__, entity);
+ if (wait_for_completion_killable(
+ &arm_state->vc_resume_complete) != 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
+ "resume interrupted", __func__, entity);
+ ret = VCHIQ_ERROR;
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
+ entity);
+ }
+
+ if (ret == VCHIQ_SUCCESS) {
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
+ while (ack_cnt && (status == VCHIQ_SUCCESS)) {
+ /* Send the use notify to videocore */
+ status = vchiq_send_remote_use_active(state);
+ if (status == VCHIQ_SUCCESS)
+ ack_cnt--;
+ else
+ atomic_add(ack_cnt,
+ &arm_state->ka_use_ack_count);
+ }
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ char entity[16];
+ int *entity_uc;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (service) {
+ snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ snprintf(entity, sizeof(entity), "PEER: ");
+ entity_uc = &arm_state->peer_use_count;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (!arm_state->videocore_use_count || !(*entity_uc)) {
+ /* Don't use BUG_ON - don't allow user thread to crash kernel */
+ WARN_ON(!arm_state->videocore_use_count);
+ WARN_ON(!(*entity_uc));
+ ret = VCHIQ_ERROR;
+ goto unlock;
+ }
+ --arm_state->videocore_use_count;
+ --(*entity_uc);
+
+ if (!vchiq_videocore_wanted(state)) {
+ if (vchiq_platform_use_suspend_timer() &&
+ !arm_state->resume_blocked) {
+ /* Only use the timer if we're not trying to force
+ * suspend (=> resume_blocked) */
+ start_suspend_timer(arm_state);
+ } else {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s %s count %d, state count %d - suspending",
+ __func__, entity, *entity_uc,
+ arm_state->videocore_use_count);
+ vchiq_arm_vcsuspend(state);
+ }
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc,
+ arm_state->videocore_use_count);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+void
+vchiq_on_remote_use(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ atomic_inc(&arm_state->ka_use_count);
+ complete(&arm_state->ka_evt);
+}
+
+void
+vchiq_on_remote_release(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ atomic_inc(&arm_state->ka_release_count);
+ complete(&arm_state->ka_evt);
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
+{
+ return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
+}
+
+VCHIQ_STATUS_T
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
+{
+ return vchiq_release_internal(service->state, service);
+}
+
+static void suspend_timer_callback(unsigned long context)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ if (!arm_state)
+ goto out;
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s - suspend timer expired - check suspend", __func__);
+ vchiq_check_suspend(state);
+out:
+ return;
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_use_internal(service->state, service,
+ USE_TYPE_SERVICE_NO_RESUME);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_use_internal(service->state, service,
+ USE_TYPE_SERVICE);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_release_internal(service->state, service);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+void
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int i, j = 0;
+ /* Only dump 64 services */
+ static const int local_max_services = 64;
+ /* If there's more than 64 services, only dump ones with
+ * non-zero counts */
+ int only_nonzero = 0;
+ static const char *nz = "<-- preventing suspend";
+
+ enum vc_suspend_status vc_suspend_state;
+ enum vc_resume_status vc_resume_state;
+ int peer_count;
+ int vc_use_count;
+ int active_services;
+ struct service_data_struct {
+ int fourcc;
+ int clientid;
+ int use_count;
+ } service_data[local_max_services];
+
+ if (!arm_state)
+ return;
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ vc_suspend_state = arm_state->vc_suspend_state;
+ vc_resume_state = arm_state->vc_resume_state;
+ peer_count = arm_state->peer_use_count;
+ vc_use_count = arm_state->videocore_use_count;
+ active_services = state->unused_service;
+ if (active_services > local_max_services)
+ only_nonzero = 1;
+
+ for (i = 0; (i < active_services) && (j < local_max_services); i++) {
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
+ if (!service_ptr)
+ continue;
+
+ if (only_nonzero && !service_ptr->service_use_count)
+ continue;
+
+ if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
+ service_data[j].fourcc = service_ptr->base.fourcc;
+ service_data[j].clientid = service_ptr->client_id;
+ service_data[j++].use_count = service_ptr->
+ service_use_count;
+ }
+ }
+
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ vchiq_log_warning(vchiq_susp_log_level,
+ "-- Videcore suspend state: %s --",
+ suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
+ vchiq_log_warning(vchiq_susp_log_level,
+ "-- Videcore resume state: %s --",
+ resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
+
+ if (only_nonzero)
+ vchiq_log_warning(vchiq_susp_log_level, "Too many active "
+ "services (%d). Only dumping up to first %d services "
+ "with non-zero use-count", active_services,
+ local_max_services);
+
+ for (i = 0; i < j; i++) {
+ vchiq_log_warning(vchiq_susp_log_level,
+ "----- %c%c%c%c:%d service count %d %s",
+ VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
+ service_data[i].clientid,
+ service_data[i].use_count,
+ service_data[i].use_count ? nz : "");
+ }
+ vchiq_log_warning(vchiq_susp_log_level,
+ "----- VCHIQ use count count %d", peer_count);
+ vchiq_log_warning(vchiq_susp_log_level,
+ "--- Overall vchiq instance use count %d", vc_use_count);
+
+ vchiq_dump_platform_use_state(state);
+}
+
+VCHIQ_STATUS_T
+vchiq_check_service(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_ARM_STATE_T *arm_state;
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+
+ if (!service || !service->state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ arm_state = vchiq_platform_get_arm_state(service->state);
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ if (service->service_use_count)
+ ret = VCHIQ_SUCCESS;
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ if (ret == VCHIQ_ERROR) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s ERROR - %c%c%c%c:%8x service count %d, "
+ "state count %d, videocore suspend state %s", __func__,
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id, service->service_use_count,
+ arm_state->videocore_use_count,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ vchiq_dump_service_use_state(service->state);
+ }
+out:
+ return ret;
+}
+
+/* stub functions */
+void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
+{
+ (void)state;
+}
+
+void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
+ get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (!arm_state->first_connect) {
+ char threadname[10];
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
+ state->id);
+ arm_state->ka_thread = vchiq_thread_create(
+ &vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (arm_state->ka_thread == NULL) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "vchiq: FATAL: couldn't create thread %s",
+ threadname);
+ } else {
+ wake_up_process(arm_state->ka_thread);
+ }
+ } else
+ write_unlock_bh(&arm_state->susp_res_lock);
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_init - called when the module is loaded.
+*
+***************************************************************************/
+
+int __init vchiq_init(void);
+int __init
+vchiq_init(void)
+{
+ int err;
+
+#ifdef notyet
+ /* create proc entries */
+ err = vchiq_proc_init();
+ if (err != 0)
+ goto failed_proc_init;
+#endif
+
+ vchiq_cdev = make_dev(&vchiq_cdevsw, 0,
+ UID_ROOT, GID_WHEEL, 0600, "vchiq");
+ if (!vchiq_cdev) {
+ printf("Failed to create /dev/vchiq");
+ return (-ENXIO);
+ }
+
+ spin_lock_init(&msg_queue_spinlock);
+
+ err = vchiq_platform_init(&g_state);
+ if (err != 0)
+ goto failed_platform_init;
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq: initialised - version %d (min %d)",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+
+ return 0;
+
+failed_platform_init:
+ if (vchiq_cdev) {
+ destroy_dev(vchiq_cdev);
+ vchiq_cdev = NULL;
+ }
+ vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
+ return err;
+}
+
+#ifdef notyet
+static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int use_count = 0, i;
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i)) != NULL) {
+ use_count += service->service_use_count;
+ unlock_service(service);
+ }
+ return use_count;
+}
+
+/* read the per-process use-count */
+static int proc_read_use_count(char *page, char **start,
+ off_t off, int count,
+ int *eof, void *data)
+{
+ VCHIQ_INSTANCE_T instance = data;
+ int len, use_count;
+
+ use_count = vchiq_instance_get_use_count(instance);
+ len = snprintf(page+off, count, "%d\n", use_count);
+
+ return len;
+}
+
+/* add an instance (process) to the proc entries */
+static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
+{
+ char pidstr[32];
+ struct proc_dir_entry *top, *use_count;
+ struct proc_dir_entry *clients = vchiq_clients_top();
+ int pid = instance->pid;
+
+ snprintf(pidstr, sizeof(pidstr), "%d", pid);
+ top = proc_mkdir(pidstr, clients);
+ if (!top)
+ goto fail_top;
+
+ use_count = create_proc_read_entry("use_count",
+ 0444, top,
+ proc_read_use_count,
+ instance);
+ if (!use_count)
+ goto fail_use_count;
+
+ instance->proc_entry = top;
+
+ return 0;
+
+fail_use_count:
+ remove_proc_entry(top->name, clients);
+fail_top:
+ return -ENOMEM;
+}
+
+static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
+{
+ struct proc_dir_entry *clients = vchiq_clients_top();
+ remove_proc_entry("use_count", instance->proc_entry);
+ remove_proc_entry(instance->proc_entry->name, clients);
+}
+
+#endif
+
+/****************************************************************************
+*
+* vchiq_exit - called when the module is unloaded.
+*
+***************************************************************************/
+
+void vchiq_exit(void);
+void
+vchiq_exit(void)
+{
+ if (vchiq_ehtag == NULL)
+ EVENTHANDLER_DEREGISTER(dev_clone, vchiq_ehtag);
+ vchiq_ehtag = NULL;
+
+ vchiq_platform_exit(&g_state);
+ if (vchiq_cdev) {
+ destroy_dev(vchiq_cdev);
+ vchiq_cdev = NULL;
+ }
+}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.h
new file mode 100644
index 000000000000..e514a7f6119b
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.h
@@ -0,0 +1,200 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_ARM_H
+#define VCHIQ_ARM_H
+
+#include "vchiq_core.h"
+
+
+enum vc_suspend_status {
+ VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
+ VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
+ VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
+ VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
+ VC_SUSPEND_REQUESTED, /* User has requested suspend */
+ VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
+ VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
+};
+
+enum vc_resume_status {
+ VC_RESUME_FAILED = -1, /* Videocore resume failed */
+ VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
+ VC_RESUME_REQUESTED, /* User has requested resume */
+ VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
+ VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
+};
+
+
+enum USE_TYPE_E {
+ USE_TYPE_SERVICE,
+ USE_TYPE_SERVICE_NO_RESUME,
+ USE_TYPE_VCHIQ
+};
+
+
+
+typedef struct vchiq_arm_state_struct {
+ /* Keepalive-related data */
+ VCHIQ_THREAD_T ka_thread;
+ struct completion ka_evt;
+ atomic_t ka_use_count;
+ atomic_t ka_use_ack_count;
+ atomic_t ka_release_count;
+
+ struct completion vc_suspend_complete;
+ struct completion vc_resume_complete;
+
+ rwlock_t susp_res_lock;
+ enum vc_suspend_status vc_suspend_state;
+ enum vc_resume_status vc_resume_state;
+
+ unsigned int wake_address;
+
+ struct timer_list suspend_timer;
+ int suspend_timer_timeout;
+ int suspend_timer_running;
+
+ /* Global use count for videocore.
+ ** This is equal to the sum of the use counts for all services. When
+ ** this hits zero the videocore suspend procedure will be initiated.
+ */
+ int videocore_use_count;
+
+ /* Use count to track requests from videocore peer.
+ ** This use count is not associated with a service, so needs to be
+ ** tracked separately with the state.
+ */
+ int peer_use_count;
+
+ /* Flag to indicate whether resume is blocked. This happens when the
+ ** ARM is suspending
+ */
+ struct completion resume_blocker;
+ int resume_blocked;
+ struct completion blocked_blocker;
+ int blocked_count;
+
+ int autosuspend_override;
+
+ /* Flag to indicate that the first vchiq connect has made it through.
+ ** This means that both sides should be fully ready, and we should
+ ** be able to suspend after this point.
+ */
+ int first_connect;
+
+ unsigned long long suspend_start_time;
+ unsigned long long sleep_start_time;
+ unsigned long long resume_start_time;
+ unsigned long long last_wake_time;
+
+} VCHIQ_ARM_STATE_T;
+
+extern int vchiq_arm_log_level;
+extern int vchiq_susp_log_level;
+
+extern int __init
+vchiq_platform_init(VCHIQ_STATE_T *state);
+
+extern void __exit
+vchiq_platform_exit(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATE_T *
+vchiq_get_state(void);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_vcresume(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
+
+extern int
+vchiq_check_resume(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_check_suspend(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_suspend(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_platform_use_suspend_timer(void);
+
+extern void
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
+
+extern VCHIQ_ARM_STATE_T*
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_videocore_wanted(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ enum USE_TYPE_E use_type);
+extern VCHIQ_STATUS_T
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
+
+void
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_suspend_status new_state);
+
+void
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_resume_status new_state);
+
+void
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
+
+extern int vchiq_proc_init(void);
+extern void vchiq_proc_deinit(void);
+extern struct proc_dir_entry *vchiq_proc_top(void);
+extern struct proc_dir_entry *vchiq_clients_top(void);
+
+
+#endif /* VCHIQ_ARM_H */
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_build_info.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_build_info.h
new file mode 100644
index 000000000000..df645813bdae
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_build_info.h
@@ -0,0 +1,37 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+const char *vchiq_get_build_hostname(void);
+const char *vchiq_get_build_version(void);
+const char *vchiq_get_build_time(void);
+const char *vchiq_get_build_date(void);
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_cfg.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_cfg.h
new file mode 100644
index 000000000000..493c86c34957
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_cfg.h
@@ -0,0 +1,60 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CFG_H
+#define VCHIQ_CFG_H
+
+#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
+/* The version of VCHIQ - change with any non-trivial change */
+#define VCHIQ_VERSION 6
+/* The minimum compatible version - update to match VCHIQ_VERSION with any
+** incompatible change */
+#define VCHIQ_VERSION_MIN 3
+
+#define VCHIQ_MAX_STATES 1
+#define VCHIQ_MAX_SERVICES 4096
+#define VCHIQ_MAX_SLOTS 128
+#define VCHIQ_MAX_SLOTS_PER_SIDE 64
+
+#define VCHIQ_NUM_CURRENT_BULKS 32
+#define VCHIQ_NUM_SERVICE_BULKS 4
+
+#ifndef VCHIQ_ENABLE_DEBUG
+#define VCHIQ_ENABLE_DEBUG 1
+#endif
+
+#ifndef VCHIQ_ENABLE_STATS
+#define VCHIQ_ENABLE_STATS 1
+#endif
+
+#endif /* VCHIQ_CFG_H */
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.c
new file mode 100644
index 000000000000..0bc6c587754f
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.c
@@ -0,0 +1,117 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_connected.h"
+#include "vchiq_core.h"
+
+#define MAX_CALLBACKS 10
+
+static int g_connected;
+static int g_num_deferred_callbacks;
+static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
+static int g_once_init;
+static struct mutex g_connected_mutex;
+
+/****************************************************************************
+*
+* Function to initialize our lock.
+*
+***************************************************************************/
+
+static void connected_init(void)
+{
+ if (!g_once_init) {
+ lmutex_init(&g_connected_mutex);
+ g_once_init = 1;
+ }
+}
+
+/****************************************************************************
+*
+* This function is used to defer initialization until the vchiq stack is
+* initialized. If the stack is already initialized, then the callback will
+* be made immediately, otherwise it will be deferred until
+* vchiq_call_connected_callbacks is called.
+*
+***************************************************************************/
+
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
+{
+ connected_init();
+
+ if (lmutex_lock_interruptible(&g_connected_mutex) != 0)
+ return;
+
+ if (g_connected)
+ /* We're already connected. Call the callback immediately. */
+
+ callback();
+ else {
+ if (g_num_deferred_callbacks >= MAX_CALLBACKS)
+ vchiq_log_error(vchiq_core_log_level,
+ "There are already %d callbacks registered - "
+ "please increase MAX_CALLBACKS",
+ g_num_deferred_callbacks);
+ else {
+ g_deferred_callback[g_num_deferred_callbacks] =
+ callback;
+ g_num_deferred_callbacks++;
+ }
+ }
+ lmutex_unlock(&g_connected_mutex);
+}
+
+/****************************************************************************
+*
+* This function is called by the vchiq stack once it has been connected to
+* the videocore and clients can start to use the stack.
+*
+***************************************************************************/
+
+void vchiq_call_connected_callbacks(void)
+{
+ int i;
+
+ connected_init();
+
+ if (lmutex_lock_interruptible(&g_connected_mutex) != 0)
+ return;
+
+ for (i = 0; i < g_num_deferred_callbacks; i++)
+ g_deferred_callback[i]();
+
+ g_num_deferred_callbacks = 0;
+ g_connected = 1;
+ lmutex_unlock(&g_connected_mutex);
+}
+EXPORT_SYMBOL(vchiq_add_connected_callback);
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.h
new file mode 100644
index 000000000000..e4cfdcc8aab2
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_connected.h
@@ -0,0 +1,51 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CONNECTED_H
+#define VCHIQ_CONNECTED_H
+
+/* ---- Include Files ----------------------------------------------------- */
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
+
+/* ---- Variable Externs ------------------------------------------------- */
+
+/* ---- Function Prototypes ---------------------------------------------- */
+
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
+void vchiq_call_connected_callbacks(void);
+
+#endif /* VCHIQ_CONNECTED_H */
+
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.c
new file mode 100644
index 000000000000..633fd869b5cc
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.c
@@ -0,0 +1,3842 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_core.h"
+
+#define VCHIQ_SLOT_HANDLER_STACK 8192
+
+#define HANDLE_STATE_SHIFT 12
+
+#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
+#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
+#define SLOT_INDEX_FROM_DATA(state, data) \
+ (((unsigned int)((char *)data - (char *)state->slot_data)) / \
+ VCHIQ_SLOT_SIZE)
+#define SLOT_INDEX_FROM_INFO(state, info) \
+ ((unsigned int)(info - state->slot_info))
+#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
+ ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
+
+
+#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
+
+
+struct vchiq_open_payload {
+ int fourcc;
+ int client_id;
+ short version;
+ short version_min;
+};
+
+struct vchiq_openack_payload {
+ short version;
+};
+
+/* we require this for consistency between endpoints */
+vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
+vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
+vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
+
+/* Run time control of log level, based on KERN_XXX level. */
+int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
+
+static atomic_t pause_bulks_count = ATOMIC_INIT(0);
+
+static DEFINE_SPINLOCK(service_spinlock);
+DEFINE_SPINLOCK(bulk_waiter_spinlock);
+DEFINE_SPINLOCK(quota_spinlock);
+
+void
+vchiq_core_initialize(void)
+{
+ spin_lock_init(&service_spinlock);
+ spin_lock_init(&bulk_waiter_spinlock);
+ spin_lock_init(&quota_spinlock);
+}
+
+VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
+static unsigned int handle_seq;
+
+static const char *const srvstate_names[] = {
+ "FREE",
+ "HIDDEN",
+ "LISTENING",
+ "OPENING",
+ "OPEN",
+ "OPENSYNC",
+ "CLOSESENT",
+ "CLOSERECVD",
+ "CLOSEWAIT",
+ "CLOSED"
+};
+
+static const char *const reason_names[] = {
+ "SERVICE_OPENED",
+ "SERVICE_CLOSED",
+ "MESSAGE_AVAILABLE",
+ "BULK_TRANSMIT_DONE",
+ "BULK_RECEIVE_DONE",
+ "BULK_TRANSMIT_ABORTED",
+ "BULK_RECEIVE_ABORTED"
+};
+
+static const char *const conn_state_names[] = {
+ "DISCONNECTED",
+ "CONNECTING",
+ "CONNECTED",
+ "PAUSING",
+ "PAUSE_SENT",
+ "PAUSED",
+ "RESUMING",
+ "PAUSE_TIMEOUT",
+ "RESUME_TIMEOUT"
+};
+
+
+static void
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
+
+static const char *msg_type_str(unsigned int msg_type)
+{
+ switch (msg_type) {
+ case VCHIQ_MSG_PADDING: return "PADDING";
+ case VCHIQ_MSG_CONNECT: return "CONNECT";
+ case VCHIQ_MSG_OPEN: return "OPEN";
+ case VCHIQ_MSG_OPENACK: return "OPENACK";
+ case VCHIQ_MSG_CLOSE: return "CLOSE";
+ case VCHIQ_MSG_DATA: return "DATA";
+ case VCHIQ_MSG_BULK_RX: return "BULK_RX";
+ case VCHIQ_MSG_BULK_TX: return "BULK_TX";
+ case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
+ case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
+ case VCHIQ_MSG_PAUSE: return "PAUSE";
+ case VCHIQ_MSG_RESUME: return "RESUME";
+ case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
+ case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
+ }
+ return "???";
+}
+
+static inline void
+vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
+{
+ vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
+ service->srvstate = newstate;
+}
+
+VCHIQ_SERVICE_T *
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service;
+
+ spin_lock(&service_spinlock);
+ service = handle_to_service(handle);
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->handle == handle)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+find_service_by_port(VCHIQ_STATE_T *state, int localport)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
+ spin_lock(&service_spinlock);
+ service = state->services[localport];
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+ }
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid port %d", localport);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle) {
+ VCHIQ_SERVICE_T *service;
+
+ spin_lock(&service_spinlock);
+ service = handle_to_service(handle);
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->handle == handle) &&
+ (service->instance == instance)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
+ int *pidx)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ int idx = *pidx;
+
+ spin_lock(&service_spinlock);
+ while (idx < state->unused_service) {
+ VCHIQ_SERVICE_T *srv = state->services[idx++];
+ if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (srv->instance == instance)) {
+ service = srv;
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ break;
+ }
+ }
+ spin_unlock(&service_spinlock);
+
+ *pidx = idx;
+
+ return service;
+}
+
+void
+lock_service(VCHIQ_SERVICE_T *service)
+{
+ spin_lock(&service_spinlock);
+ BUG_ON(!service || (service->ref_count == 0));
+ if (service)
+ service->ref_count++;
+ spin_unlock(&service_spinlock);
+}
+
+void
+unlock_service(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+ spin_lock(&service_spinlock);
+ BUG_ON(!service || (service->ref_count == 0));
+ if (service && service->ref_count) {
+ service->ref_count--;
+ if (!service->ref_count) {
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+ state->services[service->localport] = NULL;
+
+ _sema_destroy(&service->remove_event);
+ _sema_destroy(&service->bulk_remove_event);
+ lmutex_destroy(&service->bulk_mutex);
+ } else
+ service = NULL;
+ }
+ spin_unlock(&service_spinlock);
+
+ if (service && service->userdata_term)
+ service->userdata_term(service->base.userdata);
+
+ kfree(service);
+}
+
+int
+vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ int id;
+
+ id = service ? service->client_id : 0;
+ if (service)
+ unlock_service(service);
+
+ return id;
+}
+
+void *
+vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
+
+ return service ? service->base.userdata : NULL;
+}
+
+int
+vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
+
+ return service ? service->base.fourcc : 0;
+}
+
+static void
+mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
+{
+ VCHIQ_STATE_T *state = service->state;
+ VCHIQ_SERVICE_QUOTA_T *service_quota;
+
+ service->closing = 1;
+
+ /* Synchronise with other threads. */
+ lmutex_lock(&state->recycle_mutex);
+ lmutex_unlock(&state->recycle_mutex);
+ if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
+ /* If we're pausing then the slot_mutex is held until resume
+ * by the slot handler. Therefore don't try to acquire this
+ * mutex if we're the slot handler and in the pause sent state.
+ * We don't need to in this case anyway. */
+ lmutex_lock(&state->slot_mutex);
+ lmutex_unlock(&state->slot_mutex);
+ }
+
+ /* Unblock any sending thread. */
+ service_quota = &state->service_quotas[service->localport];
+ up(&service_quota->quota_event);
+}
+
+static void
+mark_service_closing(VCHIQ_SERVICE_T *service)
+{
+ mark_service_closing_internal(service, 0);
+}
+
+static inline VCHIQ_STATUS_T
+make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, void *bulk_userdata)
+{
+ VCHIQ_STATUS_T status;
+ vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
+ service->state->id, service->localport, reason_names[reason],
+ (unsigned int)header, (unsigned int)bulk_userdata);
+ status = service->base.callback(reason, header, service->handle,
+ bulk_userdata);
+ if (status == VCHIQ_ERROR) {
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: ignoring ERROR from callback to service %x",
+ service->state->id, service->handle);
+ status = VCHIQ_SUCCESS;
+ }
+ return status;
+}
+
+inline void
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
+{
+ VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+ vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
+ conn_state_names[oldstate],
+ conn_state_names[newstate]);
+ state->conn_state = newstate;
+ vchiq_platform_conn_state_changed(state, oldstate, newstate);
+}
+
+static inline void
+remote_event_create(REMOTE_EVENT_T *event)
+{
+ event->armed = 0;
+ /* Don't clear the 'fired' flag because it may already have been set
+ ** by the other side. */
+ _sema_init(event->event, 0);
+}
+
+__unused static inline void
+remote_event_destroy(REMOTE_EVENT_T *event)
+{
+ (void)event;
+}
+
+static inline int
+remote_event_wait(REMOTE_EVENT_T *event)
+{
+ if (!event->fired) {
+ event->armed = 1;
+ dsb();
+ if (!event->fired) {
+ if (down_interruptible(event->event) != 0) {
+ event->armed = 0;
+ return 0;
+ }
+ }
+ event->armed = 0;
+ wmb();
+ }
+
+ event->fired = 0;
+ return 1;
+}
+
+static inline void
+remote_event_signal_local(REMOTE_EVENT_T *event)
+{
+ event->armed = 0;
+ up(event->event);
+}
+
+static inline void
+remote_event_poll(REMOTE_EVENT_T *event)
+{
+ if (event->fired && event->armed)
+ remote_event_signal_local(event);
+}
+
+void
+remote_event_pollall(VCHIQ_STATE_T *state)
+{
+ remote_event_poll(&state->local->sync_trigger);
+ remote_event_poll(&state->local->sync_release);
+ remote_event_poll(&state->local->trigger);
+ remote_event_poll(&state->local->recycle);
+}
+
+/* Round up message sizes so that any space at the end of a slot is always big
+** enough for a header. This relies on header size being a power of two, which
+** has been verified earlier by a static assertion. */
+
+static inline unsigned int
+calc_stride(unsigned int size)
+{
+ /* Allow room for the header */
+ size += sizeof(VCHIQ_HEADER_T);
+
+ /* Round up */
+ return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
+ - 1);
+}
+
+/* Called by the slot handler thread */
+static VCHIQ_SERVICE_T *
+get_listening_service(VCHIQ_STATE_T *state, int fourcc)
+{
+ int i;
+
+ WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ if (service &&
+ (service->public_fourcc == fourcc) &&
+ ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
+ (service->remoteport == VCHIQ_PORT_FREE)))) {
+ lock_service(service);
+ return service;
+ }
+ }
+
+ return NULL;
+}
+
+/* Called by the slot handler thread */
+static VCHIQ_SERVICE_T *
+get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
+{
+ int i;
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
+ && (service->remoteport == port)) {
+ lock_service(service);
+ return service;
+ }
+ }
+ return NULL;
+}
+
+inline void
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
+{
+ uint32_t value;
+
+ if (service) {
+ do {
+ value = atomic_read(&service->poll_flags);
+ } while (atomic_cmpxchg(&service->poll_flags, value,
+ value | (1 << poll_type)) != value);
+
+ do {
+ value = atomic_read(&state->poll_services[
+ service->localport>>5]);
+ } while (atomic_cmpxchg(
+ &state->poll_services[service->localport>>5],
+ value, value | (1 << (service->localport & 0x1f)))
+ != value);
+ }
+
+ state->poll_needed = 1;
+ wmb();
+
+ /* ... and ensure the slot handler runs. */
+ remote_event_signal_local(&state->local->trigger);
+}
+
+/* Called from queue_message, by the slot handler and application threads,
+** with slot_mutex held */
+static VCHIQ_HEADER_T *
+reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
+{
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ int tx_pos = state->local_tx_pos;
+ int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
+
+ if (space > slot_space) {
+ VCHIQ_HEADER_T *header;
+ /* Fill the remaining space with padding */
+ WARN_ON(state->tx_data == NULL);
+ header = (VCHIQ_HEADER_T *)
+ (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
+ header->msgid = VCHIQ_MSGID_PADDING;
+ header->size = slot_space - sizeof(VCHIQ_HEADER_T);
+
+ tx_pos += slot_space;
+ }
+
+ /* If necessary, get the next slot. */
+ if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
+ int slot_index;
+
+ /* If there is no free slot... */
+
+ if (down_trylock(&state->slot_available_event) != 0) {
+ /* ...wait for one. */
+
+ VCHIQ_STATS_INC(state, slot_stalls);
+
+ /* But first, flush through the last slot. */
+ state->local_tx_pos = tx_pos;
+ local->tx_pos = tx_pos;
+ remote_event_signal(&state->remote->trigger);
+
+ if (!is_blocking ||
+ (down_interruptible(
+ &state->slot_available_event) != 0))
+ return NULL; /* No space available */
+ }
+
+ BUG_ON(tx_pos ==
+ (state->slot_queue_available * VCHIQ_SLOT_SIZE));
+
+ slot_index = local->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
+ VCHIQ_SLOT_QUEUE_MASK];
+ state->tx_data =
+ (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ }
+
+ state->local_tx_pos = tx_pos + space;
+
+ return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
+}
+
+/* Called by the recycle thread. */
+static void
+process_free_queue(VCHIQ_STATE_T *state)
+{
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+ int slot_queue_available;
+
+ /* Use a read memory barrier to ensure that any state that may have
+ ** been modified by another thread is not masked by stale prefetched
+ ** values. */
+ rmb();
+
+ /* Find slots which have been freed by the other side, and return them
+ ** to the available queue. */
+ slot_queue_available = state->slot_queue_available;
+
+ while (slot_queue_available != local->slot_queue_recycle) {
+ unsigned int pos;
+ int slot_index = local->slot_queue[slot_queue_available++ &
+ VCHIQ_SLOT_QUEUE_MASK];
+ char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ int data_found = 0;
+
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
+ state->id, slot_index, (unsigned int)data,
+ local->slot_queue_recycle, slot_queue_available);
+
+ /* Initialise the bitmask for services which have used this
+ ** slot */
+ BITSET_ZERO(service_found);
+
+ pos = 0;
+
+ while (pos < VCHIQ_SLOT_SIZE) {
+ VCHIQ_HEADER_T *header =
+ (VCHIQ_HEADER_T *)(data + pos);
+ int msgid = header->msgid;
+ if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
+ int port = VCHIQ_MSG_SRCPORT(msgid);
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[port];
+ int count;
+ spin_lock(&quota_spinlock);
+ count = service_quota->message_use_count;
+ if (count > 0)
+ service_quota->message_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count == service_quota->message_quota)
+ /* Signal the service that it
+ ** has dropped below its quota
+ */
+ up(&service_quota->quota_event);
+ else if (count == 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "service %d "
+ "message_use_count=%d "
+ "(header %x, msgid %x, "
+ "header->msgid %x, "
+ "header->size %x)",
+ port,
+ service_quota->
+ message_use_count,
+ (unsigned int)header, msgid,
+ header->msgid,
+ header->size);
+ WARN(1, "invalid message use count\n");
+ }
+ if (!BITSET_IS_SET(service_found, port)) {
+ /* Set the found bit for this service */
+ BITSET_SET(service_found, port);
+
+ spin_lock(&quota_spinlock);
+ count = service_quota->slot_use_count;
+ if (count > 0)
+ service_quota->slot_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count > 0) {
+ /* Signal the service in case
+ ** it has dropped below its
+ ** quota */
+ up(&service_quota->quota_event);
+ vchiq_log_trace(
+ vchiq_core_log_level,
+ "%d: pfq:%d %x@%x - "
+ "slot_use->%d",
+ state->id, port,
+ header->size,
+ (unsigned int)header,
+ count - 1);
+ } else {
+ vchiq_log_error(
+ vchiq_core_log_level,
+ "service %d "
+ "slot_use_count"
+ "=%d (header %x"
+ ", msgid %x, "
+ "header->msgid"
+ " %x, header->"
+ "size %x)",
+ port, count,
+ (unsigned int)header,
+ msgid,
+ header->msgid,
+ header->size);
+ WARN(1, "bad slot use count\n");
+ }
+ }
+
+ data_found = 1;
+ }
+
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "pfq - pos %x: header %x, msgid %x, "
+ "header->msgid %x, header->size %x",
+ pos, (unsigned int)header, msgid,
+ header->msgid, header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+
+ if (data_found) {
+ int count;
+ spin_lock(&quota_spinlock);
+ count = state->data_use_count;
+ if (count > 0)
+ state->data_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+ if (count == state->data_quota)
+ up(&state->data_quota_event);
+ }
+
+ state->slot_queue_available = slot_queue_available;
+ up(&state->slot_available_event);
+ }
+}
+
+/* Called by the slot handler and application threads */
+static VCHIQ_STATUS_T
+queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ int msgid, const VCHIQ_ELEMENT_T *elements,
+ int count, int size, int is_blocking)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
+ VCHIQ_HEADER_T *header;
+ int type = VCHIQ_MSG_TYPE(msgid);
+
+ unsigned int stride;
+
+ local = state->local;
+
+ stride = calc_stride(size);
+
+ WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
+
+ if ((type != VCHIQ_MSG_RESUME) &&
+ (lmutex_lock_interruptible(&state->slot_mutex) != 0))
+ return VCHIQ_RETRY;
+
+ if (type == VCHIQ_MSG_DATA) {
+ int tx_end_index;
+
+ BUG_ON(!service);
+
+ if (service->closing) {
+ /* The service has been closed */
+ lmutex_unlock(&state->slot_mutex);
+ return VCHIQ_ERROR;
+ }
+
+ service_quota = &state->service_quotas[service->localport];
+
+ spin_lock(&quota_spinlock);
+
+ /* Ensure this service doesn't use more than its quota of
+ ** messages or slots */
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+
+ /* Ensure data messages don't use more than their quota of
+ ** slots */
+ while ((tx_end_index != state->previous_data_index) &&
+ (state->data_use_count == state->data_quota)) {
+ VCHIQ_STATS_INC(state, data_stalls);
+ spin_unlock(&quota_spinlock);
+ lmutex_unlock(&state->slot_mutex);
+
+ if (down_interruptible(&state->data_quota_event)
+ != 0)
+ return VCHIQ_RETRY;
+
+ lmutex_lock(&state->slot_mutex);
+ spin_lock(&quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+ if ((tx_end_index == state->previous_data_index) ||
+ (state->data_use_count < state->data_quota)) {
+ /* Pass the signal on to other waiters */
+ up(&state->data_quota_event);
+ break;
+ }
+ }
+
+ while ((service_quota->message_use_count ==
+ service_quota->message_quota) ||
+ ((tx_end_index != service_quota->previous_tx_index) &&
+ (service_quota->slot_use_count ==
+ service_quota->slot_quota))) {
+ spin_unlock(&quota_spinlock);
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: qm:%d %s,%x - quota stall "
+ "(msg %d, slot %d)",
+ state->id, service->localport,
+ msg_type_str(type), size,
+ service_quota->message_use_count,
+ service_quota->slot_use_count);
+ VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
+ lmutex_unlock(&state->slot_mutex);
+ if (down_interruptible(&service_quota->quota_event)
+ != 0)
+ return VCHIQ_RETRY;
+ if (service->closing)
+ return VCHIQ_ERROR;
+ if (lmutex_lock_interruptible(&state->slot_mutex) != 0)
+ return VCHIQ_RETRY;
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
+ /* The service has been closed */
+ lmutex_unlock(&state->slot_mutex);
+ return VCHIQ_ERROR;
+ }
+ spin_lock(&quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+ }
+
+ spin_unlock(&quota_spinlock);
+ }
+
+ header = reserve_space(state, stride, is_blocking);
+
+ if (!header) {
+ if (service)
+ VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
+ lmutex_unlock(&state->slot_mutex);
+ return VCHIQ_RETRY;
+ }
+
+ if (type == VCHIQ_MSG_DATA) {
+ int i, pos;
+ int tx_end_index;
+ int slot_use_count;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: qm %s@%x,%x (%d->%d)",
+ state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+
+ BUG_ON(!service);
+
+ for (i = 0, pos = 0; i < (unsigned int)count;
+ pos += elements[i++].size)
+ if (elements[i].size) {
+ if (vchiq_copy_from_user
+ (header->data + pos, elements[i].data,
+ (size_t) elements[i].size) !=
+ VCHIQ_SUCCESS) {
+ lmutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+ if (i == 0) {
+ if (vchiq_core_msg_log_level >=
+ VCHIQ_LOG_INFO)
+ vchiq_log_dump_mem("Sent", 0,
+ header->data + pos,
+ min(64,
+ elements[0].size));
+ }
+ }
+
+ spin_lock(&quota_spinlock);
+ service_quota->message_use_count++;
+
+ tx_end_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
+
+ /* If this transmission can't fit in the last slot used by any
+ ** service, the data_use_count must be increased. */
+ if (tx_end_index != state->previous_data_index) {
+ state->previous_data_index = tx_end_index;
+ state->data_use_count++;
+ }
+
+ /* If this isn't the same slot last used by this service,
+ ** the service's slot_use_count must be increased. */
+ if (tx_end_index != service_quota->previous_tx_index) {
+ service_quota->previous_tx_index = tx_end_index;
+ slot_use_count = ++service_quota->slot_use_count;
+ } else {
+ slot_use_count = 0;
+ }
+
+ spin_unlock(&quota_spinlock);
+
+ if (slot_use_count)
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
+ state->id, service->localport,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
+ slot_use_count, header);
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: qm %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+ if (size != 0) {
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
+ memcpy(header->data, elements[0].data,
+ elements[0].size);
+ }
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->msgid = msgid;
+ header->size = size;
+
+ {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid),
+ size);
+ }
+
+ /* Make sure the new header is visible to the peer. */
+ wmb();
+
+ /* Make the new tx_pos visible to the peer. */
+ local->tx_pos = state->local_tx_pos;
+ wmb();
+
+ if (service && (type == VCHIQ_MSG_CLOSE))
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
+
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
+ lmutex_unlock(&state->slot_mutex);
+
+ remote_event_signal(&state->remote->trigger);
+
+ return VCHIQ_SUCCESS;
+}
+
+/* Called by the slot handler and application threads */
+static VCHIQ_STATUS_T
+queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ int msgid, const VCHIQ_ELEMENT_T *elements,
+ int count, int size, int is_blocking)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_HEADER_T *header;
+
+ local = state->local;
+
+ if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
+ (lmutex_lock_interruptible(&state->sync_mutex) != 0))
+ return VCHIQ_RETRY;
+
+ remote_event_wait(&local->sync_release);
+
+ rmb();
+
+ header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
+ local->slot_sync);
+
+ {
+ int oldmsgid = header->msgid;
+ if (oldmsgid != VCHIQ_MSGID_PADDING)
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: qms - msgid %x, not PADDING",
+ state->id, oldmsgid);
+ }
+
+ if (service) {
+ int i, pos;
+
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+
+ for (i = 0, pos = 0; i < (unsigned int)count;
+ pos += elements[i++].size)
+ if (elements[i].size) {
+ if (vchiq_copy_from_user
+ (header->data + pos, elements[i].data,
+ (size_t) elements[i].size) !=
+ VCHIQ_SUCCESS) {
+ lmutex_unlock(&state->sync_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+ if (i == 0) {
+ if (vchiq_sync_log_level >=
+ VCHIQ_LOG_TRACE)
+ vchiq_log_dump_mem("Sent Sync",
+ 0, header->data + pos,
+ min(64,
+ elements[0].size));
+ }
+ }
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+ if (size != 0) {
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
+ memcpy(header->data, elements[0].data,
+ elements[0].size);
+ }
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->size = size;
+ header->msgid = msgid;
+
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_trace(vchiq_sync_log_level,
+ "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid),
+ size);
+ }
+
+ /* Make sure the new header is visible to the peer. */
+ wmb();
+
+ remote_event_signal(&state->remote->sync_trigger);
+
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
+ lmutex_unlock(&state->sync_mutex);
+
+ return VCHIQ_SUCCESS;
+}
+
+static inline void
+claim_slot(VCHIQ_SLOT_INFO_T *slot)
+{
+ slot->use_count++;
+}
+
+static void
+release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
+{
+ int release_count;
+
+ lmutex_lock(&state->recycle_mutex);
+
+ if (header) {
+ int msgid = header->msgid;
+ if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
+ (service && service->closing)) {
+ lmutex_unlock(&state->recycle_mutex);
+ return;
+ }
+
+ /* Rewrite the message header to prevent a double
+ ** release */
+ header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
+ }
+
+ release_count = slot_info->release_count;
+ slot_info->release_count = ++release_count;
+
+ if (release_count == slot_info->use_count) {
+ int slot_queue_recycle;
+ /* Add to the freed queue */
+
+ /* A read barrier is necessary here to prevent speculative
+ ** fetches of remote->slot_queue_recycle from overtaking the
+ ** mutex. */
+ rmb();
+
+ slot_queue_recycle = state->remote->slot_queue_recycle;
+ state->remote->slot_queue[slot_queue_recycle &
+ VCHIQ_SLOT_QUEUE_MASK] =
+ SLOT_INDEX_FROM_INFO(state, slot_info);
+ state->remote->slot_queue_recycle = slot_queue_recycle + 1;
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: release_slot %d - recycle->%x",
+ state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
+
+ /* A write barrier is necessary, but remote_event_signal
+ ** contains one. */
+ remote_event_signal(&state->remote->recycle);
+ }
+
+ lmutex_unlock(&state->recycle_mutex);
+}
+
+/* Called by the slot handler - don't hold the bulk mutex */
+static VCHIQ_STATUS_T
+notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
+ int retry_poll)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: nb:%d %cx - p=%x rn=%x r=%x",
+ service->state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->process, queue->remote_notify, queue->remove);
+
+ if (service->state->is_master) {
+ while (queue->remote_notify != queue->process) {
+ VCHIQ_BULK_T *bulk =
+ &queue->bulks[BULK_INDEX(queue->remote_notify)];
+ int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
+ int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
+ service->remoteport);
+ VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
+ /* Only reply to non-dummy bulk requests */
+ if (bulk->remote_data) {
+ status = queue_message(service->state, NULL,
+ msgid, &element, 1, 4, 0);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+ queue->remote_notify++;
+ }
+ } else {
+ queue->remote_notify = queue->process;
+ }
+
+ if (status == VCHIQ_SUCCESS) {
+ while (queue->remove != queue->remote_notify) {
+ VCHIQ_BULK_T *bulk =
+ &queue->bulks[BULK_INDEX(queue->remove)];
+
+ /* Only generate callbacks for non-dummy bulk
+ ** requests, and non-terminated services */
+ if (bulk->data && service->instance) {
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_tx_bytes,
+ bulk->actual);
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_rx_bytes,
+ bulk->actual);
+ }
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_aborted_count);
+ }
+ if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ struct bulk_waiter *waiter;
+ spin_lock(&bulk_waiter_spinlock);
+ waiter = bulk->userdata;
+ if (waiter) {
+ waiter->actual = bulk->actual;
+ up(&waiter->event);
+ }
+ spin_unlock(&bulk_waiter_spinlock);
+ } else if (bulk->mode ==
+ VCHIQ_BULK_MODE_CALLBACK) {
+ VCHIQ_REASON_T reason = (bulk->dir ==
+ VCHIQ_BULK_TRANSMIT) ?
+ ((bulk->actual ==
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
+ VCHIQ_BULK_TRANSMIT_ABORTED :
+ VCHIQ_BULK_TRANSMIT_DONE) :
+ ((bulk->actual ==
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
+ VCHIQ_BULK_RECEIVE_ABORTED :
+ VCHIQ_BULK_RECEIVE_DONE);
+ status = make_service_callback(service,
+ reason, NULL, bulk->userdata);
+ if (status == VCHIQ_RETRY)
+ break;
+ }
+ }
+
+ queue->remove++;
+ up(&service->bulk_remove_event);
+ }
+ if (!retry_poll)
+ status = VCHIQ_SUCCESS;
+ }
+
+ if (status == VCHIQ_RETRY)
+ request_poll(service->state, service,
+ (queue == &service->bulk_tx) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+
+ return status;
+}
+
+/* Called by the slot handler thread */
+static void
+poll_services(VCHIQ_STATE_T *state)
+{
+ int group, i;
+
+ for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
+ uint32_t flags;
+ flags = atomic_xchg(&state->poll_services[group], 0);
+ for (i = 0; flags; i++) {
+ if (flags & (1 << i)) {
+ VCHIQ_SERVICE_T *service =
+ find_service_by_port(state,
+ (group<<5) + i);
+ uint32_t service_flags;
+ flags &= ~(1 << i);
+ if (!service)
+ continue;
+ service_flags =
+ atomic_xchg(&service->poll_flags, 0);
+ if (service_flags &
+ (1 << VCHIQ_POLL_REMOVE)) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: ps - remove %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+
+ /* Make it look like a client, because
+ it must be removed and not left in
+ the LISTENING state. */
+ service->public_fourcc =
+ VCHIQ_FOURCC_INVALID;
+
+ if (vchiq_close_service_internal(
+ service, 0/*!close_recvd*/) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service,
+ VCHIQ_POLL_REMOVE);
+ } else if (service_flags &
+ (1 << VCHIQ_POLL_TERMINATE)) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: ps - terminate %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+ if (vchiq_close_service_internal(
+ service, 0/*!close_recvd*/) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service,
+ VCHIQ_POLL_TERMINATE);
+ }
+ if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
+ notify_bulks(service,
+ &service->bulk_tx,
+ 1/*retry_poll*/);
+ if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
+ notify_bulks(service,
+ &service->bulk_rx,
+ 1/*retry_poll*/);
+ unlock_service(service);
+ }
+ }
+ }
+}
+
+/* Called by the slot handler or application threads, holding the bulk mutex. */
+static int
+resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
+{
+ VCHIQ_STATE_T *state = service->state;
+ int resolved = 0;
+ int rc;
+
+ while ((queue->process != queue->local_insert) &&
+ (queue->process != queue->remote_insert)) {
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: rb:%d %cx - li=%x ri=%x p=%x",
+ state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->local_insert, queue->remote_insert,
+ queue->process);
+
+ WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
+ WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
+
+ rc = lmutex_lock_interruptible(&state->bulk_transfer_mutex);
+ if (rc != 0)
+ break;
+
+ vchiq_transfer_bulk(bulk);
+ lmutex_unlock(&state->bulk_transfer_mutex);
+
+ if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
+ const char *header = (queue == &service->bulk_tx) ?
+ "Send Bulk to" : "Recv Bulk from";
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s %c%c%c%c d:%d len:%d %x<->%x",
+ header,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ (unsigned int)bulk->data,
+ (unsigned int)bulk->remote_data);
+ else
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
+ " rx len:%d %x<->%x",
+ header,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ bulk->remote_size,
+ (unsigned int)bulk->data,
+ (unsigned int)bulk->remote_data);
+ }
+
+ vchiq_complete_bulk(bulk);
+ queue->process++;
+ resolved++;
+ }
+ return resolved;
+}
+
+/* Called with the bulk_mutex held */
+static void
+abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
+{
+ int is_tx = (queue == &service->bulk_tx);
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: aob:%d %cx - li=%x ri=%x p=%x",
+ service->state->id, service->localport, is_tx ? 't' : 'r',
+ queue->local_insert, queue->remote_insert, queue->process);
+
+ WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
+ WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
+
+ while ((queue->process != queue->local_insert) ||
+ (queue->process != queue->remote_insert)) {
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
+
+ if (queue->process == queue->remote_insert) {
+ /* fabricate a matching dummy bulk */
+ bulk->remote_data = NULL;
+ bulk->remote_size = 0;
+ queue->remote_insert++;
+ }
+
+ if (queue->process != queue->local_insert) {
+ vchiq_complete_bulk(bulk);
+
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
+ "rx len:%d",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ bulk->remote_size);
+ } else {
+ /* fabricate a matching dummy bulk */
+ bulk->data = NULL;
+ bulk->size = 0;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+ bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
+ VCHIQ_BULK_RECEIVE;
+ queue->local_insert++;
+ }
+
+ queue->process++;
+ }
+}
+
+/* Called from the slot handler thread */
+static void
+pause_bulks(VCHIQ_STATE_T *state)
+{
+ if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
+ WARN_ON_ONCE(1);
+ atomic_set(&pause_bulks_count, 1);
+ return;
+ }
+
+ /* Block bulk transfers from all services */
+ lmutex_lock(&state->bulk_transfer_mutex);
+}
+
+/* Called from the slot handler thread */
+static void
+resume_bulks(VCHIQ_STATE_T *state)
+{
+ int i;
+ if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
+ WARN_ON_ONCE(1);
+ atomic_set(&pause_bulks_count, 0);
+ return;
+ }
+
+ /* Allow bulk transfers from all services */
+ lmutex_unlock(&state->bulk_transfer_mutex);
+
+ if (state->deferred_bulks == 0)
+ return;
+
+ /* Deal with any bulks which had to be deferred due to being in
+ * paused state. Don't try to match up to number of deferred bulks
+ * in case we've had something come and close the service in the
+ * interim - just process all bulk queues for all services */
+ vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
+ __func__, state->deferred_bulks);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ int resolved_rx = 0;
+ int resolved_tx = 0;
+ if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
+ continue;
+
+ lmutex_lock(&service->bulk_mutex);
+ resolved_rx = resolve_bulks(service, &service->bulk_rx);
+ resolved_tx = resolve_bulks(service, &service->bulk_tx);
+ lmutex_unlock(&service->bulk_mutex);
+ if (resolved_rx)
+ notify_bulks(service, &service->bulk_rx, 1);
+ if (resolved_tx)
+ notify_bulks(service, &service->bulk_tx, 1);
+ }
+ state->deferred_bulks = 0;
+}
+
+static int
+parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ int msgid, size;
+ unsigned int localport, remoteport;
+
+ msgid = header->msgid;
+ size = header->size;
+ //int type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+ if (size >= sizeof(struct vchiq_open_payload)) {
+ const struct vchiq_open_payload *payload =
+ (struct vchiq_open_payload *)header->data;
+ unsigned int fourcc;
+
+ fourcc = payload->fourcc;
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPEN@%x (%d->'%c%c%c%c')",
+ state->id, (unsigned int)header,
+ localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc));
+
+ service = get_listening_service(state, fourcc);
+
+ if (service) {
+ /* A matching service exists */
+ short v = payload->version;
+ short version_min = payload->version_min;
+ if ((service->version < version_min) ||
+ (v < service->version_min)) {
+ /* Version mismatch */
+ vchiq_loud_error_header();
+ vchiq_loud_error("%d: service %d (%c%c%c%c) "
+ "version mismatch - local (%d, min %d)"
+ " vs. remote (%d, min %d)",
+ state->id, service->localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ service->version, service->version_min,
+ v, version_min);
+ vchiq_loud_error_footer();
+ unlock_service(service);
+ goto fail_open;
+ }
+ service->peer_version = v;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ struct vchiq_openack_payload ack_payload = {
+ service->version
+ };
+ VCHIQ_ELEMENT_T body = {
+ &ack_payload,
+ sizeof(ack_payload)
+ };
+
+ /* Acknowledge the OPEN */
+ if (service->sync) {
+ if (queue_message_sync(state, NULL,
+ VCHIQ_MAKE_MSG(
+ VCHIQ_MSG_OPENACK,
+ service->localport,
+ remoteport),
+ &body, 1, sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ } else {
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(
+ VCHIQ_MSG_OPENACK,
+ service->localport,
+ remoteport),
+ &body, 1, sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ }
+
+ /* The service is now open */
+ vchiq_set_service_state(service,
+ service->sync ? VCHIQ_SRVSTATE_OPENSYNC
+ : VCHIQ_SRVSTATE_OPEN);
+ }
+
+ service->remoteport = remoteport;
+ service->client_id = ((int *)header->data)[1];
+ if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
+ NULL, NULL) == VCHIQ_RETRY) {
+ /* Bail out if not ready */
+ service->remoteport = VCHIQ_PORT_FREE;
+ goto bail_not_ready;
+ }
+
+ /* Success - the message has been dealt with */
+ unlock_service(service);
+ return 1;
+ }
+ }
+
+fail_open:
+ /* No available service, or an invalid request - send a CLOSE */
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
+ NULL, 0, 0, 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+
+ return 1;
+
+bail_not_ready:
+ if (service)
+ unlock_service(service);
+
+ return 0;
+}
+
+/* Called by the slot handler thread */
+static void
+parse_rx_slots(VCHIQ_STATE_T *state)
+{
+ VCHIQ_SHARED_STATE_T *remote = state->remote;
+ VCHIQ_SERVICE_T *service = NULL;
+ int tx_pos;
+ DEBUG_INITIALISE(state->local)
+
+ tx_pos = remote->tx_pos;
+
+ while (state->rx_pos != tx_pos) {
+ VCHIQ_HEADER_T *header;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (!state->rx_data) {
+ int rx_index;
+ WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
+ rx_index = remote->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
+ VCHIQ_SLOT_QUEUE_MASK];
+ state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
+ rx_index);
+ state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
+
+ /* Initialise use_count to one, and increment
+ ** release_count at the end of the slot to avoid
+ ** releasing the slot prematurely. */
+ state->rx_info->use_count = 1;
+ state->rx_info->release_count = 0;
+ }
+
+ header = (VCHIQ_HEADER_T *)(state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ DEBUG_VALUE(PARSE_HEADER, (int)header);
+ msgid = header->msgid;
+ DEBUG_VALUE(PARSE_MSGID, msgid);
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ if (type != VCHIQ_MSG_DATA)
+ VCHIQ_STATS_INC(state, ctrl_rx_count);
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ case VCHIQ_MSG_CLOSE:
+ case VCHIQ_MSG_DATA:
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ service = find_service_by_port(state, localport);
+ if ((!service || service->remoteport != remoteport) &&
+ (localport == 0) &&
+ (type == VCHIQ_MSG_CLOSE)) {
+ /* This could be a CLOSE from a client which
+ hadn't yet received the OPENACK - look for
+ the connected service */
+ if (service)
+ unlock_service(service);
+ service = get_connected_service(state,
+ remoteport);
+ if (service)
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) - "
+ "found connected service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ service->localport);
+ }
+
+ if (!service) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) - "
+ "invalid/closed service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport, localport);
+ goto skip_message;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
+ "len:%d",
+ msg_type_str(type), type,
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
+ min(64, size));
+ }
+
+ if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
+ > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "header %x (msgid %x) - size %x too big for "
+ "slot",
+ (unsigned int)header, (unsigned int)msgid,
+ (unsigned int)size);
+ WARN(1, "oversized for slot\n");
+ }
+
+ switch (type) {
+ case VCHIQ_MSG_OPEN:
+ WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
+ if (!parse_open(state, header))
+ goto bail_not_ready;
+ break;
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
+ state->id, (unsigned int)header, size,
+ remoteport, localport, service->peer_version);
+ if (service->srvstate ==
+ VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_OPEN);
+ up(&service->remove_event);
+ } else
+ vchiq_log_error(vchiq_core_log_level,
+ "OPENACK received in state %s",
+ srvstate_names[service->srvstate]);
+ break;
+ case VCHIQ_MSG_CLOSE:
+ WARN_ON(size != 0); /* There should be no data */
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CLOSE@%x (%d->%d)",
+ state->id, (unsigned int)header,
+ remoteport, localport);
+
+ mark_service_closing_internal(service, 1);
+
+ if (vchiq_close_service_internal(service,
+ 1/*close_recvd*/) == VCHIQ_RETRY)
+ goto bail_not_ready;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "Close Service %c%c%c%c s:%u d:%d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->localport,
+ service->remoteport);
+ break;
+ case VCHIQ_MSG_DATA:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs DATA@%x,%x (%d->%d)",
+ state->id, (unsigned int)header, size,
+ remoteport, localport);
+
+ if ((service->remoteport == remoteport)
+ && (service->srvstate ==
+ VCHIQ_SRVSTATE_OPEN)) {
+ header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
+ claim_slot(state->rx_info);
+ DEBUG_TRACE(PARSE_LINE);
+ if (make_service_callback(service,
+ VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
+ size);
+ } else {
+ VCHIQ_STATS_INC(state, error_count);
+ }
+ break;
+ case VCHIQ_MSG_CONNECT:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CONNECT@%x",
+ state->id, (unsigned int)header);
+ up(&state->connect);
+ break;
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX: {
+ VCHIQ_BULK_QUEUE_T *queue;
+ WARN_ON(!state->is_master);
+ queue = (type == VCHIQ_MSG_BULK_RX) ?
+ &service->bulk_tx : &service->bulk_rx;
+ if ((service->remoteport == remoteport)
+ && (service->srvstate ==
+ VCHIQ_SRVSTATE_OPEN)) {
+ VCHIQ_BULK_T *bulk;
+ int resolved = 0;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (lmutex_lock_interruptible(
+ &service->bulk_mutex) != 0) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+
+ WARN_ON(!(queue->remote_insert < queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS));
+ bulk = &queue->bulks[
+ BULK_INDEX(queue->remote_insert)];
+ bulk->remote_data =
+ (void *)((int *)header->data)[0];
+ bulk->remote_size = ((int *)header->data)[1];
+ wmb();
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) %x@%x",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ bulk->remote_size,
+ (unsigned int)bulk->remote_data);
+
+ queue->remote_insert++;
+
+ if (atomic_read(&pause_bulks_count)) {
+ state->deferred_bulks++;
+ vchiq_log_info(vchiq_core_log_level,
+ "%s: deferring bulk (%d)",
+ __func__,
+ state->deferred_bulks);
+ if (state->conn_state !=
+ VCHIQ_CONNSTATE_PAUSE_SENT)
+ vchiq_log_error(
+ vchiq_core_log_level,
+ "%s: bulks paused in "
+ "unexpected state %s",
+ __func__,
+ conn_state_names[
+ state->conn_state]);
+ } else if (state->conn_state ==
+ VCHIQ_CONNSTATE_CONNECTED) {
+ DEBUG_TRACE(PARSE_LINE);
+ resolved = resolve_bulks(service,
+ queue);
+ }
+
+ lmutex_unlock(&service->bulk_mutex);
+ if (resolved)
+ notify_bulks(service, queue,
+ 1/*retry_poll*/);
+ }
+ } break;
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ WARN_ON(state->is_master);
+ if ((service->remoteport == remoteport)
+ && (service->srvstate !=
+ VCHIQ_SRVSTATE_FREE)) {
+ VCHIQ_BULK_QUEUE_T *queue;
+ VCHIQ_BULK_T *bulk;
+
+ queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ &service->bulk_rx : &service->bulk_tx;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (lmutex_lock_interruptible(
+ &service->bulk_mutex) != 0) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ if ((int)(queue->remote_insert -
+ queue->local_insert) >= 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) "
+ "unexpected (ri=%d,li=%d)",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ queue->remote_insert,
+ queue->local_insert);
+ lmutex_unlock(&service->bulk_mutex);
+ break;
+ }
+
+ BUG_ON(queue->process == queue->local_insert);
+ BUG_ON(queue->process != queue->remote_insert);
+
+ bulk = &queue->bulks[
+ BULK_INDEX(queue->remote_insert)];
+ bulk->actual = *(int *)header->data;
+ queue->remote_insert++;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) %x@%x",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ bulk->actual, (unsigned int)bulk->data);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs:%d %cx li=%x ri=%x p=%x",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ 'r' : 't',
+ queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ DEBUG_TRACE(PARSE_LINE);
+ WARN_ON(queue->process == queue->local_insert);
+ vchiq_complete_bulk(bulk);
+ queue->process++;
+ lmutex_unlock(&service->bulk_mutex);
+ DEBUG_TRACE(PARSE_LINE);
+ notify_bulks(service, queue, 1/*retry_poll*/);
+ DEBUG_TRACE(PARSE_LINE);
+ }
+ break;
+ case VCHIQ_MSG_PADDING:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PADDING@%x,%x",
+ state->id, (unsigned int)header, size);
+ break;
+ case VCHIQ_MSG_PAUSE:
+ /* If initiated, signal the application thread */
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PAUSE@%x,%x",
+ state->id, (unsigned int)header, size);
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: PAUSE received in state PAUSED",
+ state->id);
+ break;
+ }
+ if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
+ /* Send a PAUSE in response */
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
+ NULL, 0, 0, 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ if (state->is_master)
+ pause_bulks(state);
+ }
+ /* At this point slot_mutex is held */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
+ vchiq_platform_paused(state);
+ break;
+ case VCHIQ_MSG_RESUME:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs RESUME@%x,%x",
+ state->id, (unsigned int)header, size);
+ /* Release the slot mutex */
+ lmutex_unlock(&state->slot_mutex);
+ if (state->is_master)
+ resume_bulks(state);
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_resumed(state);
+ break;
+
+ case VCHIQ_MSG_REMOTE_USE:
+ vchiq_on_remote_use(state);
+ break;
+ case VCHIQ_MSG_REMOTE_RELEASE:
+ vchiq_on_remote_release(state);
+ break;
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE:
+ vchiq_on_remote_use_active(state);
+ break;
+
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs invalid msgid %x@%x,%x",
+ state->id, msgid, (unsigned int)header, size);
+ WARN(1, "invalid message\n");
+ break;
+ }
+
+skip_message:
+ if (service) {
+ unlock_service(service);
+ service = NULL;
+ }
+
+ state->rx_pos += calc_stride(size);
+
+ DEBUG_TRACE(PARSE_LINE);
+ /* Perform some housekeeping when the end of the slot is
+ ** reached. */
+ if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
+ /* Remove the extra reference count. */
+ release_slot(state, state->rx_info, NULL, NULL);
+ state->rx_data = NULL;
+ }
+ }
+
+bail_not_ready:
+ if (service)
+ unlock_service(service);
+}
+
+/* Called by the slot handler thread */
+int slot_handler_func(void *v);
+int
+slot_handler_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ DEBUG_INITIALISE(local)
+
+ while (1) {
+ DEBUG_COUNT(SLOT_HANDLER_COUNT);
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ remote_event_wait(&local->trigger);
+
+ rmb();
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ if (state->poll_needed) {
+ /* Check if we need to suspend - may change our
+ * conn_state */
+ vchiq_platform_check_suspend(state);
+
+ state->poll_needed = 0;
+
+ /* Handle service polling and other rare conditions here
+ ** out of the mainline code */
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Poll the services as requested */
+ poll_services(state);
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSING:
+ if (state->is_master)
+ pause_bulks(state);
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
+ NULL, 0, 0, 0) != VCHIQ_RETRY) {
+ vchiq_set_conn_state(state,
+ VCHIQ_CONNSTATE_PAUSE_SENT);
+ } else {
+ if (state->is_master)
+ resume_bulks(state);
+ /* Retry later */
+ state->poll_needed = 1;
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSED:
+ vchiq_platform_resume(state);
+ break;
+
+ case VCHIQ_CONNSTATE_RESUMING:
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
+ NULL, 0, 0, 0) != VCHIQ_RETRY) {
+ if (state->is_master)
+ resume_bulks(state);
+ vchiq_set_conn_state(state,
+ VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_resumed(state);
+ } else {
+ /* This should really be impossible,
+ ** since the PAUSE should have flushed
+ ** through outstanding messages. */
+ vchiq_log_error(vchiq_core_log_level,
+ "Failed to send RESUME "
+ "message");
+ BUG();
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
+ case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
+ vchiq_platform_handle_timeout(state);
+ break;
+ default:
+ break;
+ }
+
+
+ }
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ parse_rx_slots(state);
+ }
+ return 0;
+}
+
+
+/* Called by the recycle thread */
+int recycle_func(void *v);
+int
+recycle_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+
+ while (1) {
+ remote_event_wait(&local->recycle);
+
+ process_free_queue(state);
+ }
+ return 0;
+}
+
+
+/* Called by the sync thread */
+int sync_func(void *v);
+int
+sync_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+
+ while (1) {
+ VCHIQ_SERVICE_T *service;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ remote_event_wait(&local->sync_trigger);
+
+ rmb();
+
+ msgid = header->msgid;
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ service = find_service_by_port(state, localport);
+
+ if (!service) {
+ vchiq_log_error(vchiq_sync_log_level,
+ "%d: sf %s@%x (%d->%d) - "
+ "invalid/closed service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport, localport);
+ release_message_sync(state, header);
+ continue;
+ }
+
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ vchiq_log_trace(vchiq_sync_log_level,
+ "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
+ msg_type_str(type),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
+ min(64, size));
+ }
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
+ state->id, (unsigned int)header, size,
+ remoteport, localport, service->peer_version);
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_OPENSYNC);
+ up(&service->remove_event);
+ }
+ release_message_sync(state, header);
+ break;
+
+ case VCHIQ_MSG_DATA:
+ vchiq_log_trace(vchiq_sync_log_level,
+ "%d: sf DATA@%x,%x (%d->%d)",
+ state->id, (unsigned int)header, size,
+ remoteport, localport);
+
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate ==
+ VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (make_service_callback(service,
+ VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY)
+ vchiq_log_error(vchiq_sync_log_level,
+ "synchronous callback to "
+ "service %d returns "
+ "VCHIQ_RETRY",
+ localport);
+ }
+ break;
+
+ default:
+ vchiq_log_error(vchiq_sync_log_level,
+ "%d: sf unexpected msgid %x@%x,%x",
+ state->id, msgid, (unsigned int)header, size);
+ release_message_sync(state, header);
+ break;
+ }
+
+ unlock_service(service);
+ }
+
+ return 0;
+}
+
+
+static void
+init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
+{
+ queue->local_insert = 0;
+ queue->remote_insert = 0;
+ queue->process = 0;
+ queue->remote_notify = 0;
+ queue->remove = 0;
+}
+
+
+inline const char *
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
+{
+ return conn_state_names[conn_state];
+}
+
+
+VCHIQ_SLOT_ZERO_T *
+vchiq_init_slots(void *mem_base, int mem_size)
+{
+ int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
+ VCHIQ_SLOT_ZERO_T *slot_zero =
+ (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
+ int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
+ int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
+
+ /* Ensure there is enough memory to run an absolutely minimum system */
+ num_slots -= first_data_slot;
+
+ if (num_slots < 4) {
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_init_slots - insufficient memory %x bytes",
+ mem_size);
+ return NULL;
+ }
+
+ memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
+
+ slot_zero->magic = VCHIQ_MAGIC;
+ slot_zero->version = VCHIQ_VERSION;
+ slot_zero->version_min = VCHIQ_VERSION_MIN;
+ slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
+ slot_zero->slot_size = VCHIQ_SLOT_SIZE;
+ slot_zero->max_slots = VCHIQ_MAX_SLOTS;
+ slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
+
+ slot_zero->master.slot_sync = first_data_slot;
+ slot_zero->master.slot_first = first_data_slot + 1;
+ slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
+ slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
+ slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
+ slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
+
+ return slot_zero;
+}
+
+VCHIQ_STATUS_T
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
+ int is_master)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_STATUS_T status;
+ char threadname[10];
+ static int id;
+ int i;
+
+ /* Check the input configuration */
+
+ if (slot_zero->magic != VCHIQ_MAGIC) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Invalid VCHIQ magic value found.");
+ vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
+ (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "local ver %d (min %d), remote ver %d.",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN,
+ slot_zero->version);
+
+ if (slot_zero->version < VCHIQ_VERSION_MIN) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
+ vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
+ "(minimum %d)",
+ (unsigned int)slot_zero, slot_zero->version,
+ VCHIQ_VERSION_MIN);
+ vchiq_loud_error("Restart with a newer VideoCore image.");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if (VCHIQ_VERSION < slot_zero->version_min) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
+ vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
+ "minimum %d)",
+ (unsigned int)slot_zero, VCHIQ_VERSION,
+ slot_zero->version_min);
+ vchiq_loud_error("Restart with a newer kernel.");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
+ (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
+ (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
+ (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
+ vchiq_loud_error_header();
+ if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
+ vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
+ "(expected %zx)",
+ (unsigned int)slot_zero,
+ slot_zero->slot_zero_size,
+ sizeof(VCHIQ_SLOT_ZERO_T));
+ if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
+ vchiq_loud_error("slot_zero=%x: slot_size=%d "
+ "(expected %d",
+ (unsigned int)slot_zero, slot_zero->slot_size,
+ VCHIQ_SLOT_SIZE);
+ if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
+ vchiq_loud_error("slot_zero=%x: max_slots=%d "
+ "(expected %d)",
+ (unsigned int)slot_zero, slot_zero->max_slots,
+ VCHIQ_MAX_SLOTS);
+ if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
+ vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
+ "(expected %d)",
+ (unsigned int)slot_zero,
+ slot_zero->max_slots_per_side,
+ VCHIQ_MAX_SLOTS_PER_SIDE);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if (is_master) {
+ local = &slot_zero->master;
+ remote = &slot_zero->slave;
+ } else {
+ local = &slot_zero->slave;
+ remote = &slot_zero->master;
+ }
+
+ if (local->initialised) {
+ vchiq_loud_error_header();
+ if (remote->initialised)
+ vchiq_loud_error("local state has already been "
+ "initialised");
+ else
+ vchiq_loud_error("master/slave mismatch - two %ss",
+ is_master ? "master" : "slave");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ memset(state, 0, sizeof(VCHIQ_STATE_T));
+
+ state->id = id++;
+ state->is_master = is_master;
+
+ /*
+ initialize shared state pointers
+ */
+
+ state->local = local;
+ state->remote = remote;
+ state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
+
+ /*
+ initialize events and mutexes
+ */
+
+ _sema_init(&state->connect, 0);
+ lmutex_init(&state->mutex);
+ _sema_init(&state->trigger_event, 0);
+ _sema_init(&state->recycle_event, 0);
+ _sema_init(&state->sync_trigger_event, 0);
+ _sema_init(&state->sync_release_event, 0);
+
+ lmutex_init(&state->slot_mutex);
+ lmutex_init(&state->recycle_mutex);
+ lmutex_init(&state->sync_mutex);
+ lmutex_init(&state->bulk_transfer_mutex);
+
+ _sema_init(&state->slot_available_event, 0);
+ _sema_init(&state->slot_remove_event, 0);
+ _sema_init(&state->data_quota_event, 0);
+
+ state->slot_queue_available = 0;
+
+ for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[i];
+ _sema_init(&service_quota->quota_event, 0);
+ }
+
+ for (i = local->slot_first; i <= local->slot_last; i++) {
+ local->slot_queue[state->slot_queue_available++] = i;
+ up(&state->slot_available_event);
+ }
+
+ state->default_slot_quota = state->slot_queue_available/2;
+ state->default_message_quota =
+ min((unsigned short)(state->default_slot_quota * 256),
+ (unsigned short)~0);
+
+ state->previous_data_index = -1;
+ state->data_use_count = 0;
+ state->data_quota = state->slot_queue_available - 1;
+
+ local->trigger.event = &state->trigger_event;
+ remote_event_create(&local->trigger);
+ local->tx_pos = 0;
+
+ local->recycle.event = &state->recycle_event;
+ remote_event_create(&local->recycle);
+ local->slot_queue_recycle = state->slot_queue_available;
+
+ local->sync_trigger.event = &state->sync_trigger_event;
+ remote_event_create(&local->sync_trigger);
+
+ local->sync_release.event = &state->sync_release_event;
+ remote_event_create(&local->sync_release);
+
+ /* At start-of-day, the slot is empty and available */
+ ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
+ = VCHIQ_MSGID_PADDING;
+ remote_event_signal_local(&local->sync_release);
+
+ local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
+
+ status = vchiq_platform_init_state(state);
+
+ /*
+ bring up slot handler thread
+ */
+ snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
+ state->slot_handler_thread = vchiq_thread_create(&slot_handler_func,
+ (void *)state,
+ threadname);
+
+ if (state->slot_handler_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->slot_handler_thread, -19);
+ wake_up_process(state->slot_handler_thread);
+
+ snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
+ state->recycle_thread = vchiq_thread_create(&recycle_func,
+ (void *)state,
+ threadname);
+ if (state->recycle_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->recycle_thread, -19);
+ wake_up_process(state->recycle_thread);
+
+ snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
+ state->sync_thread = vchiq_thread_create(&sync_func,
+ (void *)state,
+ threadname);
+ if (state->sync_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->sync_thread, -20);
+ wake_up_process(state->sync_thread);
+
+ BUG_ON(state->id >= VCHIQ_MAX_STATES);
+ vchiq_states[state->id] = state;
+
+ /* Indicate readiness to the other side */
+ local->initialised = 1;
+
+ return status;
+}
+
+/* Called from application thread when a client or server service is created. */
+VCHIQ_SERVICE_T *
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
+ VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
+{
+ VCHIQ_SERVICE_T *service;
+
+ service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
+ if (service) {
+ service->base.fourcc = params->fourcc;
+ service->base.callback = params->callback;
+ service->base.userdata = params->userdata;
+ service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
+ service->ref_count = 1;
+ service->srvstate = VCHIQ_SRVSTATE_FREE;
+ service->userdata_term = userdata_term;
+ service->localport = VCHIQ_PORT_FREE;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
+ VCHIQ_FOURCC_INVALID : params->fourcc;
+ service->client_id = 0;
+ service->auto_close = 1;
+ service->sync = 0;
+ service->closing = 0;
+ atomic_set(&service->poll_flags, 0);
+ service->version = params->version;
+ service->version_min = params->version_min;
+ service->state = state;
+ service->instance = instance;
+ service->service_use_count = 0;
+ init_bulk_queue(&service->bulk_tx);
+ init_bulk_queue(&service->bulk_rx);
+ _sema_init(&service->remove_event, 0);
+ _sema_init(&service->bulk_remove_event, 0);
+ lmutex_init(&service->bulk_mutex);
+ memset(&service->stats, 0, sizeof(service->stats));
+ } else {
+ vchiq_log_error(vchiq_core_log_level,
+ "Out of memory");
+ }
+
+ if (service) {
+ VCHIQ_SERVICE_T **pservice = NULL;
+ int i;
+
+ /* Although it is perfectly possible to use service_spinlock
+ ** to protect the creation of services, it is overkill as it
+ ** disables interrupts while the array is searched.
+ ** The only danger is of another thread trying to create a
+ ** service - service deletion is safe.
+ ** Therefore it is preferable to use state->mutex which,
+ ** although slower to claim, doesn't block interrupts while
+ ** it is held.
+ */
+
+ lmutex_lock(&state->mutex);
+
+ /* Prepare to use a previously unused service */
+ if (state->unused_service < VCHIQ_MAX_SERVICES)
+ pservice = &state->services[state->unused_service];
+
+ if (srvstate == VCHIQ_SRVSTATE_OPENING) {
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+ if (!srv) {
+ pservice = &state->services[i];
+ break;
+ }
+ }
+ } else {
+ for (i = (state->unused_service - 1); i >= 0; i--) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+ if (!srv)
+ pservice = &state->services[i];
+ else if ((srv->public_fourcc == params->fourcc)
+ && ((srv->instance != instance) ||
+ (srv->base.callback !=
+ params->callback))) {
+ /* There is another server using this
+ ** fourcc which doesn't match. */
+ pservice = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pservice) {
+ service->localport = (pservice - state->services);
+ if (!handle_seq)
+ handle_seq = VCHIQ_MAX_STATES *
+ VCHIQ_MAX_SERVICES;
+ service->handle = handle_seq |
+ (state->id * VCHIQ_MAX_SERVICES) |
+ service->localport;
+ handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
+ *pservice = service;
+ if (pservice == &state->services[state->unused_service])
+ state->unused_service++;
+ }
+
+ lmutex_unlock(&state->mutex);
+
+ if (!pservice) {
+ _sema_destroy(&service->remove_event);
+ _sema_destroy(&service->bulk_remove_event);
+ lmutex_destroy(&service->bulk_mutex);
+
+ kfree(service);
+ service = NULL;
+ }
+ }
+
+ if (service) {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[service->localport];
+ service_quota->slot_quota = state->default_slot_quota;
+ service_quota->message_quota = state->default_message_quota;
+ if (service_quota->slot_use_count == 0)
+ service_quota->previous_tx_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
+ - 1;
+
+ /* Bring this service online */
+ vchiq_set_service_state(service, srvstate);
+
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s Service %c%c%c%c SrcPort:%d",
+ (srvstate == VCHIQ_SRVSTATE_OPENING)
+ ? "Open" : "Add",
+ VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
+ service->localport);
+ }
+
+ /* Don't unlock the service - leave it with a ref_count of 1. */
+
+ return service;
+}
+
+VCHIQ_STATUS_T
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
+{
+ struct vchiq_open_payload payload = {
+ service->base.fourcc,
+ client_id,
+ service->version,
+ service->version_min
+ };
+ VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ service->client_id = client_id;
+ vchiq_use_service_internal(service);
+ status = queue_message(service->state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
+ &body, 1, sizeof(payload), 1);
+ if (status == VCHIQ_SUCCESS) {
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ vchiq_release_service_internal(service);
+ } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
+ (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: osi - srvstate = %s (ref %d)",
+ service->state->id,
+ srvstate_names[service->srvstate],
+ service->ref_count);
+ status = VCHIQ_ERROR;
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ vchiq_release_service_internal(service);
+ }
+ }
+ return status;
+}
+
+static void
+release_service_messages(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+ int slot_last = state->remote->slot_last;
+ int i;
+
+ /* Release any claimed messages */
+ for (i = state->remote->slot_first; i <= slot_last; i++) {
+ VCHIQ_SLOT_INFO_T *slot_info =
+ SLOT_INFO_FROM_INDEX(state, i);
+ if (slot_info->release_count != slot_info->use_count) {
+ char *data =
+ (char *)SLOT_DATA_FROM_INDEX(state, i);
+ unsigned int pos, end;
+
+ end = VCHIQ_SLOT_SIZE;
+ if (data == state->rx_data)
+ /* This buffer is still being read from - stop
+ ** at the current read position */
+ end = state->rx_pos & VCHIQ_SLOT_MASK;
+
+ pos = 0;
+
+ while (pos < end) {
+ VCHIQ_HEADER_T *header =
+ (VCHIQ_HEADER_T *)(data + pos);
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_DSTPORT(msgid);
+ if ((port == service->localport) &&
+ (msgid & VCHIQ_MSGID_CLAIMED)) {
+ vchiq_log_info(vchiq_core_log_level,
+ " fsi - hdr %x",
+ (unsigned int)header);
+ release_slot(state, slot_info, header,
+ NULL);
+ }
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "fsi - pos %x: header %x, "
+ "msgid %x, header->msgid %x, "
+ "header->size %x",
+ pos, (unsigned int)header,
+ msgid, header->msgid,
+ header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+ }
+ }
+}
+
+static int
+do_abort_bulks(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATUS_T status;
+
+ /* Abort any outstanding bulk transfers */
+ if (lmutex_lock_interruptible(&service->bulk_mutex) != 0)
+ return 0;
+ abort_outstanding_bulks(service, &service->bulk_tx);
+ abort_outstanding_bulks(service, &service->bulk_rx);
+ lmutex_unlock(&service->bulk_mutex);
+
+ status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
+ if (status == VCHIQ_SUCCESS)
+ status = notify_bulks(service, &service->bulk_rx,
+ 0/*!retry_poll*/);
+ return (status == VCHIQ_SUCCESS);
+}
+
+static VCHIQ_STATUS_T
+close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
+{
+ VCHIQ_STATUS_T status;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+ int newstate;
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (is_server) {
+ if (service->auto_close) {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ newstate = VCHIQ_SRVSTATE_LISTENING;
+ } else
+ newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
+ } else
+ newstate = VCHIQ_SRVSTATE_CLOSED;
+ vchiq_set_service_state(service, newstate);
+ break;
+ case VCHIQ_SRVSTATE_LISTENING:
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "close_service_complete(%x) called in state %s",
+ service->handle, srvstate_names[service->srvstate]);
+ WARN(1, "close_service_complete in unexpected state\n");
+ return VCHIQ_ERROR;
+ }
+
+ status = make_service_callback(service,
+ VCHIQ_SERVICE_CLOSED, NULL, NULL);
+
+ if (status != VCHIQ_RETRY) {
+ int uc = service->service_use_count;
+ int i;
+ /* Complete the close process */
+ for (i = 0; i < uc; i++)
+ /* cater for cases where close is forced and the
+ ** client may not close all it's handles */
+ vchiq_release_service_internal(service);
+
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
+ vchiq_free_service_internal(service);
+ else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
+ if (is_server)
+ service->closing = 0;
+
+ up(&service->remove_event);
+ }
+ } else
+ vchiq_set_service_state(service, failstate);
+
+ return status;
+}
+
+/* Called by the slot handler */
+VCHIQ_STATUS_T
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
+{
+ VCHIQ_STATE_T *state = service->state;
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+
+ vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
+ service->state->id, service->localport, close_recvd,
+ srvstate_names[service->srvstate]);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ if (close_recvd)
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_close_service_internal(1) called "
+ "in state %s",
+ srvstate_names[service->srvstate]);
+ else if (is_server) {
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ status = VCHIQ_ERROR;
+ } else {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ if (service->srvstate ==
+ VCHIQ_SRVSTATE_CLOSEWAIT)
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ }
+ up(&service->remove_event);
+ } else
+ vchiq_free_service_internal(service);
+ break;
+ case VCHIQ_SRVSTATE_OPENING:
+ if (close_recvd) {
+ /* The open was rejected - tell the user */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_CLOSEWAIT);
+ up(&service->remove_event);
+ } else {
+ /* Shutdown mid-open - let the other side know */
+ status = queue_message(state, service,
+ VCHIQ_MAKE_MSG
+ (VCHIQ_MSG_CLOSE,
+ service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
+ NULL, 0, 0, 0);
+ }
+ break;
+
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ lmutex_lock(&state->sync_mutex);
+ /* Drop through */
+
+ case VCHIQ_SRVSTATE_OPEN:
+ if (state->is_master || close_recvd) {
+ if (!do_abort_bulks(service))
+ status = VCHIQ_RETRY;
+ }
+
+ release_service_messages(service);
+
+ if (status == VCHIQ_SUCCESS)
+ status = queue_message(state, service,
+ VCHIQ_MAKE_MSG
+ (VCHIQ_MSG_CLOSE,
+ service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
+ NULL, 0, 0, 0);
+
+ if (status == VCHIQ_SUCCESS) {
+ if (!close_recvd)
+ break;
+ } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
+ lmutex_unlock(&state->sync_mutex);
+ break;
+ } else
+ break;
+
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ if (!close_recvd)
+ /* This happens when a process is killed mid-close */
+ break;
+
+ if (!state->is_master) {
+ if (!do_abort_bulks(service)) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+ }
+
+ if (status == VCHIQ_SUCCESS)
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (!close_recvd && is_server)
+ /* Force into LISTENING mode */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_close_service_internal(%d) called in state %s",
+ close_recvd, srvstate_names[service->srvstate]);
+ break;
+ }
+
+ return status;
+}
+
+/* Called from the application process upon process death */
+void
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+
+ vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
+ state->id, service->localport, service->remoteport);
+
+ mark_service_closing(service);
+
+ /* Mark the service for removal by the slot handler */
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
+}
+
+/* Called from the slot handler */
+void
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+
+ vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
+ state->id, service->localport);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPENING:
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: fsi - (%d) in state %s",
+ state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ return;
+ }
+
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
+
+ up(&service->remove_event);
+
+ /* Release the initial lock */
+ unlock_service(service);
+}
+
+VCHIQ_STATUS_T
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ /* Find all services registered to this client and enable them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ unlock_service(service);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
+ 0, 1) == VCHIQ_RETRY)
+ return VCHIQ_RETRY;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
+ if (down_interruptible(&state->connect) != 0)
+ return VCHIQ_RETRY;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ up(&state->connect);
+ }
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ /* Find all services registered to this client and enable them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ (void)vchiq_remove_service(service->handle);
+ unlock_service(service);
+ }
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_pause_internal(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Request a pause */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
+ request_poll(state, NULL, 0);
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_pause_internal in state %s\n",
+ conn_state_names[state->conn_state]);
+ status = VCHIQ_ERROR;
+ VCHIQ_STATS_INC(state, error_count);
+ break;
+ }
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_resume_internal(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
+ request_poll(state, NULL, 0);
+ } else {
+ status = VCHIQ_ERROR;
+ VCHIQ_STATS_INC(state, error_count);
+ }
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ /* Unregister the service */
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (!service)
+ return VCHIQ_ERROR;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: close_service:%d",
+ service->state->id, service->localport);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
+ unlock_service(service);
+ return VCHIQ_ERROR;
+ }
+
+ mark_service_closing(service);
+
+ if (current == service->state->slot_handler_thread) {
+ status = vchiq_close_service_internal(service,
+ 0/*!close_recvd*/);
+ BUG_ON(status == VCHIQ_RETRY);
+ } else {
+ /* Mark the service for termination by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
+ }
+
+ while (1) {
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: close_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if ((status == VCHIQ_SUCCESS) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
+ status = VCHIQ_ERROR;
+
+ unlock_service(service);
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ /* Unregister the service */
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (!service)
+ return VCHIQ_ERROR;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: remove_service:%d",
+ service->state->id, service->localport);
+
+ if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
+ unlock_service(service);
+ return VCHIQ_ERROR;
+ }
+
+ mark_service_closing(service);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (current == service->state->slot_handler_thread)) {
+ /* Make it look like a client, because it must be removed and
+ not left in the LISTENING state. */
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
+
+ status = vchiq_close_service_internal(service,
+ 0/*!close_recvd*/);
+ BUG_ON(status == VCHIQ_RETRY);
+ } else {
+ /* Mark the service for removal by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_REMOVE);
+ }
+ while (1) {
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: remove_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if ((status == VCHIQ_SUCCESS) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE))
+ status = VCHIQ_ERROR;
+
+ unlock_service(service);
+
+ return status;
+}
+
+
+/* This function may be called by kernel threads or user threads.
+ * User threads may receive VCHIQ_RETRY to indicate that a signal has been
+ * received and the call should be retried after being returned to user
+ * context.
+ * When called in blocking mode, the userdata field points to a bulk_waiter
+ * structure.
+ */
+VCHIQ_STATUS_T
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_BULK_QUEUE_T *queue;
+ VCHIQ_BULK_T *bulk;
+ VCHIQ_STATE_T *state;
+ struct bulk_waiter *bulk_waiter = NULL;
+ const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
+ const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ if (!service ||
+ (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
+ ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
+ goto error_exit;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ bulk_waiter = (struct bulk_waiter *)userdata;
+ _sema_init(&bulk_waiter->event, 0);
+ bulk_waiter->actual = 0;
+ bulk_waiter->bulk = NULL;
+ break;
+ case VCHIQ_BULK_MODE_WAITING:
+ bulk_waiter = (struct bulk_waiter *)userdata;
+ bulk = bulk_waiter->bulk;
+ goto waiting;
+ default:
+ goto error_exit;
+ }
+
+ state = service->state;
+
+ queue = (dir == VCHIQ_BULK_TRANSMIT) ?
+ &service->bulk_tx : &service->bulk_rx;
+
+ if (lmutex_lock_interruptible(&service->bulk_mutex) != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ lmutex_unlock(&service->bulk_mutex);
+ if (down_interruptible(&service->bulk_remove_event)
+ != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+ if (lmutex_lock_interruptible(&service->bulk_mutex)
+ != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+ } while (queue->local_insert == queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS);
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+
+ bulk->mode = mode;
+ bulk->dir = dir;
+ bulk->userdata = userdata;
+ bulk->size = size;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+
+ if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
+ VCHIQ_SUCCESS)
+ goto unlock_error_exit;
+
+ wmb();
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: bt (%d->%d) %cx %x@%x %x",
+ state->id,
+ service->localport, service->remoteport, dir_char,
+ size, (unsigned int)bulk->data, (unsigned int)userdata);
+
+ if (state->is_master) {
+ queue->local_insert++;
+ if (resolve_bulks(service, queue))
+ request_poll(state, service,
+ (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+ } else {
+ int payload[2] = { (int)bulk->data, bulk->size };
+ VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
+
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport, service->remoteport),
+ &element, 1, sizeof(payload), 1);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_complete_bulk(bulk);
+ goto unlock_error_exit;
+ }
+ queue->local_insert++;
+ }
+
+ lmutex_unlock(&service->bulk_mutex);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: bt:%d %cx li=%x ri=%x p=%x",
+ state->id,
+ service->localport, dir_char,
+ queue->local_insert, queue->remote_insert, queue->process);
+
+waiting:
+ unlock_service(service);
+
+ status = VCHIQ_SUCCESS;
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+ if (down_interruptible(&bulk_waiter->event) != 0)
+ status = VCHIQ_RETRY;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = VCHIQ_ERROR;
+ }
+
+ return status;
+
+unlock_error_exit:
+ lmutex_unlock(&service->bulk_mutex);
+
+error_exit:
+ if (service)
+ unlock_service(service);
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ const VCHIQ_ELEMENT_T *elements, unsigned int count)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ unsigned int size = 0;
+ unsigned int i;
+
+ if (!service ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
+ goto error_exit;
+
+ for (i = 0; i < (unsigned int)count; i++) {
+ if (elements[i].size) {
+ if (elements[i].data == NULL) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+ size += elements[i].size;
+ }
+ }
+
+ if (size > VCHIQ_MAX_MSG_SIZE) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ status = queue_message(service->state, service,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
+ service->localport,
+ service->remoteport),
+ elements, count, size, 1);
+ break;
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ status = queue_message_sync(service->state, service,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
+ service->localport,
+ service->remoteport),
+ elements, count, size, 1);
+ break;
+ default:
+ status = VCHIQ_ERROR;
+ break;
+ }
+
+error_exit:
+ if (service)
+ unlock_service(service);
+
+ return status;
+}
+
+void
+vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_STATE_T *state;
+ int slot_index;
+
+ if (!service)
+ return;
+
+ state = service->state;
+ remote = state->remote;
+
+ slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
+
+ if ((slot_index >= remote->slot_first) &&
+ (slot_index <= remote->slot_last)) {
+ int msgid = header->msgid;
+ if (msgid & VCHIQ_MSGID_CLAIMED) {
+ VCHIQ_SLOT_INFO_T *slot_info =
+ SLOT_INFO_FROM_INDEX(state, slot_index);
+
+ release_slot(state, slot_info, header, service);
+ }
+ } else if (slot_index == remote->slot_sync)
+ release_message_sync(state, header);
+
+ unlock_service(service);
+}
+
+static void
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
+{
+ header->msgid = VCHIQ_MSGID_PADDING;
+ wmb();
+ remote_event_signal(&state->remote->sync_release);
+}
+
+VCHIQ_STATUS_T
+vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+
+ if (!service ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
+ !peer_version)
+ goto exit;
+ *peer_version = service->peer_version;
+ status = VCHIQ_SUCCESS;
+
+exit:
+ if (service)
+ unlock_service(service);
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_get_config(VCHIQ_INSTANCE_T instance,
+ int config_size, VCHIQ_CONFIG_T *pconfig)
+{
+ VCHIQ_CONFIG_T config;
+
+ (void)instance;
+
+ config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
+ config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
+ config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
+ config.max_services = VCHIQ_MAX_SERVICES;
+ config.version = VCHIQ_VERSION;
+ config.version_min = VCHIQ_VERSION_MIN;
+
+ if (config_size > sizeof(VCHIQ_CONFIG_T))
+ return VCHIQ_ERROR;
+
+ memcpy(pconfig, &config,
+ min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHIQ_SERVICE_OPTION_T option, int value)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ if (service) {
+ switch (option) {
+ case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
+ service->auto_close = value;
+ status = VCHIQ_SUCCESS;
+ break;
+
+ case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[
+ service->localport];
+ if (value == 0)
+ value = service->state->default_slot_quota;
+ if ((value >= service_quota->slot_use_count) &&
+ (value < (unsigned short)~0)) {
+ service_quota->slot_quota = value;
+ if ((value >= service_quota->slot_use_count) &&
+ (service_quota->message_quota >=
+ service_quota->message_use_count)) {
+ /* Signal the service that it may have
+ ** dropped below its quota */
+ up(&service_quota->quota_event);
+ }
+ status = VCHIQ_SUCCESS;
+ }
+ } break;
+
+ case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[
+ service->localport];
+ if (value == 0)
+ value = service->state->default_message_quota;
+ if ((value >= service_quota->message_use_count) &&
+ (value < (unsigned short)~0)) {
+ service_quota->message_quota = value;
+ if ((value >=
+ service_quota->message_use_count) &&
+ (service_quota->slot_quota >=
+ service_quota->slot_use_count))
+ /* Signal the service that it may have
+ ** dropped below its quota */
+ up(&service_quota->quota_event);
+ status = VCHIQ_SUCCESS;
+ }
+ } break;
+
+ case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (service->srvstate ==
+ VCHIQ_SRVSTATE_LISTENING)) {
+ service->sync = value;
+ status = VCHIQ_SUCCESS;
+ }
+ break;
+
+ default:
+ break;
+ }
+ unlock_service(service);
+ }
+
+ return status;
+}
+
+static void
+vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
+ VCHIQ_SHARED_STATE_T *shared, const char *label)
+{
+ static const char *const debug_names[] = {
+ "<entries>",
+ "SLOT_HANDLER_COUNT",
+ "SLOT_HANDLER_LINE",
+ "PARSE_LINE",
+ "PARSE_HEADER",
+ "PARSE_MSGID",
+ "AWAIT_COMPLETION_LINE",
+ "DEQUEUE_MESSAGE_LINE",
+ "SERVICE_CALLBACK_LINE",
+ "MSG_QUEUE_FULL_COUNT",
+ "COMPLETION_QUEUE_FULL_COUNT"
+ };
+ int i;
+
+ char buf[80];
+ int len;
+ len = snprintf(buf, sizeof(buf),
+ " %s: slots %d-%d tx_pos=%x recycle=%x",
+ label, shared->slot_first, shared->slot_last,
+ shared->tx_pos, shared->slot_queue_recycle);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Slots claimed:");
+ vchiq_dump(dump_context, buf, len + 1);
+
+ for (i = shared->slot_first; i <= shared->slot_last; i++) {
+ VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
+ if (slot_info.use_count != slot_info.release_count) {
+ len = snprintf(buf, sizeof(buf),
+ " %d: %d/%d", i, slot_info.use_count,
+ slot_info.release_count);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+ }
+
+ for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
+ len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
+ debug_names[i], shared->debug[i], shared->debug[i]);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+}
+
+void
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
+{
+ char buf[80];
+ int len;
+ int i;
+
+ len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
+ conn_state_names[state->conn_state]);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " tx_pos=%x(@%x), rx_pos=%x(@%x)",
+ state->local->tx_pos,
+ (uint32_t)state->tx_data +
+ (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->rx_pos,
+ (uint32_t)state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Version: %d (min %d)",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ if (VCHIQ_ENABLE_STATS) {
+ len = snprintf(buf, sizeof(buf),
+ " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
+ "error_count=%d",
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+ state->stats.error_count);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+
+ len = snprintf(buf, sizeof(buf),
+ " Slots: %d available (%d data), %d recyclable, %d stalls "
+ "(%d data)",
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+ state->local_tx_pos) / VCHIQ_SLOT_SIZE,
+ state->data_quota - state->data_use_count,
+ state->local->slot_queue_recycle - state->slot_queue_available,
+ state->stats.slot_stalls, state->stats.data_stalls);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ vchiq_dump_platform_state(dump_context);
+
+ vchiq_dump_shared_state(dump_context, state, state->local, "Local");
+ vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
+
+ vchiq_dump_platform_instances(dump_context);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
+
+ if (service) {
+ vchiq_dump_service_state(dump_context, service);
+ unlock_service(service);
+ }
+ }
+}
+
+void
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
+{
+ char buf[120];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
+ service->localport, srvstate_names[service->srvstate],
+ service->ref_count - 1); /*Don't include the lock just taken*/
+
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
+ char remoteport[30];
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[service->localport];
+ int fourcc = service->base.fourcc;
+ int tx_pending, rx_pending;
+ if (service->remoteport != VCHIQ_PORT_FREE) {
+ int len2 = snprintf(remoteport, sizeof(remoteport),
+ "%d", service->remoteport);
+ if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
+ snprintf(remoteport + len2,
+ sizeof(remoteport) - len2,
+ " (client %8x)", service->client_id);
+ } else
+ strcpy(remoteport, "n/a");
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ remoteport,
+ service_quota->message_use_count,
+ service_quota->message_quota,
+ service_quota->slot_use_count,
+ service_quota->slot_quota);
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ tx_pending = service->bulk_tx.local_insert -
+ service->bulk_tx.remote_insert;
+
+ rx_pending = service->bulk_rx.local_insert -
+ service->bulk_rx.remote_insert;
+
+ len = snprintf(buf, sizeof(buf),
+ " Bulk: tx_pending=%d (size %d),"
+ " rx_pending=%d (size %d)",
+ tx_pending,
+ tx_pending ? service->bulk_tx.bulks[
+ BULK_INDEX(service->bulk_tx.remove)].size : 0,
+ rx_pending,
+ rx_pending ? service->bulk_rx.bulks[
+ BULK_INDEX(service->bulk_rx.remove)].size : 0);
+
+ if (VCHIQ_ENABLE_STATS) {
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Ctrl: tx_count=%d, tx_bytes=%llu, "
+ "rx_count=%d, rx_bytes=%llu",
+ service->stats.ctrl_tx_count,
+ service->stats.ctrl_tx_bytes,
+ service->stats.ctrl_rx_count,
+ service->stats.ctrl_rx_bytes);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Bulk: tx_count=%d, tx_bytes=%llu, "
+ "rx_count=%d, rx_bytes=%llu",
+ service->stats.bulk_tx_count,
+ service->stats.bulk_tx_bytes,
+ service->stats.bulk_rx_count,
+ service->stats.bulk_rx_bytes);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " %d quota stalls, %d slot stalls, "
+ "%d bulk stalls, %d aborted, %d errors",
+ service->stats.quota_stalls,
+ service->stats.slot_stalls,
+ service->stats.bulk_stalls,
+ service->stats.bulk_aborted_count,
+ service->stats.error_count);
+ }
+ }
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE)
+ vchiq_dump_platform_service_state(dump_context, service);
+}
+
+
+void
+vchiq_loud_error_header(void)
+{
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level, "=====");
+}
+
+void
+vchiq_loud_error_footer(void)
+{
+ vchiq_log_error(vchiq_core_log_level, "=====");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+}
+
+
+VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
+ size_t numBytes)
+{
+ const uint8_t *mem = (const uint8_t *)voidMem;
+ size_t offset;
+ char lineBuf[100];
+ char *s;
+
+ while (numBytes > 0) {
+ s = lineBuf;
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < numBytes)
+ s += snprintf(s, 4, "%02x ", mem[offset]);
+ else
+ s += snprintf(s, 4, " ");
+ }
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < numBytes) {
+ uint8_t ch = mem[offset];
+
+ if ((ch < ' ') || (ch > '~'))
+ ch = '.';
+ *s++ = (char)ch;
+ }
+ }
+ *s++ = '\0';
+
+ if ((label != NULL) && (*label != '\0'))
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
+ "%s: %08x: %s", label, addr, lineBuf);
+ else
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
+ "%08x: %s", addr, lineBuf);
+
+ addr += 16;
+ mem += 16;
+ if (numBytes > 16)
+ numBytes -= 16;
+ else
+ numBytes = 0;
+ }
+}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.h
new file mode 100644
index 000000000000..fb50e85f2d88
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_core.h
@@ -0,0 +1,710 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CORE_H
+#define VCHIQ_CORE_H
+
+#include <interface/compat/vchi_bsd.h>
+#include <interface/compat/list.h>
+
+#include "vchiq_cfg.h"
+
+#include "vchiq.h"
+
+/* Run time control of log level, based on KERN_XXX level. */
+#ifndef VCHIQ_LOG_DEFAULT
+#define VCHIQ_LOG_DEFAULT 4
+#endif
+#define VCHIQ_LOG_ERROR 3
+#define VCHIQ_LOG_WARNING 4
+#define VCHIQ_LOG_INFO 6
+#define VCHIQ_LOG_TRACE 7
+
+#define VCHIQ_LOG_PREFIX "vchiq: "
+
+#ifndef vchiq_log_error
+#define vchiq_log_error(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_ERROR) \
+ printf(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_warning
+#define vchiq_log_warning(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_WARNING) \
+ printf(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_info
+#define vchiq_log_info(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_INFO) \
+ printf(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_trace
+#define vchiq_log_trace(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_TRACE) \
+ printf(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+
+#define vchiq_loud_error(...) \
+ vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
+
+#ifndef vchiq_static_assert
+#define vchiq_static_assert(cond) __attribute__((unused)) \
+ extern int vchiq_static_assert[(cond) ? 1 : -1]
+#endif
+
+#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
+
+/* Ensure that the slot size and maximum number of slots are powers of 2 */
+vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
+
+#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
+#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
+#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
+ VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
+
+#define VCHIQ_MSG_PADDING 0 /* - */
+#define VCHIQ_MSG_CONNECT 1 /* - */
+#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
+#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
+#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
+#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
+#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_PAUSE 10 /* - */
+#define VCHIQ_MSG_RESUME 11 /* - */
+#define VCHIQ_MSG_REMOTE_USE 12 /* - */
+#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
+#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
+
+#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
+#define VCHIQ_PORT_FREE 0x1000
+#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
+#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
+ ((type<<24) | (srcport<<12) | (dstport<<0))
+#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
+#define VCHIQ_MSG_SRCPORT(msgid) \
+ (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
+#define VCHIQ_MSG_DSTPORT(msgid) \
+ ((unsigned short)msgid & 0xfff)
+
+#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
+ ((fourcc) >> 24) & 0xff, \
+ ((fourcc) >> 16) & 0xff, \
+ ((fourcc) >> 8) & 0xff, \
+ (fourcc) & 0xff
+
+/* Ensure the fields are wide enough */
+vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
+ == 0);
+vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
+vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
+ (unsigned int)VCHIQ_PORT_FREE);
+
+#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
+#define VCHIQ_MSGID_CLAIMED 0x40000000
+
+#define VCHIQ_FOURCC_INVALID 0x00000000
+#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
+
+#define VCHIQ_BULK_ACTUAL_ABORTED -1
+
+typedef uint32_t BITSET_T;
+
+vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
+
+#define BITSET_SIZE(b) ((b + 31) >> 5)
+#define BITSET_WORD(b) (b >> 5)
+#define BITSET_BIT(b) (1 << (b & 31))
+#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
+#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
+#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
+#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
+
+#if VCHIQ_ENABLE_STATS
+#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
+ (service->stats. stat += addend)
+#else
+#define VCHIQ_STATS_INC(state, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
+#endif
+
+enum {
+ DEBUG_ENTRIES,
+#if VCHIQ_ENABLE_DEBUG
+ DEBUG_SLOT_HANDLER_COUNT,
+ DEBUG_SLOT_HANDLER_LINE,
+ DEBUG_PARSE_LINE,
+ DEBUG_PARSE_HEADER,
+ DEBUG_PARSE_MSGID,
+ DEBUG_AWAIT_COMPLETION_LINE,
+ DEBUG_DEQUEUE_MESSAGE_LINE,
+ DEBUG_SERVICE_CALLBACK_LINE,
+ DEBUG_MSG_QUEUE_FULL_COUNT,
+ DEBUG_COMPLETION_QUEUE_FULL_COUNT,
+#endif
+ DEBUG_MAX
+};
+
+#if VCHIQ_ENABLE_DEBUG
+
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
+#define DEBUG_TRACE(d) \
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
+#define DEBUG_VALUE(d, v) \
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
+#define DEBUG_COUNT(d) \
+ do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
+
+#else /* VCHIQ_ENABLE_DEBUG */
+
+#define DEBUG_INITIALISE(local)
+#define DEBUG_TRACE(d)
+#define DEBUG_VALUE(d, v)
+#define DEBUG_COUNT(d)
+
+#endif /* VCHIQ_ENABLE_DEBUG */
+
+typedef enum {
+ VCHIQ_CONNSTATE_DISCONNECTED,
+ VCHIQ_CONNSTATE_CONNECTING,
+ VCHIQ_CONNSTATE_CONNECTED,
+ VCHIQ_CONNSTATE_PAUSING,
+ VCHIQ_CONNSTATE_PAUSE_SENT,
+ VCHIQ_CONNSTATE_PAUSED,
+ VCHIQ_CONNSTATE_RESUMING,
+ VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
+ VCHIQ_CONNSTATE_RESUME_TIMEOUT
+} VCHIQ_CONNSTATE_T;
+
+enum {
+ VCHIQ_SRVSTATE_FREE,
+ VCHIQ_SRVSTATE_HIDDEN,
+ VCHIQ_SRVSTATE_LISTENING,
+ VCHIQ_SRVSTATE_OPENING,
+ VCHIQ_SRVSTATE_OPEN,
+ VCHIQ_SRVSTATE_OPENSYNC,
+ VCHIQ_SRVSTATE_CLOSESENT,
+ VCHIQ_SRVSTATE_CLOSERECVD,
+ VCHIQ_SRVSTATE_CLOSEWAIT,
+ VCHIQ_SRVSTATE_CLOSED
+};
+
+enum {
+ VCHIQ_POLL_TERMINATE,
+ VCHIQ_POLL_REMOVE,
+ VCHIQ_POLL_TXNOTIFY,
+ VCHIQ_POLL_RXNOTIFY,
+ VCHIQ_POLL_COUNT
+};
+
+typedef enum {
+ VCHIQ_BULK_TRANSMIT,
+ VCHIQ_BULK_RECEIVE
+} VCHIQ_BULK_DIR_T;
+
+typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
+
+typedef struct vchiq_bulk_struct {
+ short mode;
+ short dir;
+ void *userdata;
+ VCHI_MEM_HANDLE_T handle;
+ void *data;
+ int size;
+ void *remote_data;
+ int remote_size;
+ int actual;
+} VCHIQ_BULK_T;
+
+typedef struct vchiq_bulk_queue_struct {
+ int local_insert; /* Where to insert the next local bulk */
+ int remote_insert; /* Where to insert the next remote bulk (master) */
+ int process; /* Bulk to transfer next */
+ int remote_notify; /* Bulk to notify the remote client of next (mstr) */
+ int remove; /* Bulk to notify the local client of, and remove,
+ ** next */
+ VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
+} VCHIQ_BULK_QUEUE_T;
+
+typedef struct remote_event_struct {
+ int armed;
+ int fired;
+ struct semaphore *event;
+} REMOTE_EVENT_T;
+
+typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
+
+typedef struct vchiq_state_struct VCHIQ_STATE_T;
+
+typedef struct vchiq_slot_struct {
+ char data[VCHIQ_SLOT_SIZE];
+} VCHIQ_SLOT_T;
+
+typedef struct vchiq_slot_info_struct {
+ /* Use two counters rather than one to avoid the need for a mutex. */
+ short use_count;
+ short release_count;
+} VCHIQ_SLOT_INFO_T;
+
+typedef struct vchiq_service_struct {
+ VCHIQ_SERVICE_BASE_T base;
+ VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int ref_count;
+ int srvstate;
+ VCHIQ_USERDATA_TERM_T userdata_term;
+ unsigned int localport;
+ unsigned int remoteport;
+ int public_fourcc;
+ int client_id;
+ char auto_close;
+ char sync;
+ char closing;
+ atomic_t poll_flags;
+ short version;
+ short version_min;
+ short peer_version;
+
+ VCHIQ_STATE_T *state;
+ VCHIQ_INSTANCE_T instance;
+
+ int service_use_count;
+
+ VCHIQ_BULK_QUEUE_T bulk_tx;
+ VCHIQ_BULK_QUEUE_T bulk_rx;
+
+ struct semaphore remove_event;
+ struct semaphore bulk_remove_event;
+ struct mutex bulk_mutex;
+
+ struct service_stats_struct {
+ int quota_stalls;
+ int slot_stalls;
+ int bulk_stalls;
+ int error_count;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int bulk_tx_count;
+ int bulk_rx_count;
+ int bulk_aborted_count;
+ uint64_t ctrl_tx_bytes;
+ uint64_t ctrl_rx_bytes;
+ uint64_t bulk_tx_bytes;
+ uint64_t bulk_rx_bytes;
+ } stats;
+} VCHIQ_SERVICE_T;
+
+/* The quota information is outside VCHIQ_SERVICE_T so that it can be
+ statically allocated, since for accounting reasons a service's slot
+ usage is carried over between users of the same port number.
+ */
+typedef struct vchiq_service_quota_struct {
+ unsigned short slot_quota;
+ unsigned short slot_use_count;
+ unsigned short message_quota;
+ unsigned short message_use_count;
+ struct semaphore quota_event;
+ int previous_tx_index;
+} VCHIQ_SERVICE_QUOTA_T;
+
+typedef struct vchiq_shared_state_struct {
+
+ /* A non-zero value here indicates that the content is valid. */
+ int initialised;
+
+ /* The first and last (inclusive) slots allocated to the owner. */
+ int slot_first;
+ int slot_last;
+
+ /* The slot allocated to synchronous messages from the owner. */
+ int slot_sync;
+
+ /* Signalling this event indicates that owner's slot handler thread
+ ** should run. */
+ REMOTE_EVENT_T trigger;
+
+ /* Indicates the byte position within the stream where the next message
+ ** will be written. The least significant bits are an index into the
+ ** slot. The next bits are the index of the slot in slot_queue. */
+ int tx_pos;
+
+ /* This event should be signalled when a slot is recycled. */
+ REMOTE_EVENT_T recycle;
+
+ /* The slot_queue index where the next recycled slot will be written. */
+ int slot_queue_recycle;
+
+ /* This event should be signalled when a synchronous message is sent. */
+ REMOTE_EVENT_T sync_trigger;
+
+ /* This event should be signalled when a synchronous message has been
+ ** released. */
+ REMOTE_EVENT_T sync_release;
+
+ /* A circular buffer of slot indexes. */
+ int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
+
+ /* Debugging state */
+ int debug[DEBUG_MAX];
+} VCHIQ_SHARED_STATE_T;
+
+typedef struct vchiq_slot_zero_struct {
+ int magic;
+ short version;
+ short version_min;
+ int slot_zero_size;
+ int slot_size;
+ int max_slots;
+ int max_slots_per_side;
+ int platform_data[2];
+ VCHIQ_SHARED_STATE_T master;
+ VCHIQ_SHARED_STATE_T slave;
+ VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
+} VCHIQ_SLOT_ZERO_T;
+
+struct vchiq_state_struct {
+ int id;
+ int initialised;
+ VCHIQ_CONNSTATE_T conn_state;
+ int is_master;
+
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_SLOT_T *slot_data;
+
+ unsigned short default_slot_quota;
+ unsigned short default_message_quota;
+
+ /* Event indicating connect message received */
+ struct semaphore connect;
+
+ /* Mutex protecting services */
+ struct mutex mutex;
+ VCHIQ_INSTANCE_T *instance;
+
+ /* Processes incoming messages */
+ VCHIQ_THREAD_T slot_handler_thread;
+
+ /* Processes recycled slots */
+ VCHIQ_THREAD_T recycle_thread;
+
+ /* Processes synchronous messages */
+ VCHIQ_THREAD_T sync_thread;
+
+ /* Local implementation of the trigger remote event */
+ struct semaphore trigger_event;
+
+ /* Local implementation of the recycle remote event */
+ struct semaphore recycle_event;
+
+ /* Local implementation of the sync trigger remote event */
+ struct semaphore sync_trigger_event;
+
+ /* Local implementation of the sync release remote event */
+ struct semaphore sync_release_event;
+
+ char *tx_data;
+ char *rx_data;
+ VCHIQ_SLOT_INFO_T *rx_info;
+
+ struct mutex slot_mutex;
+
+ struct mutex recycle_mutex;
+
+ struct mutex sync_mutex;
+
+ struct mutex bulk_transfer_mutex;
+
+ /* Indicates the byte position within the stream from where the next
+ ** message will be read. The least significant bits are an index into
+ ** the slot.The next bits are the index of the slot in
+ ** remote->slot_queue. */
+ int rx_pos;
+
+ /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
+ from remote->tx_pos. */
+ int local_tx_pos;
+
+ /* The slot_queue index of the slot to become available next. */
+ int slot_queue_available;
+
+ /* A flag to indicate if any poll has been requested */
+ int poll_needed;
+
+ /* Ths index of the previous slot used for data messages. */
+ int previous_data_index;
+
+ /* The number of slots occupied by data messages. */
+ unsigned short data_use_count;
+
+ /* The maximum number of slots to be occupied by data messages. */
+ unsigned short data_quota;
+
+ /* An array of bit sets indicating which services must be polled. */
+ atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+
+ /* The number of the first unused service */
+ int unused_service;
+
+ /* Signalled when a free slot becomes available. */
+ struct semaphore slot_available_event;
+
+ struct semaphore slot_remove_event;
+
+ /* Signalled when a free data slot becomes available. */
+ struct semaphore data_quota_event;
+
+ /* Incremented when there are bulk transfers which cannot be processed
+ * whilst paused and must be processed on resume */
+ int deferred_bulks;
+
+ struct state_stats_struct {
+ int slot_stalls;
+ int data_stalls;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int error_count;
+ } stats;
+
+ VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
+ VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
+ VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
+
+ VCHIQ_PLATFORM_STATE_T platform_state;
+};
+
+struct bulk_waiter {
+ VCHIQ_BULK_T *bulk;
+ struct semaphore event;
+ int actual;
+};
+
+extern spinlock_t bulk_waiter_spinlock;
+
+extern int vchiq_core_log_level;
+extern int vchiq_core_msg_log_level;
+extern int vchiq_sync_log_level;
+
+extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
+
+extern const char *
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
+
+extern VCHIQ_SLOT_ZERO_T *
+vchiq_init_slots(void *mem_base, int mem_size);
+
+extern VCHIQ_STATUS_T
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
+ int is_master);
+
+extern VCHIQ_STATUS_T
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_SERVICE_T *
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
+ VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
+
+extern VCHIQ_STATUS_T
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
+
+extern VCHIQ_STATUS_T
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
+
+extern void
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_STATUS_T
+vchiq_pause_internal(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_resume_internal(VCHIQ_STATE_T *state);
+
+extern void
+remote_event_pollall(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
+
+extern void
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_loud_error_header(void);
+
+extern void
+vchiq_loud_error_footer(void);
+
+extern void
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
+
+static inline VCHIQ_SERVICE_T *
+handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
+ (VCHIQ_MAX_STATES - 1)];
+ if (!state)
+ return NULL;
+
+ return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
+}
+
+extern VCHIQ_SERVICE_T *
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_SERVICE_T *
+find_service_by_port(VCHIQ_STATE_T *state, int localport);
+
+extern VCHIQ_SERVICE_T *
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_SERVICE_T *
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
+ int *pidx);
+
+extern void
+lock_service(VCHIQ_SERVICE_T *service);
+
+extern void
+unlock_service(VCHIQ_SERVICE_T *service);
+
+/* The following functions are called from vchiq_core, and external
+** implementations must be provided. */
+
+extern VCHIQ_STATUS_T
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
+
+extern void
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
+
+extern void
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
+
+extern VCHIQ_STATUS_T
+vchiq_copy_from_user(void *dst, const void *src, int size);
+
+extern void
+remote_event_signal(REMOTE_EVENT_T *event);
+
+void
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_paused(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_resume(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_resumed(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump(void *dump_context, const char *str, int len);
+
+extern void
+vchiq_dump_platform_state(void *dump_context);
+
+extern void
+vchiq_dump_platform_instances(void *dump_context);
+
+extern void
+vchiq_dump_platform_service_state(void *dump_context,
+ VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_on_remote_use(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_on_remote_release(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_init_state(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_check_service(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_use(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_release(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
+
+extern void
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
+
+
+extern void
+vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
+ size_t numBytes);
+
+extern void
+vchiq_core_initialize(void);
+
+#endif
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_if.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_if.h
new file mode 100644
index 000000000000..6a95a67a833a
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_if.h
@@ -0,0 +1,188 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_IF_H
+#define VCHIQ_IF_H
+
+#include "interface/vchi/vchi_mh.h"
+
+#define VCHIQ_SERVICE_HANDLE_INVALID 0
+
+#define VCHIQ_SLOT_SIZE 4096
+#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
+#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
+
+#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
+ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
+#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
+#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
+
+typedef enum {
+ VCHIQ_SERVICE_OPENED, /* service, -, - */
+ VCHIQ_SERVICE_CLOSED, /* service, -, - */
+ VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
+ VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
+} VCHIQ_REASON_T;
+
+typedef enum {
+ VCHIQ_ERROR = -1,
+ VCHIQ_SUCCESS = 0,
+ VCHIQ_RETRY = 1
+} VCHIQ_STATUS_T;
+
+typedef enum {
+ VCHIQ_BULK_MODE_CALLBACK,
+ VCHIQ_BULK_MODE_BLOCKING,
+ VCHIQ_BULK_MODE_NOCALLBACK,
+ VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
+} VCHIQ_BULK_MODE_T;
+
+typedef enum {
+ VCHIQ_SERVICE_OPTION_AUTOCLOSE,
+ VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
+ VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
+ VCHIQ_SERVICE_OPTION_SYNCHRONOUS
+} VCHIQ_SERVICE_OPTION_T;
+
+typedef struct vchiq_header_struct {
+ /* The message identifier - opaque to applications. */
+ int msgid;
+
+ /* Size of message data. */
+ unsigned int size;
+
+ char data[0]; /* message */
+} VCHIQ_HEADER_T;
+
+typedef struct {
+ const void *data;
+ unsigned int size;
+} VCHIQ_ELEMENT_T;
+
+typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
+
+typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
+ VCHIQ_SERVICE_HANDLE_T, void *);
+
+typedef struct vchiq_service_base_struct {
+ int fourcc;
+ VCHIQ_CALLBACK_T callback;
+ void *userdata;
+} VCHIQ_SERVICE_BASE_T;
+
+typedef struct vchiq_service_params_struct {
+ int fourcc;
+ VCHIQ_CALLBACK_T callback;
+ void *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+} VCHIQ_SERVICE_PARAMS_T;
+
+typedef struct vchiq_config_struct {
+ unsigned int max_msg_size;
+ unsigned int bulk_threshold; /* The message size above which it
+ is better to use a bulk transfer
+ (<= max_msg_size) */
+ unsigned int max_outstanding_bulks;
+ unsigned int max_services;
+ short version; /* The version of VCHIQ */
+ short version_min; /* The minimum compatible version of VCHIQ */
+} VCHIQ_CONFIG_T;
+
+typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
+typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
+
+extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
+extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
+extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
+extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *pservice);
+extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *pservice);
+extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
+ VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
+
+extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
+ const VCHIQ_ELEMENT_T *elements, unsigned int count);
+extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
+ VCHIQ_HEADER_T *header);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
+ const void *offset, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
+ void *offset, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
+ VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
+ void *userdata, VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
+ VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
+ void *userdata, VCHIQ_BULK_MODE_T mode);
+extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
+extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
+extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
+ int config_size, VCHIQ_CONFIG_T *pconfig);
+extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
+ VCHIQ_SERVICE_OPTION_T option, int value);
+
+extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
+ VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
+extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
+ void *ptr, size_t num_bytes);
+
+extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
+ short *peer_version);
+
+#endif /* VCHIQ_IF_H */
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_ioctl.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_ioctl.h
new file mode 100644
index 000000000000..13103c5c3632
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_ioctl.h
@@ -0,0 +1,128 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_IOCTLS_H
+#define VCHIQ_IOCTLS_H
+
+#include "vchiq_if.h"
+
+#define VCHIQ_IOC_MAGIC 0xc4
+#define VCHIQ_INVALID_HANDLE (~0)
+
+typedef struct {
+ VCHIQ_SERVICE_PARAMS_T params;
+ int is_open;
+ int is_vchi;
+ unsigned int handle; /* OUT */
+} VCHIQ_CREATE_SERVICE_T;
+
+typedef struct {
+ unsigned int handle;
+ unsigned int count;
+ const VCHIQ_ELEMENT_T *elements;
+} VCHIQ_QUEUE_MESSAGE_T;
+
+typedef struct {
+ unsigned int handle;
+ void *data;
+ unsigned int size;
+ void *userdata;
+ VCHIQ_BULK_MODE_T mode;
+} VCHIQ_QUEUE_BULK_TRANSFER_T;
+
+typedef struct {
+ VCHIQ_REASON_T reason;
+ VCHIQ_HEADER_T *header;
+ void *service_userdata;
+ void *bulk_userdata;
+} VCHIQ_COMPLETION_DATA_T;
+
+typedef struct {
+ unsigned int count;
+ VCHIQ_COMPLETION_DATA_T *buf;
+ unsigned int msgbufsize;
+ unsigned int msgbufcount; /* IN/OUT */
+ void **msgbufs;
+} VCHIQ_AWAIT_COMPLETION_T;
+
+typedef struct {
+ unsigned int handle;
+ int blocking;
+ unsigned int bufsize;
+ void *buf;
+} VCHIQ_DEQUEUE_MESSAGE_T;
+
+typedef struct {
+ unsigned int config_size;
+ VCHIQ_CONFIG_T *pconfig;
+} VCHIQ_GET_CONFIG_T;
+
+typedef struct {
+ unsigned int handle;
+ VCHIQ_SERVICE_OPTION_T option;
+ int value;
+} VCHIQ_SET_SERVICE_OPTION_T;
+
+typedef struct {
+ void *virt_addr;
+ size_t num_bytes;
+} VCHIQ_DUMP_MEM_T;
+
+#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
+#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
+#define VCHIQ_IOC_CREATE_SERVICE \
+ _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
+#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
+#define VCHIQ_IOC_QUEUE_MESSAGE \
+ _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
+ _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
+ _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
+#define VCHIQ_IOC_AWAIT_COMPLETION \
+ _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
+#define VCHIQ_IOC_DEQUEUE_MESSAGE \
+ _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
+#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
+#define VCHIQ_IOC_GET_CONFIG \
+ _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
+#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
+#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
+#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
+#define VCHIQ_IOC_SET_SERVICE_OPTION \
+ _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
+#define VCHIQ_IOC_DUMP_PHYS_MEM \
+ _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
+#define VCHIQ_IOC_MAX 15
+
+#endif
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c
new file mode 100644
index 000000000000..6edbb67d8e0a
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c
@@ -0,0 +1,461 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#include "vchiq_core.h"
+#include "vchiq_arm.h"
+
+/* ---- Public Variables ------------------------------------------------- */
+
+/* ---- Private Constants and Types -------------------------------------- */
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance_struct {
+ VCHIQ_STATE_T *state;
+
+ int connected;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+};
+
+static VCHIQ_STATUS_T
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, VCHIQ_BULK_DIR_T dir);
+
+/****************************************************************************
+*
+* vchiq_initialise
+*
+***************************************************************************/
+#define VCHIQ_INIT_RETRIES 10
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_STATE_T *state;
+ VCHIQ_INSTANCE_T instance = NULL;
+ int i;
+
+ vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
+
+ /* VideoCore may not be ready due to boot up timing.
+ It may never be ready if kernel and firmware are mismatched, so don't block forever. */
+ for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
+ state = vchiq_get_state();
+ if (state)
+ break;
+ udelay(500);
+ }
+ if (i==VCHIQ_INIT_RETRIES) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s: videocore not initialized\n", __func__);
+ goto failed;
+ } else if (i>0) {
+ vchiq_log_warning(vchiq_core_log_level,
+ "%s: videocore initialized after %d retries\n", __func__, i);
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s: error allocating vchiq instance\n", __func__);
+ goto failed;
+ }
+
+ instance->connected = 0;
+ instance->state = state;
+ lmutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ *instanceOut = instance;
+
+ status = VCHIQ_SUCCESS;
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_initialise);
+
+/****************************************************************************
+*
+* vchiq_shutdown
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ if (lmutex_lock_interruptible(&state->mutex) != 0)
+ return VCHIQ_RETRY;
+
+ /* Remove all services */
+ status = vchiq_shutdown_internal(state, instance);
+
+ lmutex_unlock(&state->mutex);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ if (status == VCHIQ_SUCCESS) {
+ struct list_head *pos, *next;
+ list_for_each_safe(pos, next,
+ &instance->bulk_waiter_list) {
+ struct bulk_waiter_node *waiter;
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ vchiq_log_info(vchiq_arm_log_level,
+ "bulk_waiter - cleaned up %x "
+ "for pid %d",
+ (unsigned int)waiter, waiter->pid);
+ _sema_destroy(&waiter->bulk_waiter.event);
+
+ kfree(waiter);
+ }
+
+ lmutex_destroy(&instance->bulk_waiter_list_mutex);
+
+ kfree(instance);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_shutdown);
+
+/****************************************************************************
+*
+* vchiq_is_connected
+*
+***************************************************************************/
+
+static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
+{
+ return instance->connected;
+}
+
+/****************************************************************************
+*
+* vchiq_connect
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ if (lmutex_lock_interruptible(&state->mutex) != 0) {
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s: call to lmutex_lock failed", __func__);
+ status = VCHIQ_RETRY;
+ goto failed;
+ }
+ status = vchiq_connect_internal(state, instance);
+
+ if (status == VCHIQ_SUCCESS)
+ instance->connected = 1;
+
+ lmutex_unlock(&state->mutex);
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_connect);
+
+/****************************************************************************
+*
+* vchiq_add_service
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_add_service(
+ VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *phandle)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+ VCHIQ_SERVICE_T *service = NULL;
+ int srvstate;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ srvstate = vchiq_is_connected(instance)
+ ? VCHIQ_SRVSTATE_LISTENING
+ : VCHIQ_SRVSTATE_HIDDEN;
+
+ service = vchiq_add_service_internal(
+ state,
+ params,
+ srvstate,
+ instance,
+ NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ status = VCHIQ_SUCCESS;
+ } else
+ status = VCHIQ_ERROR;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_add_service);
+
+/****************************************************************************
+*
+* vchiq_open_service
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_open_service(
+ VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *phandle)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_STATE_T *state = instance->state;
+ VCHIQ_SERVICE_T *service = NULL;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ if (!vchiq_is_connected(instance))
+ goto failed;
+
+ service = vchiq_add_service_internal(state,
+ params,
+ VCHIQ_SRVSTATE_OPENING,
+ instance,
+ NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ status = vchiq_open_service_internal(service,
+ (uintptr_t)current);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_remove_service(service->handle);
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+ }
+ }
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_open_service);
+
+VCHIQ_STATUS_T
+vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
+ void *data, unsigned int size, void *userdata)
+{
+ return vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
+}
+EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
+
+VCHIQ_STATUS_T
+vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, void *userdata)
+{
+ return vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
+}
+EXPORT_SYMBOL(vchiq_queue_bulk_receive);
+
+VCHIQ_STATUS_T
+vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+{
+ VCHIQ_STATUS_T status;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ status = vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ mode, VCHIQ_BULK_TRANSMIT);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ status = vchiq_blocking_bulk_transfer(handle,
+ data, size, VCHIQ_BULK_TRANSMIT);
+ break;
+ default:
+ return VCHIQ_ERROR;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_bulk_transmit);
+
+VCHIQ_STATUS_T
+vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+{
+ VCHIQ_STATUS_T status;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ status = vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ mode, VCHIQ_BULK_RECEIVE);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ status = vchiq_blocking_bulk_transfer(handle,
+ data, size, VCHIQ_BULK_RECEIVE);
+ break;
+ default:
+ return VCHIQ_ERROR;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_bulk_receive);
+
+static VCHIQ_STATUS_T
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, VCHIQ_BULK_DIR_T dir)
+{
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_SERVICE_T *service;
+ VCHIQ_STATUS_T status;
+ struct bulk_waiter_node *waiter = NULL;
+ struct list_head *pos;
+
+ service = find_service_by_handle(handle);
+ if (!service)
+ return VCHIQ_ERROR;
+
+ instance = service->instance;
+
+ unlock_service(service);
+
+ lmutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each(pos, &instance->bulk_waiter_list) {
+ if (list_entry(pos, struct bulk_waiter_node,
+ list)->pid == current->p_pid) {
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ break;
+ }
+ }
+ lmutex_unlock(&instance->bulk_waiter_list_mutex);
+
+ if (waiter) {
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+ if (bulk) {
+ /* This thread has an outstanding bulk transfer. */
+ if ((bulk->data != data) ||
+ (bulk->size != size)) {
+ /* This is not a retry of the previous one.
+ ** Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ }
+ }
+
+ if (!waiter) {
+ waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
+ if (!waiter) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s - out of memory", __func__);
+ return VCHIQ_ERROR;
+ }
+ }
+
+ status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
+ data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
+ dir);
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+ if (bulk) {
+ /* Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ _sema_destroy(&waiter->bulk_waiter.event);
+
+ kfree(waiter);
+ } else {
+ waiter->pid = current->p_pid;
+ lmutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ lmutex_unlock(&instance->bulk_waiter_list_mutex);
+ vchiq_log_info(vchiq_arm_log_level,
+ "saved bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->p_pid);
+ }
+
+ return status;
+}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c
new file mode 100644
index 000000000000..6931c8992241
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2012-2015 Oleksandr Tymoshenko <gonzo@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+#include <sys/watchdog.h>
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/intr.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include "vchiq_arm.h"
+#include "vchiq_2835.h"
+
+#define VCHIQ_LOCK do { \
+ mtx_lock(&bcm_vchiq_sc->lock); \
+} while(0)
+
+#define VCHIQ_UNLOCK do { \
+ mtx_unlock(&bcm_vchiq_sc->lock); \
+} while(0)
+
+#ifdef DEBUG
+#define dprintf(fmt, args...) printf(fmt, ##args)
+#else
+#define dprintf(fmt, args...)
+#endif
+
+struct bcm_vchiq_softc {
+ struct mtx lock;
+ struct resource * mem_res;
+ struct resource * irq_res;
+ void* intr_hl;
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+};
+
+static struct bcm_vchiq_softc *bcm_vchiq_sc = NULL;
+
+#define vchiq_read_4(reg) \
+ bus_space_read_4(bcm_vchiq_sc->bst, bcm_vchiq_sc->bsh, reg)
+#define vchiq_write_4(reg, val) \
+ bus_space_write_4(bcm_vchiq_sc->bst, bcm_vchiq_sc->bsh, reg, val)
+
+/*
+ * Extern functions */
+void vchiq_exit(void);
+int vchiq_init(void);
+
+extern VCHIQ_STATE_T g_state;
+
+static void
+bcm_vchiq_intr(void *arg)
+{
+ VCHIQ_STATE_T *state = &g_state;
+ unsigned int status;
+
+ /* Read (and clear) the doorbell */
+ status = vchiq_read_4(0x40);
+
+ if (status & 0x4) { /* Was the doorbell rung? */
+ remote_event_pollall(state);
+ }
+}
+
+void
+remote_event_signal(REMOTE_EVENT_T *event)
+{
+ event->fired = 1;
+
+ /* The test on the next line also ensures the write on the previous line
+ has completed */
+ if (event->armed) {
+ /* trigger vc interrupt */
+ __asm __volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory");
+ vchiq_write_4(0x48, 0);
+ }
+}
+
+static int
+bcm_vchiq_probe(device_t dev)
+{
+
+ if (ofw_bus_is_compatible(dev, "broadcom,bcm2835-vchiq")) {
+ device_set_desc(dev, "BCM2835 VCHIQ");
+ return(BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+bcm_vchiq_attach(device_t dev)
+{
+ struct bcm_vchiq_softc *sc = device_get_softc(dev);
+ int rid = 0;
+
+ if (bcm_vchiq_sc != NULL)
+ return (EINVAL);
+
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (sc->mem_res == NULL) {
+ device_printf(dev, "could not allocate memory resource\n");
+ return (ENXIO);
+ }
+
+ sc->bst = rman_get_bustag(sc->mem_res);
+ sc->bsh = rman_get_bushandle(sc->mem_res);
+
+ rid = 0;
+ sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (sc->irq_res == NULL) {
+ device_printf(dev, "could not allocate interrupt resource\n");
+ return (ENXIO);
+ }
+
+ vchiq_core_initialize();
+
+ /* Setup and enable the timer */
+ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC,
+ NULL, bcm_vchiq_intr, sc,
+ &sc->intr_hl) != 0) {
+ bus_release_resource(dev, SYS_RES_IRQ, rid,
+ sc->irq_res);
+ device_printf(dev, "Unable to setup the clock irq handler.\n");
+ return (ENXIO);
+ }
+
+ mtx_init(&sc->lock, "vchiq", MTX_DEF, 0);
+ bcm_vchiq_sc = sc;
+
+ vchiq_init();
+
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+
+ return (0);
+}
+
+static int
+bcm_vchiq_detach(device_t dev)
+{
+ struct bcm_vchiq_softc *sc = device_get_softc(dev);
+
+ vchiq_exit();
+
+ if (sc->intr_hl)
+ bus_teardown_intr(dev, sc->irq_res, sc->intr_hl);
+ bus_release_resource(dev, SYS_RES_IRQ, 0,
+ sc->irq_res);
+ bus_release_resource(dev, SYS_RES_MEMORY, 0,
+ sc->mem_res);
+
+ mtx_destroy(&sc->lock);
+
+ return (0);
+}
+
+
+static device_method_t bcm_vchiq_methods[] = {
+ DEVMETHOD(device_probe, bcm_vchiq_probe),
+ DEVMETHOD(device_attach, bcm_vchiq_attach),
+ DEVMETHOD(device_detach, bcm_vchiq_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+
+ { 0, 0 }
+};
+
+static driver_t bcm_vchiq_driver = {
+ "vchiq",
+ bcm_vchiq_methods,
+ sizeof(struct bcm_vchiq_softc),
+};
+
+static devclass_t bcm_vchiq_devclass;
+
+DRIVER_MODULE(vchiq, simplebus, bcm_vchiq_driver, bcm_vchiq_devclass, 0, 0);
+MODULE_VERSION(vchiq, 1);
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_memdrv.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_memdrv.h
new file mode 100644
index 000000000000..d02e7764bd0d
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_memdrv.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_MEMDRV_H
+#define VCHIQ_MEMDRV_H
+
+/* ---- Include Files ----------------------------------------------------- */
+
+#include <linux/kernel.h>
+#include "vchiq_if.h"
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+typedef struct {
+ void *armSharedMemVirt;
+ dma_addr_t armSharedMemPhys;
+ size_t armSharedMemSize;
+
+ void *vcSharedMemVirt;
+ dma_addr_t vcSharedMemPhys;
+ size_t vcSharedMemSize;
+} VCHIQ_SHARED_MEM_INFO_T;
+
+/* ---- Variable Externs ------------------------------------------------- */
+
+/* ---- Function Prototypes ---------------------------------------------- */
+
+void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
+
+VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
+
+VCHIQ_STATUS_T vchiq_userdrv_create_instance(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+VCHIQ_STATUS_T vchiq_userdrv_suspend(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+VCHIQ_STATUS_T vchiq_userdrv_resume(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+#endif
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_pagelist.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_pagelist.h
new file mode 100644
index 000000000000..07717d7bf358
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_pagelist.h
@@ -0,0 +1,59 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_PAGELIST_H
+#define VCHIQ_PAGELIST_H
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#undef CACHE_LINE_SIZE
+#define CACHE_LINE_SIZE 32
+#define PAGELIST_WRITE 0
+#define PAGELIST_READ 1
+#define PAGELIST_READ_WITH_FRAGMENTS 2
+
+typedef struct pagelist_struct {
+ unsigned long length;
+ unsigned short type;
+ unsigned short offset;
+ unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
+ pages at consecutive addresses. */
+} PAGELIST_T;
+
+typedef struct fragments_struct {
+ char headbuf[CACHE_LINE_SIZE];
+ char tailbuf[CACHE_LINE_SIZE];
+} FRAGMENTS_T;
+
+#endif /* VCHIQ_PAGELIST_H */
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_proc.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_proc.c
new file mode 100644
index 000000000000..863c285dd978
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_proc.c
@@ -0,0 +1,240 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/proc_fs.h>
+#include "vchiq_core.h"
+#include "vchiq_arm.h"
+
+struct vchiq_proc_info {
+ /* Global 'vc' proc entry used by all instances */
+ struct proc_dir_entry *vc_cfg_dir;
+
+ /* one entry per client process */
+ struct proc_dir_entry *clients;
+
+ /* log categories */
+ struct proc_dir_entry *log_categories;
+};
+
+static struct vchiq_proc_info proc_info;
+
+struct proc_dir_entry *vchiq_proc_top(void)
+{
+ BUG_ON(proc_info.vc_cfg_dir == NULL);
+ return proc_info.vc_cfg_dir;
+}
+
+/****************************************************************************
+*
+* log category entries
+*
+***************************************************************************/
+#define PROC_WRITE_BUF_SIZE 256
+
+#define VCHIQ_LOG_ERROR_STR "error"
+#define VCHIQ_LOG_WARNING_STR "warning"
+#define VCHIQ_LOG_INFO_STR "info"
+#define VCHIQ_LOG_TRACE_STR "trace"
+
+static int log_cfg_read(char *buffer,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ int len = 0;
+ char *log_value = NULL;
+
+ switch (*((int *)data)) {
+ case VCHIQ_LOG_ERROR:
+ log_value = VCHIQ_LOG_ERROR_STR;
+ break;
+ case VCHIQ_LOG_WARNING:
+ log_value = VCHIQ_LOG_WARNING_STR;
+ break;
+ case VCHIQ_LOG_INFO:
+ log_value = VCHIQ_LOG_INFO_STR;
+ break;
+ case VCHIQ_LOG_TRACE:
+ log_value = VCHIQ_LOG_TRACE_STR;
+ break;
+ default:
+ break;
+ }
+
+ len += snprintf(buffer + len, count - len,
+ "%s\n",
+ log_value ? log_value : "(null)");
+
+ return len;
+}
+
+
+static int log_cfg_write(struct file *file,
+ const char __user *buffer,
+ unsigned long count,
+ void *data)
+{
+ int *log_module = data;
+ char kbuf[PROC_WRITE_BUF_SIZE + 1];
+
+ (void)file;
+
+ memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
+ if (count >= PROC_WRITE_BUF_SIZE)
+ count = PROC_WRITE_BUF_SIZE;
+
+ if (copy_from_user(kbuf,
+ buffer,
+ count) != 0)
+ return -EFAULT;
+ kbuf[count - 1] = 0;
+
+ if (strncmp("error", kbuf, strlen("error")) == 0)
+ *log_module = VCHIQ_LOG_ERROR;
+ else if (strncmp("warning", kbuf, strlen("warning")) == 0)
+ *log_module = VCHIQ_LOG_WARNING;
+ else if (strncmp("info", kbuf, strlen("info")) == 0)
+ *log_module = VCHIQ_LOG_INFO;
+ else if (strncmp("trace", kbuf, strlen("trace")) == 0)
+ *log_module = VCHIQ_LOG_TRACE;
+ else
+ *log_module = VCHIQ_LOG_DEFAULT;
+
+ return count;
+}
+
+/* Log category proc entries */
+struct vchiq_proc_log_entry {
+ const char *name;
+ int *plevel;
+ struct proc_dir_entry *dir;
+};
+
+static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
+ { "core", &vchiq_core_log_level },
+ { "msg", &vchiq_core_msg_log_level },
+ { "sync", &vchiq_sync_log_level },
+ { "susp", &vchiq_susp_log_level },
+ { "arm", &vchiq_arm_log_level },
+};
+static int n_log_entries =
+ sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
+
+/* create an entry under /proc/vc/log for each log category */
+static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
+{
+ struct proc_dir_entry *dir;
+ size_t i;
+ int ret = 0;
+
+ dir = proc_mkdir("log", proc_info.vc_cfg_dir);
+ if (!dir)
+ return -ENOMEM;
+ proc_info.log_categories = dir;
+
+ for (i = 0; i < n_log_entries; i++) {
+ dir = create_proc_entry(vchiq_proc_log_entries[i].name,
+ 0644,
+ proc_info.log_categories);
+ if (!dir) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ dir->read_proc = &log_cfg_read;
+ dir->write_proc = &log_cfg_write;
+ dir->data = (void *)vchiq_proc_log_entries[i].plevel;
+
+ vchiq_proc_log_entries[i].dir = dir;
+ }
+ return ret;
+}
+
+
+int vchiq_proc_init(void)
+{
+ BUG_ON(proc_info.vc_cfg_dir != NULL);
+
+ proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
+ if (proc_info.vc_cfg_dir == NULL)
+ goto fail;
+
+ proc_info.clients = proc_mkdir("clients",
+ proc_info.vc_cfg_dir);
+ if (!proc_info.clients)
+ goto fail;
+
+ if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ vchiq_proc_deinit();
+ vchiq_log_error(vchiq_arm_log_level,
+ "%s: failed to create proc directory",
+ __func__);
+
+ return -ENOMEM;
+}
+
+/* remove all the proc entries */
+void vchiq_proc_deinit(void)
+{
+ /* log category entries */
+ if (proc_info.log_categories) {
+ size_t i;
+ for (i = 0; i < n_log_entries; i++)
+ if (vchiq_proc_log_entries[i].dir)
+ remove_proc_entry(
+ vchiq_proc_log_entries[i].name,
+ proc_info.log_categories);
+
+ remove_proc_entry(proc_info.log_categories->name,
+ proc_info.vc_cfg_dir);
+ }
+ if (proc_info.clients)
+ remove_proc_entry(proc_info.clients->name,
+ proc_info.vc_cfg_dir);
+ if (proc_info.vc_cfg_dir)
+ remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
+}
+
+struct proc_dir_entry *vchiq_clients_top(void)
+{
+ return proc_info.clients;
+}
+
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_shim.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_shim.c
new file mode 100644
index 000000000000..94ad46ea833f
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_shim.c
@@ -0,0 +1,830 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <interface/compat/vchi_bsd.h>
+
+#include "interface/vchi/vchi.h"
+#include "vchiq.h"
+#include "vchiq_core.h"
+
+#include "vchiq_util.h"
+
+#define vchiq_status_to_vchi(status) ((int32_t)status)
+
+typedef struct {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ VCHIU_QUEUE_T queue;
+
+ VCHI_CALLBACK_T callback;
+ void *callback_param;
+} SHIM_SERVICE_T;
+
+/* ----------------------------------------------------------------------
+ * return pointer to the mphi message driver function table
+ * -------------------------------------------------------------------- */
+const VCHI_MESSAGE_DRIVER_T *
+vchi_mphi_message_driver_func_table(void)
+{
+ return NULL;
+}
+
+/* ----------------------------------------------------------------------
+ * return a pointer to the 'single' connection driver fops
+ * -------------------------------------------------------------------- */
+const VCHI_CONNECTION_API_T *
+single_get_func_table(void)
+{
+ return NULL;
+}
+
+VCHI_CONNECTION_T *vchi_create_connection(
+ const VCHI_CONNECTION_API_T *function_table,
+ const VCHI_MESSAGE_DRIVER_T *low_level)
+{
+ (void)function_table;
+ (void)low_level;
+ return NULL;
+}
+
+/***********************************************************
+ * Name: vchi_msg_peek
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * void **data,
+ * uint32_t *msg_size,
+
+
+ * VCHI_FLAGS_T flags
+ *
+ * Description: Routine to return a pointer to the current message (to allow in
+ * place processing). The message can be removed using
+ * vchi_msg_remove when you're finished
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_peek(&service->queue);
+
+ *data = header->data;
+ *msg_size = header->size;
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_peek);
+
+/***********************************************************
+ * Name: vchi_msg_remove
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ *
+ * Description: Routine to remove a message (after it has been read with
+ * vchi_msg_peek)
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ vchiq_release_message(service->handle, header);
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_remove);
+
+/***********************************************************
+ * Name: vchi_msg_queue
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * const void *data,
+ * uint32_t data_size,
+ * VCHI_FLAGS_T flags,
+ * void *msg_handle,
+ *
+ * Description: Thin wrapper to queue a message onto a connection
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_ELEMENT_T element = {data, data_size};
+ VCHIQ_STATUS_T status;
+
+ (void)msg_handle;
+
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+
+ status = vchiq_queue_message(service->handle, &element, 1);
+
+ /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_queue_message(service->handle, &element, 1);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_msg_queue);
+
+/***********************************************************
+ * Name: vchi_bulk_queue_receive
+ *
+ * Arguments: VCHI_BULK_HANDLE_T handle,
+ * void *data_dst,
+ * const uint32_t data_size,
+ * VCHI_FLAGS_T flags
+ * void *bulk_handle
+ *
+ * Description: Routine to setup a rcv buffer
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_BULK_MODE_T mode;
+ VCHIQ_STATUS_T status;
+
+ switch ((int)flags) {
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ WARN_ON(!service->callback);
+ mode = VCHIQ_BULK_MODE_CALLBACK;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
+ mode = VCHIQ_BULK_MODE_BLOCKING;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ case VCHI_FLAGS_NONE:
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
+ break;
+ default:
+ WARN(1, "unsupported message\n");
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
+ }
+
+ status = vchiq_bulk_receive(service->handle, data_dst, data_size,
+ bulk_handle, mode);
+
+ /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_bulk_receive(service->handle, data_dst,
+ data_size, bulk_handle, mode);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_bulk_queue_receive);
+
+/***********************************************************
+ * Name: vchi_bulk_queue_transmit
+ *
+ * Arguments: VCHI_BULK_HANDLE_T handle,
+ * void *data_src,
+ * uint32_t data_size,
+ * VCHI_FLAGS_T flags,
+ * void *bulk_handle
+ *
+ * Description: Routine to transmit some data
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
+ void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_BULK_MODE_T mode;
+ VCHIQ_STATUS_T status;
+
+ switch ((int)flags) {
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ WARN_ON(!service->callback);
+ mode = VCHIQ_BULK_MODE_CALLBACK;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
+ mode = VCHIQ_BULK_MODE_BLOCKING;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ case VCHI_FLAGS_NONE:
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
+ break;
+ default:
+ WARN(1, "unsupported message\n");
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
+ }
+
+ status = vchiq_bulk_transmit(service->handle, data_src, data_size,
+ bulk_handle, mode);
+
+ /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_bulk_transmit(service->handle, data_src,
+ data_size, bulk_handle, mode);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_bulk_queue_transmit);
+
+/***********************************************************
+ * Name: vchi_msg_dequeue
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * void *data,
+ * uint32_t max_data_size_to_read,
+ * uint32_t *actual_msg_size
+ * VCHI_FLAGS_T flags
+ *
+ * Description: Routine to dequeue a message into the supplied buffer
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ memcpy(data, header->data, header->size < max_data_size_to_read ?
+ header->size : max_data_size_to_read);
+
+ *actual_msg_size = header->size;
+
+ vchiq_release_message(service->handle, header);
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_dequeue);
+
+/***********************************************************
+ * Name: vchi_msg_queuev
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * VCHI_MSG_VECTOR_T *vector,
+ * uint32_t count,
+ * VCHI_FLAGS_T flags,
+ * void *msg_handle
+ *
+ * Description: Thin wrapper to queue a message onto a connection
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+
+vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
+ offsetof(VCHIQ_ELEMENT_T, data));
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
+ offsetof(VCHIQ_ELEMENT_T, size));
+
+int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+
+ (void)msg_handle;
+
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+
+ return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
+ (const VCHIQ_ELEMENT_T *)vector, count));
+}
+EXPORT_SYMBOL(vchi_msg_queuev);
+
+/***********************************************************
+ * Name: vchi_held_msg_release
+ *
+ * Arguments: VCHI_HELD_MSG_T *message
+ *
+ * Description: Routine to release a held message (after it has been read with
+ * vchi_msg_hold)
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
+{
+ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
+ (VCHIQ_HEADER_T *)message->message);
+
+ return 0;
+}
+
+/***********************************************************
+ * Name: vchi_msg_hold
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * void **data,
+ * uint32_t *msg_size,
+ * VCHI_FLAGS_T flags,
+ * VCHI_HELD_MSG_T *message_handle
+ *
+ * Description: Routine to return a pointer to the current message (to allow
+ * in place processing). The message is dequeued - don't forget
+ * to release the message using vchi_held_msg_release when you're
+ * finished.
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags,
+ VCHI_HELD_MSG_T *message_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ *data = header->data;
+ *msg_size = header->size;
+
+ message_handle->service =
+ (struct opaque_vchi_service_t *)service->handle;
+ message_handle->message = header;
+
+ return 0;
+}
+
+/***********************************************************
+ * Name: vchi_initialise
+ *
+ * Arguments: VCHI_INSTANCE_T *instance_handle
+ * VCHI_CONNECTION_T **connections
+ * const uint32_t num_connections
+ *
+ * Description: Initialises the hardware but does not transmit anything
+ * When run as a Host App this will be called twice hence the need
+ * to malloc the state information
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+
+int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
+{
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_STATUS_T status;
+
+ status = vchiq_initialise(&instance);
+
+ *instance_handle = (VCHI_INSTANCE_T)instance;
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_initialise);
+
+/***********************************************************
+ * Name: vchi_connect
+ *
+ * Arguments: VCHI_CONNECTION_T **connections
+ * const uint32_t num_connections
+ * VCHI_INSTANCE_T instance_handle)
+ *
+ * Description: Starts the command service on each connection,
+ * causing INIT messages to be pinged back and forth
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+int32_t vchi_connect(VCHI_CONNECTION_T **connections,
+ const uint32_t num_connections,
+ VCHI_INSTANCE_T instance_handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+
+ (void)connections;
+ (void)num_connections;
+
+ return vchiq_connect(instance);
+}
+EXPORT_SYMBOL(vchi_connect);
+
+
+/***********************************************************
+ * Name: vchi_disconnect
+ *
+ * Arguments: VCHI_INSTANCE_T instance_handle
+ *
+ * Description: Stops the command service on each connection,
+ * causing DE-INIT messages to be pinged back and forth
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ return vchiq_status_to_vchi(vchiq_shutdown(instance));
+}
+EXPORT_SYMBOL(vchi_disconnect);
+
+
+/***********************************************************
+ * Name: vchi_service_open
+ * Name: vchi_service_create
+ *
+ * Arguments: VCHI_INSTANCE_T *instance_handle
+ * SERVICE_CREATION_T *setup,
+ * VCHI_SERVICE_HANDLE_T *handle
+ *
+ * Description: Routine to open a service
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+
+static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
+{
+ SHIM_SERVICE_T *service =
+ (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
+
+ if (!service->callback)
+ goto release;
+
+ switch (reason) {
+ case VCHIQ_MESSAGE_AVAILABLE:
+ vchiu_queue_push(&service->queue, header);
+
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_MSG_AVAILABLE, NULL);
+
+ goto done;
+ break;
+
+ case VCHIQ_BULK_TRANSMIT_DONE:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_SENT, bulk_user);
+ break;
+
+ case VCHIQ_BULK_RECEIVE_DONE:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
+ break;
+
+ case VCHIQ_SERVICE_CLOSED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_SERVICE_CLOSED, NULL);
+ break;
+
+ case VCHIQ_SERVICE_OPENED:
+ /* No equivalent VCHI reason */
+ break;
+
+ case VCHIQ_BULK_TRANSMIT_ABORTED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
+ bulk_user);
+ break;
+
+ case VCHIQ_BULK_RECEIVE_ABORTED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
+ bulk_user);
+ break;
+
+ default:
+ WARN(1, "not supported\n");
+ break;
+ }
+
+release:
+ vchiq_release_message(service->handle, header);
+done:
+ return VCHIQ_SUCCESS;
+}
+
+static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
+ SERVICE_CREATION_T *setup)
+{
+ SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
+
+ (void)instance;
+
+ if (service) {
+ if (vchiu_queue_init(&service->queue, 64)) {
+ service->callback = setup->callback;
+ service->callback_param = setup->callback_param;
+ } else {
+ kfree(service);
+ service = NULL;
+ }
+ }
+
+ return service;
+}
+
+static void service_free(SHIM_SERVICE_T *service)
+{
+ if (service) {
+ vchiu_queue_delete(&service->queue);
+ kfree(service);
+ }
+}
+
+int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
+
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
+
+ if (service) {
+ VCHIQ_SERVICE_PARAMS_T params;
+ VCHIQ_STATUS_T status;
+
+ memset(&params, 0, sizeof(params));
+ params.fourcc = setup->service_id;
+ params.callback = shim_callback;
+ params.userdata = service;
+ params.version = setup->version.version;
+ params.version_min = setup->version.version_min;
+
+ status = vchiq_open_service(instance, &params,
+ &service->handle);
+ if (status != VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ *handle = NULL;
+ }
+ }
+
+ return (service != NULL) ? 0 : -1;
+}
+EXPORT_SYMBOL(vchi_service_open);
+
+int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
+
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
+
+ if (service) {
+ VCHIQ_SERVICE_PARAMS_T params;
+ VCHIQ_STATUS_T status;
+
+ memset(&params, 0, sizeof(params));
+ params.fourcc = setup->service_id;
+ params.callback = shim_callback;
+ params.userdata = service;
+ params.version = setup->version.version;
+ params.version_min = setup->version.version_min;
+ status = vchiq_add_service(instance, &params, &service->handle);
+
+ if (status != VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ *handle = NULL;
+ }
+ }
+
+ return (service != NULL) ? 0 : -1;
+}
+EXPORT_SYMBOL(vchi_service_create);
+
+int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service) {
+ VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
+ if (status == VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ }
+
+ ret = vchiq_status_to_vchi(status);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_close);
+
+int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service) {
+ VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+ if (status == VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ }
+
+ ret = vchiq_status_to_vchi(status);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_destroy);
+
+int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if(service)
+ {
+ VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
+ ret = vchiq_status_to_vchi( status );
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_get_peer_version);
+
+#ifdef notyet
+/* ----------------------------------------------------------------------
+ * read a uint32_t from buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+uint32_t
+vchi_readbuf_uint32(const void *_ptr)
+{
+ const unsigned char *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
+}
+
+/* ----------------------------------------------------------------------
+ * write a uint32_t to buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+void
+vchi_writebuf_uint32(void *_ptr, uint32_t value)
+{
+ unsigned char *ptr = _ptr;
+ ptr[0] = (unsigned char)((value >> 0) & 0xFF);
+ ptr[1] = (unsigned char)((value >> 8) & 0xFF);
+ ptr[2] = (unsigned char)((value >> 16) & 0xFF);
+ ptr[3] = (unsigned char)((value >> 24) & 0xFF);
+}
+
+/* ----------------------------------------------------------------------
+ * read a uint16_t from buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+uint16_t
+vchi_readbuf_uint16(const void *_ptr)
+{
+ const unsigned char *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8);
+}
+
+/* ----------------------------------------------------------------------
+ * write a uint16_t into the buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+void
+vchi_writebuf_uint16(void *_ptr, uint16_t value)
+{
+ unsigned char *ptr = _ptr;
+ ptr[0] = (value >> 0) & 0xFF;
+ ptr[1] = (value >> 8) & 0xFF;
+}
+#endif
+
+/***********************************************************
+ * Name: vchi_service_use
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ *
+ * Description: Routine to increment refcount on a service
+ *
+ * Returns: void
+ *
+ ***********************************************************/
+int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service)
+ ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_use);
+
+/***********************************************************
+ * Name: vchi_service_release
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ *
+ * Description: Routine to decrement refcount on a service
+ *
+ * Returns: void
+ *
+ ***********************************************************/
+int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service)
+ ret = vchiq_status_to_vchi(
+ vchiq_release_service(service->handle));
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_release);
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.c
new file mode 100644
index 000000000000..d972d3b791f4
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.c
@@ -0,0 +1,151 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_util.h"
+
+static inline int is_pow2(int i)
+{
+ return i && !(i & (i - 1));
+}
+
+int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
+{
+ WARN_ON(!is_pow2(size));
+
+ queue->size = size;
+ queue->read = 0;
+ queue->write = 0;
+
+ _sema_init(&queue->pop, 0);
+ _sema_init(&queue->push, 0);
+
+ queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
+ if (queue->storage == NULL) {
+ vchiu_queue_delete(queue);
+ return 0;
+ }
+ return 1;
+}
+
+void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
+{
+ if (queue->storage != NULL)
+ kfree(queue->storage);
+}
+
+int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
+{
+ return queue->read == queue->write;
+}
+
+int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
+{
+ return queue->write == queue->read + queue->size;
+}
+
+void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
+{
+ while (queue->write == queue->read + queue->size) {
+ if (down_interruptible(&queue->pop) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ /*
+ * Write to queue->storage must be visible after read from
+ * queue->read
+ */
+ smp_mb();
+
+ queue->storage[queue->write & (queue->size - 1)] = header;
+
+ /*
+ * Write to queue->storage must be visible before write to
+ * queue->write
+ */
+ smp_wmb();
+
+ queue->write++;
+
+ up(&queue->push);
+}
+
+VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
+{
+ while (queue->write == queue->read) {
+ if (down_interruptible(&queue->push) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ up(&queue->push); // We haven't removed anything from the queue.
+
+ /*
+ * Read from queue->storage must be visible after read from
+ * queue->write
+ */
+ smp_rmb();
+
+ return queue->storage[queue->read & (queue->size - 1)];
+}
+
+VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
+{
+ VCHIQ_HEADER_T *header;
+
+ while (queue->write == queue->read) {
+ if (down_interruptible(&queue->push) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ /*
+ * Read from queue->storage must be visible after read from
+ * queue->write
+ */
+ smp_rmb();
+
+ header = queue->storage[queue->read & (queue->size - 1)];
+
+ /*
+ * Read from queue->storage must be visible before write to
+ * queue->read
+ */
+ smp_mb();
+
+ queue->read++;
+
+ up(&queue->pop);
+
+ return header;
+}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.h b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.h
new file mode 100644
index 000000000000..ce49037f9600
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_util.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_UTIL_H
+#define VCHIQ_UTIL_H
+
+#ifdef __FreeBSD__
+#include <interface/compat/vchi_bsd.h>
+#endif
+
+#include "vchiq_if.h"
+
+typedef struct {
+ int size;
+ int read;
+ int write;
+
+ struct semaphore pop;
+ struct semaphore push;
+
+ VCHIQ_HEADER_T **storage;
+} VCHIU_QUEUE_T;
+
+extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
+extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
+
+extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
+extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
+
+extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
+
+extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
+extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
+
+#endif
+
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_version.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_version.c
new file mode 100644
index 000000000000..b6bfa21155e4
--- /dev/null
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_version.c
@@ -0,0 +1,59 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "vchiq_build_info.h"
+#include <linux/broadcom/vc_debug_sym.h>
+
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
+
+const char *vchiq_get_build_hostname( void )
+{
+ return vchiq_build_hostname;
+}
+
+const char *vchiq_get_build_version( void )
+{
+ return vchiq_build_version;
+}
+
+const char *vchiq_get_build_date( void )
+{
+ return vchiq_build_date;
+}
+
+const char *vchiq_get_build_time( void )
+{
+ return vchiq_build_time;
+}
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index a4732c47a647..bf2cc5498b43 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -101,6 +101,7 @@ int acpi_quirks;
/* Supported sleep states. */
static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
+static void acpi_lookup(void *arg, const char *name, device_t *dev);
static int acpi_modevent(struct module *mod, int event, void *junk);
static int acpi_probe(device_t dev);
static int acpi_attach(device_t dev);
@@ -671,8 +672,10 @@ acpi_attach(device_t dev)
/* Register ACPI again to pass the correct argument of pm_func. */
power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
- if (!acpi_disabled("bus"))
+ if (!acpi_disabled("bus")) {
+ EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000);
acpi_probe_children(dev);
+ }
/* Update all GPEs and enable runtime GPEs. */
status = AcpiUpdateAllGpes();
@@ -3401,6 +3404,31 @@ acpi_disabled(char *subsys)
return (0);
}
+static void
+acpi_lookup(void *arg, const char *name, device_t *dev)
+{
+ ACPI_HANDLE handle;
+
+ if (*dev != NULL)
+ return;
+
+ /*
+ * Allow any handle name that is specified as an absolute path and
+ * starts with '\'. We could restrict this to \_SB and friends,
+ * but see acpi_probe_children() for notes on why we scan the entire
+ * namespace for devices.
+ *
+ * XXX: The pathname argument to AcpiGetHandle() should be fixed to
+ * be const.
+ */
+ if (name[0] != '\\')
+ return;
+ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name),
+ &handle)))
+ return;
+ *dev = acpi_get_device(handle);
+}
+
/*
* Control interface.
*
diff --git a/sys/dev/cxgb/cxgb_osdep.h b/sys/dev/cxgb/cxgb_osdep.h
index 15f7d133f5eb..ddef730426aa 100644
--- a/sys/dev/cxgb/cxgb_osdep.h
+++ b/sys/dev/cxgb/cxgb_osdep.h
@@ -39,6 +39,8 @@ $FreeBSD$
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/kdb.h>
+
#include <dev/mii/mii.h>
#ifndef _CXGB_OSDEP_H_
@@ -128,10 +130,8 @@ void prefetch(void *x)
#define smp_mb() mb()
#define L1_CACHE_BYTES 128
-extern void kdb_backtrace(void);
-
#define WARN_ON(condition) do { \
- if (__predict_false((condition)!=0)) { \
+ if (__predict_false((condition)!=0)) { \
log(LOG_WARNING, "BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \
kdb_backtrace(); \
} \
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 62ff9af000aa..31e8f393e0c1 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -192,7 +192,7 @@ enum {
/* INTR_DIRECT = (1 << 2), No longer used. */
MASTER_PF = (1 << 3),
ADAP_SYSCTL_CTX = (1 << 4),
- TOM_INIT_DONE = (1 << 5),
+ /* TOM_INIT_DONE= (1 << 5), No longer used */
BUF_PACKING_OK = (1 << 6),
CXGBE_BUSY = (1 << 9),
@@ -758,7 +758,8 @@ struct adapter {
uint16_t doorbells;
int open_device_map;
#ifdef TCP_OFFLOAD
- int offload_map;
+ int offload_map; /* ports with IFCAP_TOE enabled */
+ int active_ulds; /* ULDs activated on this adapter */
#endif
int flags;
@@ -812,7 +813,6 @@ struct adapter {
#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
-/* XXX: not bulletproof, but much better than nothing */
#define ASSERT_SYNCHRONIZED_OP(sc) \
KASSERT(IS_BUSY(sc) && \
(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 2d5d79d752f0..e2efb782e703 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -3111,6 +3111,31 @@ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
&pfmask, 1, A_TP_RSS_PF_MSK);
}
+static void refresh_vlan_pri_map(struct adapter *adap)
+{
+
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ A_TP_VLAN_PRI_MAP);
+
+ /*
+ * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+
+ /*
+ * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+}
+
/**
* t4_set_filter_mode - configure the optional components of filter tuples
* @adap: the adapter
@@ -3134,6 +3159,8 @@ int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
return -EINVAL;
t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
A_TP_VLAN_PRI_MAP);
+ refresh_vlan_pri_map(adap);
+
return 0;
}
@@ -5618,33 +5645,10 @@ int __devinit t4_init_tp_params(struct adapter *adap)
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
- /*
- * Cache the adapter's Compressed Filter Mode and global Incress
- * Configuration.
- */
- t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
- &adap->params.tp.vlan_pri_map, 1,
- A_TP_VLAN_PRI_MAP);
t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
&adap->params.tp.ingress_config, 1,
A_TP_INGRESS_CONFIG);
-
- /*
- * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
- * shift positions of several elements of the Compressed Filter Tuple
- * for this adapter which we need frequently ...
- */
- adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
- adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
-
- /*
- * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
- * represents the presense of an Outer VLAN instead of a VNIC ID.
- */
- if ((adap->params.tp.ingress_config & F_VNIC) == 0)
- adap->params.tp.vnic_shift = -1;
+ refresh_vlan_pri_map(adap);
return 0;
}
diff --git a/sys/dev/cxgbe/if_cxl.c b/sys/dev/cxgbe/if_cxl.c
new file mode 100644
index 000000000000..2b498dc5c449
--- /dev/null
+++ b/sys/dev/cxgbe/if_cxl.c
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2015 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+static int
+mod_event(module_t mod, int cmd, void *arg)
+{
+
+ return (0);
+}
+static moduledata_t if_cxl_mod = {"if_cxl", mod_event};
+DECLARE_MODULE(if_cxl, if_cxl_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_VERSION(if_cxl, 1);
+MODULE_DEPEND(if_cxl, cxl, 1, 1, 1);
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 92a574215b2a..29378ae9d377 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -213,7 +213,7 @@ c4iw_activate(struct adapter *sc)
ASSERT_SYNCHRONIZED_OP(sc);
- if (isset(&sc->offload_map, MAX_NPORTS)) {
+ if (uld_active(sc, ULD_IWARP)) {
KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
return (0);
}
@@ -265,9 +265,9 @@ c4iw_activate_all(struct adapter *sc, void *arg __unused)
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
return;
- if (!isset(&sc->offload_map, MAX_NPORTS) &&
- t4_activate_uld(sc, ULD_IWARP) == 0)
- setbit(&sc->offload_map, MAX_NPORTS);
+ /* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */
+ if (sc->offload_map && !uld_active(sc, ULD_IWARP))
+ (void) t4_activate_uld(sc, ULD_IWARP);
end_synchronized_op(sc, 0);
}
@@ -279,9 +279,8 @@ c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
return;
- if (isset(&sc->offload_map, MAX_NPORTS) &&
- t4_deactivate_uld(sc, ULD_IWARP) == 0)
- clrbit(&sc->offload_map, MAX_NPORTS);
+ if (uld_active(sc, ULD_IWARP))
+ (void) t4_deactivate_uld(sc, ULD_IWARP);
end_synchronized_op(sc, 0);
}
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index ea681fee5284..2a1228332b23 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -127,8 +127,10 @@ struct t4_virt_res { /* virtualized HW resources */
#ifdef TCP_OFFLOAD
enum {
- ULD_TOM = 1,
- ULD_IWARP = 2,
+ ULD_TOM = 0,
+ ULD_IWARP,
+ ULD_ISCSI,
+ ULD_MAX = ULD_ISCSI
};
struct adapter;
@@ -155,5 +157,6 @@ int t4_unregister_uld(struct uld_info *);
int t4_activate_uld(struct adapter *, int);
int t4_deactivate_uld(struct adapter *, int);
void t4_iscsi_init(struct ifnet *, unsigned int, const unsigned int *);
+int uld_active(struct adapter *, int);
#endif
#endif
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 7f59e840de27..94e7ed1fe492 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -3139,6 +3139,9 @@ mcfail:
return (rc);
}
+/*
+ * {begin|end}_synchronized_op must be called from the same thread.
+ */
int
begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
char *wmesg)
@@ -3194,6 +3197,9 @@ done:
return (rc);
}
+/*
+ * {begin|end}_synchronized_op must be called from the same thread.
+ */
void
end_synchronized_op(struct adapter *sc, int flags)
{
@@ -3426,6 +3432,7 @@ adapter_full_init(struct adapter *sc)
{
int rc, i;
+ ASSERT_SYNCHRONIZED_OP(sc);
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
KASSERT((sc->flags & FULL_INIT_DONE) == 0,
("%s: FULL_INIT_DONE already", __func__));
@@ -7109,10 +7116,9 @@ get_filter_mode(struct adapter *sc, uint32_t *mode)
log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
fconf);
- sc->params.tp.vlan_pri_map = fconf;
}
- *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
+ *mode = fconf_to_mode(fconf);
end_synchronized_op(sc, LOCK_HELD);
return (0);
@@ -7137,20 +7143,13 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
}
#ifdef TCP_OFFLOAD
- if (sc->offload_map) {
+ if (uld_active(sc, ULD_TOM)) {
rc = EBUSY;
goto done;
}
#endif
-#ifdef notyet
rc = -t4_set_filter_mode(sc, fconf);
- if (rc == 0)
- sc->filter_mode = fconf;
-#else
- rc = ENOTSUP;
-#endif
-
done:
end_synchronized_op(sc, LOCK_HELD);
return (rc);
@@ -8215,7 +8214,12 @@ toe_capability(struct port_info *pi, int enable)
return (ENODEV);
if (enable) {
- if (!(sc->flags & FULL_INIT_DONE)) {
+ /*
+ * We need the port's queues around so that we're able to send
+ * and receive CPLs to/from the TOE even if the ifnet for this
+ * port has never been UP'd administratively.
+ */
+ if (!(pi->flags & PORT_INIT_DONE)) {
rc = cxgbe_init_synchronized(pi);
if (rc)
return (rc);
@@ -8224,7 +8228,7 @@ toe_capability(struct port_info *pi, int enable)
if (isset(&sc->offload_map, pi->port_id))
return (0);
- if (!(sc->flags & TOM_INIT_DONE)) {
+ if (!uld_active(sc, ULD_TOM)) {
rc = t4_activate_uld(sc, ULD_TOM);
if (rc == EAGAIN) {
log(LOG_WARNING,
@@ -8235,16 +8239,22 @@ toe_capability(struct port_info *pi, int enable)
return (rc);
KASSERT(sc->tom_softc != NULL,
("%s: TOM activated but softc NULL", __func__));
- KASSERT(sc->flags & TOM_INIT_DONE,
+ KASSERT(uld_active(sc, ULD_TOM),
("%s: TOM activated but flag not set", __func__));
}
+ /* Activate iWARP and iSCSI too, if the modules are loaded. */
+ if (!uld_active(sc, ULD_IWARP))
+ (void) t4_activate_uld(sc, ULD_IWARP);
+ if (!uld_active(sc, ULD_ISCSI))
+ (void) t4_activate_uld(sc, ULD_ISCSI);
+
setbit(&sc->offload_map, pi->port_id);
} else {
if (!isset(&sc->offload_map, pi->port_id))
return (0);
- KASSERT(sc->flags & TOM_INIT_DONE,
+ KASSERT(uld_active(sc, ULD_TOM),
("%s: TOM never initialized?", __func__));
clrbit(&sc->offload_map, pi->port_id);
}
@@ -8304,11 +8314,15 @@ done:
int
t4_activate_uld(struct adapter *sc, int id)
{
- int rc = EAGAIN;
+ int rc;
struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
+ rc = EAGAIN; /* kldoad the module with this ULD and try again. */
+
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
@@ -8316,16 +8330,18 @@ t4_activate_uld(struct adapter *sc, int id)
if (!(sc->flags & FULL_INIT_DONE)) {
rc = adapter_full_init(sc);
if (rc != 0)
- goto done;
+ break;
}
rc = ui->activate(sc);
- if (rc == 0)
+ if (rc == 0) {
+ setbit(&sc->active_ulds, id);
ui->refcount++;
- goto done;
+ }
+ break;
}
}
-done:
+
sx_sunlock(&t4_uld_list_lock);
return (rc);
@@ -8334,26 +8350,41 @@ done:
int
t4_deactivate_uld(struct adapter *sc, int id)
{
- int rc = EINVAL;
+ int rc;
struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
+ rc = ENXIO;
+
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
if (ui->uld_id == id) {
rc = ui->deactivate(sc);
- if (rc == 0)
+ if (rc == 0) {
+ clrbit(&sc->active_ulds, id);
ui->refcount--;
- goto done;
+ }
+ break;
}
}
-done:
+
sx_sunlock(&t4_uld_list_lock);
return (rc);
}
+
+int
+uld_active(struct adapter *sc, int uld_id)
+{
+
+ MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
+
+ return (isset(&sc->active_ulds, uld_id));
+}
#endif
/*
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 4380c9ee92ac..e6cbfe4c9579 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -523,7 +523,7 @@ t4_listen_start(struct toedev *tod, struct tcpcb *tp)
goto done;
}
- KASSERT(sc->flags & TOM_INIT_DONE,
+ KASSERT(uld_active(sc, ULD_TOM),
("%s: TOM not initialized", __func__));
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 71ea1df58857..a980fba80a77 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -746,7 +746,7 @@ update_clip(struct adapter *sc, void *arg __unused)
if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc"))
return;
- if (sc->flags & TOM_INIT_DONE)
+ if (uld_active(sc, ULD_TOM))
update_clip_table(sc, sc->tom_softc);
end_synchronized_op(sc, LOCK_HELD);
@@ -1025,7 +1025,6 @@ t4_tom_activate(struct adapter *sc)
TOEDEV(sc->port[i]->ifp) = &td->tod;
sc->tom_softc = td;
- sc->flags |= TOM_INIT_DONE;
register_toedev(sc->tom_softc);
done:
@@ -1048,6 +1047,9 @@ t4_tom_deactivate(struct adapter *sc)
if (sc->offload_map != 0)
return (EBUSY); /* at least one port has IFCAP_TOE enabled */
+ if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
+ return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */
+
mtx_lock(&td->toep_list_lock);
if (!TAILQ_EMPTY(&td->toep_list))
rc = EBUSY;
@@ -1068,7 +1070,6 @@ t4_tom_deactivate(struct adapter *sc)
unregister_toedev(sc->tom_softc);
free_tom_data(sc, td);
sc->tom_softc = NULL;
- sc->flags &= ~TOM_INIT_DONE;
}
return (rc);
@@ -1122,7 +1123,7 @@ tom_uninit(struct adapter *sc, void *arg __unused)
return;
/* Try to free resources (works only if no port has IFCAP_TOE) */
- if (sc->flags & TOM_INIT_DONE)
+ if (uld_active(sc, ULD_TOM))
t4_deactivate_uld(sc, ULD_TOM);
end_synchronized_op(sc, 0);
diff --git a/sys/dev/drm2/radeon/ni.c b/sys/dev/drm2/radeon/ni.c
index 6f9a9668ba87..1484b3ed59a2 100644
--- a/sys/dev/drm2/radeon/ni.c
+++ b/sys/dev/drm2/radeon/ni.c
@@ -190,23 +190,23 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BARTS:
- io_mc_regs = (const u32 *)&barts_io_mc_regs;
+ io_mc_regs = &barts_io_mc_regs[0][0];
ucode_size = BTC_MC_UCODE_SIZE;
regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_TURKS:
- io_mc_regs = (const u32 *)&turks_io_mc_regs;
+ io_mc_regs = &turks_io_mc_regs[0][0];
ucode_size = BTC_MC_UCODE_SIZE;
regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_CAICOS:
default:
- io_mc_regs = (const u32 *)&caicos_io_mc_regs;
+ io_mc_regs = &caicos_io_mc_regs[0][0];
ucode_size = BTC_MC_UCODE_SIZE;
regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_CAYMAN:
- io_mc_regs = (const u32 *)&cayman_io_mc_regs;
+ io_mc_regs = &cayman_io_mc_regs[0][0];
ucode_size = CAYMAN_MC_UCODE_SIZE;
regs_size = BTC_IO_MC_REGS_SIZE;
break;
diff --git a/sys/dev/drm2/radeon/si.c b/sys/dev/drm2/radeon/si.c
index 7bf7721cfcff..0fdefd2a6b6d 100644
--- a/sys/dev/drm2/radeon/si.c
+++ b/sys/dev/drm2/radeon/si.c
@@ -190,18 +190,18 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_TAHITI:
- io_mc_regs = (const u32 *)&tahiti_io_mc_regs;
+ io_mc_regs = &tahiti_io_mc_regs[0][0];
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_PITCAIRN:
- io_mc_regs = (const u32 *)&pitcairn_io_mc_regs;
+ io_mc_regs = &pitcairn_io_mc_regs[0][0];
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_VERDE:
default:
- io_mc_regs = (const u32 *)&verde_io_mc_regs;
+ io_mc_regs = &verde_io_mc_regs[0][0];
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
diff --git a/sys/dev/ed/if_ed.c b/sys/dev/ed/if_ed.c
index be9d274aadd6..00e785d91c35 100644
--- a/sys/dev/ed/if_ed.c
+++ b/sys/dev/ed/if_ed.c
@@ -976,8 +976,10 @@ edintr(void *arg)
/*
* loop until there are no more new interrupts. When the card goes
* away, the hardware will read back 0xff. Looking at the interrupts,
- * it would appear that 0xff is impossible, or at least extremely
- * unlikely.
+ * it would appear that 0xff is impossible as ED_ISR_RST is normally
+ * clear. ED_ISR_RDC is also normally clear and only set while
+ * we're transferring memory to the card and we're holding the
+ * ED_LOCK (so we can't get into here).
*/
while ((isr = ed_nic_inb(sc, ED_P0_ISR)) != 0 && isr != 0xff) {
diff --git a/sys/dev/hwpmc/hwpmc_armv7.c b/sys/dev/hwpmc/hwpmc_armv7.c
index d58aca5aba0c..d14a990269bc 100644
--- a/sys/dev/hwpmc/hwpmc_armv7.c
+++ b/sys/dev/hwpmc/hwpmc_armv7.c
@@ -555,7 +555,7 @@ armv7_pcpu_init(struct pmc_mdep *md, int cpu)
armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
M_WAITOK|M_ZERO);
- cpuid = cpu_id();
+ cpuid = cpu_ident();
pac->cortex_ver = (cpuid >> CPU_ID_CORTEX_VER_SHIFT) & \
CPU_ID_CORTEX_VER_MASK;
diff --git a/sys/dev/ipmi/ipmi.c b/sys/dev/ipmi/ipmi.c
index 5e30770cb669..a1edbf64524b 100644
--- a/sys/dev/ipmi/ipmi.c
+++ b/sys/dev/ipmi/ipmi.c
@@ -49,6 +49,23 @@ __FBSDID("$FreeBSD$");
#include <dev/ipmi/ipmivars.h>
#endif
+/*
+ * Driver request structures are allocated on the stack via alloca() to
+ * avoid calling malloc(), especially for the watchdog handler.
+ * To avoid too much stack growth, a previously allocated structure can
+ * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
+ * that there is adequate reply/request space in the original allocation.
+ */
+#define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
+ bzero((req), sizeof(struct ipmi_request)); \
+ ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
+
+#define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen) \
+ (req) = __builtin_alloca(sizeof(struct ipmi_request) + \
+ (reqlen) + (replylen)); \
+ IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen), \
+ (replylen))
+
#ifdef IPMB
static int ipmi_ipmb_checksum(u_char, int);
static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
@@ -181,8 +198,8 @@ ipmi_dtor(void *arg)
*/
dev->ipmi_closing = 1;
while (dev->ipmi_requests > 0) {
- msleep(&dev->ipmi_requests, &sc->ipmi_lock, PWAIT,
- "ipmidrain", 0);
+ msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
+ PWAIT, "ipmidrain", 0);
ipmi_purge_completed_requests(dev);
}
}
@@ -215,7 +232,7 @@ ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
u_char slave_addr = 0x52;
int error;
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_SEND_MSG, data_len + 8, 0);
req->ir_request[0] = channel;
req->ir_request[1] = slave_addr;
@@ -231,7 +248,6 @@ ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
ipmi_submit_driver_request(sc, req);
error = req->ir_error;
- ipmi_free_request(req);
return (error);
}
@@ -243,7 +259,7 @@ ipmi_handle_attn(struct ipmi_softc *sc)
int error;
device_printf(sc->ipmi_dev, "BMC has a message\n");
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_MSG_FLAGS, 0, 1);
ipmi_submit_driver_request(sc, req);
@@ -257,9 +273,7 @@ ipmi_handle_attn(struct ipmi_softc *sc)
"watchdog about to go off");
}
if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
- ipmi_free_request(req);
-
- req = ipmi_alloc_driver_request(
+ IPMI_ALLOC_DRIVER_REQUEST(req,
IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
16);
@@ -268,7 +282,6 @@ ipmi_handle_attn(struct ipmi_softc *sc)
}
}
error = req->ir_error;
- ipmi_free_request(req);
return (error);
}
@@ -478,15 +491,11 @@ ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
* Request management.
*/
-/* Allocate a new request with request and reply buffers. */
-struct ipmi_request *
-ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
- uint8_t command, size_t requestlen, size_t replylen)
+static __inline void
+ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
+ uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
{
- struct ipmi_request *req;
- req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
- M_IPMI, M_WAITOK | M_ZERO);
req->ir_owner = dev;
req->ir_msgid = msgid;
req->ir_addr = addr;
@@ -499,6 +508,18 @@ ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
req->ir_reply = (char *)&req[1] + requestlen;
req->ir_replybuflen = replylen;
}
+}
+
+/* Allocate a new request with request and reply buffers. */
+struct ipmi_request *
+ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
+ uint8_t command, size_t requestlen, size_t replylen)
+{
+ struct ipmi_request *req;
+
+ req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
+ M_IPMI, M_WAITOK | M_ZERO);
+ ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
return (req);
}
@@ -533,21 +554,13 @@ ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
}
}
-/* Enqueue an internal driver request and wait until it is completed. */
+/* Perform an internal driver request. */
int
ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
int timo)
{
- int error;
- IPMI_LOCK(sc);
- error = sc->ipmi_enqueue_request(sc, req);
- if (error == 0)
- error = msleep(req, &sc->ipmi_lock, 0, "ipmireq", timo);
- if (error == 0)
- error = req->ir_error;
- IPMI_UNLOCK(sc);
- return (error);
+ return (sc->ipmi_driver_request(sc, req, timo));
}
/*
@@ -564,7 +577,7 @@ ipmi_dequeue_request(struct ipmi_softc *sc)
IPMI_LOCK_ASSERT(sc);
while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
- cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
+ cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
if (sc->ipmi_detaching)
return (NULL);
@@ -598,7 +611,7 @@ ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
if (sec > 0xffff / 10)
return (EINVAL);
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_SET_WDOG, 6, 0);
if (sec) {
@@ -622,9 +635,7 @@ ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
if (error)
device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
else if (sec) {
- ipmi_free_request(req);
-
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_RESET_WDOG, 0, 0);
error = ipmi_submit_driver_request(sc, req, 0);
@@ -633,7 +644,6 @@ ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
"Failed to reset watchdog\n");
}
- ipmi_free_request(req);
return (error);
/*
dump_watchdog(sc);
@@ -680,7 +690,8 @@ ipmi_startup(void *arg)
dev = sc->ipmi_dev;
/* Initialize interface-independent state. */
- mtx_init(&sc->ipmi_lock, device_get_nameunit(dev), "ipmi", MTX_DEF);
+ mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
+ mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
cv_init(&sc->ipmi_request_added, "ipmireq");
TAILQ_INIT(&sc->ipmi_pending_requests);
@@ -693,28 +704,24 @@ ipmi_startup(void *arg)
}
/* Send a GET_DEVICE_ID request. */
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_DEVICE_ID, 0, 15);
error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
if (error == EWOULDBLOCK) {
device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
- ipmi_free_request(req);
return;
} else if (error) {
device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
- ipmi_free_request(req);
return;
} else if (req->ir_compcode != 0) {
device_printf(dev,
"Bad completion code for GET_DEVICE_ID: %d\n",
req->ir_compcode);
- ipmi_free_request(req);
return;
} else if (req->ir_replylen < 5) {
device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
req->ir_replylen);
- ipmi_free_request(req);
return;
}
@@ -724,9 +731,7 @@ ipmi_startup(void *arg)
req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
- ipmi_free_request(req);
-
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_CLEAR_FLAGS, 1, 0);
ipmi_submit_driver_request(sc, req, 0);
@@ -738,25 +743,21 @@ ipmi_startup(void *arg)
if (req->ir_compcode == 0xc1) {
device_printf(dev, "Clear flags illegal\n");
}
- ipmi_free_request(req);
for (i = 0; i < 8; i++) {
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_CHANNEL_INFO, 1, 0);
req->ir_request[0] = i;
ipmi_submit_driver_request(sc, req, 0);
- if (req->ir_compcode != 0) {
- ipmi_free_request(req);
+ if (req->ir_compcode != 0)
break;
- }
- ipmi_free_request(req);
}
device_printf(dev, "Number of channels %d\n", i);
/* probe for watchdog */
- req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
+ IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_WDOG, 0, 0);
ipmi_submit_driver_request(sc, req, 0);
@@ -767,7 +768,6 @@ ipmi_startup(void *arg)
sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(watchdog_list,
ipmi_wd_event, sc, 0);
}
- ipmi_free_request(req);
sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
@@ -834,14 +834,16 @@ ipmi_detach(device_t dev)
sc->ipmi_detaching = 1;
if (sc->ipmi_kthread) {
cv_broadcast(&sc->ipmi_request_added);
- msleep(sc->ipmi_kthread, &sc->ipmi_lock, 0, "ipmi_wait", 0);
+ msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
+ "ipmi_wait", 0);
}
IPMI_UNLOCK(sc);
if (sc->ipmi_irq)
bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
ipmi_release_resources(dev);
- mtx_destroy(&sc->ipmi_lock);
+ mtx_destroy(&sc->ipmi_io_lock);
+ mtx_destroy(&sc->ipmi_requests_lock);
return (0);
}
diff --git a/sys/dev/ipmi/ipmi_kcs.c b/sys/dev/ipmi/ipmi_kcs.c
index eb5884a86b57..1c586467667b 100644
--- a/sys/dev/ipmi/ipmi_kcs.c
+++ b/sys/dev/ipmi/ipmi_kcs.c
@@ -321,6 +321,8 @@ kcs_polled_request(struct ipmi_softc *sc, struct ipmi_request *req)
u_char *cp, data;
int i, state;
+ IPMI_IO_LOCK(sc);
+
/* Send the request. */
if (!kcs_start_write(sc)) {
device_printf(sc->ipmi_dev, "KCS: Failed to start write\n");
@@ -444,6 +446,7 @@ kcs_polled_request(struct ipmi_softc *sc, struct ipmi_request *req)
}
i++;
}
+ IPMI_IO_UNLOCK(sc);
req->ir_replylen = i;
#ifdef KCS_DEBUG
device_printf(sc->ipmi_dev, "KCS: READ finished (%d bytes)\n", i);
@@ -457,6 +460,7 @@ kcs_polled_request(struct ipmi_softc *sc, struct ipmi_request *req)
return (1);
fail:
kcs_error(sc);
+ IPMI_IO_UNLOCK(sc);
return (0);
}
@@ -492,6 +496,21 @@ kcs_startup(struct ipmi_softc *sc)
device_get_nameunit(sc->ipmi_dev)));
}
+static int
+kcs_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+{
+ int i, ok;
+
+ ok = 0;
+ for (i = 0; i < 3 && !ok; i++)
+ ok = kcs_polled_request(sc, req);
+ if (ok)
+ req->ir_error = 0;
+ else
+ req->ir_error = EIO;
+ return (req->ir_error);
+}
+
int
ipmi_kcs_attach(struct ipmi_softc *sc)
{
@@ -500,6 +519,7 @@ ipmi_kcs_attach(struct ipmi_softc *sc)
/* Setup function pointers. */
sc->ipmi_startup = kcs_startup;
sc->ipmi_enqueue_request = ipmi_polled_enqueue_request;
+ sc->ipmi_driver_request = kcs_driver_request;
/* See if we can talk to the controller. */
status = INB(sc, KCS_CTL_STS);
diff --git a/sys/dev/ipmi/ipmi_smic.c b/sys/dev/ipmi/ipmi_smic.c
index c79c86d574fb..4e26553e98e7 100644
--- a/sys/dev/ipmi/ipmi_smic.c
+++ b/sys/dev/ipmi/ipmi_smic.c
@@ -364,8 +364,11 @@ smic_loop(void *arg)
while ((req = ipmi_dequeue_request(sc)) != NULL) {
IPMI_UNLOCK(sc);
ok = 0;
- for (i = 0; i < 3 && !ok; i++)
+ for (i = 0; i < 3 && !ok; i++) {
+ IPMI_IO_LOCK(sc);
ok = smic_polled_request(sc, req);
+ IPMI_IO_UNLOCK(sc);
+ }
if (ok)
req->ir_error = 0;
else
@@ -385,6 +388,24 @@ smic_startup(struct ipmi_softc *sc)
"%s: smic", device_get_nameunit(sc->ipmi_dev)));
}
+static int
+smic_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+{
+ int i, ok;
+
+ ok = 0;
+ for (i = 0; i < 3 && !ok; i++) {
+ IPMI_IO_LOCK(sc);
+ ok = smic_polled_request(sc, req);
+ IPMI_IO_UNLOCK(sc);
+ }
+ if (ok)
+ req->ir_error = 0;
+ else
+ req->ir_error = EIO;
+ return (req->ir_error);
+}
+
int
ipmi_smic_attach(struct ipmi_softc *sc)
{
@@ -393,6 +414,7 @@ ipmi_smic_attach(struct ipmi_softc *sc)
/* Setup function pointers. */
sc->ipmi_startup = smic_startup;
sc->ipmi_enqueue_request = ipmi_polled_enqueue_request;
+ sc->ipmi_driver_request = smic_driver_request;
/* See if we can talk to the controller. */
flags = INB(sc, SMIC_FLAGS);
diff --git a/sys/dev/ipmi/ipmi_ssif.c b/sys/dev/ipmi/ipmi_ssif.c
index 2256de16e7ef..e5d5d3aa8cdd 100644
--- a/sys/dev/ipmi/ipmi_ssif.c
+++ b/sys/dev/ipmi/ipmi_ssif.c
@@ -359,6 +359,22 @@ ssif_startup(struct ipmi_softc *sc)
"%s: ssif", device_get_nameunit(sc->ipmi_dev)));
}
+static int
+ssif_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+{
+ int error;
+
+ IPMI_LOCK(sc);
+ error = ipmi_polled_enqueue_request(sc, req);
+ if (error == 0)
+ error = msleep(req, &sc->ipmi_requests_lock, 0, "ipmireq",
+ timo);
+ if (error == 0)
+ error = req->ir_error;
+ IPMI_UNLOCK(sc);
+ return (error);
+}
+
int
ipmi_ssif_attach(struct ipmi_softc *sc, device_t smbus, int smbus_address)
{
@@ -370,6 +386,7 @@ ipmi_ssif_attach(struct ipmi_softc *sc, device_t smbus, int smbus_address)
/* Setup function pointers. */
sc->ipmi_startup = ssif_startup;
sc->ipmi_enqueue_request = ipmi_polled_enqueue_request;
+ sc->ipmi_driver_request = ssif_driver_request;
return (0);
}
diff --git a/sys/dev/ipmi/ipmivars.h b/sys/dev/ipmi/ipmivars.h
index 8e9e130d3ca7..9d7dc3220893 100644
--- a/sys/dev/ipmi/ipmivars.h
+++ b/sys/dev/ipmi/ipmivars.h
@@ -95,6 +95,7 @@ struct ipmi_softc {
} _iface;
int ipmi_io_rid;
int ipmi_io_type;
+ struct mtx ipmi_io_lock;
struct resource *ipmi_io_res[MAX_RES];
int ipmi_io_spacing;
int ipmi_irq_rid;
@@ -107,12 +108,13 @@ struct ipmi_softc {
eventhandler_tag ipmi_watchdog_tag;
int ipmi_watchdog_active;
struct intr_config_hook ipmi_ich;
- struct mtx ipmi_lock;
+ struct mtx ipmi_requests_lock;
struct cv ipmi_request_added;
struct proc *ipmi_kthread;
driver_intr_t *ipmi_intr;
int (*ipmi_startup)(struct ipmi_softc *);
int (*ipmi_enqueue_request)(struct ipmi_softc *, struct ipmi_request *);
+ int (*ipmi_driver_request)(struct ipmi_softc *, struct ipmi_request *, int);
};
#define ipmi_ssif_smbus_address _iface.ssif.smbus_address
@@ -183,12 +185,13 @@ struct ipmi_ipmb {
#define IPMI_ADDR(netfn, lun) ((netfn) << 2 | (lun))
#define IPMI_REPLY_ADDR(addr) ((addr) + 0x4)
-#define IPMI_LOCK(sc) mtx_lock(&(sc)->ipmi_lock)
-#define IPMI_UNLOCK(sc) mtx_unlock(&(sc)->ipmi_lock)
-#define IPMI_LOCK_ASSERT(sc) mtx_assert(&(sc)->ipmi_lock, MA_OWNED)
+#define IPMI_LOCK(sc) mtx_lock(&(sc)->ipmi_requests_lock)
+#define IPMI_UNLOCK(sc) mtx_unlock(&(sc)->ipmi_requests_lock)
+#define IPMI_LOCK_ASSERT(sc) mtx_assert(&(sc)->ipmi_requests_lock, MA_OWNED)
-#define ipmi_alloc_driver_request(addr, cmd, reqlen, replylen) \
- ipmi_alloc_request(NULL, 0, (addr), (cmd), (reqlen), (replylen))
+#define IPMI_IO_LOCK(sc) mtx_lock(&(sc)->ipmi_io_lock)
+#define IPMI_IO_UNLOCK(sc) mtx_unlock(&(sc)->ipmi_io_lock)
+#define IPMI_IO_LOCK_ASSERT(sc) mtx_assert(&(sc)->ipmi_io_lock, MA_OWNED)
#if __FreeBSD_version < 601105
#define bus_read_1(r, o) \
diff --git a/sys/dev/iscsi/icl.c b/sys/dev/iscsi/icl.c
index a60313cf857b..2e92f2a4ed91 100644
--- a/sys/dev/iscsi/icl.c
+++ b/sys/dev/iscsi/icl.c
@@ -96,7 +96,7 @@ icl_find(const char *name)
}
TAILQ_FOREACH(im, &sc->sc_modules, im_next) {
- if (strcmp(im->im_name, name) == 0)
+ if (strcasecmp(im->im_name, name) == 0)
return (im);
}
diff --git a/sys/dev/iscsi/icl.h b/sys/dev/iscsi/icl.h
index f667ad66a899..86dfb9ac460e 100644
--- a/sys/dev/iscsi/icl.h
+++ b/sys/dev/iscsi/icl.h
@@ -113,6 +113,7 @@ struct icl_conn {
bool ic_disconnecting;
bool ic_iser;
const char *ic_name;
+ const char *ic_offload;
void (*ic_receive)(struct icl_pdu *);
void (*ic_error)(struct icl_conn *);
diff --git a/sys/dev/iscsi/icl_conn_if.m b/sys/dev/iscsi/icl_conn_if.m
index d3ac57f0420d..1a52882dd649 100644
--- a/sys/dev/iscsi/icl_conn_if.m
+++ b/sys/dev/iscsi/icl_conn_if.m
@@ -85,3 +85,27 @@ METHOD void close {
METHOD bool connected {
struct icl_conn *_ic;
};
+
+METHOD int task_setup {
+ struct icl_conn *_ic;
+ struct ccb_scsiio *_csio;
+ uint32_t *_task_tag;
+ void **_prvp;
+};
+
+METHOD void task_done {
+ struct icl_conn *_ic;
+ void *_prv;
+};
+
+METHOD int transfer_setup {
+ struct icl_conn *_ic;
+ union ctl_io *_io;
+ uint32_t *_transfer_tag;
+ void **_prvp;
+};
+
+METHOD void transfer_done {
+ struct icl_conn *_ic;
+ void *_prv;
+};
diff --git a/sys/dev/iscsi/icl_soft.c b/sys/dev/iscsi/icl_soft.c
index b0c2d40861e9..2dd5e5c40044 100644
--- a/sys/dev/iscsi/icl_soft.c
+++ b/sys/dev/iscsi/icl_soft.c
@@ -29,8 +29,7 @@
*/
/*
- * iSCSI Common Layer. It's used by both the initiator and target to send
- * and receive iSCSI PDUs.
+ * Software implementation of iSCSI Common Layer kobj(9) interface.
*/
#include <sys/cdefs.h>
@@ -99,6 +98,10 @@ static icl_conn_handoff_t icl_soft_conn_handoff;
static icl_conn_free_t icl_soft_conn_free;
static icl_conn_close_t icl_soft_conn_close;
static icl_conn_connected_t icl_soft_conn_connected;
+static icl_conn_task_setup_t icl_soft_conn_task_setup;
+static icl_conn_task_done_t icl_soft_conn_task_done;
+static icl_conn_transfer_setup_t icl_soft_conn_transfer_setup;
+static icl_conn_transfer_done_t icl_soft_conn_transfer_done;
static kobj_method_t icl_soft_methods[] = {
KOBJMETHOD(icl_conn_new_pdu, icl_soft_conn_new_pdu),
@@ -112,13 +115,15 @@ static kobj_method_t icl_soft_methods[] = {
KOBJMETHOD(icl_conn_free, icl_soft_conn_free),
KOBJMETHOD(icl_conn_close, icl_soft_conn_close),
KOBJMETHOD(icl_conn_connected, icl_soft_conn_connected),
+ KOBJMETHOD(icl_conn_task_setup, icl_soft_conn_task_setup),
+ KOBJMETHOD(icl_conn_task_done, icl_soft_conn_task_done),
+ KOBJMETHOD(icl_conn_transfer_setup, icl_soft_conn_transfer_setup),
+ KOBJMETHOD(icl_conn_transfer_done, icl_soft_conn_transfer_done),
{ 0, 0 }
};
DEFINE_CLASS(icl_soft, icl_soft_methods, sizeof(struct icl_conn));
-static void icl_conn_close(struct icl_conn *ic);
-
static void
icl_conn_fail(struct icl_conn *ic)
{
@@ -201,6 +206,7 @@ icl_pdu_free(struct icl_pdu *ip)
void
icl_soft_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
{
+
icl_pdu_free(ip);
}
@@ -696,7 +702,7 @@ icl_conn_receive_pdu(struct icl_conn *ic, size_t *availablep)
if (error != 0) {
/*
* Don't free the PDU; it's pointed to by ic->ic_receive_pdu
- * and will get freed in icl_conn_close().
+ * and will get freed in icl_soft_conn_close().
*/
icl_conn_fail(ic);
}
@@ -1185,6 +1191,7 @@ icl_soft_new_conn(const char *name, struct mtx *lock)
#endif
ic->ic_max_data_segment_length = ICL_MAX_DATA_SEGMENT_LENGTH;
ic->ic_name = name;
+ ic->ic_offload = "None";
return (ic);
}
@@ -1247,7 +1254,7 @@ icl_conn_start(struct icl_conn *ic)
error = soreserve(ic->ic_socket, sendspace, recvspace);
if (error != 0) {
ICL_WARN("soreserve failed with error %d", error);
- icl_conn_close(ic);
+ icl_soft_conn_close(ic);
return (error);
}
ic->ic_socket->so_snd.sb_flags |= SB_AUTOSIZE;
@@ -1265,7 +1272,7 @@ icl_conn_start(struct icl_conn *ic)
error = sosetopt(ic->ic_socket, &opt);
if (error != 0) {
ICL_WARN("disabling TCP_NODELAY failed with error %d", error);
- icl_conn_close(ic);
+ icl_soft_conn_close(ic);
return (error);
}
@@ -1276,7 +1283,7 @@ icl_conn_start(struct icl_conn *ic)
ic->ic_name);
if (error != 0) {
ICL_WARN("kthread_add(9) failed with error %d", error);
- icl_conn_close(ic);
+ icl_soft_conn_close(ic);
return (error);
}
@@ -1284,7 +1291,7 @@ icl_conn_start(struct icl_conn *ic)
ic->ic_name);
if (error != 0) {
ICL_WARN("kthread_add(9) failed with error %d", error);
- icl_conn_close(ic);
+ icl_soft_conn_close(ic);
return (error);
}
@@ -1349,7 +1356,7 @@ icl_soft_conn_handoff(struct icl_conn *ic, int fd)
}
void
-icl_conn_close(struct icl_conn *ic)
+icl_soft_conn_close(struct icl_conn *ic)
{
struct icl_pdu *pdu;
@@ -1418,13 +1425,6 @@ icl_conn_close(struct icl_conn *ic)
ICL_CONN_UNLOCK(ic);
}
-void
-icl_soft_conn_close(struct icl_conn *ic)
-{
-
- icl_conn_close(ic);
-}
-
bool
icl_soft_conn_connected(struct icl_conn *ic)
{
@@ -1443,6 +1443,32 @@ icl_soft_conn_connected(struct icl_conn *ic)
return (true);
}
+int
+icl_soft_conn_task_setup(struct icl_conn *ic, struct ccb_scsiio *csio,
+ uint32_t *task_tagp, void **prvp)
+{
+
+ return (0);
+}
+
+void
+icl_soft_conn_task_done(struct icl_conn *ic, void *prv)
+{
+}
+
+int
+icl_soft_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
+ uint32_t *transfer_tag, void **prvp)
+{
+
+ return (0);
+}
+
+void
+icl_soft_conn_transfer_done(struct icl_conn *ic, void *prv)
+{
+}
+
static int
icl_soft_limits(size_t *limitp)
{
diff --git a/sys/dev/iscsi/icl_wrappers.h b/sys/dev/iscsi/icl_wrappers.h
index 22cf0a6e2214..2cf7d96446fc 100644
--- a/sys/dev/iscsi/icl_wrappers.h
+++ b/sys/dev/iscsi/icl_wrappers.h
@@ -112,4 +112,34 @@ icl_conn_connected(struct icl_conn *ic)
return (ICL_CONN_CONNECTED(ic));
}
+static inline int
+icl_conn_task_setup(struct icl_conn *ic, struct ccb_scsiio *csio,
+ uint32_t *task_tagp, void **prvp)
+{
+
+ return (ICL_CONN_TASK_SETUP(ic, csio, task_tagp, prvp));
+}
+
+static inline void
+icl_conn_task_done(struct icl_conn *ic, void *prv)
+{
+
+ ICL_CONN_TASK_DONE(ic, prv);
+}
+
+static inline int
+icl_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
+ uint32_t *transfer_tagp, void **prvp)
+{
+
+ return (ICL_CONN_TRANSFER_SETUP(ic, io, transfer_tagp, prvp));
+}
+
+static inline void
+icl_conn_transfer_done(struct icl_conn *ic, void *prv)
+{
+
+ ICL_CONN_TRANSFER_DONE(ic, prv);
+}
+
#endif /* !ICL_WRAPPERS_H */
diff --git a/sys/dev/iscsi/iscsi.c b/sys/dev/iscsi/iscsi.c
index 8f07620a697d..5188ac827664 100644
--- a/sys/dev/iscsi/iscsi.c
+++ b/sys/dev/iscsi/iscsi.c
@@ -169,7 +169,7 @@ static void iscsi_poll(struct cam_sim *sim);
static struct iscsi_outstanding *iscsi_outstanding_find(struct iscsi_session *is,
uint32_t initiator_task_tag);
static struct iscsi_outstanding *iscsi_outstanding_add(struct iscsi_session *is,
- uint32_t initiator_task_tag, union ccb *ccb);
+ union ccb *ccb, uint32_t *initiator_task_tagp);
static void iscsi_outstanding_remove(struct iscsi_session *is,
struct iscsi_outstanding *io);
@@ -421,6 +421,7 @@ iscsi_maintenance_thread_terminate(struct iscsi_session *is)
sx_xunlock(&sc->sc_lock);
icl_conn_close(is->is_conn);
+ callout_drain(&is->is_callout);
ISCSI_SESSION_LOCK(is);
@@ -434,8 +435,6 @@ iscsi_maintenance_thread_terminate(struct iscsi_session *is)
cv_signal(&is->is_login_cv);
#endif
- callout_drain(&is->is_callout);
-
iscsi_session_cleanup(is, true);
KASSERT(TAILQ_EMPTY(&is->is_outstanding),
@@ -511,6 +510,7 @@ iscsi_session_reconnect(struct iscsi_session *is)
static void
iscsi_session_terminate(struct iscsi_session *is)
{
+
if (is->is_terminating)
return;
@@ -532,12 +532,14 @@ iscsi_callout(void *context)
is = context;
- if (is->is_terminating)
+ ISCSI_SESSION_LOCK(is);
+ if (is->is_terminating) {
+ ISCSI_SESSION_UNLOCK(is);
return;
+ }
callout_schedule(&is->is_callout, 1 * hz);
- ISCSI_SESSION_LOCK(is);
is->is_timeout++;
if (is->is_waiting_for_iscsid) {
@@ -1306,6 +1308,16 @@ iscsi_ioctl_daemon_wait(struct iscsi_softc *sc,
request->idr_tsih = 0; /* New or reinstated session. */
memcpy(&request->idr_conf, &is->is_conf,
sizeof(request->idr_conf));
+
+ error = icl_limits(is->is_conf.isc_offload,
+ &request->idr_limits.isl_max_data_segment_length);
+ if (error != 0) {
+ ISCSI_SESSION_WARN(is, "icl_limits for offload \"%s\" "
+ "failed with error %d", is->is_conf.isc_offload,
+ error);
+ sx_sunlock(&sc->sc_lock);
+ return (error);
+ }
sx_sunlock(&sc->sc_lock);
return (0);
@@ -1731,7 +1743,13 @@ iscsi_ioctl_session_add(struct iscsi_softc *sc, struct iscsi_session_add *isa)
return (EBUSY);
}
- is->is_conn = icl_new_conn(NULL, "iscsi", &is->is_lock);
+ is->is_conn = icl_new_conn(is->is_conf.isc_offload,
+ "iscsi", &is->is_lock);
+ if (is->is_conn == NULL) {
+ sx_xunlock(&sc->sc_lock);
+ free(is, M_ISCSI);
+ return (EINVAL);
+ }
is->is_conn->ic_receive = iscsi_receive_callback;
is->is_conn->ic_error = iscsi_error_callback;
is->is_conn->ic_prv0 = is;
@@ -1750,15 +1768,17 @@ iscsi_ioctl_session_add(struct iscsi_softc *sc, struct iscsi_session_add *isa)
arc4rand(&is->is_isid[1], 5, 0);
is->is_tsih = 0;
callout_init(&is->is_callout, 1);
- callout_reset(&is->is_callout, 1 * hz, iscsi_callout, is);
- TAILQ_INSERT_TAIL(&sc->sc_sessions, is, is_next);
error = kthread_add(iscsi_maintenance_thread, is, NULL, NULL, 0, 0, "iscsimt");
if (error != 0) {
ISCSI_SESSION_WARN(is, "kthread_add(9) failed with error %d", error);
+ sx_xunlock(&sc->sc_lock);
return (error);
}
+ callout_reset(&is->is_callout, 1 * hz, iscsi_callout, is);
+ TAILQ_INSERT_TAIL(&sc->sc_sessions, is, is_next);
+
/*
* Trigger immediate reconnection.
*/
@@ -1836,6 +1856,7 @@ iscsi_ioctl_session_list(struct iscsi_softc *sc, struct iscsi_session_list *isl)
iss.iss_id = is->is_id;
strlcpy(iss.iss_target_alias, is->is_target_alias, sizeof(iss.iss_target_alias));
strlcpy(iss.iss_reason, is->is_reason, sizeof(iss.iss_reason));
+ strlcpy(iss.iss_offload, is->is_conn->ic_offload, sizeof(iss.iss_offload));
if (is->is_conn->ic_header_crc32c)
iss.iss_header_digest = ISCSI_DIGEST_CRC32C;
@@ -1972,21 +1993,33 @@ iscsi_outstanding_find_ccb(struct iscsi_session *is, union ccb *ccb)
static struct iscsi_outstanding *
iscsi_outstanding_add(struct iscsi_session *is,
- uint32_t initiator_task_tag, union ccb *ccb)
+ union ccb *ccb, uint32_t *initiator_task_tagp)
{
struct iscsi_outstanding *io;
+ int error;
ISCSI_SESSION_LOCK_ASSERT(is);
- KASSERT(iscsi_outstanding_find(is, initiator_task_tag) == NULL,
- ("initiator_task_tag 0x%x already added", initiator_task_tag));
-
io = uma_zalloc(iscsi_outstanding_zone, M_NOWAIT | M_ZERO);
if (io == NULL) {
- ISCSI_SESSION_WARN(is, "failed to allocate %zd bytes", sizeof(*io));
+ ISCSI_SESSION_WARN(is, "failed to allocate %zd bytes",
+ sizeof(*io));
return (NULL);
}
- io->io_initiator_task_tag = initiator_task_tag;
+
+ error = icl_conn_task_setup(is->is_conn, &ccb->csio,
+ initiator_task_tagp, &io->io_icl_prv);
+ if (error != 0) {
+ ISCSI_SESSION_WARN(is,
+ "icl_conn_task_setup() failed with error %d", error);
+ uma_zfree(iscsi_outstanding_zone, io);
+ return (NULL);
+ }
+
+ KASSERT(iscsi_outstanding_find(is, *initiator_task_tagp) == NULL,
+ ("initiator_task_tag 0x%x already added", *initiator_task_tagp));
+
+ io->io_initiator_task_tag = *initiator_task_tagp;
io->io_ccb = ccb;
TAILQ_INSERT_TAIL(&is->is_outstanding, io, io_next);
return (io);
@@ -1998,6 +2031,7 @@ iscsi_outstanding_remove(struct iscsi_session *is, struct iscsi_outstanding *io)
ISCSI_SESSION_LOCK_ASSERT(is);
+ icl_conn_task_done(is->is_conn, io->io_icl_prv);
TAILQ_REMOVE(&is->is_outstanding, io, io_next);
uma_zfree(iscsi_outstanding_zone, io);
}
@@ -2009,6 +2043,7 @@ iscsi_action_abort(struct iscsi_session *is, union ccb *ccb)
struct iscsi_bhs_task_management_request *bhstmr;
struct ccb_abort *cab = &ccb->cab;
struct iscsi_outstanding *io, *aio;
+ uint32_t initiator_task_tag;
ISCSI_SESSION_LOCK_ASSERT(is);
@@ -2036,16 +2071,9 @@ iscsi_action_abort(struct iscsi_session *is, union ccb *ccb)
return;
}
- bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
- bhstmr->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_REQUEST;
- bhstmr->bhstmr_function = 0x80 | BHSTMR_FUNCTION_ABORT_TASK;
-
- bhstmr->bhstmr_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
- bhstmr->bhstmr_initiator_task_tag = is->is_initiator_task_tag;
- is->is_initiator_task_tag++;
- bhstmr->bhstmr_referenced_task_tag = aio->io_initiator_task_tag;
+ initiator_task_tag = is->is_initiator_task_tag++;
- io = iscsi_outstanding_add(is, bhstmr->bhstmr_initiator_task_tag, NULL);
+ io = iscsi_outstanding_add(is, NULL, &initiator_task_tag);
if (io == NULL) {
icl_pdu_free(request);
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
@@ -2053,6 +2081,14 @@ iscsi_action_abort(struct iscsi_session *is, union ccb *ccb)
return;
}
io->io_datasn = aio->io_initiator_task_tag;
+
+ bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
+ bhstmr->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_REQUEST;
+ bhstmr->bhstmr_function = 0x80 | BHSTMR_FUNCTION_ABORT_TASK;
+ bhstmr->bhstmr_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
+ bhstmr->bhstmr_initiator_task_tag = initiator_task_tag;
+ bhstmr->bhstmr_referenced_task_tag = aio->io_initiator_task_tag;
+
iscsi_pdu_queue_locked(request);
}
@@ -2064,6 +2100,7 @@ iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb)
struct ccb_scsiio *csio;
struct iscsi_outstanding *io;
size_t len;
+ uint32_t initiator_task_tag;
int error;
ISCSI_SESSION_LOCK_ASSERT(is);
@@ -2094,6 +2131,19 @@ iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb)
return;
}
+ initiator_task_tag = is->is_initiator_task_tag++;
+ io = iscsi_outstanding_add(is, ccb, &initiator_task_tag);
+ if (io == NULL) {
+ icl_pdu_free(request);
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ ISCSI_SESSION_DEBUG(is, "freezing devq");
+ }
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN;
+ xpt_done(ccb);
+ return;
+ }
+
csio = &ccb->csio;
bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs;
bhssc->bhssc_opcode = ISCSI_BHS_OPCODE_SCSI_COMMAND;
@@ -2127,8 +2177,7 @@ iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb)
bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_UNTAGGED;
bhssc->bhssc_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
- bhssc->bhssc_initiator_task_tag = is->is_initiator_task_tag;
- is->is_initiator_task_tag++;
+ bhssc->bhssc_initiator_task_tag = initiator_task_tag;
bhssc->bhssc_expected_data_transfer_length = htonl(csio->dxfer_len);
KASSERT(csio->cdb_len <= sizeof(bhssc->bhssc_cdb),
("unsupported CDB size %zd", (size_t)csio->cdb_len));
@@ -2138,18 +2187,6 @@ iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb)
else
memcpy(&bhssc->bhssc_cdb, csio->cdb_io.cdb_bytes, csio->cdb_len);
- io = iscsi_outstanding_add(is, bhssc->bhssc_initiator_task_tag, ccb);
- if (io == NULL) {
- icl_pdu_free(request);
- if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
- xpt_freeze_devq(ccb->ccb_h.path, 1);
- ISCSI_SESSION_DEBUG(is, "freezing devq");
- }
- ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN;
- xpt_done(ccb);
- return;
- }
-
if (is->is_immediate_data &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
len = csio->dxfer_len;
diff --git a/sys/dev/iscsi/iscsi.h b/sys/dev/iscsi/iscsi.h
index 0fe1515c2b6d..fd52fa87a954 100644
--- a/sys/dev/iscsi/iscsi.h
+++ b/sys/dev/iscsi/iscsi.h
@@ -45,6 +45,7 @@ struct iscsi_outstanding {
size_t io_received;
uint32_t io_initiator_task_tag;
uint32_t io_datasn;
+ void *io_icl_prv;
};
struct iscsi_session {
diff --git a/sys/dev/iscsi/iscsi_ioctl.h b/sys/dev/iscsi/iscsi_ioctl.h
index b7cb47d4f975..1dd7b0a2d6b2 100644
--- a/sys/dev/iscsi/iscsi_ioctl.h
+++ b/sys/dev/iscsi/iscsi_ioctl.h
@@ -43,6 +43,7 @@
#define ISCSI_ADDR_LEN 47 /* INET6_ADDRSTRLEN + '\0' */
#define ISCSI_ALIAS_LEN 256 /* XXX: Where did it come from? */
#define ISCSI_SECRET_LEN 17 /* 16 + '\0' */
+#define ISCSI_OFFLOAD_LEN 8
#define ISCSI_REASON_LEN 64
#define ISCSI_DIGEST_NONE 0
@@ -65,7 +66,16 @@ struct iscsi_session_conf {
int isc_header_digest;
int isc_data_digest;
int isc_iser;
- int isc_spare[4];
+ char isc_offload[ISCSI_OFFLOAD_LEN];
+ int isc_spare[2];
+};
+
+/*
+ * Additional constraints imposed by chosen ICL offload module;
+ * iscsid(8) must obey those when negotiating operational parameters.
+ */
+struct iscsi_session_limits {
+ size_t isl_max_data_segment_length;
};
/*
@@ -81,20 +91,21 @@ struct iscsi_session_state {
int iss_immediate_data;
int iss_connected;
char iss_reason[ISCSI_REASON_LEN];
- int iss_spare[4];
+ char iss_offload[ISCSI_OFFLOAD_LEN];
+ int iss_spare[2];
};
/*
- * For use with iscsid(8).
+ * The following ioctls are used by iscsid(8).
*/
-
struct iscsi_daemon_request {
unsigned int idr_session_id;
struct iscsi_session_conf idr_conf;
uint8_t idr_isid[6];
uint16_t idr_tsih;
uint16_t idr_spare_cid;
- int idr_spare[4];
+ struct iscsi_session_limits idr_limits;
+ int idr_spare[2];
};
struct iscsi_daemon_handoff {
@@ -182,9 +193,8 @@ struct iscsi_daemon_receive {
#endif /* ICL_KERNEL_PROXY */
/*
- * For use with iscsictl(8).
+ * The following ioctls are used by iscsictl(8).
*/
-
struct iscsi_session_add {
struct iscsi_session_conf isa_conf;
int isa_spare[4];
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index b7e90e9da4ba..a3aa04eea913 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -165,7 +165,7 @@ static void
malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val)
{
DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n",
- __func__, (intmax_t)off, val);
+ __func__, (uintmax_t)off, val);
bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val);
}
@@ -510,9 +510,10 @@ malo_desc_setup(struct malo_softc *sc, const char *name,
ds = dd->dd_desc;
memset(ds, 0, dd->dd_desc_len);
- DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
+ DPRINTF(sc, MALO_DEBUG_RESET,
+ "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
__func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
- (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
+ (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
return 0;
fail2:
@@ -877,10 +878,9 @@ malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix)
const struct malo_rxdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->status);
- printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
+ printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
" STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x"
- " RATE:%02x QOS:%04x\n",
- ix, ds, (const struct malo_desc *)bf->bf_daddr,
+ " RATE:%02x QOS:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr,
le32toh(ds->physnext), le32toh(ds->physbuffdata),
ds->rxcontrol,
ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ?
@@ -896,8 +896,7 @@ malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix)
uint32_t status = le32toh(ds->status);
printf("Q%u[%3u]", qnum, ix);
- printf(" (DS.V:%p DS.P:%p)\n",
- ds, (const struct malo_txdesc *)bf->bf_daddr);
+ printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
le32toh(ds->physnext),
le32toh(ds->pktptr), le16toh(ds->pktlen), status,
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 3ea24749421b..89a39c073f66 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -2056,9 +2056,10 @@ mwl_desc_setup(struct mwl_softc *sc, const char *name,
ds = dd->dd_desc;
memset(ds, 0, dd->dd_desc_len);
- DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
+ DPRINTF(sc, MWL_DEBUG_RESET,
+ "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
__func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
- (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
+ (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
return 0;
fail2:
@@ -4688,11 +4689,10 @@ mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
const struct mwl_rxdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->Status);
- printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
+ printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
" STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
- ix, ds, (const struct mwl_desc *)bf->bf_daddr,
- le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
- ds->RxControl,
+ ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
+ le32toh(ds->pPhysBuffData), ds->RxControl,
ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
"" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
@@ -4706,8 +4706,7 @@ mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
uint32_t status = le32toh(ds->Status);
printf("Q%u[%3u]", qnum, ix);
- printf(" (DS.V:%p DS.P:%p)\n",
- ds, (const struct mwl_txdesc *)bf->bf_daddr);
+ printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
le32toh(ds->pPhysNext),
le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 8f87851da6ac..263904bc1783 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
+#include <sys/limits.h>
#include <sys/linker.h>
#include <sys/fcntl.h>
#include <sys/conf.h>
@@ -4824,8 +4825,8 @@ pci_child_location_str_method(device_t dev, device_t child, char *buf,
size_t buflen)
{
- snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
- pci_get_function(child));
+ snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child),
+ pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
return (0);
}
@@ -4855,10 +4856,60 @@ pci_assign_interrupt_method(device_t dev, device_t child)
cfg->intpin));
}
+static void
+pci_lookup(void *arg, const char *name, device_t *dev)
+{
+ long val;
+ char *end;
+ int domain, bus, slot, func;
+
+ if (*dev != NULL)
+ return;
+
+ /*
+ * Accept pciconf-style selectors of either pciD:B:S:F or
+ * pciB:S:F. In the latter case, the domain is assumed to
+ * be zero.
+ */
+ if (strncmp(name, "pci", 3) != 0)
+ return;
+ val = strtol(name + 3, &end, 10);
+ if (val < 0 || val > INT_MAX || *end != ':')
+ return;
+ domain = val;
+ val = strtol(end + 1, &end, 10);
+ if (val < 0 || val > INT_MAX || *end != ':')
+ return;
+ bus = val;
+ val = strtol(end + 1, &end, 10);
+ if (val < 0 || val > INT_MAX)
+ return;
+ slot = val;
+ if (*end == ':') {
+ val = strtol(end + 1, &end, 10);
+ if (val < 0 || val > INT_MAX || *end != '\0')
+ return;
+ func = val;
+ } else if (*end == '\0') {
+ func = slot;
+ slot = bus;
+ bus = domain;
+ domain = 0;
+ } else
+ return;
+
+ if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
+ func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
+ return;
+
+ *dev = pci_find_dbsf(domain, bus, slot, func);
+}
+
static int
pci_modevent(module_t mod, int what, void *arg)
{
static struct cdev *pci_cdev;
+ static eventhandler_tag tag;
switch (what) {
case MOD_LOAD:
@@ -4867,9 +4918,13 @@ pci_modevent(module_t mod, int what, void *arg)
pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
"pci");
pci_load_vendor_data();
+ tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
+ 1000);
break;
case MOD_UNLOAD:
+ if (tag != NULL)
+ EVENTHANDLER_DEREGISTER(dev_lookup, tag);
destroy_dev(pci_cdev);
break;
}
diff --git a/sys/dev/ral/if_ral_pci.c b/sys/dev/ral/if_ral_pci.c
index d3a269210a3f..519b4ca30d78 100644
--- a/sys/dev/ral/if_ral_pci.c
+++ b/sys/dev/ral/if_ral_pci.c
@@ -93,7 +93,10 @@ static const struct ral_pci_ident ral_pci_ids[] = {
{ 0x1814, 0x3562, "Ralink Technology RT3562" },
{ 0x1814, 0x3592, "Ralink Technology RT3592" },
{ 0x1814, 0x3593, "Ralink Technology RT3593" },
+ { 0x1814, 0x5360, "Ralink Technology RT5390" },
+ { 0x1814, 0x5362, "Ralink Technology RT5392" },
{ 0x1814, 0x5390, "Ralink Technology RT5390" },
+ { 0x1814, 0x5392, "Ralink Technology RT5392" },
{ 0x1814, 0x539a, "Ralink Technology RT5390" },
{ 0x1814, 0x539f, "Ralink Technology RT5390" },
{ 0x1a3b, 0x1059, "AWT RT2890" },
diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c
index 2e68c2e505ad..b29a0eb60aaf 100644
--- a/sys/dev/ral/rt2860.c
+++ b/sys/dev/ral/rt2860.c
@@ -21,7 +21,7 @@
__FBSDID("$FreeBSD$");
/*-
- * Ralink Technology RT2860/RT3090/RT3390/RT3562 chipset driver
+ * Ralink Technology RT2860/RT3090/RT3390/RT3562/RT5390/RT5392 chipset driver
* http://www.ralinktech.com/
*/
@@ -142,8 +142,11 @@ static void rt2860_set_channel(struct ieee80211com *);
static void rt2860_select_chan_group(struct rt2860_softc *, int);
static void rt2860_set_chan(struct rt2860_softc *, u_int);
static void rt3090_set_chan(struct rt2860_softc *, u_int);
+static void rt5390_set_chan(struct rt2860_softc *, u_int);
static int rt3090_rf_init(struct rt2860_softc *);
+static void rt5390_rf_init(struct rt2860_softc *);
static void rt3090_rf_wakeup(struct rt2860_softc *);
+static void rt5390_rf_wakeup(struct rt2860_softc *);
static int rt3090_filter_calib(struct rt2860_softc *, uint8_t, uint8_t,
uint8_t *);
static void rt3090_rf_setup(struct rt2860_softc *);
@@ -166,6 +169,7 @@ static const char *rt2860_get_rf(uint8_t);
static int rt2860_read_eeprom(struct rt2860_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static int rt2860_bbp_init(struct rt2860_softc *);
+static void rt5390_bbp_init(struct rt2860_softc *);
static int rt2860_txrx_enable(struct rt2860_softc *);
static void rt2860_init(void *);
static void rt2860_init_locked(struct rt2860_softc *);
@@ -194,6 +198,8 @@ static const struct {
uint8_t val;
} rt2860_def_bbp[] = {
RT2860_DEF_BBP
+}, rt5390_def_bbp[] = {
+ RT5390_DEF_BBP
};
static const struct rfprog {
@@ -212,8 +218,12 @@ struct {
static const struct {
uint8_t reg;
uint8_t val;
-} rt3090_def_rf[] = {
+} rt3090_def_rf[] = {
RT3070_DEF_RF
+}, rt5390_def_rf[] = {
+ RT5390_DEF_RF
+}, rt5392_def_rf[] = {
+ RT5392_DEF_RF
};
int
@@ -264,12 +274,10 @@ rt2860_attach(device_t dev, int id)
/* retrieve RF rev. no and various other things from EEPROM */
rt2860_read_eeprom(sc, macaddr);
- if (bootverbose) {
- device_printf(sc->sc_dev, "MAC/BBP RT%X (rev 0x%04X), "
- "RF %s (MIMO %dT%dR), address %6D\n",
- sc->mac_ver, sc->mac_rev, rt2860_get_rf(sc->rf_rev),
- sc->ntxchains, sc->nrxchains, macaddr, ":");
- }
+ device_printf(sc->sc_dev, "MAC/BBP RT%X (rev 0x%04X), "
+ "RF %s (MIMO %dT%dR), address %6D\n",
+ sc->mac_ver, sc->mac_rev, rt2860_get_rf(sc->rf_rev),
+ sc->ntxchains, sc->nrxchains, macaddr, ":");
/*
* Allocate Tx (4 EDCAs + HCCA + Mgt) and Rx rings.
@@ -2082,7 +2090,7 @@ rt2860_mcu_bbp_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val)
}
if (ntries == 100) {
device_printf(sc->sc_dev,
- "could not write to BBP through MCU\n");
+ "could not write to BBP through MCU\n");
return;
}
@@ -2562,10 +2570,110 @@ rt3090_set_chan(struct rt2860_softc *sc, u_int chan)
rt3090_rf_write(sc, 7, rf | RT3070_TUNE);
}
+static void
+rt5390_set_chan(struct rt2860_softc *sc, u_int chan)
+{
+ uint8_t h20mhz, rf, tmp;
+ int8_t txpow1, txpow2;
+ int i;
+
+ /* RT5390 is 2GHz only */
+ KASSERT(chan >= 1 && chan <= 14, ("chan %d not support", chan));
+
+ /* find the settings for this channel (we know it exists) */
+ for (i = 0; rt2860_rf2850[i].chan != chan; i++);
+
+ /* use Tx power values from EEPROM */
+ txpow1 = sc->txpow1[i];
+ txpow2 = sc->txpow2[i];
+
+ rt3090_rf_write(sc, 8, rt3090_freqs[i].n);
+ rt3090_rf_write(sc, 9, rt3090_freqs[i].k & 0x0f);
+ rf = rt3090_rf_read(sc, 11);
+ rf = (rf & ~0x03) | (rt3090_freqs[i].r & 0x03);
+ rt3090_rf_write(sc, 11, rf);
+
+ rf = rt3090_rf_read(sc, 49);
+ rf = (rf & ~0x3f) | (txpow1 & 0x3f);
+ /* the valid range of the RF R49 is 0x00~0x27 */
+ if ((rf & 0x3f) > 0x27)
+ rf = (rf & ~0x3f) | 0x27;
+ rt3090_rf_write(sc, 49, rf);
+ if (sc->mac_ver == 0x5392) {
+ rf = rt3090_rf_read(sc, 50);
+ rf = (rf & ~0x3f) | (txpow2 & 0x3f);
+ /* the valid range of the RF R50 is 0x00~0x27 */
+ if ((rf & 0x3f) > 0x27)
+ rf = (rf & ~0x3f) | 0x27;
+ rt3090_rf_write(sc, 50, rf);
+ }
+
+ rf = rt3090_rf_read(sc, 1);
+ rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD;
+ if (sc->mac_ver == 0x5392)
+ rf |= RT3070_RX1_PD | RT3070_TX1_PD;
+ rt3090_rf_write(sc, 1, rf);
+
+ rf = rt3090_rf_read(sc, 2);
+ rt3090_rf_write(sc, 2, rf | RT3593_RESCAL);
+ DELAY(1000);
+ rt3090_rf_write(sc, 2, rf & ~RT3593_RESCAL);
+
+ rf = rt3090_rf_read(sc, 17);
+ tmp = rf;
+ rf = (rf & ~0x7f) | (sc->freq & 0x7f);
+ rf = MIN(rf, 0x5f);
+ if (tmp != rf)
+ rt2860_mcu_cmd(sc, 0x74, (tmp << 8 ) | rf, 0);
+
+ if (sc->mac_ver == 0x5390) {
+ if (chan <= 4)
+ rf = 0x73;
+ else if (chan >= 5 && chan <= 6)
+ rf = 0x63;
+ else if (chan >= 7 && chan <= 10)
+ rf = 0x53;
+ else
+ rf = 43;
+ rt3090_rf_write(sc, 55, rf);
+
+ if (chan == 1)
+ rf = 0x0c;
+ else if (chan == 2)
+ rf = 0x0b;
+ else if (chan == 3)
+ rf = 0x0a;
+ else if (chan >= 4 && chan <= 6)
+ rf = 0x09;
+ else if (chan >= 7 && chan <= 12)
+ rf = 0x08;
+ else if (chan == 13)
+ rf = 0x07;
+ else
+ rf = 0x06;
+ rt3090_rf_write(sc, 59, rf);
+ }
+
+ /* Tx/Rx h20M */
+ h20mhz = (sc->rf24_20mhz & 0x20) >> 5;
+ rf = rt3090_rf_read(sc, 30);
+ rf = (rf & ~0x06) | (h20mhz << 1) | (h20mhz << 2);
+ rt3090_rf_write(sc, 30, rf);
+
+ /* Rx BB filter VCM */
+ rf = rt3090_rf_read(sc, 30);
+ rf = (rf & ~0x18) | 0x10;
+ rt3090_rf_write(sc, 30, rf);
+
+ /* Initiate VCO calibration. */
+ rf = rt3090_rf_read(sc, 3);
+ rf |= RT3593_VCOCAL;
+ rt3090_rf_write(sc, 3, rf);
+}
+
static int
rt3090_rf_init(struct rt2860_softc *sc)
{
-#define N(a) (sizeof (a) / sizeof ((a)[0]))
uint32_t tmp;
uint8_t rf, bbp;
int i;
@@ -2589,7 +2697,7 @@ rt3090_rf_init(struct rt2860_softc *sc)
RAL_WRITE(sc, RT3070_GPIO_SWITCH, tmp & ~0x20);
/* initialize RF registers to default value */
- for (i = 0; i < N(rt3090_def_rf); i++) {
+ for (i = 0; i < nitems(rt3090_def_rf); i++) {
rt3090_rf_write(sc, rt3090_def_rf[i].reg,
rt3090_def_rf[i].val);
}
@@ -2668,11 +2776,79 @@ rt3090_rf_init(struct rt2860_softc *sc)
rf = rt3090_rf_read(sc, 21);
rt3090_rf_write(sc, 21, rf & ~RT3070_RX_LO2);
- return 0;
-#undef N
+ return (0);
}
-void
+static void
+rt5390_rf_init(struct rt2860_softc *sc)
+{
+ uint8_t rf, bbp;
+ int i;
+
+ rf = rt3090_rf_read(sc, 2);
+ /* Toggle RF R2 bit 7. */
+ rt3090_rf_write(sc, 2, rf | RT3593_RESCAL);
+ DELAY(1000);
+ rt3090_rf_write(sc, 2, rf & ~RT3593_RESCAL);
+
+ /* Initialize RF registers to default value. */
+ if (sc->mac_ver == 0x5392) {
+ for (i = 0; i < nitems(rt5392_def_rf); i++) {
+ rt3090_rf_write(sc, rt5392_def_rf[i].reg,
+ rt5392_def_rf[i].val);
+ }
+ } else {
+ for (i = 0; i < nitems(rt5390_def_rf); i++) {
+ rt3090_rf_write(sc, rt5390_def_rf[i].reg,
+ rt5390_def_rf[i].val);
+ }
+ }
+
+ sc->rf24_20mhz = 0x1f;
+ sc->rf24_40mhz = 0x2f;
+
+ if (sc->mac_rev < 0x0211)
+ rt3090_rf_write(sc, 27, 0x03);
+
+ /* Set led open drain enable. */
+ RAL_WRITE(sc, RT3070_OPT_14, RAL_READ(sc, RT3070_OPT_14) | 1);
+
+ RAL_WRITE(sc, RT2860_TX_SW_CFG1, 0);
+ RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0);
+
+ if (sc->mac_ver == 0x5390)
+ rt3090_set_rx_antenna(sc, 0);
+
+ /* Patch RSSI inaccurate issue. */
+ rt2860_mcu_bbp_write(sc, 79, 0x13);
+ rt2860_mcu_bbp_write(sc, 80, 0x05);
+ rt2860_mcu_bbp_write(sc, 81, 0x33);
+
+ /* Enable DC filter. */
+ if (sc->mac_rev >= 0x0211)
+ rt2860_mcu_bbp_write(sc, 103, 0xc0);
+
+ bbp = rt2860_mcu_bbp_read(sc, 138);
+ if (sc->ntxchains == 1)
+ bbp |= 0x20; /* Turn off DAC1. */
+ if (sc->nrxchains == 1)
+ bbp &= ~0x02; /* Turn off ADC1. */
+ rt2860_mcu_bbp_write(sc, 138, bbp);
+
+ /* Enable RX LO1 and LO2. */
+ rt3090_rf_write(sc, 38, rt3090_rf_read(sc, 38) & ~RT5390_RX_LO1);
+ rt3090_rf_write(sc, 39, rt3090_rf_read(sc, 39) & ~RT5390_RX_LO2);
+
+ /* Avoid data lost and CRC error. */
+ rt2860_mcu_bbp_write(sc, 4,
+ rt2860_mcu_bbp_read(sc, 4) | RT5390_MAC_IF_CTRL);
+
+ rf = rt3090_rf_read(sc, 30);
+ rf = (rf & ~0x18) | 0x10;
+ rt3090_rf_write(sc, 30, rf);
+}
+
+static void
rt3090_rf_wakeup(struct rt2860_softc *sc)
{
uint32_t tmp;
@@ -2738,7 +2914,43 @@ rt3090_rf_wakeup(struct rt2860_softc *sc)
}
}
-int
+static void
+rt5390_rf_wakeup(struct rt2860_softc *sc)
+{
+ uint32_t tmp;
+ uint8_t rf;
+
+ rf = rt3090_rf_read(sc, 1);
+ rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD |
+ RT3070_TX0_PD;
+ if (sc->mac_ver == 0x5392)
+ rf |= RT3070_RX1_PD | RT3070_TX1_PD;
+ rt3090_rf_write(sc, 1, rf);
+
+ rf = rt3090_rf_read(sc, 6);
+ rf |= RT3593_VCO_IC | RT3593_VCOCAL;
+ if (sc->mac_ver == 0x5390)
+ rf &= ~RT3593_VCO_IC;
+ rt3090_rf_write(sc, 6, rf);
+
+ rt3090_rf_write(sc, 2, rt3090_rf_read(sc, 2) | RT3593_RESCAL);
+
+ rf = rt3090_rf_read(sc, 22);
+ rf = (rf & ~0xe0) | 0x20;
+ rt3090_rf_write(sc, 22, rf);
+
+ rt3090_rf_write(sc, 42, rt3090_rf_read(sc, 42) | RT5390_RX_CTB);
+ rt3090_rf_write(sc, 20, rt3090_rf_read(sc, 20) & ~0x77);
+ rt3090_rf_write(sc, 3, rt3090_rf_read(sc, 3) | RT3593_VCOCAL);
+
+ if (sc->patch_dac && sc->mac_rev < 0x0211) {
+ tmp = RAL_READ(sc, RT3070_LDO_CFG0);
+ tmp = (tmp & ~0x1f000000) | 0x0d000000;
+ RAL_WRITE(sc, RT3070_LDO_CFG0, tmp);
+ }
+}
+
+static int
rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target,
uint8_t *val)
{
@@ -2767,7 +2979,7 @@ rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target,
break;
}
if (ntries == 100)
- return ETIMEDOUT;
+ return (ETIMEDOUT);
/* set power and frequency of stopband test tone */
rt2860_mcu_bbp_write(sc, 24, 0x06);
@@ -2800,7 +3012,7 @@ rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target,
rf22 = rt3090_rf_read(sc, 22);
rt3090_rf_write(sc, 22, rf22 & ~RT3070_BB_LOOPBACK);
- return 0;
+ return (0);
}
static void
@@ -2826,10 +3038,12 @@ rt3090_rf_setup(struct rt2860_softc *sc)
RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0);
/* initialize RF registers from ROM */
- for (i = 0; i < 10; i++) {
- if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff)
- continue;
- rt3090_rf_write(sc, sc->rf[i].reg, sc->rf[i].val);
+ if (sc->mac_ver < 0x5390) {
+ for (i = 0; i < 10; i++) {
+ if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff)
+ continue;
+ rt3090_rf_write(sc, sc->rf[i].reg, sc->rf[i].val);
+ }
}
}
@@ -3168,6 +3382,7 @@ rt2860_get_rf(uint8_t rev)
case RT3070_RF_3052: return "RT3052";
case RT3070_RF_3320: return "RT3320";
case RT3070_RF_3053: return "RT3053";
+ case RT5390_RF_5390: return "RT5390";
default: return "unknown";
}
}
@@ -3250,7 +3465,12 @@ rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
val = rt2860_srom_read(sc, RT2860_EEPROM_ANTENNA);
if (val == 0xffff) {
DPRINTF(("invalid EEPROM antenna info, using default\n"));
- if (sc->mac_ver == 0x3593) {
+ if (sc->mac_ver >= 0x5390) {
+ /* default to RF5390 */
+ sc->rf_rev = RT5390_RF_5390;
+ sc->ntxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
+ sc->nrxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
+ } else if (sc->mac_ver == 0x3593) {
/* default to RF3053 3T3R */
sc->rf_rev = RT3070_RF_3053;
sc->ntxchains = 3;
@@ -3268,8 +3488,13 @@ rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
}
} else {
sc->rf_rev = (val >> 8) & 0xf;
- sc->ntxchains = (val >> 4) & 0xf;
- sc->nrxchains = val & 0xf;
+ if (sc->mac_ver >= 0x5390) {
+ sc->ntxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
+ sc->nrxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
+ } else {
+ sc->ntxchains = (val >> 4) & 0xf;
+ sc->nrxchains = val & 0xf;
+ }
}
DPRINTF(("EEPROM RF rev=0x%02x chains=%dT%dR\n",
sc->rf_rev, sc->ntxchains, sc->nrxchains));
@@ -3307,17 +3532,23 @@ rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
sc->txpow1[i + 0] = (int8_t)(val & 0xff);
sc->txpow1[i + 1] = (int8_t)(val >> 8);
- val = rt2860_srom_read(sc,
- RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2);
- sc->txpow2[i + 0] = (int8_t)(val & 0xff);
- sc->txpow2[i + 1] = (int8_t)(val >> 8);
+ if (sc->mac_ver != 0x5390) {
+ val = rt2860_srom_read(sc,
+ RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2);
+ sc->txpow2[i + 0] = (int8_t)(val & 0xff);
+ sc->txpow2[i + 1] = (int8_t)(val >> 8);
+ }
}
/* fix broken Tx power entries */
for (i = 0; i < 14; i++) {
- if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31)
+ if (sc->txpow1[i] < 0 ||
+ sc->txpow1[i] > ((sc->mac_ver >= 0x5390) ? 39 : 31))
sc->txpow1[i] = 5;
- if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31)
- sc->txpow2[i] = 5;
+ if (sc->mac_ver != 0x5390) {
+ if (sc->txpow2[i] < 0 ||
+ sc->txpow2[i] > ((sc->mac_ver == 0x5392) ? 39 : 31))
+ sc->txpow2[i] = 5;
+ }
DPRINTF(("chan %d: power1=%d, power2=%d\n",
rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i]));
}
@@ -3485,10 +3716,9 @@ rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
return 0;
}
-int
+static int
rt2860_bbp_init(struct rt2860_softc *sc)
{
-#define N(a) (sizeof (a) / sizeof ((a)[0]))
int i, ntries;
/* wait for BBP to wake up */
@@ -3500,13 +3730,17 @@ rt2860_bbp_init(struct rt2860_softc *sc)
if (ntries == 20) {
device_printf(sc->sc_dev,
"timeout waiting for BBP to wake up\n");
- return ETIMEDOUT;
+ return (ETIMEDOUT);
}
/* initialize BBP registers to default values */
- for (i = 0; i < N(rt2860_def_bbp); i++) {
- rt2860_mcu_bbp_write(sc, rt2860_def_bbp[i].reg,
- rt2860_def_bbp[i].val);
+ if (sc->mac_ver >= 0x5390)
+ rt5390_bbp_init(sc);
+ else {
+ for (i = 0; i < nitems(rt2860_def_bbp); i++) {
+ rt2860_mcu_bbp_write(sc, rt2860_def_bbp[i].reg,
+ rt2860_def_bbp[i].val);
+ }
}
/* fix BBP84 for RT2860E */
@@ -3523,7 +3757,44 @@ rt2860_bbp_init(struct rt2860_softc *sc)
}
return 0;
-#undef N
+}
+
+static void
+rt5390_bbp_init(struct rt2860_softc *sc)
+{
+ uint8_t bbp;
+ int i;
+
+ /* Apply maximum likelihood detection for 2 stream case. */
+ if (sc->nrxchains > 1) {
+ bbp = rt2860_mcu_bbp_read(sc, 105);
+ rt2860_mcu_bbp_write(sc, 105, bbp | RT5390_MLD);
+ }
+
+ /* Avoid data lost and CRC error. */
+ bbp = rt2860_mcu_bbp_read(sc, 4);
+ rt2860_mcu_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL);
+
+ for (i = 0; i < nitems(rt5390_def_bbp); i++) {
+ rt2860_mcu_bbp_write(sc, rt5390_def_bbp[i].reg,
+ rt5390_def_bbp[i].val);
+ }
+
+ if (sc->mac_ver == 0x5392) {
+ rt2860_mcu_bbp_write(sc, 84, 0x9a);
+ rt2860_mcu_bbp_write(sc, 95, 0x9a);
+ rt2860_mcu_bbp_write(sc, 98, 0x12);
+ rt2860_mcu_bbp_write(sc, 106, 0x05);
+ rt2860_mcu_bbp_write(sc, 134, 0xd0);
+ rt2860_mcu_bbp_write(sc, 135, 0xf6);
+ }
+
+ bbp = rt2860_mcu_bbp_read(sc, 152);
+ rt2860_mcu_bbp_write(sc, 152, bbp | 0x80);
+
+ /* Disable hardware antenna diversity. */
+ if (sc->mac_ver == 0x5390)
+ rt2860_mcu_bbp_write(sc, 154, 0);
}
static int
@@ -3590,7 +3861,6 @@ rt2860_init(void *arg)
static void
rt2860_init_locked(struct rt2860_softc *sc)
{
-#define N(a) (sizeof (a) / sizeof ((a)[0]))
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
uint32_t tmp;
@@ -3665,9 +3935,11 @@ rt2860_init_locked(struct rt2860_softc *sc)
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0);
- for (i = 0; i < N(rt2860_def_mac); i++)
+ for (i = 0; i < nitems(rt2860_def_mac); i++)
RAL_WRITE(sc, rt2860_def_mac[i].reg, rt2860_def_mac[i].val);
- if (sc->mac_ver >= 0x3071) {
+ if (sc->mac_ver >= 0x5390)
+ RAL_WRITE(sc, RT2860_TX_SW_CFG0, 0x00000404);
+ else if (sc->mac_ver >= 0x3071) {
/* set delay of PA_PE assertion to 1us (unit of 0.25us) */
RAL_WRITE(sc, RT2860_TX_SW_CFG0,
4 << RT2860_DLY_PAPE_EN_SHIFT);
@@ -3762,7 +4034,8 @@ rt2860_init_locked(struct rt2860_softc *sc)
/* select Main antenna for 1T1R devices */
if (sc->rf_rev == RT3070_RF_2020 ||
sc->rf_rev == RT3070_RF_3020 ||
- sc->rf_rev == RT3070_RF_3320)
+ sc->rf_rev == RT3070_RF_3320 ||
+ sc->mac_ver == 0x5390)
rt3090_set_rx_antenna(sc, 0);
/* send LEDs operating mode to microcontroller */
@@ -3770,13 +4043,21 @@ rt2860_init_locked(struct rt2860_softc *sc)
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1], 0);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2], 0);
- if (sc->mac_ver >= 0x3071)
- rt3090_rf_init(sc);
+ if (sc->mac_ver >= 0x5390)
+ rt5390_rf_init(sc);
+ else if (sc->mac_ver >= 0x3071) {
+ if ((error = rt3090_rf_init(sc)) != 0) {
+ rt2860_stop_locked(sc);
+ return;
+ }
+ }
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_SLEEP, 0x02ff, 1);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_WAKEUP, 0, 1);
- if (sc->mac_ver >= 0x3071)
+ if (sc->mac_ver >= 0x5390)
+ rt5390_rf_wakeup(sc);
+ else if (sc->mac_ver >= 0x3071)
rt3090_rf_wakeup(sc);
/* disable non-existing Rx chains */
@@ -3837,7 +4118,6 @@ rt2860_init_locked(struct rt2860_softc *sc)
ifp->if_drv_flags |= IFF_DRV_RUNNING;
callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc);
-#undef N
}
static void
@@ -3988,15 +4268,25 @@ rt3090_set_rx_antenna(struct rt2860_softc *sc, int aux)
uint32_t tmp;
if (aux) {
- tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
- RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp & ~RT2860_C);
- tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
- RAL_WRITE(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08);
+ if (sc->mac_ver == 0x5390) {
+ rt2860_mcu_bbp_write(sc, 152,
+ rt2860_mcu_bbp_read(sc, 152) & ~0x80);
+ } else {
+ tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
+ RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp & ~RT2860_C);
+ tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
+ RAL_WRITE(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08);
+ }
} else {
- tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
- RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp | RT2860_C);
- tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
- RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp & ~0x0808);
+ if (sc->mac_ver == 0x5390) {
+ rt2860_mcu_bbp_write(sc, 152,
+ rt2860_mcu_bbp_read(sc, 152) | 0x80);
+ } else {
+ tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
+ RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp | RT2860_C);
+ tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
+ RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp & ~0x0808);
+ }
}
}
@@ -4011,7 +4301,9 @@ rt2860_switch_chan(struct rt2860_softc *sc, struct ieee80211_channel *c)
if (chan == 0 || chan == IEEE80211_CHAN_ANY)
return;
- if (sc->mac_ver >= 0x3071)
+ if (sc->mac_ver >= 0x5390)
+ rt5390_set_chan(sc, chan);
+ else if (sc->mac_ver >= 0x3071)
rt3090_set_chan(sc, chan);
else
rt2860_set_chan(sc, chan);
@@ -4027,7 +4319,8 @@ rt2860_switch_chan(struct rt2860_softc *sc, struct ieee80211_channel *c)
group = 3;
/* XXX necessary only when group has changed! */
- rt2860_select_chan_group(sc, group);
+ if (sc->mac_ver < 0x5390)
+ rt2860_select_chan_group(sc, group);
DELAY(1000);
}
diff --git a/sys/dev/ral/rt2860reg.h b/sys/dev/ral/rt2860reg.h
index fe9fb4f38998..3d507be43e88 100644
--- a/sys/dev/ral/rt2860reg.h
+++ b/sys/dev/ral/rt2860reg.h
@@ -699,6 +699,7 @@
/* possible flags for RT3020 RF register 1 */
#define RT3070_RF_BLOCK (1 << 0)
+#define RT3070_PLL_PD (1 << 1)
#define RT3070_RX0_PD (1 << 2)
#define RT3070_TX0_PD (1 << 3)
#define RT3070_RX1_PD (1 << 4)
@@ -750,6 +751,22 @@
#define RT3090_DEF_LNA 10
+/* possible flags for RT5390 RF register 38 */
+#define RT5390_RX_LO1 (1 << 5)
+
+/* possible flags for RT5390 RF register 39 */
+#define RT5390_RX_LO2 (1 << 7)
+
+/* possible flags for RT5390 RF register 42 */
+#define RT5390_RX_CTB (1 << 6)
+
+/* possible flags for RT5390 BBP register 4 */
+#define RT5390_MAC_IF_CTRL (1 << 6)
+
+/* possible flags for RT5390 BBP register 105 */
+#define RT5390_MLD (1 << 2)
+#define RT5390_SIG_MODULATION (1 << 3)
+
/* RT2860 TX descriptor */
struct rt2860_txd {
uint32_t sdp0; /* Segment Data Pointer 0 */
@@ -894,6 +911,7 @@ struct rt2860_rxwi {
#define RT3070_RF_3052 9 /* dual-band 2T2R */
#define RT3070_RF_3320 11 /* 1T1R */
#define RT3070_RF_3053 13 /* dual-band 3T3R */
+#define RT5390_RF_5390 15 /* b/g/n */
/* USB commands for RT2870 only */
#define RT2870_RESET 1
@@ -1006,14 +1024,17 @@ static const struct rt2860_rate {
*/
#define RT2860_DEF_MAC \
{ RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \
+ { RT2860_BCN_OFFSET1, 0x6f77d0c8 }, \
{ RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \
{ RT2860_HT_BASIC_RATE, 0x00008003 }, \
{ RT2860_MAC_SYS_CTRL, 0x00000000 }, \
+ { RT2860_RX_FILTR_CFG, 0x00017f97 }, \
{ RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \
{ RT2860_TX_SW_CFG0, 0x00000000 }, \
{ RT2860_TX_SW_CFG1, 0x00080606 }, \
{ RT2860_TX_LINK_CFG, 0x00001020 }, \
{ RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \
+ { RT2860_MAX_LEN_CFG, 0x00001f00 }, \
{ RT2860_LED_CFG, 0x7f031e46 }, \
{ RT2860_WMM_AIFSN_CFG, 0x00002273 }, \
{ RT2860_WMM_CWMIN_CFG, 0x00002344 }, \
@@ -1028,42 +1049,9 @@ static const struct rt2860_rate {
{ RT2860_MM20_PROT_CFG, 0x01744004 }, \
{ RT2860_MM40_PROT_CFG, 0x03f54084 }, \
{ RT2860_TXOP_CTRL_CFG, 0x0000583f }, \
- { RT2860_TXOP_HLDR_ET, 0x00000002 }, \
{ RT2860_TX_RTS_CFG, 0x00092b20 }, \
{ RT2860_EXP_ACK_TIME, 0x002400ca }, \
- { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \
- { RT2860_PWR_PIN_CFG, 0x00000003 }
-
-/* XXX only a few registers differ from above, try to merge? */
-#define RT2870_DEF_MAC \
- { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \
- { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \
- { RT2860_HT_BASIC_RATE, 0x00008003 }, \
- { RT2860_MAC_SYS_CTRL, 0x00000000 }, \
- { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \
- { RT2860_TX_SW_CFG0, 0x00000000 }, \
- { RT2860_TX_SW_CFG1, 0x00080606 }, \
- { RT2860_TX_LINK_CFG, 0x00001020 }, \
- { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \
- { RT2860_LED_CFG, 0x7f031e46 }, \
- { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \
- { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \
- { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \
- { RT2860_MAX_PCNT, 0x1f3fbf9f }, \
- { RT2860_TX_RTY_CFG, 0x47d01f0f }, \
- { RT2860_AUTO_RSP_CFG, 0x00000013 }, \
- { RT2860_CCK_PROT_CFG, 0x05740003 }, \
- { RT2860_OFDM_PROT_CFG, 0x05740003 }, \
- { RT2860_PBF_CFG, 0x00f40006 }, \
- { RT2860_WPDMA_GLO_CFG, 0x00000030 }, \
- { RT2860_GF20_PROT_CFG, 0x01744004 }, \
- { RT2860_GF40_PROT_CFG, 0x03f44084 }, \
- { RT2860_MM20_PROT_CFG, 0x01744004 }, \
- { RT2860_MM40_PROT_CFG, 0x03f44084 }, \
- { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \
{ RT2860_TXOP_HLDR_ET, 0x00000002 }, \
- { RT2860_TX_RTS_CFG, 0x00092b20 }, \
- { RT2860_EXP_ACK_TIME, 0x002400ca }, \
{ RT2860_XIFS_TIME_CFG, 0x33a41010 }, \
{ RT2860_PWR_PIN_CFG, 0x00000003 }
@@ -1073,6 +1061,7 @@ static const struct rt2860_rate {
#define RT2860_DEF_BBP \
{ 65, 0x2c }, \
{ 66, 0x38 }, \
+ { 68, 0x0b }, \
{ 69, 0x12 }, \
{ 70, 0x0a }, \
{ 73, 0x10 }, \
@@ -1087,6 +1076,30 @@ static const struct rt2860_rate {
{ 105, 0x05 }, \
{ 106, 0x35 }
+#define RT5390_DEF_BBP \
+ { 31, 0x08 }, \
+ { 65, 0x2c }, \
+ { 66, 0x38 }, \
+ { 68, 0x0b }, \
+ { 69, 0x12 }, \
+ { 70, 0x0a }, \
+ { 73, 0x13 }, \
+ { 75, 0x46 }, \
+ { 76, 0x28 }, \
+ { 77, 0x59 }, \
+ { 81, 0x37 }, \
+ { 82, 0x62 }, \
+ { 83, 0x7a }, \
+ { 84, 0x19 }, \
+ { 86, 0x38 }, \
+ { 91, 0x04 }, \
+ { 92, 0x02 }, \
+ { 103, 0xc0 }, \
+ { 104, 0x92 }, \
+ { 105, 0x3c }, \
+ { 106, 0x03 }, \
+ { 128, 0x12 }, \
+
/*
* Default settings for RF registers; values derived from the reference driver.
*/
@@ -1204,7 +1217,7 @@ static const struct rt2860_rate {
{ 4, 0x40 }, \
{ 5, 0x03 }, \
{ 6, 0x02 }, \
- { 7, 0x70 }, \
+ { 7, 0x60 }, \
{ 9, 0x0f }, \
{ 10, 0x41 }, \
{ 11, 0x21 }, \
@@ -1221,35 +1234,122 @@ static const struct rt2860_rate {
{ 25, 0x01 }, \
{ 29, 0x1f }
-#define RT3572_DEF_RF \
- { 0, 0x70 }, \
- { 1, 0x81 }, \
- { 2, 0xf1 }, \
- { 3, 0x02 }, \
- { 4, 0x4c }, \
- { 5, 0x05 }, \
- { 6, 0x4a }, \
- { 7, 0xd8 }, \
- { 9, 0xc3 }, \
- { 10, 0xf1 }, \
- { 11, 0xb9 }, \
- { 12, 0x70 }, \
- { 13, 0x65 }, \
- { 14, 0xa0 }, \
- { 15, 0x53 }, \
- { 16, 0x4c }, \
- { 17, 0x23 }, \
- { 18, 0xac }, \
- { 19, 0x93 }, \
- { 20, 0xb3 }, \
- { 21, 0xd0 }, \
- { 22, 0x00 }, \
- { 23, 0x3c }, \
- { 24, 0x16 }, \
- { 25, 0x15 }, \
- { 26, 0x85 }, \
- { 27, 0x00 }, \
+#define RT5390_DEF_RF \
+ { 1, 0x0f }, \
+ { 2, 0x80 }, \
+ { 3, 0x88 }, \
+ { 5, 0x10 }, \
+ { 6, 0xe0 }, \
+ { 7, 0x00 }, \
+ { 10, 0x53 }, \
+ { 11, 0x4a }, \
+ { 12, 0x46 }, \
+ { 13, 0x9f }, \
+ { 14, 0x00 }, \
+ { 15, 0x00 }, \
+ { 16, 0x00 }, \
+ { 18, 0x03 }, \
+ { 19, 0x00 }, \
+ { 20, 0x00 }, \
+ { 21, 0x00 }, \
+ { 22, 0x20 }, \
+ { 23, 0x00 }, \
+ { 24, 0x00 }, \
+ { 25, 0x80 }, \
+ { 26, 0x00 }, \
+ { 27, 0x09 }, \
+ { 28, 0x00 }, \
+ { 29, 0x10 }, \
+ { 30, 0x10 }, \
+ { 31, 0x80 }, \
+ { 32, 0x80 }, \
+ { 33, 0x00 }, \
+ { 34, 0x07 }, \
+ { 35, 0x12 }, \
+ { 36, 0x00 }, \
+ { 37, 0x08 }, \
+ { 38, 0x85 }, \
+ { 39, 0x1b }, \
+ { 40, 0x0b }, \
+ { 41, 0xbb }, \
+ { 42, 0xd2 }, \
+ { 43, 0x9a }, \
+ { 44, 0x0e }, \
+ { 45, 0xa2 }, \
+ { 46, 0x73 }, \
+ { 47, 0x00 }, \
+ { 48, 0x10 }, \
+ { 49, 0x94 }, \
+ { 52, 0x38 }, \
+ { 53, 0x00 }, \
+ { 54, 0x78 }, \
+ { 55, 0x23 }, \
+ { 56, 0x22 }, \
+ { 57, 0x80 }, \
+ { 58, 0x7f }, \
+ { 59, 0x07 }, \
+ { 60, 0x45 }, \
+ { 61, 0xd1 }, \
+ { 62, 0x00 }, \
+ { 63, 0x00 }
+
+#define RT5392_DEF_RF \
+ { 1, 0x17 }, \
+ { 2, 0x80 }, \
+ { 3, 0x88 }, \
+ { 5, 0x10 }, \
+ { 6, 0xe0 }, \
+ { 7, 0x00 }, \
+ { 10, 0x53 }, \
+ { 11, 0x4a }, \
+ { 12, 0x46 }, \
+ { 13, 0x9f }, \
+ { 14, 0x00 }, \
+ { 15, 0x00 }, \
+ { 16, 0x00 }, \
+ { 18, 0x03 }, \
+ { 19, 0x4d }, \
+ { 20, 0x00 }, \
+ { 21, 0x8d }, \
+ { 22, 0x20 }, \
+ { 23, 0x0b }, \
+ { 24, 0x44 }, \
+ { 25, 0x80 }, \
+ { 26, 0x82 }, \
+ { 27, 0x09 }, \
{ 28, 0x00 }, \
- { 29, 0x9b }, \
- { 30, 0x09 }, \
- { 31, 0x10 }
+ { 29, 0x10 }, \
+ { 30, 0x10 }, \
+ { 31, 0x80 }, \
+ { 32, 0x80 }, \
+ { 33, 0xc0 }, \
+ { 34, 0x07 }, \
+ { 35, 0x12 }, \
+ { 36, 0x00 }, \
+ { 37, 0x08 }, \
+ { 38, 0x89 }, \
+ { 39, 0x1b }, \
+ { 40, 0x0f }, \
+ { 41, 0xbb }, \
+ { 42, 0xd5 }, \
+ { 43, 0x9b }, \
+ { 44, 0x0e }, \
+ { 45, 0xa2 }, \
+ { 46, 0x73 }, \
+ { 47, 0x0c }, \
+ { 48, 0x10 }, \
+ { 49, 0x94 }, \
+ { 50, 0x94 }, \
+ { 51, 0x3a }, \
+ { 52, 0x48 }, \
+ { 53, 0x44 }, \
+ { 54, 0x38 }, \
+ { 55, 0x43 }, \
+ { 56, 0xa1 }, \
+ { 57, 0x00 }, \
+ { 58, 0x39 }, \
+ { 59, 0x07 }, \
+ { 60, 0x45 }, \
+ { 61, 0x91 }, \
+ { 62, 0x39 }, \
+ { 63, 0x00 }
diff --git a/sys/dev/sfxge/common/efsys.h b/sys/dev/sfxge/common/efsys.h
index c91de292ca48..705d060701d2 100644
--- a/sys/dev/sfxge/common/efsys.h
+++ b/sys/dev/sfxge/common/efsys.h
@@ -51,7 +51,11 @@ extern "C" {
#include <machine/endian.h>
#define EFSYS_HAS_UINT64 1
+#if defined(__x86_64__)
+#define EFSYS_USE_UINT64 1
+#else
#define EFSYS_USE_UINT64 0
+#endif
#if _BYTE_ORDER == _BIG_ENDIAN
#define EFSYS_IS_BIG_ENDIAN 1
#define EFSYS_IS_LITTLE_ENDIAN 0
@@ -398,6 +402,26 @@ typedef struct efsys_mem_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#if defined(__x86_64__)
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ uint64_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eqp)->eq_u64[0] = *addr; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
do { \
uint32_t *addr; \
@@ -417,7 +441,31 @@ typedef struct efsys_mem_s {
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
+#if defined(__x86_64__)
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ uint64_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eop)->eo_u64[0] = *addr++; \
+ (_eop)->eo_u64[1] = *addr; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
do { \
uint32_t *addr; \
@@ -441,6 +489,7 @@ typedef struct efsys_mem_s {
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
do { \
@@ -460,6 +509,27 @@ typedef struct efsys_mem_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#if defined(__x86_64__)
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ uint64_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ *addr = (_eqp)->eq_u64[0]; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#else
#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
do { \
uint32_t *addr; \
@@ -479,7 +549,31 @@ typedef struct efsys_mem_s {
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
+#if defined(__x86_64__)
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ uint64_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ *addr++ = (_eop)->eo_u64[0]; \
+ *addr = (_eop)->eo_u64[1]; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
do { \
uint32_t *addr; \
@@ -503,20 +597,40 @@ typedef struct efsys_mem_s {
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
#define EFSYS_MEM_ADDR(_esmp) \
((_esmp)->esm_addr)
/* BAR */
+#define SFXGE_LOCK_NAME_MAX 16
+
typedef struct efsys_bar_s {
struct mtx esb_lock;
+ char esb_lock_name[SFXGE_LOCK_NAME_MAX];
bus_space_tag_t esb_tag;
bus_space_handle_t esb_handle;
int esb_rid;
struct resource *esb_res;
} efsys_bar_t;
+#define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \
+ do { \
+ snprintf((_esbp)->esb_lock_name, \
+ sizeof((_esbp)->esb_lock_name), \
+ "%s:bar", (_ifname)); \
+ mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \
+ NULL, MTX_DEF); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#define SFXGE_BAR_LOCK_DESTROY(_esbp) \
+ mtx_destroy(&(_esbp)->esb_lock)
+#define SFXGE_BAR_LOCK(_esbp) \
+ mtx_lock(&(_esbp)->esb_lock)
+#define SFXGE_BAR_UNLOCK(_esbp) \
+ mtx_unlock(&(_esbp)->esb_lock)
+
#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
@@ -525,7 +639,7 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_lock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_LOCK(_esbp); \
\
(_edp)->ed_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset)); \
@@ -535,10 +649,58 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
+ SFXGE_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if defined(__x86_64__)
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ SFXGE_BAR_LOCK(_esbp); \
+ \
+ (_eqp)->eq_u64[0] = bus_space_read_8((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset)); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ (_eop)->eo_u64[0] = bus_space_read_8((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset)); \
+ (_eop)->eo_u64[1] = bus_space_read_8((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset+8)); \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#else
#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
do { \
_NOTE(CONSTANTCONDITION) \
@@ -556,7 +718,7 @@ typedef struct efsys_bar_s {
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
\
- mtx_unlock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_UNLOCK(_esbp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
@@ -568,7 +730,7 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_lock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_LOCK(_esbp); \
\
(_eop)->eo_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset)); \
@@ -587,9 +749,10 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_unlock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_UNLOCK(_esbp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
do { \
@@ -599,7 +762,7 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_lock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_LOCK(_esbp); \
\
EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
uint32_t, (_edp)->ed_u32[0]); \
@@ -609,10 +772,30 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_unlock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_UNLOCK(_esbp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#if defined(__x86_64__)
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ SFXGE_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ bus_space_write_8((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset), (_eqp)->eq_u64[0]); \
+ \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
do { \
_NOTE(CONSTANTCONDITION) \
@@ -630,10 +813,40 @@ typedef struct efsys_bar_s {
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset+4), (_eqp)->eq_u32[1]); \
\
- mtx_unlock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_UNLOCK(_esbp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
+#if defined(__x86_64__)
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ SFXGE_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ bus_space_write_8((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset), (_eop)->eo_u64[0]); \
+ bus_space_write_8((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset+8), (_eop)->eo_u64[1]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#else
#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
@@ -661,9 +874,10 @@ typedef struct efsys_bar_s {
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
- mtx_unlock(&((_esbp)->esb_lock)); \
+ SFXGE_BAR_UNLOCK(_esbp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+#endif
/* SPIN */
@@ -677,8 +891,7 @@ typedef struct efsys_bar_s {
/* BARRIERS */
-/* Strict ordering guaranteed by devacc.devacc_attr_dataorder */
-#define EFSYS_MEM_READ_BARRIER()
+#define EFSYS_MEM_READ_BARRIER() rmb()
#define EFSYS_PIO_WRITE_BARRIER()
/* TIMESTAMP */
@@ -713,13 +926,35 @@ typedef clock_t efsys_timestamp_t;
/* LOCK */
-typedef struct mtx efsys_lock_t;
+typedef struct efsys_lock_s {
+ struct mtx lock;
+ char lock_name[SFXGE_LOCK_NAME_MAX];
+} efsys_lock_t;
+
+#define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
+ do { \
+ efsys_lock_t *__eslp = (_eslp); \
+ \
+ snprintf((__eslp)->lock_name, \
+ sizeof((__eslp)->lock_name), \
+ "%s:%s", (_ifname), (_label)); \
+ mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \
+ NULL, MTX_DEF); \
+ } while (B_FALSE)
+#define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \
+ mtx_destroy(&(_eslp)->lock)
+#define SFXGE_EFSYS_LOCK(_eslp) \
+ mtx_lock(&(_eslp)->lock)
+#define SFXGE_EFSYS_UNLOCK(_eslp) \
+ mtx_unlock(&(_eslp)->lock)
+#define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
+ mtx_assert(&(_eslp)->lock, MA_OWNED)
#define EFSYS_LOCK_MAGIC 0x000010c4
#define EFSYS_LOCK(_lockp, _state) \
do { \
- mtx_lock(_lockp); \
+ SFXGE_EFSYS_LOCK(_lockp); \
(_state) = EFSYS_LOCK_MAGIC; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
@@ -728,7 +963,7 @@ typedef struct mtx efsys_lock_t;
do { \
if ((_state) != EFSYS_LOCK_MAGIC) \
KASSERT(B_FALSE, ("not locked")); \
- mtx_unlock(_lockp); \
+ SFXGE_EFSYS_UNLOCK(_lockp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 7ecc6b4f880c..7f4ce95f4d83 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -95,7 +95,7 @@ sfxge_start(struct sfxge_softc *sc)
{
int rc;
- sx_assert(&sc->softc_lock, LA_XLOCKED);
+ SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
if (sc->init_state == SFXGE_STARTED)
return (0);
@@ -164,15 +164,15 @@ sfxge_if_init(void *arg)
sc = (struct sfxge_softc *)arg;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
(void)sfxge_start(sc);
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
}
static void
sfxge_stop(struct sfxge_softc *sc)
{
- sx_assert(&sc->softc_lock, LA_XLOCKED);
+ SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
if (sc->init_state != SFXGE_STARTED)
return;
@@ -212,7 +212,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
switch (command) {
case SIOCSIFFLAGS:
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->if_flags) &
@@ -225,7 +225,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sfxge_stop(sc);
sc->if_flags = ifp->if_flags;
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu == ifp->if_mtu) {
@@ -238,11 +238,11 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
error = 0;
} else {
/* Restart required */
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
sfxge_stop(sc);
ifp->if_mtu = ifr->ifr_mtu;
error = sfxge_start(sc);
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
if (error != 0) {
ifp->if_flags &= ~IFF_UP;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
@@ -256,7 +256,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
sfxge_mac_filter_set(sc);
break;
case SIOCSIFCAP:
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
/*
* The networking core already rejects attempts to
@@ -266,7 +266,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
*/
if (~ifr->ifr_reqcap & SFXGE_CAP_FIXED) {
error = EINVAL;
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
break;
}
@@ -280,7 +280,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
else
ifp->if_hwassist &= ~CSUM_TSO;
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
@@ -298,9 +298,9 @@ sfxge_ifnet_fini(struct ifnet *ifp)
{
struct sfxge_softc *sc = ifp->if_softc;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
sfxge_stop(sc);
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
ifmedia_removeall(&sc->media);
ether_ifdetach(ifp);
@@ -338,7 +338,9 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
ifp->if_snd.ifq_drv_maxlen = sc->txq_entries - 1;
IFQ_SET_READY(&ifp->if_snd);
- mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
+ snprintf(sc->tx_lock_name, sizeof(sc->tx_lock_name),
+ "%s:tx", device_get_nameunit(sc->dev));
+ mtx_init(&sc->tx_lock, sc->tx_lock_name, NULL, MTX_DEF);
#endif
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
@@ -376,7 +378,8 @@ sfxge_bar_init(struct sfxge_softc *sc)
}
esbp->esb_tag = rman_get_bustag(esbp->esb_res);
esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
- mtx_init(&esbp->esb_lock, "sfxge_efsys_bar", NULL, MTX_DEF);
+
+ SFXGE_BAR_LOCK_INIT(esbp, device_get_nameunit(sc->dev));
return (0);
}
@@ -388,7 +391,7 @@ sfxge_bar_fini(struct sfxge_softc *sc)
bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
esbp->esb_res);
- mtx_destroy(&esbp->esb_lock);
+ SFXGE_BAR_LOCK_DESTROY(esbp);
}
static int
@@ -401,7 +404,7 @@ sfxge_create(struct sfxge_softc *sc)
dev = sc->dev;
- sx_init(&sc->softc_lock, "sfxge_softc");
+ SFXGE_ADAPTER_LOCK_INIT(sc, device_get_nameunit(sc->dev));
sc->max_rss_channels = 0;
snprintf(rss_param_name, sizeof(rss_param_name),
@@ -435,7 +438,8 @@ sfxge_create(struct sfxge_softc *sc)
KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
/* Create the common code nic object. */
- mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF);
+ SFXGE_EFSYS_LOCK_INIT(&sc->enp_lock,
+ device_get_nameunit(sc->dev), "nic");
if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
&sc->bar, &sc->enp_lock, &enp)) != 0)
goto fail3;
@@ -537,7 +541,7 @@ fail_tx_ring_entries:
fail_rx_ring_entries:
sc->enp = NULL;
efx_nic_destroy(enp);
- mtx_destroy(&sc->enp_lock);
+ SFXGE_EFSYS_LOCK_DESTROY(&sc->enp_lock);
fail3:
sfxge_bar_fini(sc);
@@ -545,7 +549,7 @@ fail3:
fail:
sc->dev = NULL;
- sx_destroy(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK_DESTROY(sc);
return (error);
}
@@ -594,7 +598,7 @@ sfxge_destroy(struct sfxge_softc *sc)
taskqueue_drain(taskqueue_thread, &sc->task_reset);
/* Destroy the softc lock. */
- sx_destroy(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK_DESTROY(sc);
}
static int
@@ -696,7 +700,7 @@ sfxge_reset(void *arg, int npending)
sc = (struct sfxge_softc *)arg;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
if (sc->init_state != SFXGE_STARTED)
goto done;
@@ -709,7 +713,7 @@ sfxge_reset(void *arg, int npending)
rc);
done:
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
}
void
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index 6157b23f8a3b..1c4e76a426e4 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -122,6 +122,7 @@ struct sfxge_evq {
/* Structure members not used on event processing path */
unsigned int buf_base_id;
unsigned int entries;
+ char lock_name[SFXGE_LOCK_NAME_MAX];
} __aligned(CACHE_LINE_SIZE);
#define SFXGE_NDESCS 1024
@@ -162,6 +163,9 @@ struct sfxge_mcdi {
struct cv cv;
enum sfxge_mcdi_state state;
efx_mcdi_transport_t transport;
+
+ /* Only used in debugging output */
+ char lock_name[SFXGE_LOCK_NAME_MAX];
};
struct sfxge_hw_stats {
@@ -186,6 +190,9 @@ struct sfxge_port {
struct sfxge_hw_stats phy_stats;
struct sfxge_hw_stats mac_stats;
efx_link_mode_t link_mode;
+
+ /* Only used in debugging output */
+ char lock_name[SFXGE_LOCK_NAME_MAX];
};
enum sfxge_softc_state {
@@ -198,6 +205,7 @@ enum sfxge_softc_state {
struct sfxge_softc {
device_t dev;
struct sx softc_lock;
+ char softc_lock_name[SFXGE_LOCK_NAME_MAX];
enum sfxge_softc_state init_state;
struct ifnet *ifnet;
unsigned int if_flags;
@@ -210,7 +218,7 @@ struct sfxge_softc {
caddr_t vpd_data;
size_t vpd_size;
efx_nic_t *enp;
- struct mtx enp_lock;
+ efsys_lock_t enp_lock;
unsigned int rxq_entries;
unsigned int txq_entries;
@@ -249,6 +257,7 @@ struct sfxge_softc {
#ifndef SFXGE_HAVE_MQ
struct mtx tx_lock __aligned(CACHE_LINE_SIZE);
+ char tx_lock_name[SFXGE_LOCK_NAME_MAX];
#endif
};
@@ -314,4 +323,79 @@ extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc);
#define SFXGE_MAX_MTU (9 * 1024)
+#define SFXGE_ADAPTER_LOCK_INIT(_sc, _ifname) \
+ do { \
+ struct sfxge_softc *__sc = (_sc); \
+ \
+ snprintf((__sc)->softc_lock_name, \
+ sizeof((__sc)->softc_lock_name), \
+ "%s:softc", (_ifname)); \
+ sx_init(&(__sc)->softc_lock, (__sc)->softc_lock_name); \
+ } while (B_FALSE)
+#define SFXGE_ADAPTER_LOCK_DESTROY(_sc) \
+ sx_destroy(&(_sc)->softc_lock)
+#define SFXGE_ADAPTER_LOCK(_sc) \
+ sx_xlock(&(_sc)->softc_lock)
+#define SFXGE_ADAPTER_UNLOCK(_sc) \
+ sx_xunlock(&(_sc)->softc_lock)
+#define SFXGE_ADAPTER_LOCK_ASSERT_OWNED(_sc) \
+ sx_assert(&(_sc)->softc_lock, LA_XLOCKED)
+
+#define SFXGE_PORT_LOCK_INIT(_port, _ifname) \
+ do { \
+ struct sfxge_port *__port = (_port); \
+ \
+ snprintf((__port)->lock_name, \
+ sizeof((__port)->lock_name), \
+ "%s:port", (_ifname)); \
+ mtx_init(&(__port)->lock, (__port)->lock_name, \
+ NULL, MTX_DEF); \
+ } while (B_FALSE)
+#define SFXGE_PORT_LOCK_DESTROY(_port) \
+ mtx_destroy(&(_port)->lock)
+#define SFXGE_PORT_LOCK(_port) \
+ mtx_lock(&(_port)->lock)
+#define SFXGE_PORT_UNLOCK(_port) \
+ mtx_unlock(&(_port)->lock)
+#define SFXGE_PORT_LOCK_ASSERT_OWNED(_port) \
+ mtx_assert(&(_port)->lock, MA_OWNED)
+
+#define SFXGE_MCDI_LOCK_INIT(_mcdi, _ifname) \
+ do { \
+ struct sfxge_mcdi *__mcdi = (_mcdi); \
+ \
+ snprintf((__mcdi)->lock_name, \
+ sizeof((__mcdi)->lock_name), \
+ "%s:mcdi", (_ifname)); \
+ mtx_init(&(__mcdi)->lock, (__mcdi)->lock_name, \
+ NULL, MTX_DEF); \
+ } while (B_FALSE)
+#define SFXGE_MCDI_LOCK_DESTROY(_mcdi) \
+ mtx_destroy(&(_mcdi)->lock)
+#define SFXGE_MCDI_LOCK(_mcdi) \
+ mtx_lock(&(_mcdi)->lock)
+#define SFXGE_MCDI_UNLOCK(_mcdi) \
+ mtx_unlock(&(_mcdi)->lock)
+#define SFXGE_MCDI_LOCK_ASSERT_OWNED(_mcdi) \
+ mtx_assert(&(_mcdi)->lock, MA_OWNED)
+
+#define SFXGE_EVQ_LOCK_INIT(_evq, _ifname, _evq_index) \
+ do { \
+ struct sfxge_evq *__evq = (_evq); \
+ \
+ snprintf((__evq)->lock_name, \
+ sizeof((__evq)->lock_name), \
+ "%s:evq%u", (_ifname), (_evq_index)); \
+ mtx_init(&(__evq)->lock, (__evq)->lock_name, \
+ NULL, MTX_DEF); \
+ } while (B_FALSE)
+#define SFXGE_EVQ_LOCK_DESTROY(_evq) \
+ mtx_destroy(&(_evq)->lock)
+#define SFXGE_EVQ_LOCK(_evq) \
+ mtx_lock(&(_evq)->lock)
+#define SFXGE_EVQ_UNLOCK(_evq) \
+ mtx_unlock(&(_evq)->lock)
+#define SFXGE_EVQ_LOCK_ASSERT_OWNED(_evq) \
+ mtx_assert(&(_evq)->lock, MA_OWNED)
+
#endif /* _SFXGE_H */
diff --git a/sys/dev/sfxge/sfxge_ev.c b/sys/dev/sfxge/sfxge_ev.c
index c3e359b04061..af6fb98aa1c7 100644
--- a/sys/dev/sfxge/sfxge_ev.c
+++ b/sys/dev/sfxge/sfxge_ev.c
@@ -415,7 +415,7 @@ sfxge_ev_stat_update(struct sfxge_softc *sc)
unsigned int index;
clock_t now;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
if (sc->evq[0]->init_state != SFXGE_EVQ_STARTED)
goto out;
@@ -429,12 +429,12 @@ sfxge_ev_stat_update(struct sfxge_softc *sc)
/* Add event counts from each event queue in turn */
for (index = 0; index < sc->intr.n_alloc; index++) {
evq = sc->evq[index];
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
efx_ev_qstats_update(evq->common, sc->ev_stats);
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
}
out:
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
}
static int
@@ -495,7 +495,7 @@ sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
int error;
int index;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
if (req->newptr != NULL) {
if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
@@ -522,7 +522,7 @@ sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
}
out:
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
return (error);
}
@@ -577,7 +577,7 @@ sfxge_ev_qpoll(struct sfxge_evq *evq)
{
int rc;
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
if (evq->init_state != SFXGE_EVQ_STARTING &&
evq->init_state != SFXGE_EVQ_STARTED) {
@@ -607,12 +607,12 @@ sfxge_ev_qpoll(struct sfxge_evq *evq)
if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
goto fail;
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
return (0);
fail:
- mtx_unlock(&(evq->lock));
+ SFXGE_EVQ_UNLOCK(evq);
return (rc);
}
@@ -626,7 +626,7 @@ sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq->init_state != SFXGE_EVQ_STARTED"));
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
evq->init_state = SFXGE_EVQ_INITIALIZED;
evq->read_ptr = 0;
evq->exception = B_FALSE;
@@ -639,7 +639,7 @@ sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
efx_ev_qdestroy(evq->common);
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
EFX_EVQ_NBUFS(evq->entries));
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
}
static int
@@ -669,7 +669,7 @@ sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
evq->buf_base_id, &evq->common)) != 0)
goto fail;
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
/* Set the default moderation */
(void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
@@ -680,7 +680,7 @@ sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
evq->init_state = SFXGE_EVQ_STARTING;
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
/* Wait for the initialization event */
count = 0;
@@ -701,10 +701,10 @@ done:
return (0);
fail3:
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
evq->init_state = SFXGE_EVQ_INITIALIZED;
fail2:
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
efx_ev_qdestroy(evq->common);
fail:
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
@@ -785,7 +785,7 @@ sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
sc->evq[index] = NULL;
- mtx_destroy(&evq->lock);
+ SFXGE_EVQ_LOCK_DESTROY(evq);
free(evq, M_SFXGE);
}
@@ -832,7 +832,7 @@ sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
&evq->buf_base_id);
- mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
+ SFXGE_EVQ_LOCK_INIT(evq, device_get_nameunit(sc->dev), index);
evq->init_state = SFXGE_EVQ_INITIALIZED;
diff --git a/sys/dev/sfxge/sfxge_mcdi.c b/sys/dev/sfxge/sfxge_mcdi.c
index 6368ab4357b3..a1bc85701bd0 100644
--- a/sys/dev/sfxge/sfxge_mcdi.c
+++ b/sys/dev/sfxge/sfxge_mcdi.c
@@ -52,8 +52,7 @@ __FBSDID("$FreeBSD$");
static void
sfxge_mcdi_acquire(struct sfxge_mcdi *mcdi)
{
-
- mtx_lock(&mcdi->lock);
+ SFXGE_MCDI_LOCK(mcdi);
KASSERT(mcdi->state != SFXGE_MCDI_UNINITIALIZED,
("MCDI not initialized"));
@@ -61,15 +60,14 @@ sfxge_mcdi_acquire(struct sfxge_mcdi *mcdi)
(void)cv_wait_sig(&mcdi->cv, &mcdi->lock);
mcdi->state = SFXGE_MCDI_BUSY;
- mtx_unlock(&mcdi->lock);
+ SFXGE_MCDI_UNLOCK(mcdi);
}
/* Release ownership of MCDI on request completion. */
static void
sfxge_mcdi_release(struct sfxge_mcdi *mcdi)
{
-
- mtx_lock(&mcdi->lock);
+ SFXGE_MCDI_LOCK(mcdi);
KASSERT((mcdi->state == SFXGE_MCDI_BUSY ||
mcdi->state == SFXGE_MCDI_COMPLETED),
("MCDI not busy or task not completed"));
@@ -77,7 +75,7 @@ sfxge_mcdi_release(struct sfxge_mcdi *mcdi)
mcdi->state = SFXGE_MCDI_INITIALIZED;
cv_broadcast(&mcdi->cv);
- mtx_unlock(&mcdi->lock);
+ SFXGE_MCDI_UNLOCK(mcdi);
}
static void
@@ -160,11 +158,11 @@ sfxge_mcdi_ev_cpl(void *arg)
sc = (struct sfxge_softc *)arg;
mcdi = &sc->mcdi;
- mtx_lock(&mcdi->lock);
+ SFXGE_MCDI_LOCK(mcdi);
KASSERT(mcdi->state == SFXGE_MCDI_BUSY, ("MCDI not busy"));
mcdi->state = SFXGE_MCDI_COMPLETED;
cv_broadcast(&mcdi->cv);
- mtx_unlock(&mcdi->lock);
+ SFXGE_MCDI_UNLOCK(mcdi);
}
static void
@@ -203,7 +201,7 @@ sfxge_mcdi_init(struct sfxge_softc *sc)
KASSERT(mcdi->state == SFXGE_MCDI_UNINITIALIZED,
("MCDI already initialized"));
- mtx_init(&mcdi->lock, "sfxge_mcdi", NULL, MTX_DEF);
+ SFXGE_MCDI_LOCK_INIT(mcdi, device_get_nameunit(sc->dev));
mcdi->state = SFXGE_MCDI_INITIALIZED;
@@ -220,7 +218,7 @@ sfxge_mcdi_init(struct sfxge_softc *sc)
return (0);
fail:
- mtx_destroy(&mcdi->lock);
+ SFXGE_MCDI_LOCK_DESTROY(mcdi);
mcdi->state = SFXGE_MCDI_UNINITIALIZED;
return (rc);
}
@@ -236,7 +234,7 @@ sfxge_mcdi_fini(struct sfxge_softc *sc)
mcdi = &sc->mcdi;
emtp = &mcdi->transport;
- mtx_lock(&mcdi->lock);
+ SFXGE_MCDI_LOCK(mcdi);
KASSERT(mcdi->state == SFXGE_MCDI_INITIALIZED,
("MCDI not initialized"));
@@ -244,7 +242,7 @@ sfxge_mcdi_fini(struct sfxge_softc *sc)
bzero(emtp, sizeof(*emtp));
cv_destroy(&mcdi->cv);
- mtx_unlock(&mcdi->lock);
+ SFXGE_MCDI_UNLOCK(mcdi);
- mtx_destroy(&mcdi->lock);
+ SFXGE_MCDI_LOCK_DESTROY(mcdi);
}
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index ae81505e945e..6e211302dc8b 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -48,7 +48,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
unsigned int count;
int rc;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK_ASSERT_OWNED(port);
if (port->init_state != SFXGE_PORT_STARTED) {
rc = 0;
@@ -82,7 +82,6 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
rc = ETIMEDOUT;
out:
- mtx_unlock(&port->lock);
return (rc);
}
@@ -93,12 +92,16 @@ sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS)
unsigned int id = arg2;
int rc;
+ SFXGE_PORT_LOCK(&sc->port);
if ((rc = sfxge_mac_stat_update(sc)) != 0)
- return (rc);
+ goto out;
- return (SYSCTL_OUT(req,
- (uint64_t *)sc->port.mac_stats.decode_buf + id,
- sizeof(uint64_t)));
+ rc = SYSCTL_OUT(req,
+ (uint64_t *)sc->port.mac_stats.decode_buf + id,
+ sizeof(uint64_t));
+out:
+ SFXGE_PORT_UNLOCK(&sc->port);
+ return (rc);
}
static void
@@ -170,7 +173,7 @@ sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS)
sc = arg1;
port = &sc->port;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
if (req->newptr != NULL) {
if ((error = SYSCTL_IN(req, &fcntl, sizeof(fcntl))) != 0)
@@ -191,7 +194,7 @@ sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS)
}
out:
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
return (error);
}
@@ -207,13 +210,13 @@ sfxge_port_link_fc_handler(SYSCTL_HANDLER_ARGS)
sc = arg1;
port = &sc->port;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
if (port->init_state == SFXGE_PORT_STARTED && SFXGE_LINK_UP(sc))
efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc);
else
link_fc = 0;
error = SYSCTL_OUT(req, &link_fc, sizeof(link_fc));
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
return (error);
}
@@ -262,7 +265,7 @@ sfxge_mac_poll_work(void *arg, int npending)
enp = sc->enp;
port = &sc->port;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
if (port->init_state != SFXGE_PORT_STARTED)
goto done;
@@ -272,7 +275,7 @@ sfxge_mac_poll_work(void *arg, int npending)
sfxge_mac_link_update(sc, mode);
done:
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
}
static int
@@ -320,7 +323,7 @@ sfxge_mac_filter_set(struct sfxge_softc *sc)
struct sfxge_port *port = &sc->port;
int rc;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
/*
* The function may be called without softc_lock held in the
* case of SIOCADDMULTI and SIOCDELMULTI ioctls. ioctl handler
@@ -335,7 +338,7 @@ sfxge_mac_filter_set(struct sfxge_softc *sc)
rc = sfxge_mac_filter_set_locked(sc);
else
rc = 0;
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
return (rc);
}
@@ -348,7 +351,7 @@ sfxge_port_stop(struct sfxge_softc *sc)
port = &sc->port;
enp = sc->enp;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
KASSERT(port->init_state == SFXGE_PORT_STARTED,
("port not started"));
@@ -367,7 +370,7 @@ sfxge_port_stop(struct sfxge_softc *sc)
/* Destroy the common code port object. */
efx_port_fini(sc->enp);
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
}
int
@@ -383,7 +386,7 @@ sfxge_port_start(struct sfxge_softc *sc)
port = &sc->port;
enp = sc->enp;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK(port);
KASSERT(port->init_state == SFXGE_PORT_INITIALIZED,
("port not initialized"));
@@ -426,7 +429,7 @@ sfxge_port_start(struct sfxge_softc *sc)
port->init_state = SFXGE_PORT_STARTED;
/* Single poll in case there were missing initial events */
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
sfxge_mac_poll_work(sc, 0);
return (0);
@@ -439,7 +442,7 @@ fail3:
fail2:
efx_port_fini(sc->enp);
fail:
- mtx_unlock(&port->lock);
+ SFXGE_PORT_UNLOCK(port);
return (rc);
}
@@ -453,7 +456,7 @@ sfxge_phy_stat_update(struct sfxge_softc *sc)
unsigned int count;
int rc;
- mtx_lock(&port->lock);
+ SFXGE_PORT_LOCK_ASSERT_OWNED(port);
if (port->init_state != SFXGE_PORT_STARTED) {
rc = 0;
@@ -487,7 +490,6 @@ sfxge_phy_stat_update(struct sfxge_softc *sc)
rc = ETIMEDOUT;
out:
- mtx_unlock(&port->lock);
return (rc);
}
@@ -498,12 +500,16 @@ sfxge_phy_stat_handler(SYSCTL_HANDLER_ARGS)
unsigned int id = arg2;
int rc;
+ SFXGE_PORT_LOCK(&sc->port);
if ((rc = sfxge_phy_stat_update(sc)) != 0)
- return (rc);
+ goto out;
- return (SYSCTL_OUT(req,
- (uint32_t *)sc->port.phy_stats.decode_buf + id,
- sizeof(uint32_t)));
+ rc = SYSCTL_OUT(req,
+ (uint32_t *)sc->port.phy_stats.decode_buf + id,
+ sizeof(uint32_t));
+out:
+ SFXGE_PORT_UNLOCK(&sc->port);
+ return (rc);
}
static void
@@ -554,7 +560,7 @@ sfxge_port_fini(struct sfxge_softc *sc)
sfxge_dma_free(esmp);
free(port->mac_stats.decode_buf, M_SFXGE);
- mtx_destroy(&port->lock);
+ SFXGE_PORT_LOCK_DESTROY(port);
port->sc = NULL;
}
@@ -577,7 +583,7 @@ sfxge_port_init(struct sfxge_softc *sc)
port->sc = sc;
- mtx_init(&port->lock, "sfxge_port", NULL, MTX_DEF);
+ SFXGE_PORT_LOCK_INIT(port, device_get_nameunit(sc->dev));
port->phy_stats.decode_buf = malloc(EFX_PHY_NSTATS * sizeof(uint32_t),
M_SFXGE, M_WAITOK | M_ZERO);
@@ -615,7 +621,7 @@ fail2:
sfxge_dma_free(phy_stats_buf);
fail:
free(port->phy_stats.decode_buf, M_SFXGE);
- (void)mtx_destroy(&port->lock);
+ SFXGE_PORT_LOCK_DESTROY(port);
port->sc = NULL;
return (rc);
}
@@ -655,7 +661,7 @@ sfxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
efx_link_mode_t mode;
sc = ifp->if_softc;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
@@ -669,7 +675,7 @@ sfxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
ifmr->ifm_active |= sfxge_port_link_fc_ifm(sc);
}
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
}
static int
@@ -682,7 +688,7 @@ sfxge_media_change(struct ifnet *ifp)
sc = ifp->if_softc;
ifm = sc->media.ifm_cur;
- sx_xlock(&sc->softc_lock);
+ SFXGE_ADAPTER_LOCK(sc);
if (!SFXGE_RUNNING(sc)) {
rc = 0;
@@ -695,7 +701,7 @@ sfxge_media_change(struct ifnet *ifp)
rc = efx_phy_adv_cap_set(sc->enp, ifm->ifm_data);
out:
- sx_xunlock(&sc->softc_lock);
+ SFXGE_ADAPTER_UNLOCK(sc);
return (rc);
}
diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c
index ccfdfb023d07..0a4b803b3732 100644
--- a/sys/dev/sfxge/sfxge_rx.c
+++ b/sys/dev/sfxge/sfxge_rx.c
@@ -207,7 +207,7 @@ sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
prefetch_read_many(sc->enp);
prefetch_read_many(rxq->common);
- mtx_assert(&evq->lock, MA_OWNED);
+ SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
if (rxq->init_state != SFXGE_RXQ_STARTED)
return;
@@ -749,7 +749,7 @@ sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop)
index = rxq->index;
evq = sc->evq[index];
- mtx_assert(&evq->lock, MA_OWNED);
+ SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
completed = rxq->completed;
while (completed != rxq->pending) {
@@ -834,7 +834,7 @@ sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index)
rxq = sc->rxq[index];
evq = sc->evq[index];
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
KASSERT(rxq->init_state == SFXGE_RXQ_STARTED,
("rxq not started"));
@@ -849,7 +849,7 @@ again:
/* Flush the receive queue */
efx_rx_qflush(rxq->common);
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
count = 0;
do {
@@ -861,7 +861,7 @@ again:
} while (++count < 20);
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
if (rxq->flush_state == SFXGE_FLUSH_FAILED)
goto again;
@@ -885,7 +885,7 @@ again:
efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
EFX_RXQ_NBUFS(sc->rxq_entries));
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
}
static int
@@ -916,7 +916,7 @@ sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
&rxq->common)) != 0)
goto fail;
- mtx_lock(&evq->lock);
+ SFXGE_EVQ_LOCK(evq);
/* Enable the receive queue. */
efx_rx_qenable(rxq->common);
@@ -926,7 +926,7 @@ sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
/* Try to fill the queue from the pool. */
sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE);
- mtx_unlock(&evq->lock);
+ SFXGE_EVQ_UNLOCK(evq);
return (0);
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 0a9218c76228..394a751f19a7 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -118,7 +118,7 @@ sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
{
unsigned int completed;
- mtx_assert(&evq->lock, MA_OWNED);
+ SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
completed = txq->completed;
while (completed != txq->pending) {
@@ -178,7 +178,7 @@ sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
unsigned int count;
unsigned int non_tcp_count;
- mtx_assert(&txq->lock, MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
stdp = &txq->dpl;
@@ -221,7 +221,7 @@ sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
static void
sfxge_tx_qreap(struct sfxge_txq *txq)
{
- mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
txq->reaped = txq->completed;
}
@@ -233,7 +233,7 @@ sfxge_tx_qlist_post(struct sfxge_txq *txq)
unsigned int level;
int rc;
- mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
@@ -408,7 +408,7 @@ sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
unsigned int pushed;
int rc;
- mtx_assert(&txq->lock, MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
sc = txq->sc;
stdp = &txq->dpl;
@@ -484,7 +484,7 @@ sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
static inline void
sfxge_tx_qdpl_service(struct sfxge_txq *txq)
{
- mtx_assert(&txq->lock, MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
do {
if (SFXGE_TX_QDPL_PENDING(txq))
@@ -493,9 +493,9 @@ sfxge_tx_qdpl_service(struct sfxge_txq *txq)
if (!txq->blocked)
sfxge_tx_qdpl_drain(txq);
- mtx_unlock(&txq->lock);
+ SFXGE_TXQ_UNLOCK(txq);
} while (SFXGE_TX_QDPL_PENDING(txq) &&
- mtx_trylock(&txq->lock));
+ SFXGE_TXQ_TRYLOCK(txq));
}
/*
@@ -519,7 +519,7 @@ sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
if (locked) {
- mtx_assert(&txq->lock, MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
sfxge_tx_qdpl_swizzle(txq);
@@ -588,11 +588,11 @@ sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
* the packet will be appended to the "get list" of the deferred
* packet list. Otherwise, it will be pushed on the "put list".
*/
- locked = mtx_trylock(&txq->lock);
+ locked = SFXGE_TXQ_TRYLOCK(txq);
if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
if (locked)
- mtx_unlock(&txq->lock);
+ SFXGE_TXQ_UNLOCK(txq);
rc = ENOBUFS;
goto fail;
}
@@ -605,7 +605,7 @@ sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
* is processing the list.
*/
if (!locked)
- locked = mtx_trylock(&txq->lock);
+ locked = SFXGE_TXQ_TRYLOCK(txq);
if (locked) {
/* Try to service the list. */
@@ -626,7 +626,7 @@ sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
struct sfxge_tx_dpl *stdp = &txq->dpl;
struct mbuf *mbuf, *next;
- mtx_lock(&txq->lock);
+ SFXGE_TXQ_LOCK(txq);
sfxge_tx_qdpl_swizzle(txq);
for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
@@ -638,7 +638,7 @@ sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
stdp->std_get_non_tcp_count = 0;
stdp->std_getp = &stdp->std_get;
- mtx_unlock(&txq->lock);
+ SFXGE_TXQ_UNLOCK(txq);
}
void
@@ -753,21 +753,20 @@ void sfxge_if_start(struct ifnet *ifp)
{
struct sfxge_softc *sc = ifp->if_softc;
- mtx_lock(&sc->tx_lock);
+ SFXGE_TXQ_LOCK(sc->txq[0]);
sfxge_if_start_locked(ifp);
- mtx_unlock(&sc->tx_lock);
+ SFXGE_TXQ_UNLOCK(sc->txq[0]);
}
static inline void
sfxge_tx_qdpl_service(struct sfxge_txq *txq)
{
- struct sfxge_softc *sc = txq->sc;
- struct ifnet *ifp = sc->ifnet;
+ struct ifnet *ifp = txq->sc->ifnet;
- mtx_assert(&sc->tx_lock, MA_OWNED);
+ SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sfxge_if_start_locked(ifp);
- mtx_unlock(&sc->tx_lock);
+ SFXGE_TXQ_UNLOCK(txq);
}
#endif /* SFXGE_HAVE_MQ */
@@ -1095,12 +1094,16 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
* roll back the work we have done.
*/
if (txq->n_pend_desc >
- SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
+ SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) {
+ txq->tso_pdrop_too_many++;
break;
+ }
next_id = (id + 1) & txq->ptr_mask;
if (__predict_false(tso_start_new_packet(txq, &tso,
- next_id)))
+ next_id))) {
+ txq->tso_pdrop_no_rsrc++;
break;
+ }
id = next_id;
}
}
@@ -1118,12 +1121,12 @@ sfxge_tx_qunblock(struct sfxge_txq *txq)
sc = txq->sc;
evq = sc->evq[txq->evq_index];
- mtx_assert(&evq->lock, MA_OWNED);
+ SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
if (txq->init_state != SFXGE_TXQ_STARTED)
return;
- mtx_lock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_TXQ_LOCK(txq);
if (txq->blocked) {
unsigned int level;
@@ -1154,7 +1157,7 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
txq = sc->txq[index];
evq = sc->evq[txq->evq_index];
- mtx_lock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_TXQ_LOCK(txq);
KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
("txq->init_state != SFXGE_TXQ_STARTED"));
@@ -1165,7 +1168,7 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
/* Flush the transmit queue. */
efx_tx_qflush(txq->common);
- mtx_unlock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_TXQ_UNLOCK(txq);
count = 0;
do {
@@ -1176,8 +1179,8 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
break;
} while (++count < 20);
- mtx_lock(&evq->lock);
- mtx_lock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_EVQ_LOCK(evq);
+ SFXGE_TXQ_LOCK(txq);
KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
("txq->flush_state == SFXGE_FLUSH_FAILED"));
@@ -1207,8 +1210,8 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
EFX_TXQ_NBUFS(sc->txq_entries));
- mtx_unlock(&evq->lock);
- mtx_unlock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_EVQ_UNLOCK(evq);
+ SFXGE_TXQ_UNLOCK(txq);
}
static int
@@ -1257,14 +1260,14 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
&txq->common)) != 0)
goto fail;
- mtx_lock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_TXQ_LOCK(txq);
/* Enable the transmit queue. */
efx_tx_qenable(txq->common);
txq->init_state = SFXGE_TXQ_STARTED;
- mtx_unlock(SFXGE_TXQ_LOCK(txq));
+ SFXGE_TXQ_UNLOCK(txq);
return (0);
@@ -1362,7 +1365,7 @@ sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
sc->txq[index] = NULL;
#ifdef SFXGE_HAVE_MQ
- mtx_destroy(&txq->lock);
+ SFXGE_TXQ_LOCK_DESTROY(txq);
#endif
free(txq, M_SFXGE);
@@ -1468,7 +1471,7 @@ sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
stdp->std_getp = &stdp->std_get;
- mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
+ SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
SYSCTL_CHILDREN(txq_node), OID_AUTO,
@@ -1517,6 +1520,8 @@ static const struct {
SFXGE_TX_STAT(tso_bursts, tso_bursts),
SFXGE_TX_STAT(tso_packets, tso_packets),
SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
+ SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
+ SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
SFXGE_TX_STAT(tx_collapses, collapses),
SFXGE_TX_STAT(tx_drops, drops),
SFXGE_TX_STAT(tx_get_overflow, get_overflow),
diff --git a/sys/dev/sfxge/sfxge_tx.h b/sys/dev/sfxge/sfxge_tx.h
index 958dbc3217d5..510cb3fae72e 100644
--- a/sys/dev/sfxge/sfxge_tx.h
+++ b/sys/dev/sfxge/sfxge_tx.h
@@ -123,13 +123,35 @@ enum sfxge_txq_type {
#define SFXGE_TX_BATCH 64
#ifdef SFXGE_HAVE_MQ
-#define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
+#define SFXGE_TX_LOCK(txq) (&(txq)->lock)
#define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
#else
-#define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
+#define SFXGE_TX_LOCK(txq) (&(txq)->sc->tx_lock)
#define SFXGE_TX_SCALE(sc) 1
#endif
+#define SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index) \
+ do { \
+ struct sfxge_txq *__txq = (_txq); \
+ \
+ snprintf((__txq)->lock_name, \
+ sizeof((__txq)->lock_name), \
+ "%s:txq%u", (_ifname), (_txq_index)); \
+ mtx_init(&(__txq)->lock, (__txq)->lock_name, \
+ NULL, MTX_DEF); \
+ } while (B_FALSE)
+#define SFXGE_TXQ_LOCK_DESTROY(_txq) \
+ mtx_destroy(&(_txq)->lock)
+#define SFXGE_TXQ_LOCK(_txq) \
+ mtx_lock(SFXGE_TX_LOCK(_txq))
+#define SFXGE_TXQ_TRYLOCK(_txq) \
+ mtx_trylock(SFXGE_TX_LOCK(_txq))
+#define SFXGE_TXQ_UNLOCK(_txq) \
+ mtx_unlock(SFXGE_TX_LOCK(_txq))
+#define SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq) \
+ mtx_assert(SFXGE_TX_LOCK(_txq), MA_OWNED)
+
+
struct sfxge_txq {
/* The following fields should be written very rarely */
struct sfxge_softc *sc;
@@ -150,6 +172,8 @@ struct sfxge_txq {
efsys_mem_t *tsoh_buffer;
+ char lock_name[SFXGE_LOCK_NAME_MAX];
+
/* This field changes more often and is read regularly on both
* the initiation and completion paths
*/
@@ -177,6 +201,8 @@ struct sfxge_txq {
unsigned long get_non_tcp_overflow;
unsigned long put_overflow;
unsigned long netdown_drops;
+ unsigned long tso_pdrop_too_many;
+ unsigned long tso_pdrop_no_rsrc;
/* The following fields change more often, and are used mostly
* on the completion path
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 105179719d48..5f11bba4e641 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -111,6 +111,7 @@ SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, default_channels, CTLFLAG_RWTUN,
&uaudio_default_channels, 0, "uaudio default sample channels");
#endif
+#define UAUDIO_IRQS (8000 / UAUDIO_NFRAMES) /* interrupts per second */
#define UAUDIO_NFRAMES 64 /* must be factor of 8 due HS-USB */
#define UAUDIO_NCHANBUFS 2 /* number of outstanding request */
#define UAUDIO_RECURSE_LIMIT 255 /* rounds */
@@ -189,7 +190,6 @@ struct uaudio_chan_alt {
uint8_t iface_index;
uint8_t iface_alt_index;
uint8_t channels;
- uint8_t enable_sync;
};
struct uaudio_chan {
@@ -226,11 +226,12 @@ struct uaudio_chan {
#define CHAN_OP_STOP 2
#define CHAN_OP_DRAIN 3
- uint8_t last_sync_time;
- uint8_t last_sync_state;
-#define UAUDIO_SYNC_NONE 0
-#define UAUDIO_SYNC_MORE 1
-#define UAUDIO_SYNC_LESS 2
+ /* USB audio feedback endpoint state */
+ struct {
+ uint16_t time; /* I/O interrupt count */
+ int16_t constant; /* sample rate adjustment in Hz */
+ int16_t remainder; /* current remainder */
+ } feedback;
};
#define UMIDI_EMB_JACK_MAX 16 /* units */
@@ -1799,14 +1800,6 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
chan_alt->iface_index = curidx;
chan_alt->iface_alt_index = alt_index;
- if (UEP_HAS_SYNCADDR(ed1) && ed1->bSynchAddress != 0) {
- DPRINTF("Sync endpoint will be used, if present\n");
- chan_alt->enable_sync = 1;
- } else {
- DPRINTF("Sync endpoint will not be used\n");
- chan_alt->enable_sync = 0;
- }
-
usbd_set_parent_iface(sc->sc_udev, curidx,
sc->sc_mixer_iface_index);
@@ -2016,29 +2009,44 @@ uaudio_chan_play_sync_callback(struct usb_xfer *xfer, usb_error_t error)
if (temp == 0)
break;
- /* correctly scale value */
-
- temp = (temp * 125ULL) - 64;
+ temp *= 125ULL;
/* auto adjust */
-
while (temp < (sample_rate - (sample_rate / 4)))
temp *= 2;
-
+
while (temp > (sample_rate + (sample_rate / 2)))
temp /= 2;
- /* compare */
-
- DPRINTF("Comparing %d < %d\n",
- (int)temp, (int)sample_rate);
+ /*
+ * Some USB audio devices only report a sample rate
+ * different from the nominal one when they want one
+ * more or less sample. Make sure we catch this case
+ * by pulling the sample rate offset slowly towards
+ * zero if the reported value is equal to the sample
+ * rate.
+ */
+ if (temp > sample_rate)
+ ch->feedback.constant += 1;
+ else if (temp < sample_rate)
+ ch->feedback.constant -= 1;
+ else if (ch->feedback.constant > 0)
+ ch->feedback.constant--;
+ else if (ch->feedback.constant < 0)
+ ch->feedback.constant++;
+
+ DPRINTF("Comparing %d Hz :: %d Hz :: %d samples drift\n",
+ (int)temp, (int)sample_rate, (int)ch->feedback.constant);
- if (temp == sample_rate)
- ch->last_sync_state = UAUDIO_SYNC_NONE;
- else if (temp > sample_rate)
- ch->last_sync_state = UAUDIO_SYNC_MORE;
- else
- ch->last_sync_state = UAUDIO_SYNC_LESS;
+ /*
+ * Range check sync constant. We cannot change the
+ * number of samples per second by more than the value
+ * defined by "UAUDIO_IRQS":
+ */
+ if (ch->feedback.constant > UAUDIO_IRQS)
+ ch->feedback.constant = UAUDIO_IRQS;
+ else if (ch->feedback.constant < -UAUDIO_IRQS)
+ ch->feedback.constant = -UAUDIO_IRQS;
break;
case USB_ST_SETUP:
@@ -2082,10 +2090,10 @@ tr_transferred:
}
chn_intr(ch->pcm_ch);
- /* start SYNC transfer, if any */
- if (ch->usb_alt[ch->cur_alt].enable_sync != 0) {
- if ((ch->last_sync_time++ & 7) == 0)
- usbd_transfer_start(ch->xfer[UAUDIO_NCHANBUFS]);
+ /* start the SYNC transfer one time per second, if any */
+ if (++(ch->feedback.time) >= UAUDIO_IRQS) {
+ ch->feedback.time = 0;
+ usbd_transfer_start(ch->xfer[UAUDIO_NCHANBUFS]);
}
case USB_ST_SETUP:
@@ -2120,21 +2128,22 @@ tr_transferred:
}
if (n == (blockcount - 1)) {
- switch (ch->last_sync_state) {
- case UAUDIO_SYNC_MORE:
+ /*
+ * Update sync remainder and check if
+ * we should transmit more or less
+ * data:
+ */
+ ch->feedback.remainder += ch->feedback.constant;
+ if (ch->feedback.remainder >= UAUDIO_IRQS) {
+ ch->feedback.remainder -= UAUDIO_IRQS;
DPRINTFN(6, "sending one sample more\n");
if ((frame_len + sample_size) <= mfl)
frame_len += sample_size;
- ch->last_sync_state = UAUDIO_SYNC_NONE;
- break;
- case UAUDIO_SYNC_LESS:
+ } else if (ch->feedback.remainder <= -UAUDIO_IRQS) {
+ ch->feedback.remainder += UAUDIO_IRQS;
DPRINTFN(6, "sending one sample less\n");
if (frame_len >= sample_size)
frame_len -= sample_size;
- ch->last_sync_state = UAUDIO_SYNC_NONE;
- break;
- default:
- break;
}
}
@@ -2452,6 +2461,9 @@ uaudio_chan_start(struct uaudio_chan *ch)
}
usb_proc_explore_unlock(sc->sc_udev);
+ /* reset feedback endpoint state */
+ memset(&ch->feedback, 0, sizeof(ch->feedback));
+
if (do_start) {
usbd_transfer_start(ch->xfer[0]);
usbd_transfer_start(ch->xfer[1]);
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 7d977a0d00c2..b2975020f268 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -121,6 +121,7 @@ static const struct pci_id pci_ns8250_ids[] = {
{ 0x8086, 0x1c3d, 0xffff, 0, "Intel AMT - KT Controller", 0x10 },
{ 0x8086, 0x1d3d, 0xffff, 0, "Intel C600/X79 Series Chipset KT Controller", 0x10 },
{ 0x8086, 0x2a07, 0xffff, 0, "Intel AMT - PM965/GM965 KT Controller", 0x10 },
+{ 0x8086, 0x2a47, 0xffff, 0, "Mobile 4 Series Chipset KT Controller", 0x10 },
{ 0x8086, 0x2e17, 0xffff, 0, "4 Series Chipset Serial KT Controller", 0x10 },
{ 0x8086, 0x3b67, 0xffff, 0, "5 Series/3400 Series Chipset KT Controller",
0x10 },
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index e9b72a34e97f..2ad79646029a 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -492,7 +492,7 @@ xhci_start_controller(struct xhci_softc *sc)
XWRITE4(sc, runt, XHCI_ERDP_LO(0), (uint32_t)addr);
XWRITE4(sc, runt, XHCI_ERDP_HI(0), (uint32_t)(addr >> 32));
- addr = (uint64_t)buf_res.physaddr;
+ addr = buf_res.physaddr;
DPRINTF("ERSTBA(0)=0x%016llx\n", (unsigned long long)addr);
@@ -1114,7 +1114,7 @@ xhci_interrupt_poll(struct xhci_softc *sc)
* register.
*/
- addr = (uint32_t)buf_res.physaddr;
+ addr = buf_res.physaddr;
addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_events[i];
/* try to clear busy bit */
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index 2de5831cf0c4..c2d65c790cd4 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -399,6 +399,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(QUALCOMM2, AC8700, 0),
U3G_DEV(QUALCOMM2, MF330, 0),
U3G_DEV(QUALCOMM2, SIM5218, 0),
+ U3G_DEV(QUALCOMM2, WM620, 0),
U3G_DEV(QUALCOMM2, VW110L, U3GINIT_SCSIEJECT),
U3G_DEV(QUALCOMM2, GOBI2000_QDL, 0),
U3G_DEV(QUALCOMM2, GOBI2000, 0),
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 53057e02d6a3..a40c913b20ad 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -3577,6 +3577,7 @@ product QUALCOMM2 CDMA_MSM 0x3196 CDMA Technologies MSM modem
product QUALCOMM2 AC8700 0x6000 AC8700
product QUALCOMM2 VW110L 0x1000 Vertex Wireless 110L modem
product QUALCOMM2 SIM5218 0x9000 SIM5218
+product QUALCOMM2 WM620 0x9002 Neoway WM620
product QUALCOMM2 GOBI2000_QDL 0x9204 Qualcomm Gobi 2000 QDL
product QUALCOMM2 GOBI2000 0x9205 Qualcomm Gobi 2000 modem
product QUALCOMM2 VT80N 0x6500 Venus VT80N
diff --git a/sys/dev/vt/hw/vga/vt_vga.c b/sys/dev/vt/hw/vga/vt_vga.c
index d29a5b394481..43bad8e9a91b 100644
--- a/sys/dev/vt/hw/vga/vt_vga.c
+++ b/sys/dev/vt/hw/vga/vt_vga.c
@@ -1263,7 +1263,8 @@ static int
vtvga_probe(device_t dev)
{
- device_set_desc(dev, "vt_vga driver");
+ device_set_desc(dev, "VT VGA driver");
+
return (BUS_PROBE_NOWILDCARD);
}
diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c
index accb46c34bfc..52846ddf3b54 100644
--- a/sys/dev/wpi/if_wpi.c
+++ b/sys/dev/wpi/if_wpi.c
@@ -16,8 +16,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define VERSION "20071127"
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -60,6 +58,7 @@ __FBSDID("$FreeBSD$");
*/
#include "opt_wlan.h"
+#include "opt_wpi.h"
#include <sys/param.h>
#include <sys/sysctl.h>
@@ -93,51 +92,20 @@ __FBSDID("$FreeBSD$");
#include <net/if_media.h>
#include <net/if_types.h>
-#include <net80211/ieee80211_var.h>
-#include <net80211/ieee80211_radiotap.h>
-#include <net80211/ieee80211_regdomain.h>
-#include <net80211/ieee80211_ratectl.h>
-
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
-#include <netinet/ip.h>
#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_regdomain.h>
+#include <net80211/ieee80211_ratectl.h>
#include <dev/wpi/if_wpireg.h>
#include <dev/wpi/if_wpivar.h>
-
-#define WPI_DEBUG
-
-#ifdef WPI_DEBUG
-#define DPRINTF(x) do { if (wpi_debug != 0) printf x; } while (0)
-#define DPRINTFN(n, x) do { if (wpi_debug & n) printf x; } while (0)
-#define WPI_DEBUG_SET (wpi_debug != 0)
-
-enum {
- WPI_DEBUG_UNUSED = 0x00000001, /* Unused */
- WPI_DEBUG_HW = 0x00000002, /* Stage 1 (eeprom) debugging */
- WPI_DEBUG_TX = 0x00000004, /* Stage 2 TX intrp debugging*/
- WPI_DEBUG_RX = 0x00000008, /* Stage 2 RX intrp debugging */
- WPI_DEBUG_CMD = 0x00000010, /* Stage 2 CMD intrp debugging*/
- WPI_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */
- WPI_DEBUG_DMA = 0x00000040, /* DMA (de)allocations/syncs */
- WPI_DEBUG_SCANNING = 0x00000080, /* Stage 2 Scanning debugging */
- WPI_DEBUG_NOTIFY = 0x00000100, /* State 2 Noftif intr debug */
- WPI_DEBUG_TEMP = 0x00000200, /* TXPower/Temp Calibration */
- WPI_DEBUG_OPS = 0x00000400, /* wpi_ops taskq debug */
- WPI_DEBUG_WATCHDOG = 0x00000800, /* Watch dog debug */
- WPI_DEBUG_ANY = 0xffffffff
-};
-
-static int wpi_debug;
-SYSCTL_INT(_debug, OID_AUTO, wpi, CTLFLAG_RWTUN, &wpi_debug, 0, "wpi debug level");
-
-#else
-#define DPRINTF(x)
-#define DPRINTFN(n, x)
-#define WPI_DEBUG_SET 0
-#endif
+#include <dev/wpi/if_wpi_debug.h>
struct wpi_ident {
uint16_t vendor;
@@ -158,99 +126,138 @@ static const struct wpi_ident wpi_ident_table[] = {
{ 0, 0, 0, NULL }
};
+static int wpi_probe(device_t);
+static int wpi_attach(device_t);
+static void wpi_radiotap_attach(struct wpi_softc *);
+static void wpi_sysctlattach(struct wpi_softc *);
static struct ieee80211vap *wpi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void wpi_vap_delete(struct ieee80211vap *);
+static int wpi_detach(device_t);
+static int wpi_shutdown(device_t);
+static int wpi_suspend(device_t);
+static int wpi_resume(device_t);
+static int wpi_nic_lock(struct wpi_softc *);
+static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
+static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *,
- void **, bus_size_t, bus_size_t, int);
+ void **, bus_size_t, bus_size_t);
static void wpi_dma_contig_free(struct wpi_dma_info *);
-static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int wpi_alloc_shared(struct wpi_softc *);
static void wpi_free_shared(struct wpi_softc *);
-static int wpi_alloc_rx_ring(struct wpi_softc *, struct wpi_rx_ring *);
-static void wpi_reset_rx_ring(struct wpi_softc *, struct wpi_rx_ring *);
-static void wpi_free_rx_ring(struct wpi_softc *, struct wpi_rx_ring *);
+static int wpi_alloc_fwmem(struct wpi_softc *);
+static void wpi_free_fwmem(struct wpi_softc *);
+static int wpi_alloc_rx_ring(struct wpi_softc *);
+static void wpi_update_rx_ring(struct wpi_softc *);
+static void wpi_reset_rx_ring(struct wpi_softc *);
+static void wpi_free_rx_ring(struct wpi_softc *);
static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *,
- int, int);
+ int);
+static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
+static int wpi_read_eeprom(struct wpi_softc *,
+ uint8_t macaddr[IEEE80211_ADDR_LEN]);
+static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *);
+static void wpi_read_eeprom_band(struct wpi_softc *, int);
+static int wpi_read_eeprom_channels(struct wpi_softc *, int);
+static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *,
+ struct ieee80211_channel *);
+static int wpi_setregdomain(struct ieee80211com *,
+ struct ieee80211_regdomain *, int,
+ struct ieee80211_channel[]);
+static int wpi_read_eeprom_group(struct wpi_softc *, int);
+static void wpi_node_free(struct ieee80211_node *);
+static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
-static void wpi_mem_lock(struct wpi_softc *);
-static void wpi_mem_unlock(struct wpi_softc *);
-static uint32_t wpi_mem_read(struct wpi_softc *, uint16_t);
-static void wpi_mem_write(struct wpi_softc *, uint16_t, uint32_t);
-static void wpi_mem_write_region_4(struct wpi_softc *, uint16_t,
- const uint32_t *, int);
-static uint16_t wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
-static int wpi_alloc_fwmem(struct wpi_softc *);
-static void wpi_free_fwmem(struct wpi_softc *);
-static int wpi_load_firmware(struct wpi_softc *);
-static void wpi_unload_firmware(struct wpi_softc *);
-static int wpi_load_microcode(struct wpi_softc *, const uint8_t *, int);
-static void wpi_rx_intr(struct wpi_softc *, struct wpi_rx_desc *,
+static void wpi_calib_timeout(void *);
+static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *,
+ struct wpi_rx_data *);
+static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *,
struct wpi_rx_data *);
-static void wpi_tx_intr(struct wpi_softc *, struct wpi_rx_desc *);
-static void wpi_cmd_intr(struct wpi_softc *, struct wpi_rx_desc *);
+static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *);
+static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *);
static void wpi_notif_intr(struct wpi_softc *);
+static void wpi_wakeup_intr(struct wpi_softc *);
+static void wpi_fatal_intr(struct wpi_softc *);
static void wpi_intr(void *);
-static uint8_t wpi_plcp_signal(int);
-static void wpi_watchdog(void *);
+static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *);
static int wpi_tx_data(struct wpi_softc *, struct mbuf *,
- struct ieee80211_node *, int);
-static void wpi_start(struct ifnet *);
-static void wpi_start_locked(struct ifnet *);
+ struct ieee80211_node *);
+static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *,
+ struct ieee80211_node *,
+ const struct ieee80211_bpf_params *);
static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void wpi_scan_start(struct ieee80211com *);
-static void wpi_scan_end(struct ieee80211com *);
-static void wpi_set_channel(struct ieee80211com *);
-static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
-static void wpi_scan_mindwell(struct ieee80211_scan_state *);
+static void wpi_start(struct ifnet *);
+static void wpi_start_locked(struct ifnet *);
+static void wpi_watchdog_rfkill(void *);
+static void wpi_watchdog(void *);
static int wpi_ioctl(struct ifnet *, u_long, caddr_t);
-static void wpi_read_eeprom(struct wpi_softc *,
- uint8_t macaddr[IEEE80211_ADDR_LEN]);
-static void wpi_read_eeprom_channels(struct wpi_softc *, int);
-static void wpi_read_eeprom_group(struct wpi_softc *, int);
-static int wpi_cmd(struct wpi_softc *, int, const void *, int, int);
-static int wpi_wme_update(struct ieee80211com *);
+static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int);
static int wpi_mrr_setup(struct wpi_softc *);
+static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *);
+static int wpi_add_broadcast_node(struct wpi_softc *, int);
+static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *);
+static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *);
+static int wpi_updateedca(struct ieee80211com *);
+static void wpi_set_promisc(struct wpi_softc *);
+static void wpi_update_promisc(struct ifnet *);
+static void wpi_update_mcast(struct ifnet *);
static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t);
-static void wpi_enable_tsf(struct wpi_softc *, struct ieee80211_node *);
-#if 0
-static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
-#endif
+static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *);
+static void wpi_power_calibration(struct wpi_softc *);
+static int wpi_set_txpower(struct wpi_softc *, int);
+static int wpi_get_power_index(struct wpi_softc *,
+ struct wpi_power_group *, struct ieee80211_channel *, int);
+static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int);
+static int wpi_send_btcoex(struct wpi_softc *);
+static int wpi_send_rxon(struct wpi_softc *, int, int);
+static int wpi_config(struct wpi_softc *);
+static uint16_t wpi_get_active_dwell_time(struct wpi_softc *,
+ struct ieee80211_channel *, uint8_t);
+static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t);
+static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *,
+ struct ieee80211_channel *);
+static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *);
static int wpi_auth(struct wpi_softc *, struct ieee80211vap *);
+static void wpi_update_beacon(struct ieee80211vap *, int);
+static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
static int wpi_run(struct wpi_softc *, struct ieee80211vap *);
-static int wpi_scan(struct wpi_softc *);
-static int wpi_config(struct wpi_softc *);
-static void wpi_stop_master(struct wpi_softc *);
-static int wpi_power_up(struct wpi_softc *);
-static int wpi_reset(struct wpi_softc *);
-static void wpi_hwreset(void *, int);
-static void wpi_rfreset(void *, int);
-static void wpi_hw_config(struct wpi_softc *);
+static int wpi_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
+ ieee80211_keyix *, ieee80211_keyix *);
+static int wpi_key_set(struct ieee80211vap *,
+ const struct ieee80211_key *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+static int wpi_key_delete(struct ieee80211vap *,
+ const struct ieee80211_key *);
+static int wpi_post_alive(struct wpi_softc *);
+static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int);
+static int wpi_load_firmware(struct wpi_softc *);
+static int wpi_read_firmware(struct wpi_softc *);
+static void wpi_unload_firmware(struct wpi_softc *);
+static int wpi_clock_wait(struct wpi_softc *);
+static int wpi_apm_init(struct wpi_softc *);
+static void wpi_apm_stop_master(struct wpi_softc *);
+static void wpi_apm_stop(struct wpi_softc *);
+static void wpi_nic_config(struct wpi_softc *);
+static int wpi_hw_init(struct wpi_softc *);
+static void wpi_hw_stop(struct wpi_softc *);
+static void wpi_radio_on(void *, int);
+static void wpi_radio_off(void *, int);
+static void wpi_init_locked(struct wpi_softc *);
static void wpi_init(void *);
-static void wpi_init_locked(struct wpi_softc *, int);
-static void wpi_stop(struct wpi_softc *);
static void wpi_stop_locked(struct wpi_softc *);
-
-static int wpi_set_txpower(struct wpi_softc *, struct ieee80211_channel *,
- int);
-static void wpi_calib_timeout(void *);
-static void wpi_power_calibration(struct wpi_softc *, int);
-static int wpi_get_power_index(struct wpi_softc *,
- struct wpi_power_group *, struct ieee80211_channel *, int);
-#ifdef WPI_DEBUG
-static const char *wpi_cmd_str(int);
-#endif
-static int wpi_probe(device_t);
-static int wpi_attach(device_t);
-static int wpi_detach(device_t);
-static int wpi_shutdown(device_t);
-static int wpi_suspend(device_t);
-static int wpi_resume(device_t);
+static void wpi_stop(struct wpi_softc *);
+static void wpi_scan_start(struct ieee80211com *);
+static void wpi_scan_end(struct ieee80211com *);
+static void wpi_set_channel(struct ieee80211com *);
+static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
+static void wpi_scan_mindwell(struct ieee80211_scan_state *);
+static void wpi_hw_reset(void *, int);
static device_method_t wpi_methods[] = {
/* Device interface */
@@ -269,25 +276,15 @@ static driver_t wpi_driver = {
wpi_methods,
sizeof (struct wpi_softc)
};
-
static devclass_t wpi_devclass;
DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL);
MODULE_VERSION(wpi, 1);
-static const uint8_t wpi_ridx_to_plcp[] = {
- /* OFDM: IEEE Std 802.11a-1999, pp. 14 Table 80 */
- /* R1-R4 (ral/ural is R4-R1) */
- 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3,
- /* CCK: device-dependent */
- 10, 20, 55, 110
-};
-
-static const uint8_t wpi_ridx_to_rate[] = {
- 12, 18, 24, 36, 48, 72, 96, 108, /* OFDM */
- 2, 4, 11, 22 /*CCK */
-};
+MODULE_DEPEND(wpi, pci, 1, 1, 1);
+MODULE_DEPEND(wpi, wlan, 1, 1, 1);
+MODULE_DEPEND(wpi, firmware, 1, 1, 1);
static int
wpi_probe(device_t dev)
@@ -304,202 +301,38 @@ wpi_probe(device_t dev)
return ENXIO;
}
-/**
- * Load the firmare image from disk to the allocated dma buffer.
- * we also maintain the reference to the firmware pointer as there
- * is times where we may need to reload the firmware but we are not
- * in a context that can access the filesystem (ie taskq cause by restart)
- *
- * @return 0 on success, an errno on failure
- */
-static int
-wpi_load_firmware(struct wpi_softc *sc)
-{
- const struct firmware *fp;
- struct wpi_dma_info *dma = &sc->fw_dma;
- const struct wpi_firmware_hdr *hdr;
- const uint8_t *itext, *idata, *rtext, *rdata, *btext;
- uint32_t itextsz, idatasz, rtextsz, rdatasz, btextsz;
- int error;
-
- DPRINTFN(WPI_DEBUG_FIRMWARE,
- ("Attempting Loading Firmware from wpi_fw module\n"));
-
- WPI_UNLOCK(sc);
-
- if (sc->fw_fp == NULL && (sc->fw_fp = firmware_get("wpifw")) == NULL) {
- device_printf(sc->sc_dev,
- "could not load firmware image 'wpifw'\n");
- error = ENOENT;
- WPI_LOCK(sc);
- goto fail;
- }
-
- fp = sc->fw_fp;
-
- WPI_LOCK(sc);
-
- /* Validate the firmware is minimum a particular version */
- if (fp->version < WPI_FW_MINVERSION) {
- device_printf(sc->sc_dev,
- "firmware version is too old. Need %d, got %d\n",
- WPI_FW_MINVERSION,
- fp->version);
- error = ENXIO;
- goto fail;
- }
-
- if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
- device_printf(sc->sc_dev,
- "firmware file too short: %zu bytes\n", fp->datasize);
- error = ENXIO;
- goto fail;
- }
-
- hdr = (const struct wpi_firmware_hdr *)fp->data;
-
- /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW |
- |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
-
- rtextsz = le32toh(hdr->rtextsz);
- rdatasz = le32toh(hdr->rdatasz);
- itextsz = le32toh(hdr->itextsz);
- idatasz = le32toh(hdr->idatasz);
- btextsz = le32toh(hdr->btextsz);
-
- /* check that all firmware segments are present */
- if (fp->datasize < sizeof (struct wpi_firmware_hdr) +
- rtextsz + rdatasz + itextsz + idatasz + btextsz) {
- device_printf(sc->sc_dev,
- "firmware file too short: %zu bytes\n", fp->datasize);
- error = ENXIO; /* XXX appropriate error code? */
- goto fail;
- }
-
- /* get pointers to firmware segments */
- rtext = (const uint8_t *)(hdr + 1);
- rdata = rtext + rtextsz;
- itext = rdata + rdatasz;
- idata = itext + itextsz;
- btext = idata + idatasz;
-
- DPRINTFN(WPI_DEBUG_FIRMWARE,
- ("Firmware Version: Major %d, Minor %d, Driver %d, \n"
- "runtime (text: %u, data: %u) init (text: %u, data %u) boot (text %u)\n",
- (le32toh(hdr->version) & 0xff000000) >> 24,
- (le32toh(hdr->version) & 0x00ff0000) >> 16,
- (le32toh(hdr->version) & 0x0000ffff),
- rtextsz, rdatasz,
- itextsz, idatasz, btextsz));
-
- DPRINTFN(WPI_DEBUG_FIRMWARE,("rtext 0x%x\n", *(const uint32_t *)rtext));
- DPRINTFN(WPI_DEBUG_FIRMWARE,("rdata 0x%x\n", *(const uint32_t *)rdata));
- DPRINTFN(WPI_DEBUG_FIRMWARE,("itext 0x%x\n", *(const uint32_t *)itext));
- DPRINTFN(WPI_DEBUG_FIRMWARE,("idata 0x%x\n", *(const uint32_t *)idata));
- DPRINTFN(WPI_DEBUG_FIRMWARE,("btext 0x%x\n", *(const uint32_t *)btext));
-
- /* sanity checks */
- if (rtextsz > WPI_FW_MAIN_TEXT_MAXSZ ||
- rdatasz > WPI_FW_MAIN_DATA_MAXSZ ||
- itextsz > WPI_FW_INIT_TEXT_MAXSZ ||
- idatasz > WPI_FW_INIT_DATA_MAXSZ ||
- btextsz > WPI_FW_BOOT_TEXT_MAXSZ ||
- (btextsz & 3) != 0) {
- device_printf(sc->sc_dev, "firmware invalid\n");
- error = EINVAL;
- goto fail;
- }
-
- /* copy initialization images into pre-allocated DMA-safe memory */
- memcpy(dma->vaddr, idata, idatasz);
- memcpy(dma->vaddr + WPI_FW_INIT_DATA_MAXSZ, itext, itextsz);
-
- bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
-
- /* tell adapter where to find initialization images */
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr);
- wpi_mem_write(sc, WPI_MEM_DATA_SIZE, idatasz);
- wpi_mem_write(sc, WPI_MEM_TEXT_BASE,
- dma->paddr + WPI_FW_INIT_DATA_MAXSZ);
- wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, itextsz);
- wpi_mem_unlock(sc);
-
- /* load firmware boot code */
- if ((error = wpi_load_microcode(sc, btext, btextsz)) != 0) {
- device_printf(sc->sc_dev, "Failed to load microcode\n");
- goto fail;
- }
-
- /* now press "execute" */
- WPI_WRITE(sc, WPI_RESET, 0);
-
- /* wait at most one second for the first alive notification */
- if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
- device_printf(sc->sc_dev,
- "timeout waiting for adapter to initialize\n");
- goto fail;
- }
-
- /* copy runtime images into pre-allocated DMA-sage memory */
- memcpy(dma->vaddr, rdata, rdatasz);
- memcpy(dma->vaddr + WPI_FW_MAIN_DATA_MAXSZ, rtext, rtextsz);
- bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
-
- /* tell adapter where to find runtime images */
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr);
- wpi_mem_write(sc, WPI_MEM_DATA_SIZE, rdatasz);
- wpi_mem_write(sc, WPI_MEM_TEXT_BASE,
- dma->paddr + WPI_FW_MAIN_DATA_MAXSZ);
- wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, WPI_FW_UPDATED | rtextsz);
- wpi_mem_unlock(sc);
-
- /* wait at most one second for the first alive notification */
- if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
- device_printf(sc->sc_dev,
- "timeout waiting for adapter to initialize2\n");
- goto fail;
- }
-
- DPRINTFN(WPI_DEBUG_FIRMWARE,
- ("Firmware loaded to driver successfully\n"));
- return error;
-fail:
- wpi_unload_firmware(sc);
- return error;
-}
-
-/**
- * Free the referenced firmware image
- */
-static void
-wpi_unload_firmware(struct wpi_softc *sc)
-{
-
- if (sc->fw_fp) {
- WPI_UNLOCK(sc);
- firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
- WPI_LOCK(sc);
- sc->fw_fp = NULL;
- }
-}
-
static int
wpi_attach(device_t dev)
{
- struct wpi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp;
+ struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev);
struct ieee80211com *ic;
- int ac, error, rid, supportsa = 1;
- uint32_t tmp;
+ struct ifnet *ifp;
+ int i, error, rid, supportsa = 1;
const struct wpi_ident *ident;
uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
- if (bootverbose || WPI_DEBUG_SET)
- device_printf(sc->sc_dev,"Driver Revision %s\n", VERSION);
+#ifdef WPI_DEBUG
+ error = resource_int_value(device_get_name(sc->sc_dev),
+ device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
+ if (error != 0)
+ sc->sc_debug = 0;
+#else
+ sc->sc_debug = 0;
+#endif
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ /*
+ * Get the offset of the PCI Express Capability Structure in PCI
+ * Configuration Space.
+ */
+ error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
+ if (error != 0) {
+ device_printf(dev, "PCIe capability structure not found!\n");
+ return error;
+ }
/*
* Some card's only support 802.11b/g not a, check to see if
@@ -514,131 +347,118 @@ wpi_attach(device_t dev)
}
}
- /* Create the tasks that can be queued */
- TASK_INIT(&sc->sc_restarttask, 0, wpi_hwreset, sc);
- TASK_INIT(&sc->sc_radiotask, 0, wpi_rfreset, sc);
-
- WPI_LOCK_INIT(sc);
-
- callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
- callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
-
- /* disable the retry timeout register */
+ /* Clear device-specific "PCI retry timeout" register (41h). */
pci_write_config(dev, 0x41, 0, 1);
- /* enable bus-mastering */
+ /* Enable bus-mastering. */
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem == NULL) {
- device_printf(dev, "could not allocate memory resource\n");
+ device_printf(dev, "can't map mem space\n");
error = ENOMEM;
- goto fail;
+ return error;
}
-
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
+ i = 1;
rid = 0;
- sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_ACTIVE | RF_SHAREABLE);
+ if (pci_alloc_msi(dev, &i) == 0)
+ rid = 1;
+ /* Install interrupt handler. */
+ sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
+ (rid != 0 ? 0 : RF_SHAREABLE));
if (sc->irq == NULL) {
- device_printf(dev, "could not allocate interrupt resource\n");
+ device_printf(dev, "can't map interrupt\n");
error = ENOMEM;
goto fail;
}
- /*
- * Allocate DMA memory for firmware transfers.
- */
- if ((error = wpi_alloc_fwmem(sc)) != 0) {
- printf(": could not allocate firmware memory\n");
- error = ENOMEM;
- goto fail;
- }
+ WPI_LOCK_INIT(sc);
- /*
- * Put adapter into a known state.
- */
- if ((error = wpi_reset(sc)) != 0) {
- device_printf(dev, "could not reset adapter\n");
+ sc->sc_unr = new_unrhdr(WPI_ID_IBSS_MIN, WPI_ID_IBSS_MAX, &sc->sc_mtx);
+
+ /* Allocate DMA memory for firmware transfers. */
+ if ((error = wpi_alloc_fwmem(sc)) != 0) {
+ device_printf(dev,
+ "could not allocate memory for firmware, error %d\n",
+ error);
goto fail;
}
- wpi_mem_lock(sc);
- tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV);
- if (bootverbose || WPI_DEBUG_SET)
- device_printf(sc->sc_dev, "Hardware Revision (0x%X)\n", tmp);
-
- wpi_mem_unlock(sc);
-
- /* Allocate shared page */
+ /* Allocate shared page. */
if ((error = wpi_alloc_shared(sc)) != 0) {
device_printf(dev, "could not allocate shared page\n");
goto fail;
}
- /* tx data queues - 4 for QoS purposes */
- for (ac = 0; ac < WME_NUM_AC; ac++) {
- error = wpi_alloc_tx_ring(sc, &sc->txq[ac], WPI_TX_RING_COUNT, ac);
- if (error != 0) {
- device_printf(dev, "could not allocate Tx ring %d\n",ac);
- goto fail;
+ /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */
+ for (i = 0; i < WPI_NTXQUEUES; i++) {
+ if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
+ device_printf(dev,
+ "could not allocate TX ring %d, error %d\n", i,
+ error);
+ goto fail;
}
}
- /* command queue to talk to the card's firmware */
- error = wpi_alloc_tx_ring(sc, &sc->cmdq, WPI_CMD_RING_COUNT, 4);
- if (error != 0) {
- device_printf(dev, "could not allocate command ring\n");
+ /* Allocate RX ring. */
+ if ((error = wpi_alloc_rx_ring(sc)) != 0) {
+ device_printf(dev, "could not allocate RX ring, error %d\n",
+ error);
goto fail;
}
- /* receive data queue */
- error = wpi_alloc_rx_ring(sc, &sc->rxq);
- if (error != 0) {
- device_printf(dev, "could not allocate Rx ring\n");
- goto fail;
- }
+ /* Clear pending interrupts. */
+ WPI_WRITE(sc, WPI_INT, 0xffffffff);
ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOMEM;
+ device_printf(dev, "can not allocate ifnet structure\n");
goto fail;
}
- ic = ifp->if_l2com;
+ ic = ifp->if_l2com;
ic->ic_ifp = ifp;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
- /* set device capabilities */
+ /* Set device capabilities. */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
+ | IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
+ | IEEE80211_C_AHDEMO /* adhoc demo mode */
+ | IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHSLOT /* short slot time supported */
- | IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_WPA /* 802.11i */
-/* XXX looks like WME is partly supported? */
+ | IEEE80211_C_SHPREAMBLE /* short preamble supported */
#if 0
- | IEEE80211_C_IBSS /* IBSS mode support */
- | IEEE80211_C_BGSCAN /* capable of bg scanning */
- | IEEE80211_C_WME /* 802.11e */
| IEEE80211_C_HOSTAP /* Host access point mode */
#endif
+ | IEEE80211_C_WME /* 802.11e */
+ | IEEE80211_C_PMGT /* Station-side power mgmt */
;
+ ic->ic_cryptocaps =
+ IEEE80211_CRYPTO_AES_CCM;
+
/*
* Read in the eeprom and also setup the channels for
* net80211. We don't set the rates as net80211 does this for us
*/
- wpi_read_eeprom(sc, macaddr);
+ if ((error = wpi_read_eeprom(sc, macaddr)) != 0) {
+ device_printf(dev, "could not read EEPROM, error %d\n",
+ error);
+ goto fail;
+ }
- if (bootverbose || WPI_DEBUG_SET) {
+#ifdef WPI_DEBUG
+ if (bootverbose) {
device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", sc->domain);
device_printf(sc->sc_dev, "Hardware Type: %c\n",
sc->type > 1 ? 'B': '?');
@@ -650,6 +470,7 @@ wpi_attach(device_t dev)
/* XXX hw_config uses the PCIDEV for the Hardware rev. Must check
what sc->rev really represents - benjsc 20070615 */
}
+#endif
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_softc = sc;
@@ -662,43 +483,141 @@ wpi_attach(device_t dev)
IFQ_SET_READY(&ifp->if_snd);
ieee80211_ifattach(ic, macaddr);
- /* override default methods */
+ ic->ic_vap_create = wpi_vap_create;
+ ic->ic_vap_delete = wpi_vap_delete;
ic->ic_raw_xmit = wpi_raw_xmit;
- ic->ic_wme.wme_update = wpi_wme_update;
+ ic->ic_node_alloc = wpi_node_alloc;
+ sc->sc_node_free = ic->ic_node_free;
+ ic->ic_node_free = wpi_node_free;
+ ic->ic_wme.wme_update = wpi_updateedca;
+ ic->ic_update_promisc = wpi_update_promisc;
+ ic->ic_update_mcast = wpi_update_mcast;
ic->ic_scan_start = wpi_scan_start;
ic->ic_scan_end = wpi_scan_end;
ic->ic_set_channel = wpi_set_channel;
+ sc->sc_scan_curchan = ic->ic_scan_curchan;
ic->ic_scan_curchan = wpi_scan_curchan;
ic->ic_scan_mindwell = wpi_scan_mindwell;
+ ic->ic_setregdomain = wpi_setregdomain;
- ic->ic_vap_create = wpi_vap_create;
- ic->ic_vap_delete = wpi_vap_delete;
+ wpi_radiotap_attach(sc);
- ieee80211_radiotap_attach(ic,
- &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
- WPI_TX_RADIOTAP_PRESENT,
- &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
- WPI_RX_RADIOTAP_PRESENT);
+ callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
+ callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
+ callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0);
+ TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc);
+ TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc);
+ TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc);
+
+ wpi_sysctlattach(sc);
/*
* Hook our interrupt after all initialization is complete.
*/
- error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET |INTR_MPSAFE,
+ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, wpi_intr, sc, &sc->sc_ih);
if (error != 0) {
- device_printf(dev, "could not set up interrupt\n");
+ device_printf(dev, "can't establish interrupt, error %d\n",
+ error);
goto fail;
}
if (bootverbose)
ieee80211_announce(ic);
-#ifdef XXX_DEBUG
- ieee80211_announce_channels(ic);
+
+#ifdef WPI_DEBUG
+ if (sc->sc_debug & WPI_DEBUG_HW)
+ ieee80211_announce_channels(ic);
#endif
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
fail: wpi_detach(dev);
- return ENXIO;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+ return error;
+}
+
+/*
+ * Attach the interface to 802.11 radiotap.
+ */
+static void
+wpi_radiotap_attach(struct wpi_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+ ieee80211_radiotap_attach(ic,
+ &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
+ WPI_TX_RADIOTAP_PRESENT,
+ &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
+ WPI_RX_RADIOTAP_PRESENT);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+}
+
+static void
+wpi_sysctlattach(struct wpi_softc *sc)
+{
+#ifdef WPI_DEBUG
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
+
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
+ "control debugging printfs");
+#endif
+}
+
+static struct ieee80211vap *
+wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
+ enum ieee80211_opmode opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct wpi_vap *wvp;
+ struct wpi_buf *bcn;
+ struct ieee80211vap *vap;
+
+ if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
+ return NULL;
+
+ wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap),
+ M_80211_VAP, M_NOWAIT | M_ZERO);
+ if (wvp == NULL)
+ return NULL;
+ vap = &wvp->vap;
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+
+ bcn = &wvp->wv_bcbuf;
+ bcn->data = NULL;
+
+ /* Override with driver methods. */
+ wvp->newstate = vap->iv_newstate;
+ vap->iv_key_alloc = wpi_key_alloc;
+ vap->iv_key_set = wpi_key_set;
+ vap->iv_key_delete = wpi_key_delete;
+ vap->iv_newstate = wpi_newstate;
+ vap->iv_update_beacon = wpi_update_beacon;
+
+ ieee80211_ratectl_init(vap);
+ /* Complete setup. */
+ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ic->ic_opmode = opmode;
+ return vap;
+}
+
+static void
+wpi_vap_delete(struct ieee80211vap *vap)
+{
+ struct wpi_vap *wvp = WPI_VAP(vap);
+ struct wpi_buf *bcn = &wvp->wv_bcbuf;
+
+ ieee80211_ratectl_deinit(vap);
+ ieee80211_vap_detach(vap);
+
+ if (bcn->data != NULL)
+ free(bcn->data, M_DEVBUF);
+ free(wvp, M_80211_VAP);
}
static int
@@ -707,43 +626,44 @@ wpi_detach(device_t dev)
struct wpi_softc *sc = device_get_softc(dev);
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic;
- int ac;
+ int qid;
- if (sc->irq != NULL)
- bus_teardown_intr(dev, sc->irq, sc->sc_ih);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
if (ifp != NULL) {
ic = ifp->if_l2com;
- ieee80211_draintask(ic, &sc->sc_restarttask);
- ieee80211_draintask(ic, &sc->sc_radiotask);
+ ieee80211_draintask(ic, &sc->sc_reinittask);
+ ieee80211_draintask(ic, &sc->sc_radiooff_task);
+
wpi_stop(sc);
+
callout_drain(&sc->watchdog_to);
+ callout_drain(&sc->watchdog_rfkill);
callout_drain(&sc->calib_to);
ieee80211_ifdetach(ic);
}
- WPI_LOCK(sc);
+ /* Uninstall interrupt handler. */
+ if (sc->irq != NULL) {
+ bus_teardown_intr(dev, sc->irq, sc->sc_ih);
+ bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
+ sc->irq);
+ pci_release_msi(dev);
+ }
+
if (sc->txq[0].data_dmat) {
- for (ac = 0; ac < WME_NUM_AC; ac++)
- wpi_free_tx_ring(sc, &sc->txq[ac]);
+ /* Free DMA resources. */
+ for (qid = 0; qid < WPI_NTXQUEUES; qid++)
+ wpi_free_tx_ring(sc, &sc->txq[qid]);
- wpi_free_tx_ring(sc, &sc->cmdq);
- wpi_free_rx_ring(sc, &sc->rxq);
+ wpi_free_rx_ring(sc);
wpi_free_shared(sc);
}
- if (sc->fw_fp != NULL) {
- wpi_unload_firmware(sc);
- }
-
if (sc->fw_dma.tag)
wpi_free_fwmem(sc);
- WPI_UNLOCK(sc);
-
- if (sc->irq != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
- sc->irq);
+
if (sc->mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->mem), sc->mem);
@@ -751,47 +671,166 @@ wpi_detach(device_t dev)
if (ifp != NULL)
if_free(ifp);
+ delete_unrhdr(sc->sc_unr);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_LOCK_DESTROY(sc);
+ return 0;
+}
+static int
+wpi_shutdown(device_t dev)
+{
+ struct wpi_softc *sc = device_get_softc(dev);
+
+ wpi_stop(sc);
return 0;
}
-static struct ieee80211vap *
-wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
- enum ieee80211_opmode opmode, int flags,
- const uint8_t bssid[IEEE80211_ADDR_LEN],
- const uint8_t mac[IEEE80211_ADDR_LEN])
+static int
+wpi_suspend(device_t dev)
{
- struct wpi_vap *wvp;
- struct ieee80211vap *vap;
+ struct wpi_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = sc->sc_ifp->if_l2com;
- if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
- return NULL;
- wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (wvp == NULL)
- return NULL;
- vap = &wvp->vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
- /* override with driver methods */
- wvp->newstate = vap->iv_newstate;
- vap->iv_newstate = wpi_newstate;
+ ieee80211_suspend_all(ic);
+ return 0;
+}
- ieee80211_ratectl_init(vap);
- /* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
- ic->ic_opmode = opmode;
- return vap;
+static int
+wpi_resume(device_t dev)
+{
+ struct wpi_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+
+ /* Clear device-specific "PCI retry timeout" register (41h). */
+ pci_write_config(dev, 0x41, 0, 1);
+
+ ieee80211_resume_all(ic);
+ return 0;
}
-static void
-wpi_vap_delete(struct ieee80211vap *vap)
+/*
+ * Grab exclusive access to NIC memory.
+ */
+static int
+wpi_nic_lock(struct wpi_softc *sc)
{
- struct wpi_vap *wvp = WPI_VAP(vap);
+ int ntries;
- ieee80211_ratectl_deinit(vap);
- ieee80211_vap_detach(vap);
- free(wvp, M_80211_VAP);
+ /* Request exclusive access to NIC. */
+ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
+
+ /* Spin until we actually get the lock. */
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if ((WPI_READ(sc, WPI_GP_CNTRL) &
+ (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) ==
+ WPI_GP_CNTRL_MAC_ACCESS_ENA)
+ return 0;
+ DELAY(10);
+ }
+
+ device_printf(sc->sc_dev, "could not lock memory\n");
+
+ return ETIMEDOUT;
+}
+
+/*
+ * Release lock on NIC memory.
+ */
+static __inline void
+wpi_nic_unlock(struct wpi_softc *sc)
+{
+ WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
+}
+
+static __inline uint32_t
+wpi_prph_read(struct wpi_softc *sc, uint32_t addr)
+{
+ WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr);
+ WPI_BARRIER_READ_WRITE(sc);
+ return WPI_READ(sc, WPI_PRPH_RDATA);
+}
+
+static __inline void
+wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data)
+{
+ WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr);
+ WPI_BARRIER_WRITE(sc);
+ WPI_WRITE(sc, WPI_PRPH_WDATA, data);
+}
+
+static __inline void
+wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
+{
+ wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask);
+}
+
+static __inline void
+wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
+{
+ wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask);
+}
+
+static __inline void
+wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr,
+ const uint32_t *data, int count)
+{
+ for (; count > 0; count--, data++, addr += 4)
+ wpi_prph_write(sc, addr, *data);
+}
+
+static __inline uint32_t
+wpi_mem_read(struct wpi_softc *sc, uint32_t addr)
+{
+ WPI_WRITE(sc, WPI_MEM_RADDR, addr);
+ WPI_BARRIER_READ_WRITE(sc);
+ return WPI_READ(sc, WPI_MEM_RDATA);
+}
+
+static __inline void
+wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data,
+ int count)
+{
+ for (; count > 0; count--, addr += 4)
+ *data++ = wpi_mem_read(sc, addr);
+}
+
+static int
+wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count)
+{
+ uint8_t *out = data;
+ uint32_t val;
+ int error, ntries;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+
+ for (; count > 0; count -= 2, addr++) {
+ WPI_WRITE(sc, WPI_EEPROM, addr << 2);
+ for (ntries = 0; ntries < 10; ntries++) {
+ val = WPI_READ(sc, WPI_EEPROM);
+ if (val & WPI_EEPROM_READ_VALID)
+ break;
+ DELAY(5);
+ }
+ if (ntries == 10) {
+ device_printf(sc->sc_dev,
+ "timeout reading ROM at 0x%x\n", addr);
+ return ETIMEDOUT;
+ }
+ *out++= val >> 16;
+ if (count > 1)
+ *out ++= val >> 24;
+ }
+
+ wpi_nic_unlock(sc);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
+ return 0;
}
static void
@@ -799,116 +838,63 @@ wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (error != 0)
return;
-
KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
-
*(bus_addr_t *)arg = segs[0].ds_addr;
}
/*
* Allocates a contiguous block of dma memory of the requested size and
- * alignment. Due to limitations of the FreeBSD dma subsystem as of 20071217,
- * allocations greater than 4096 may fail. Hence if the requested alignment is
- * greater we allocate 'alignment' size extra memory and shift the vaddr and
- * paddr after the dma load. This bypasses the problem at the cost of a little
- * more memory.
+ * alignment.
*/
static int
wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma,
- void **kvap, bus_size_t size, bus_size_t alignment, int flags)
+ void **kvap, bus_size_t size, bus_size_t alignment)
{
int error;
- bus_size_t align;
- bus_size_t reqsize;
- DPRINTFN(WPI_DEBUG_DMA,
- ("Size: %zd - alignment %zd\n", size, alignment));
-
- dma->size = size;
dma->tag = NULL;
+ dma->size = size;
- if (alignment > 4096) {
- align = PAGE_SIZE;
- reqsize = size + alignment;
- } else {
- align = alignment;
- reqsize = size;
- }
- error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), align,
- 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
- NULL, NULL, reqsize,
- 1, reqsize, flags,
- NULL, NULL, &dma->tag);
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not create shared page DMA tag\n");
- goto fail;
- }
- error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr_start,
- flags | BUS_DMA_ZERO, &dma->map);
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not allocate shared page DMA memory\n");
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
+ 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
+ 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
+ if (error != 0)
goto fail;
- }
-
- error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr_start,
- reqsize, wpi_dma_map_addr, &dma->paddr_start, flags);
-
- /* Save the original pointers so we can free all the memory */
- dma->paddr = dma->paddr_start;
- dma->vaddr = dma->vaddr_start;
- /*
- * Check the alignment and increment by 4096 until we get the
- * requested alignment. Fail if can't obtain the alignment
- * we requested.
- */
- if ((dma->paddr & (alignment -1 )) != 0) {
- int i;
-
- for (i = 0; i < alignment / 4096; i++) {
- if ((dma->paddr & (alignment - 1 )) == 0)
- break;
- dma->paddr += 4096;
- dma->vaddr += 4096;
- }
- if (i == alignment / 4096) {
- device_printf(sc->sc_dev,
- "alignment requirement was not satisfied\n");
- goto fail;
- }
- }
+ error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
+ if (error != 0)
+ goto fail;
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not load shared page DMA map\n");
+ error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
+ wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
+ if (error != 0)
goto fail;
- }
+
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
if (kvap != NULL)
*kvap = dma->vaddr;
return 0;
-fail:
- wpi_dma_contig_free(dma);
+fail: wpi_dma_contig_free(dma);
return error;
}
static void
wpi_dma_contig_free(struct wpi_dma_info *dma)
{
- if (dma->tag) {
- if (dma->vaddr_start != NULL) {
- if (dma->paddr_start != 0) {
- bus_dmamap_sync(dma->tag, dma->map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->tag, dma->map);
- }
- bus_dmamem_free(dma->tag, dma->vaddr_start, dma->map);
- }
+ if (dma->vaddr != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ dma->vaddr = NULL;
+ }
+ if (dma->tag != NULL) {
bus_dma_tag_destroy(dma->tag);
+ dma->tag = NULL;
}
}
@@ -918,19 +904,9 @@ wpi_dma_contig_free(struct wpi_dma_info *dma)
static int
wpi_alloc_shared(struct wpi_softc *sc)
{
- int error;
-
- error = wpi_dma_contig_alloc(sc, &sc->shared_dma,
- (void **)&sc->shared, sizeof (struct wpi_shared),
- PAGE_SIZE,
- BUS_DMA_NOWAIT);
-
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not allocate shared area DMA memory\n");
- }
-
- return error;
+ /* Shared buffer must be aligned on a 4KB boundary. */
+ return wpi_dma_contig_alloc(sc, &sc->shared_dma,
+ (void **)&sc->shared, sizeof (struct wpi_shared), 4096);
}
static void
@@ -939,114 +915,161 @@ wpi_free_shared(struct wpi_softc *sc)
wpi_dma_contig_free(&sc->shared_dma);
}
+/*
+ * Allocate DMA-safe memory for firmware transfer.
+ */
static int
-wpi_alloc_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
+wpi_alloc_fwmem(struct wpi_softc *sc)
{
+ /* Must be aligned on a 16-byte boundary. */
+ return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
+ WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16);
+}
+static void
+wpi_free_fwmem(struct wpi_softc *sc)
+{
+ wpi_dma_contig_free(&sc->fw_dma);
+}
+
+static int
+wpi_alloc_rx_ring(struct wpi_softc *sc)
+{
+ struct wpi_rx_ring *ring = &sc->rxq;
+ bus_size_t size;
int i, error;
ring->cur = 0;
+ ring->update = 0;
- error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
- (void **)&ring->desc, WPI_RX_RING_COUNT * sizeof (uint32_t),
- WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+ /* Allocate RX descriptors (16KB aligned.) */
+ size = WPI_RX_RING_COUNT * sizeof (uint32_t);
+ error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
+ (void **)&ring->desc, size, WPI_RING_DMA_ALIGN);
if (error != 0) {
device_printf(sc->sc_dev,
- "%s: could not allocate rx ring DMA memory, error %d\n",
+ "%s: could not allocate RX ring DMA memory, error %d\n",
__func__, error);
goto fail;
}
+ /* Create RX buffer DMA tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1,
- MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL,
+ &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
- "%s: bus_dma_tag_create_failed, error %d\n",
+ "%s: could not create RX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
/*
- * Setup Rx buffers.
+ * Allocate and map RX buffers.
*/
for (i = 0; i < WPI_RX_RING_COUNT; i++) {
struct wpi_rx_data *data = &ring->data[i];
- struct mbuf *m;
bus_addr_t paddr;
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
- "%s: bus_dmamap_create failed, error %d\n",
+ "%s: could not create RX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
- m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
- if (m == NULL) {
+
+ data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
+ if (data->m == NULL) {
device_printf(sc->sc_dev,
- "%s: could not allocate rx mbuf\n", __func__);
- error = ENOMEM;
+ "%s: could not allocate RX mbuf\n", __func__);
+ error = ENOBUFS;
goto fail;
}
- /* map page */
+
error = bus_dmamap_load(ring->data_dmat, data->map,
- mtod(m, caddr_t), MJUMPAGESIZE,
- wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
+ mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
+ &paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
- "%s: bus_dmamap_load failed, error %d\n",
- __func__, error);
- m_freem(m);
- error = ENOMEM; /* XXX unique code */
+ "%s: can't map mbuf (error %d)\n", __func__,
+ error);
goto fail;
}
- bus_dmamap_sync(ring->data_dmat, data->map,
- BUS_DMASYNC_PREWRITE);
- data->m = m;
+ /* Set physical address of RX buffer. */
ring->desc[i] = htole32(paddr);
}
+
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
return 0;
-fail:
- wpi_free_rx_ring(sc, ring);
+
+fail: wpi_free_rx_ring(sc);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+
return error;
}
static void
-wpi_reset_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
+wpi_update_rx_ring(struct wpi_softc *sc)
{
- int ntries;
+ struct wpi_rx_ring *ring = &sc->rxq;
- wpi_mem_lock(sc);
+ if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) {
+ DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n",
+ __func__);
- WPI_WRITE(sc, WPI_RX_CONFIG, 0);
+ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
+ ring->update = 1;
+ } else
+ WPI_WRITE(sc, WPI_FH_RX_WPTR, ring->cur & ~7);
+}
- for (ntries = 0; ntries < 100; ntries++) {
- if (WPI_READ(sc, WPI_RX_STATUS) & WPI_RX_IDLE)
- break;
- DELAY(10);
- }
+static void
+wpi_reset_rx_ring(struct wpi_softc *sc)
+{
+ struct wpi_rx_ring *ring = &sc->rxq;
+ int ntries;
- wpi_mem_unlock(sc);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+ if (wpi_nic_lock(sc) == 0) {
+ WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0);
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if (WPI_READ(sc, WPI_FH_RX_STATUS) &
+ WPI_FH_RX_STATUS_IDLE)
+ break;
+ DELAY(10);
+ }
#ifdef WPI_DEBUG
- if (ntries == 100 && wpi_debug > 0)
- device_printf(sc->sc_dev, "timeout resetting Rx ring\n");
+ if (ntries == 1000) {
+ device_printf(sc->sc_dev,
+ "timeout resetting Rx ring\n");
+ }
#endif
+ wpi_nic_unlock(sc);
+ }
ring->cur = 0;
+ ring->update = 0;
}
static void
-wpi_free_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
+wpi_free_rx_ring(struct wpi_softc *sc)
{
+ struct wpi_rx_ring *ring = &sc->rxq;
int i;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
wpi_dma_contig_free(&ring->desc_dma);
for (i = 0; i < WPI_RX_RING_COUNT; i++) {
@@ -1057,52 +1080,62 @@ wpi_free_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
+ data->m = NULL;
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
+ if (ring->data_dmat != NULL) {
+ bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
}
static int
-wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int count,
- int qid)
+wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid)
{
- struct wpi_tx_data *data;
+ bus_addr_t paddr;
+ bus_size_t size;
int i, error;
ring->qid = qid;
- ring->count = count;
ring->queued = 0;
ring->cur = 0;
- ring->data = NULL;
+ ring->update = 0;
- error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
- (void **)&ring->desc, count * sizeof (struct wpi_tx_desc),
- WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+ /* Allocate TX descriptors (16KB aligned.) */
+ size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc);
+ error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
+ size, WPI_RING_DMA_ALIGN);
if (error != 0) {
- device_printf(sc->sc_dev, "could not allocate tx dma memory\n");
- goto fail;
+ device_printf(sc->sc_dev,
+ "%s: could not allocate TX ring DMA memory, error %d\n",
+ __func__, error);
+ goto fail;
}
- /* update shared page with ring's base address */
+ /* Update shared area with ring physical address. */
sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
+ bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
+ BUS_DMASYNC_PREWRITE);
- error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
- count * sizeof (struct wpi_tx_cmd), WPI_RING_DMA_ALIGN,
- BUS_DMA_NOWAIT);
+ /*
+ * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
+ * to allocate commands space for other rings.
+ * XXX Do we really need to allocate descriptors for other rings?
+ */
+ if (qid > 4)
+ return 0;
+ size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd);
+ error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
+ size, 4);
if (error != 0) {
device_printf(sc->sc_dev,
- "could not allocate tx command DMA memory\n");
- goto fail;
- }
-
- ring->data = malloc(count * sizeof (struct wpi_tx_data), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (ring->data == NULL) {
- device_printf(sc->sc_dev,
- "could not allocate tx data slots\n");
+ "%s: could not allocate TX cmd DMA memory, error %d\n",
+ __func__, error);
goto fail;
}
@@ -1111,128 +1144,390 @@ wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int count,
WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
- device_printf(sc->sc_dev, "could not create data DMA tag\n");
+ device_printf(sc->sc_dev,
+ "%s: could not create TX buf DMA tag, error %d\n",
+ __func__, error);
goto fail;
}
- for (i = 0; i < count; i++) {
- data = &ring->data[i];
+ paddr = ring->cmd_dma.paddr;
+ for (i = 0; i < WPI_TX_RING_COUNT; i++) {
+ struct wpi_tx_data *data = &ring->data[i];
+
+ data->cmd_paddr = paddr;
+ paddr += sizeof (struct wpi_tx_cmd);
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
- "could not create tx buf DMA map\n");
+ "%s: could not create TX buf DMA map, error %d\n",
+ __func__, error);
goto fail;
}
- bus_dmamap_sync(ring->data_dmat, data->map,
- BUS_DMASYNC_PREWRITE);
}
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
return 0;
-fail:
- wpi_free_tx_ring(sc, ring);
+fail: wpi_free_tx_ring(sc, ring);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
static void
-wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
+wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
- struct wpi_tx_data *data;
- int i, ntries;
+ if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) {
+ DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n",
+ __func__, ring->qid);
- wpi_mem_lock(sc);
+ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
+ ring->update = 1;
+ } else
+ WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
+}
- WPI_WRITE(sc, WPI_TX_CONFIG(ring->qid), 0);
- for (ntries = 0; ntries < 100; ntries++) {
- if (WPI_READ(sc, WPI_TX_STATUS) & WPI_TX_IDLE(ring->qid))
- break;
- DELAY(10);
- }
-#ifdef WPI_DEBUG
- if (ntries == 100 && wpi_debug > 0)
- device_printf(sc->sc_dev, "timeout resetting Tx ring %d\n",
- ring->qid);
-#endif
- wpi_mem_unlock(sc);
+static void
+wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
+{
+ int i;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- for (i = 0; i < ring->count; i++) {
- data = &ring->data[i];
+ for (i = 0; i < WPI_TX_RING_COUNT; i++) {
+ struct wpi_tx_data *data = &ring->data[i];
if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
}
-
+ /* Clear TX descriptors. */
+ memset(ring->desc, 0, ring->desc_dma.size);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ sc->qfullmsk &= ~(1 << ring->qid);
ring->queued = 0;
ring->cur = 0;
+ ring->update = 0;
}
static void
wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
- struct wpi_tx_data *data;
int i;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
wpi_dma_contig_free(&ring->desc_dma);
wpi_dma_contig_free(&ring->cmd_dma);
- if (ring->data != NULL) {
- for (i = 0; i < ring->count; i++) {
- data = &ring->data[i];
+ for (i = 0; i < WPI_TX_RING_COUNT; i++) {
+ struct wpi_tx_data *data = &ring->data[i];
- if (data->m != NULL) {
- bus_dmamap_sync(ring->data_dmat, data->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(ring->data_dmat, data->map);
- m_freem(data->m);
- data->m = NULL;
- }
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
}
- free(ring->data, M_DEVBUF);
+ if (data->map != NULL)
+ bus_dmamap_destroy(ring->data_dmat, data->map);
}
-
- if (ring->data_dmat != NULL)
+ if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
+}
+
+/*
+ * Extract various information from EEPROM.
+ */
+static int
+wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+#define WPI_CHK(res) do { \
+ if ((error = res) != 0) \
+ goto fail; \
+} while (0)
+ int error, i;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ /* Adapter has to be powered on for EEPROM access to work. */
+ if ((error = wpi_apm_init(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not power ON adapter, error %d\n", __func__,
+ error);
+ return error;
+ }
+
+ if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) {
+ device_printf(sc->sc_dev, "bad EEPROM signature\n");
+ error = EIO;
+ goto fail;
+ }
+ /* Clear HW ownership of EEPROM. */
+ WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER);
+
+ /* Read the hardware capabilities, revision and SKU type. */
+ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap,
+ sizeof(sc->cap)));
+ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,
+ sizeof(sc->rev)));
+ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type,
+ sizeof(sc->type)));
+
+ DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, le16toh(sc->rev),
+ sc->type);
+
+ /* Read the regulatory domain (4 ASCII characters.) */
+ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain,
+ sizeof(sc->domain)));
+
+ /* Read MAC address. */
+ WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr,
+ IEEE80211_ADDR_LEN));
+
+ /* Read the list of authorized channels. */
+ for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
+ WPI_CHK(wpi_read_eeprom_channels(sc, i));
+
+ /* Read the list of TX power groups. */
+ for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
+ WPI_CHK(wpi_read_eeprom_group(sc, i));
+
+fail: wpi_apm_stop(sc); /* Power OFF adapter. */
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
+ __func__);
+
+ return error;
+#undef WPI_CHK
+}
+
+/*
+ * Translate EEPROM flags to net80211.
+ */
+static uint32_t
+wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel)
+{
+ uint32_t nflags;
+
+ nflags = 0;
+ if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0)
+ nflags |= IEEE80211_CHAN_PASSIVE;
+ if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0)
+ nflags |= IEEE80211_CHAN_NOADHOC;
+ if (channel->flags & WPI_EEPROM_CHAN_RADAR) {
+ nflags |= IEEE80211_CHAN_DFS;
+ /* XXX apparently IBSS may still be marked */
+ nflags |= IEEE80211_CHAN_NOADHOC;
+ }
+
+ return nflags;
}
+static void
+wpi_read_eeprom_band(struct wpi_softc *sc, int n)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct wpi_eeprom_chan *channels = sc->eeprom_channels[n];
+ const struct wpi_chan_band *band = &wpi_bands[n];
+ struct ieee80211_channel *c;
+ uint8_t chan;
+ int i, nflags;
+
+ for (i = 0; i < band->nchan; i++) {
+ if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
+ DPRINTF(sc, WPI_DEBUG_HW,
+ "Channel Not Valid: %d, band %d\n",
+ band->chan[i],n);
+ continue;
+ }
+
+ chan = band->chan[i];
+ nflags = wpi_eeprom_channel_flags(&channels[i]);
+
+ c = &ic->ic_channels[ic->ic_nchans++];
+ c->ic_ieee = chan;
+ c->ic_maxregpower = channels[i].maxpwr;
+ c->ic_maxpower = 2*c->ic_maxregpower;
+
+ if (n == 0) { /* 2GHz band */
+ c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
+ /* G =>'s B is supported */
+ c->ic_flags = IEEE80211_CHAN_B | nflags;
+ c = &ic->ic_channels[ic->ic_nchans++];
+ c[0] = c[-1];
+ c->ic_flags = IEEE80211_CHAN_G | nflags;
+ } else { /* 5GHz band */
+ c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
+ c->ic_flags = IEEE80211_CHAN_A | nflags;
+ }
+
+ /* Save maximum allowed TX power for this channel. */
+ sc->maxpwr[chan] = channels[i].maxpwr;
+
+ DPRINTF(sc, WPI_DEBUG_EEPROM,
+ "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d,"
+ " offset %d\n", chan, c->ic_freq,
+ channels[i].flags, sc->maxpwr[chan],
+ (c->ic_flags & IEEE80211_CHAN_PASSIVE) != 0,
+ ic->ic_nchans);
+ }
+}
+
+/**
+ * Read the eeprom to find out what channels are valid for the given
+ * band and update net80211 with what we find.
+ */
static int
-wpi_shutdown(device_t dev)
+wpi_read_eeprom_channels(struct wpi_softc *sc, int n)
{
- struct wpi_softc *sc = device_get_softc(dev);
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ const struct wpi_chan_band *band = &wpi_bands[n];
+ int error;
- WPI_LOCK(sc);
- wpi_stop_locked(sc);
- wpi_unload_firmware(sc);
- WPI_UNLOCK(sc);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n],
+ band->nchan * sizeof (struct wpi_eeprom_chan));
+ if (error != 0) {
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+ return error;
+ }
+
+ wpi_read_eeprom_band(sc, n);
+
+ ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
+static struct wpi_eeprom_chan *
+wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c)
+{
+ int i, j;
+
+ for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++)
+ for (i = 0; i < wpi_bands[j].nchan; i++)
+ if (wpi_bands[j].chan[i] == c->ic_ieee)
+ return &sc->eeprom_channels[j][i];
+
+ return NULL;
+}
+
+/*
+ * Enforce flags read from EEPROM.
+ */
static int
-wpi_suspend(device_t dev)
+wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
+ int nchan, struct ieee80211_channel chans[])
{
- struct wpi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ifnet *ifp = ic->ic_ifp;
+ struct wpi_softc *sc = ifp->if_softc;
+ int i;
+
+ for (i = 0; i < nchan; i++) {
+ struct ieee80211_channel *c = &chans[i];
+ struct wpi_eeprom_chan *channel;
+
+ channel = wpi_find_eeprom_channel(sc, c);
+ if (channel == NULL) {
+ if_printf(ic->ic_ifp,
+ "%s: invalid channel %u freq %u/0x%x\n",
+ __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
+ return EINVAL;
+ }
+ c->ic_flags |= wpi_eeprom_channel_flags(channel);
+ }
- ieee80211_suspend_all(ic);
return 0;
}
static int
-wpi_resume(device_t dev)
+wpi_read_eeprom_group(struct wpi_softc *sc, int n)
{
- struct wpi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct wpi_power_group *group = &sc->groups[n];
+ struct wpi_eeprom_group rgroup;
+ int i, error;
- pci_write_config(dev, 0x41, 0, 1);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32,
+ &rgroup, sizeof rgroup)) != 0) {
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+ return error;
+ }
+
+ /* Save TX power group information. */
+ group->chan = rgroup.chan;
+ group->maxpwr = rgroup.maxpwr;
+ /* Retrieve temperature at which the samples were taken. */
+ group->temp = (int16_t)le16toh(rgroup.temp);
+
+ DPRINTF(sc, WPI_DEBUG_EEPROM,
+ "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan,
+ group->maxpwr, group->temp);
+
+ for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
+ group->samples[i].index = rgroup.samples[i].index;
+ group->samples[i].power = rgroup.samples[i].power;
+
+ DPRINTF(sc, WPI_DEBUG_EEPROM,
+ "\tsample %d: index=%d power=%d\n", i,
+ group->samples[i].index, group->samples[i].power);
+ }
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- ieee80211_resume_all(ic);
return 0;
}
+static struct ieee80211_node *
+wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct wpi_node *wn;
+
+ wn = malloc(sizeof (struct wpi_node), M_80211_NODE,
+ M_NOWAIT | M_ZERO);
+
+ if (wn == NULL)
+ return NULL;
+
+ wn->id = WPI_ID_UNDEFINED;
+
+ return &wn->ni;
+}
+
+static void
+wpi_node_free(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct wpi_softc *sc = ic->ic_ifp->if_softc;
+ struct wpi_node *wn = (struct wpi_node *)ni;
+
+ if (wn->id >= WPI_ID_IBSS_MIN && wn->id <= WPI_ID_IBSS_MAX) {
+ free_unr(sc->sc_unr, wn->id);
+
+ WPI_LOCK(sc);
+ if (sc->rxon.filter & htole32(WPI_FILTER_BSS))
+ wpi_del_node(sc, ni);
+ WPI_UNLOCK(sc);
+ }
+
+ sc->sc_node_free(ni);
+}
+
/**
* Called by net80211 when ever there is a change to 80211 state machine
*/
@@ -1243,396 +1538,376 @@ wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = ic->ic_ifp;
struct wpi_softc *sc = ifp->if_softc;
- int error;
+ int error = 0;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__,
+ DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
- ieee80211_state_name[nstate], sc->flags));
+ ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
WPI_LOCK(sc);
- if (nstate == IEEE80211_S_SCAN && vap->iv_state != IEEE80211_S_INIT) {
- /*
- * On !INIT -> SCAN transitions, we need to clear any possible
- * knowledge about associations.
- */
- error = wpi_config(sc);
- if (error != 0) {
- device_printf(sc->sc_dev,
- "%s: device config failed, error %d\n",
- __func__, error);
+ switch (nstate) {
+ case IEEE80211_S_SCAN:
+ if ((vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_AHDEMO) &&
+ (sc->rxon.filter & htole32(WPI_FILTER_BSS))) {
+ sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
+ if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not send RXON\n", __func__);
+ }
}
- }
- if (nstate == IEEE80211_S_AUTH ||
- (nstate == IEEE80211_S_ASSOC && vap->iv_state == IEEE80211_S_RUN)) {
+ break;
+
+ case IEEE80211_S_ASSOC:
+ if (vap->iv_state != IEEE80211_S_RUN)
+ break;
+ /* FALLTHROUGH */
+ case IEEE80211_S_AUTH:
/*
* The node must be registered in the firmware before auth.
* Also the associd must be cleared on RUN -> ASSOC
* transitions.
*/
- error = wpi_auth(sc, vap);
- if (error != 0) {
+ if ((error = wpi_auth(sc, vap)) != 0) {
device_printf(sc->sc_dev,
- "%s: could not move to auth state, error %d\n",
+ "%s: could not move to AUTH state, error %d\n",
__func__, error);
}
- }
- if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) {
- error = wpi_run(sc, vap);
- if (error != 0) {
+ break;
+
+ case IEEE80211_S_RUN:
+ /*
+ * RUN -> RUN transition; Just restart the timers.
+ */
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ wpi_calib_timeout(sc);
+ break;
+ }
+
+ /*
+ * !RUN -> RUN requires setting the association id
+ * which is done with a firmware cmd. We also defer
+ * starting the timers until that work is done.
+ */
+ if ((error = wpi_run(sc, vap)) != 0) {
device_printf(sc->sc_dev,
- "%s: could not move to run state, error %d\n",
- __func__, error);
+ "%s: could not move to RUN state\n", __func__);
}
- }
- if (nstate == IEEE80211_S_RUN) {
- /* RUN -> RUN transition; just restart the timers */
- wpi_calib_timeout(sc);
- /* XXX split out rate control timer */
+ break;
+
+ default:
+ break;
}
WPI_UNLOCK(sc);
IEEE80211_LOCK(ic);
- return wvp->newstate(vap, nstate, arg);
-}
-
-/*
- * Grab exclusive access to NIC memory.
- */
-static void
-wpi_mem_lock(struct wpi_softc *sc)
-{
- int ntries;
- uint32_t tmp;
+ if (error != 0) {
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+ return error;
+ }
- tmp = WPI_READ(sc, WPI_GPIO_CTL);
- WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_MAC);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- /* spin until we actually get the lock */
- for (ntries = 0; ntries < 100; ntries++) {
- if ((WPI_READ(sc, WPI_GPIO_CTL) &
- (WPI_GPIO_CLOCK | WPI_GPIO_SLEEP)) == WPI_GPIO_CLOCK)
- break;
- DELAY(10);
- }
- if (ntries == 100)
- device_printf(sc->sc_dev, "could not lock memory\n");
+ return wvp->newstate(vap, nstate, arg);
}
-/*
- * Release lock on NIC memory.
- */
static void
-wpi_mem_unlock(struct wpi_softc *sc)
+wpi_calib_timeout(void *arg)
{
- uint32_t tmp = WPI_READ(sc, WPI_GPIO_CTL);
- WPI_WRITE(sc, WPI_GPIO_CTL, tmp & ~WPI_GPIO_MAC);
-}
+ struct wpi_softc *sc = arg;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
-static uint32_t
-wpi_mem_read(struct wpi_softc *sc, uint16_t addr)
-{
- WPI_WRITE(sc, WPI_READ_MEM_ADDR, WPI_MEM_4 | addr);
- return WPI_READ(sc, WPI_READ_MEM_DATA);
-}
+ if (vap->iv_state != IEEE80211_S_RUN)
+ return;
-static void
-wpi_mem_write(struct wpi_softc *sc, uint16_t addr, uint32_t data)
-{
- WPI_WRITE(sc, WPI_WRITE_MEM_ADDR, WPI_MEM_4 | addr);
- WPI_WRITE(sc, WPI_WRITE_MEM_DATA, data);
-}
+ wpi_power_calibration(sc);
-static void
-wpi_mem_write_region_4(struct wpi_softc *sc, uint16_t addr,
- const uint32_t *data, int wlen)
-{
- for (; wlen > 0; wlen--, data++, addr+=4)
- wpi_mem_write(sc, addr, *data);
+ callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
}
-/*
- * Read data from the EEPROM. We access EEPROM through the MAC instead of
- * using the traditional bit-bang method. Data is read up until len bytes have
- * been obtained.
- */
-static uint16_t
-wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int len)
+static __inline uint8_t
+rate2plcp(const uint8_t rate)
{
- int ntries;
- uint32_t val;
- uint8_t *out = data;
-
- wpi_mem_lock(sc);
-
- for (; len > 0; len -= 2, addr++) {
- WPI_WRITE(sc, WPI_EEPROM_CTL, addr << 2);
-
- for (ntries = 0; ntries < 10; ntries++) {
- if ((val = WPI_READ(sc, WPI_EEPROM_CTL)) & WPI_EEPROM_READY)
- break;
- DELAY(5);
- }
-
- if (ntries == 10) {
- device_printf(sc->sc_dev, "could not read EEPROM\n");
- return ETIMEDOUT;
- }
-
- *out++= val >> 16;
- if (len > 1)
- *out ++= val >> 24;
+ switch (rate) {
+ case 12: return 0xd;
+ case 18: return 0xf;
+ case 24: return 0x5;
+ case 36: return 0x7;
+ case 48: return 0x9;
+ case 72: return 0xb;
+ case 96: return 0x1;
+ case 108: return 0x3;
+ case 2: return 10;
+ case 4: return 20;
+ case 11: return 55;
+ case 22: return 110;
+ default: return 0;
}
-
- wpi_mem_unlock(sc);
-
- return 0;
}
-/*
- * The firmware text and data segments are transferred to the NIC using DMA.
- * The driver just copies the firmware into DMA-safe memory and tells the NIC
- * where to find it. Once the NIC has copied the firmware into its internal
- * memory, we can free our local copy in the driver.
- */
-static int
-wpi_load_microcode(struct wpi_softc *sc, const uint8_t *fw, int size)
+static __inline uint8_t
+plcp2rate(const uint8_t plcp)
{
- int error, ntries;
-
- DPRINTFN(WPI_DEBUG_HW,("Loading microcode size 0x%x\n", size));
-
- size /= sizeof(uint32_t);
-
- wpi_mem_lock(sc);
-
- wpi_mem_write_region_4(sc, WPI_MEM_UCODE_BASE,
- (const uint32_t *)fw, size);
-
- wpi_mem_write(sc, WPI_MEM_UCODE_SRC, 0);
- wpi_mem_write(sc, WPI_MEM_UCODE_DST, WPI_FW_TEXT);
- wpi_mem_write(sc, WPI_MEM_UCODE_SIZE, size);
-
- /* run microcode */
- wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_RUN);
-
- /* wait while the adapter is busy copying the firmware */
- for (error = 0, ntries = 0; ntries < 1000; ntries++) {
- uint32_t status = WPI_READ(sc, WPI_TX_STATUS);
- DPRINTFN(WPI_DEBUG_HW,
- ("firmware status=0x%x, val=0x%x, result=0x%x\n", status,
- WPI_TX_IDLE(6), status & WPI_TX_IDLE(6)));
- if (status & WPI_TX_IDLE(6)) {
- DPRINTFN(WPI_DEBUG_HW,
- ("Status Match! - ntries = %d\n", ntries));
- break;
- }
- DELAY(10);
- }
- if (ntries == 1000) {
- device_printf(sc->sc_dev, "timeout transferring firmware\n");
- error = ETIMEDOUT;
+ switch (plcp) {
+ case 0xd: return 12;
+ case 0xf: return 18;
+ case 0x5: return 24;
+ case 0x7: return 36;
+ case 0x9: return 48;
+ case 0xb: return 72;
+ case 0x1: return 96;
+ case 0x3: return 108;
+ case 10: return 2;
+ case 20: return 4;
+ case 55: return 11;
+ case 110: return 22;
+ default: return 0;
}
-
- /* start the microcode executing */
- wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_ENABLE);
-
- wpi_mem_unlock(sc);
-
- return (error);
}
+/* Quickly determine if a given rate is CCK or OFDM. */
+#define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
+
static void
-wpi_rx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc,
- struct wpi_rx_data *data)
+wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc,
+ struct wpi_rx_data *data)
{
struct ifnet *ifp = sc->sc_ifp;
+ const struct ieee80211_cipher *cip = NULL;
struct ieee80211com *ic = ifp->if_l2com;
struct wpi_rx_ring *ring = &sc->rxq;
struct wpi_rx_stat *stat;
struct wpi_rx_head *head;
struct wpi_rx_tail *tail;
+ struct ieee80211_frame *wh;
struct ieee80211_node *ni;
- struct mbuf *m, *mnew;
+ struct mbuf *m, *m1;
bus_addr_t paddr;
+ uint32_t flags;
+ uint16_t len;
int error;
stat = (struct wpi_rx_stat *)(desc + 1);
if (stat->len > WPI_STAT_MAXLEN) {
- device_printf(sc->sc_dev, "invalid rx statistic header\n");
+ device_printf(sc->sc_dev, "invalid RX statistic header\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len);
- tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + le16toh(head->len));
-
- DPRINTFN(WPI_DEBUG_RX, ("rx intr: idx=%d len=%d stat len=%d rssi=%d "
- "rate=%x chan=%d tstamp=%ju\n", ring->cur, le32toh(desc->len),
- le16toh(head->len), (int8_t)stat->rssi, head->rate, head->chan,
- (uintmax_t)le64toh(tail->tstamp)));
-
- /* discard Rx frames with bad CRC early */
- if ((le32toh(tail->flags) & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
- DPRINTFN(WPI_DEBUG_RX, ("%s: rx flags error %x\n", __func__,
- le32toh(tail->flags)));
+ len = le16toh(head->len);
+ tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len);
+ flags = le32toh(tail->flags);
+
+ DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d"
+ " rate %x chan %d tstamp %ju\n", __func__, ring->cur,
+ le32toh(desc->len), len, (int8_t)stat->rssi,
+ head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp));
+
+ /* Discard frames with a bad FCS early. */
+ if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
+ DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n",
+ __func__, flags);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
- if (le16toh(head->len) < sizeof (struct ieee80211_frame)) {
- DPRINTFN(WPI_DEBUG_RX, ("%s: frame too short: %d\n", __func__,
- le16toh(head->len)));
+ /* Discard frames that are too short. */
+ if (len < sizeof (*wh)) {
+ DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n",
+ __func__, len);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
- /* XXX don't need mbuf, just dma buffer */
- mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
- if (mnew == NULL) {
- DPRINTFN(WPI_DEBUG_RX, ("%s: no mbuf to restock ring\n",
- __func__));
+ m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
+ if (m1 == NULL) {
+ DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n",
+ __func__);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
bus_dmamap_unload(ring->data_dmat, data->map);
- error = bus_dmamap_load(ring->data_dmat, data->map,
- mtod(mnew, caddr_t), MJUMPAGESIZE,
- wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
+ MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: bus_dmamap_load failed, error %d\n", __func__, error);
- m_freem(mnew);
+ m_freem(m1);
+
+ /* Try to reload the old mbuf. */
+ error = bus_dmamap_load(ring->data_dmat, data->map,
+ mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
+ &paddr, BUS_DMA_NOWAIT);
+ if (error != 0 && error != EFBIG) {
+ panic("%s: could not load old RX mbuf", __func__);
+ }
+ /* Physical address may have changed. */
+ ring->desc[ring->cur] = htole32(paddr);
+ bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
- bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
- /* finalize mbuf and swap in new one */
m = data->m;
+ data->m = m1;
+ /* Update RX descriptor. */
+ ring->desc[ring->cur] = htole32(paddr);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ /* Finalize mbuf. */
m->m_pkthdr.rcvif = ifp;
m->m_data = (caddr_t)(head + 1);
- m->m_pkthdr.len = m->m_len = le16toh(head->len);
-
- data->m = mnew;
- /* update Rx descriptor */
- ring->desc[ring->cur] = htole32(paddr);
+ m->m_pkthdr.len = m->m_len = len;
+
+ /* Grab a reference to the source node. */
+ wh = mtod(m, struct ieee80211_frame *);
+ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
+
+ if (ni != NULL)
+ cip = ni->ni_ucastkey.wk_cipher;
+ if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ cip != NULL && cip->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ if ((flags & WPI_RX_CIPHER_MASK) != WPI_RX_CIPHER_CCMP) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ m_freem(m);
+ return;
+ }
+ /* Check whether decryption was successful or not. */
+ if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) {
+ DPRINTF(sc, WPI_DEBUG_RECV,
+ "CCMP decryption failed 0x%x\n", flags);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ m_freem(m);
+ return;
+ }
+ m->m_flags |= M_WEP;
+ }
if (ieee80211_radiotap_active(ic)) {
struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
- tap->wr_chan_freq =
- htole16(ic->ic_channels[head->chan].ic_freq);
- tap->wr_chan_flags =
- htole16(ic->ic_channels[head->chan].ic_flags);
+ if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE))
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
tap->wr_dbm_antsignal = (int8_t)(stat->rssi - WPI_RSSI_OFFSET);
tap->wr_dbm_antnoise = (int8_t)le16toh(stat->noise);
tap->wr_tsft = tail->tstamp;
tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf;
- switch (head->rate) {
- /* CCK rates */
- case 10: tap->wr_rate = 2; break;
- case 20: tap->wr_rate = 4; break;
- case 55: tap->wr_rate = 11; break;
- case 110: tap->wr_rate = 22; break;
- /* OFDM rates */
- case 0xd: tap->wr_rate = 12; break;
- case 0xf: tap->wr_rate = 18; break;
- case 0x5: tap->wr_rate = 24; break;
- case 0x7: tap->wr_rate = 36; break;
- case 0x9: tap->wr_rate = 48; break;
- case 0xb: tap->wr_rate = 72; break;
- case 0x1: tap->wr_rate = 96; break;
- case 0x3: tap->wr_rate = 108; break;
- /* unknown rate: should not happen */
- default: tap->wr_rate = 0;
- }
- if (le16toh(head->flags) & 0x4)
- tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ tap->wr_rate = plcp2rate(head->plcp);
}
WPI_UNLOCK(sc);
- ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *));
+ /* Send the frame to the 802.11 layer. */
if (ni != NULL) {
- (void) ieee80211_input(ni, m, stat->rssi, 0);
+ (void)ieee80211_input(ni, m, stat->rssi, -WPI_RSSI_OFFSET);
+ /* Node is no longer needed. */
ieee80211_free_node(ni);
} else
- (void) ieee80211_input_all(ic, m, stat->rssi, 0);
+ (void)ieee80211_input_all(ic, m, stat->rssi, -WPI_RSSI_OFFSET);
WPI_LOCK(sc);
}
static void
-wpi_tx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc)
+wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc,
+ struct wpi_rx_data *data)
+{
+ /* Ignore */
+}
+
+static void
+wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
{
struct ifnet *ifp = sc->sc_ifp;
struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
- struct wpi_tx_data *txdata = &ring->data[desc->idx];
+ struct wpi_tx_data *data = &ring->data[desc->idx];
struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1);
- struct ieee80211_node *ni = txdata->ni;
- struct ieee80211vap *vap = ni->ni_vap;
- int retrycnt = 0;
+ struct mbuf *m;
+ struct ieee80211_node *ni;
+ struct ieee80211vap *vap;
+ int status = le32toh(stat->status);
+
+ KASSERT(data->ni != NULL, ("no node"));
- DPRINTFN(WPI_DEBUG_TX, ("tx done: qid=%d idx=%d retries=%d nkill=%d "
- "rate=%x duration=%d status=%x\n", desc->qid, desc->idx,
- stat->ntries, stat->nkill, stat->rate, le32toh(stat->duration),
- le32toh(stat->status)));
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: "
+ "qid %d idx %d retries %d btkillcnt %d rate %x duration %d "
+ "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt,
+ stat->btkillcnt, stat->rate, le32toh(stat->duration), status);
+
+ /* Unmap and free mbuf. */
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m = data->m, data->m = NULL;
+ ni = data->ni, data->ni = NULL;
+ vap = ni->ni_vap;
/*
* Update rate control statistics for the node.
- * XXX we should not count mgmt frames since they're always sent at
- * the lowest available bit-rate.
- * XXX frames w/o ACK shouldn't be used either
*/
- if (stat->ntries > 0) {
- DPRINTFN(WPI_DEBUG_TX, ("%d retries\n", stat->ntries));
- retrycnt = 1;
- }
- ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS,
- &retrycnt, NULL);
-
- /* XXX oerrors should only count errors !maxtries */
- if ((le32toh(stat->status) & 0xff) != 1)
+ WPI_UNLOCK(sc);
+ if ((status & 0xff) != 1) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- else
+ ieee80211_ratectl_tx_complete(vap, ni,
+ IEEE80211_RATECTL_TX_FAILURE, &stat->ackfailcnt, NULL);
+ } else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ ieee80211_ratectl_tx_complete(vap, ni,
+ IEEE80211_RATECTL_TX_SUCCESS, &stat->ackfailcnt, NULL);
+ }
- bus_dmamap_sync(ring->data_dmat, txdata->map, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(ring->data_dmat, txdata->map);
- /* XXX handle M_TXCB? */
- m_freem(txdata->m);
- txdata->m = NULL;
- ieee80211_free_node(txdata->ni);
- txdata->ni = NULL;
-
- ring->queued--;
+ ieee80211_tx_complete(ni, m, (status & 0xff) != 1);
+ WPI_LOCK(sc);
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- wpi_start_locked(ifp);
+ if (--ring->queued < WPI_TX_RING_LOMARK) {
+ sc->qfullmsk &= ~(1 << ring->qid);
+ if (sc->qfullmsk == 0 &&
+ (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ wpi_start_locked(ifp);
+ }
+ }
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
}
+/*
+ * Process a "command done" firmware notification. This is where we wakeup
+ * processes waiting for a synchronous command completion.
+ */
static void
-wpi_cmd_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc)
+wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
{
- struct wpi_tx_ring *ring = &sc->cmdq;
+ struct wpi_tx_ring *ring = &sc->txq[4];
struct wpi_tx_data *data;
- DPRINTFN(WPI_DEBUG_CMD, ("cmd notification qid=%x idx=%d flags=%x "
- "type=%s len=%d\n", desc->qid, desc->idx,
- desc->flags, wpi_cmd_str(desc->type),
- le32toh(desc->len)));
+ DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid=%x idx=%d flags=%x "
+ "type=%s len=%d\n", desc->qid, desc->idx,
+ desc->flags, wpi_cmd_str(desc->type),
+ le32toh(desc->len));
if ((desc->qid & 7) != 4)
- return; /* not a command ack */
+ return; /* Not a command ack. */
data = &ring->data[desc->idx];
- /* if the command was mapped in a mbuf, free it */
+ /* If the command was mapped in an mbuf, free it. */
if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
@@ -1647,374 +1922,710 @@ wpi_notif_intr(struct wpi_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
- struct wpi_rx_desc *desc;
- struct wpi_rx_data *data;
- uint32_t hw;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ int hw;
bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
BUS_DMASYNC_POSTREAD);
hw = le32toh(sc->shared->next);
- while (sc->rxq.cur != hw) {
- data = &sc->rxq.data[sc->rxq.cur];
+ hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
+
+ if (sc->rxq.cur == hw)
+ return;
+
+ do {
+ sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
+
+ struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur];
+ struct wpi_rx_desc *desc;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
- desc = (void *)data->m->m_ext.ext_buf;
+ desc = mtod(data->m, struct wpi_rx_desc *);
- DPRINTFN(WPI_DEBUG_NOTIFY,
- ("notify qid=%x idx=%d flags=%x type=%d len=%d\n",
- desc->qid,
- desc->idx,
- desc->flags,
- desc->type,
- le32toh(desc->len)));
+ DPRINTF(sc, WPI_DEBUG_NOTIFY,
+ "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
+ __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags,
+ desc->type, wpi_cmd_str(desc->type), le32toh(desc->len));
- if (!(desc->qid & 0x80)) /* reply to a command */
- wpi_cmd_intr(sc, desc);
+ if (!(desc->qid & 0x80)) /* Reply to a command. */
+ wpi_cmd_done(sc, desc);
switch (desc->type) {
case WPI_RX_DONE:
- /* a 802.11 frame was received */
- wpi_rx_intr(sc, desc, data);
+ /* An 802.11 frame has been received. */
+ wpi_rx_done(sc, desc, data);
break;
case WPI_TX_DONE:
- /* a 802.11 frame has been transmitted */
- wpi_tx_intr(sc, desc);
+ /* An 802.11 frame has been transmitted. */
+ wpi_tx_done(sc, desc);
+ break;
+
+ case WPI_RX_STATISTICS:
+ case WPI_BEACON_STATISTICS:
+ wpi_rx_statistics(sc, desc, data);
break;
+ case WPI_BEACON_MISSED:
+ {
+ struct wpi_beacon_missed *miss =
+ (struct wpi_beacon_missed *)(desc + 1);
+ int misses;
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ misses = le32toh(miss->consecutive);
+
+ DPRINTF(sc, WPI_DEBUG_STATE,
+ "%s: beacons missed %d/%d\n", __func__, misses,
+ le32toh(miss->total));
+
+ if (vap->iv_state == IEEE80211_S_RUN &&
+ (ic->ic_flags & IEEE80211_S_SCAN) == 0) {
+ if (misses >= vap->iv_bmissthreshold) {
+ WPI_UNLOCK(sc);
+ ieee80211_beacon_miss(ic);
+ WPI_LOCK(sc);
+ }
+ }
+ break;
+ }
case WPI_UC_READY:
{
struct wpi_ucode_info *uc =
- (struct wpi_ucode_info *)(desc + 1);
+ (struct wpi_ucode_info *)(desc + 1);
- /* the microcontroller is ready */
- DPRINTF(("microcode alive notification version %x "
- "alive %x\n", le32toh(uc->version),
- le32toh(uc->valid)));
+ /* The microcontroller is ready. */
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ DPRINTF(sc, WPI_DEBUG_RESET,
+ "microcode alive notification version=%d.%d "
+ "subtype=%x alive=%x\n", uc->major, uc->minor,
+ uc->subtype, le32toh(uc->valid));
if (le32toh(uc->valid) != 1) {
device_printf(sc->sc_dev,
"microcontroller initialization failed\n");
wpi_stop_locked(sc);
}
+ /* Save the address of the error log in SRAM. */
+ sc->errptr = le32toh(uc->errptr);
break;
}
case WPI_STATE_CHANGED:
{
- uint32_t *status = (uint32_t *)(desc + 1);
-
- /* enabled/disabled notification */
- DPRINTF(("state changed to %x\n", le32toh(*status)));
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ uint32_t *status = (uint32_t *)(desc + 1);
+#ifdef WPI_DEBUG
+ DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n",
+ le32toh(*status));
+#endif
if (le32toh(*status) & 1) {
- device_printf(sc->sc_dev,
- "Radio transmitter is switched off\n");
- sc->flags |= WPI_FLAG_HW_RADIO_OFF;
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- /* Disable firmware commands */
- WPI_WRITE(sc, WPI_UCODE_SET, WPI_DISABLE_CMD);
+ ieee80211_runtask(ic, &sc->sc_radiooff_task);
+ return;
}
break;
}
case WPI_START_SCAN:
{
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
#ifdef WPI_DEBUG
struct wpi_start_scan *scan =
- (struct wpi_start_scan *)(desc + 1);
+ (struct wpi_start_scan *)(desc + 1);
+ DPRINTF(sc, WPI_DEBUG_SCAN,
+ "%s: scanning channel %d status %x\n",
+ __func__, scan->chan, le32toh(scan->status));
#endif
-
- DPRINTFN(WPI_DEBUG_SCANNING,
- ("scanning channel %d status %x\n",
- scan->chan, le32toh(scan->status)));
break;
}
case WPI_STOP_SCAN:
{
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
#ifdef WPI_DEBUG
struct wpi_stop_scan *scan =
- (struct wpi_stop_scan *)(desc + 1);
+ (struct wpi_stop_scan *)(desc + 1);
+ DPRINTF(sc, WPI_DEBUG_SCAN,
+ "scan finished nchan=%d status=%d chan=%d\n",
+ scan->nchan, scan->status, scan->chan);
#endif
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
-
- DPRINTFN(WPI_DEBUG_SCANNING,
- ("scan finished nchan=%d status=%d chan=%d\n",
- scan->nchan, scan->status, scan->chan));
-
sc->sc_scan_timer = 0;
+ WPI_UNLOCK(sc);
ieee80211_scan_next(vap);
- break;
- }
- case WPI_MISSED_BEACON:
- {
- struct wpi_missed_beacon *beacon =
- (struct wpi_missed_beacon *)(desc + 1);
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
-
- if (le32toh(beacon->consecutive) >=
- vap->iv_bmissthreshold) {
- DPRINTF(("Beacon miss: %u >= %u\n",
- le32toh(beacon->consecutive),
- vap->iv_bmissthreshold));
- ieee80211_beacon_miss(ic);
- }
+ WPI_LOCK(sc);
break;
}
}
+ } while (sc->rxq.cur != hw);
- sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
+ /* Tell the firmware what we have processed. */
+ wpi_update_rx_ring(sc);
+}
+
+/*
+ * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
+ * from power-down sleep mode.
+ */
+static void
+wpi_wakeup_intr(struct wpi_softc *sc)
+{
+ int qid;
+
+ DPRINTF(sc, WPI_DEBUG_PWRSAVE,
+ "%s: ucode wakeup from power-down sleep\n", __func__);
+
+ /* Wakeup RX and TX rings. */
+ if (sc->rxq.update) {
+ wpi_update_rx_ring(sc);
+ sc->rxq.update = 0;
}
+ for (qid = 0; qid < WPI_NTXQUEUES; qid++) {
+ struct wpi_tx_ring *ring = &sc->txq[qid];
- /* tell the firmware what we have processed */
- hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
- WPI_WRITE(sc, WPI_RX_WIDX, hw & ~7);
+ if (ring->update) {
+ wpi_update_tx_ring(sc, ring);
+ ring->update = 0;
+ }
+ }
+
+ WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
+}
+
+/*
+ * Dump the error log of the firmware when a firmware panic occurs. Although
+ * we can't debug the firmware because it is neither open source nor free, it
+ * can help us to identify certain classes of problems.
+ */
+static void
+wpi_fatal_intr(struct wpi_softc *sc)
+{
+ struct wpi_fw_dump dump;
+ uint32_t i, offset, count;
+ const uint32_t size_errmsg =
+ (sizeof (wpi_fw_errmsg) / sizeof ((wpi_fw_errmsg)[0]));
+
+ /* Check that the error log address is valid. */
+ if (sc->errptr < WPI_FW_DATA_BASE ||
+ sc->errptr + sizeof (dump) >
+ WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) {
+ printf("%s: bad firmware error log address 0x%08x\n", __func__,
+ sc->errptr);
+ return;
+ }
+ if (wpi_nic_lock(sc) != 0) {
+ printf("%s: could not read firmware error log\n", __func__);
+ return;
+ }
+ /* Read number of entries in the log. */
+ count = wpi_mem_read(sc, sc->errptr);
+ if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) {
+ printf("%s: invalid count field (count = %u)\n", __func__,
+ count);
+ wpi_nic_unlock(sc);
+ return;
+ }
+ /* Skip "count" field. */
+ offset = sc->errptr + sizeof (uint32_t);
+ printf("firmware error log (count = %u):\n", count);
+ for (i = 0; i < count; i++) {
+ wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump,
+ sizeof (dump) / sizeof (uint32_t));
+
+ printf(" error type = \"%s\" (0x%08X)\n",
+ (dump.desc < size_errmsg) ?
+ wpi_fw_errmsg[dump.desc] : "UNKNOWN",
+ dump.desc);
+ printf(" error data = 0x%08X\n",
+ dump.data);
+ printf(" branch link = 0x%08X%08X\n",
+ dump.blink[0], dump.blink[1]);
+ printf(" interrupt link = 0x%08X%08X\n",
+ dump.ilink[0], dump.ilink[1]);
+ printf(" time = %u\n", dump.time);
+
+ offset += sizeof (dump);
+ }
+ wpi_nic_unlock(sc);
+ /* Dump driver status (TX and RX rings) while we're here. */
+ printf("driver status:\n");
+ for (i = 0; i < WPI_NTXQUEUES; i++) {
+ struct wpi_tx_ring *ring = &sc->txq[i];
+ printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->queued);
+ }
+ printf(" rx ring: cur=%d\n", sc->rxq.cur);
}
static void
wpi_intr(void *arg)
{
struct wpi_softc *sc = arg;
- uint32_t r;
+ struct ifnet *ifp = sc->sc_ifp;
+ uint32_t r1, r2;
WPI_LOCK(sc);
- r = WPI_READ(sc, WPI_INTR);
- if (r == 0 || r == 0xffffffff) {
+ /* Disable interrupts. */
+ WPI_WRITE(sc, WPI_INT_MASK, 0);
+
+ r1 = WPI_READ(sc, WPI_INT);
+
+ if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
WPI_UNLOCK(sc);
- return;
+ return; /* Hardware gone! */
}
- /* disable interrupts */
- WPI_WRITE(sc, WPI_MASK, 0);
- /* ack interrupts */
- WPI_WRITE(sc, WPI_INTR, r);
+ r2 = WPI_READ(sc, WPI_FH_INT);
+
+ DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__,
+ r1, r2);
+
+ if (r1 == 0 && r2 == 0)
+ goto done; /* Interrupt not for us. */
+
+ /* Acknowledge interrupts. */
+ WPI_WRITE(sc, WPI_INT, r1);
+ WPI_WRITE(sc, WPI_FH_INT, r2);
- if (r & (WPI_SW_ERROR | WPI_HW_ERROR)) {
- struct ifnet *ifp = sc->sc_ifp;
+ if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) {
struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "fatal firmware error\n");
- DPRINTFN(6,("(%s)\n", (r & WPI_SW_ERROR) ? "(Software Error)" :
- "(Hardware Error)"));
- if (vap != NULL)
- ieee80211_cancel_scan(vap);
- ieee80211_runtask(ic, &sc->sc_restarttask);
+ wpi_fatal_intr(sc);
+ DPRINTF(sc, WPI_DEBUG_HW,
+ "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" :
+ "(Hardware Error)");
+ ieee80211_runtask(ic, &sc->sc_reinittask);
sc->flags &= ~WPI_FLAG_BUSY;
WPI_UNLOCK(sc);
return;
}
- if (r & WPI_RX_INTR)
+ if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
+ (r2 & WPI_FH_INT_RX))
wpi_notif_intr(sc);
- if (r & WPI_ALIVE_INTR) /* firmware initialized */
- wakeup(sc);
+ if (r1 & WPI_INT_ALIVE)
+ wakeup(sc); /* Firmware is alive. */
- /* re-enable interrupts */
- if (sc->sc_ifp->if_flags & IFF_UP)
- WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK);
+ if (r1 & WPI_INT_WAKEUP)
+ wpi_wakeup_intr(sc);
+
+done:
+ /* Re-enable interrupts. */
+ if (ifp->if_flags & IFF_UP)
+ WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
WPI_UNLOCK(sc);
}
-static uint8_t
-wpi_plcp_signal(int rate)
+static int
+wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf)
{
- switch (rate) {
- /* CCK rates (returned values are device-dependent) */
- case 2: return 10;
- case 4: return 20;
- case 11: return 55;
- case 22: return 110;
+ struct ieee80211_frame *wh;
+ struct wpi_tx_cmd *cmd;
+ struct wpi_tx_data *data;
+ struct wpi_tx_desc *desc;
+ struct wpi_tx_ring *ring;
+ struct mbuf *m1;
+ bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER];
+ u_int hdrlen;
+ int error, i, nsegs, pad, totlen;
- /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
- /* R1-R4 (ral/ural is R4-R1) */
- case 12: return 0xd;
- case 18: return 0xf;
- case 24: return 0x5;
- case 36: return 0x7;
- case 48: return 0x9;
- case 72: return 0xb;
- case 96: return 0x1;
- case 108: return 0x3;
+ WPI_LOCK_ASSERT(sc);
- /* unsupported rates (should not get there) */
- default: return 0;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ wh = mtod(buf->m, struct ieee80211_frame *);
+ hdrlen = ieee80211_anyhdrsize(wh);
+ totlen = buf->m->m_pkthdr.len;
+
+ if (hdrlen & 3) {
+ /* First segment length must be a multiple of 4. */
+ pad = 4 - (hdrlen & 3);
+ } else
+ pad = 0;
+
+ ring = &sc->txq[buf->ac];
+ desc = &ring->desc[ring->cur];
+ data = &ring->data[ring->cur];
+
+ /* Prepare TX firmware command. */
+ cmd = &ring->cmd[ring->cur];
+ cmd->code = buf->code;
+ cmd->flags = 0;
+ cmd->qid = ring->qid;
+ cmd->idx = ring->cur;
+
+ memcpy(cmd->data, buf->data, buf->size);
+
+ /* Save and trim IEEE802.11 header. */
+ memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen);
+ m_adj(buf->m, hdrlen);
+
+ error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0 && error != EFBIG) {
+ device_printf(sc->sc_dev,
+ "%s: can't map mbuf (error %d)\n", __func__, error);
+ m_freem(buf->m);
+ return error;
}
-}
+ if (error != 0) {
+ /* Too many DMA segments, linearize mbuf. */
+ m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER);
+ if (m1 == NULL) {
+ device_printf(sc->sc_dev,
+ "%s: could not defrag mbuf\n", __func__);
+ m_freem(buf->m);
+ return ENOBUFS;
+ }
+ buf->m = m1;
-/* quickly determine if a given rate is CCK or OFDM */
-#define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
+ error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
+ buf->m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "%s: can't map mbuf (error %d)\n", __func__, error);
+ m_freem(buf->m);
+ return error;
+ }
+ }
+
+ data->m = buf->m;
+ data->ni = buf->ni;
+
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
+ __func__, ring->qid, ring->cur, totlen, nsegs);
+
+ /* Fill TX descriptor. */
+ desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs);
+ /* First DMA segment is used by the TX command. */
+ desc->segs[0].addr = htole32(data->cmd_paddr);
+ desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad);
+ /* Other DMA segments are for data payload. */
+ seg = &segs[0];
+ for (i = 1; i <= nsegs; i++) {
+ desc->segs[i].addr = htole32(seg->ds_addr);
+ desc->segs[i].len = htole32(seg->ds_len);
+ seg++;
+ }
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ /* Kick TX ring. */
+ ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
+ wpi_update_tx_ring(sc, ring);
+
+ /* Mark TX ring as full if we reach a certain threshold. */
+ if (++ring->queued > WPI_TX_RING_HIMARK)
+ sc->qfullmsk |= 1 << ring->qid;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
+ return 0;
+}
/*
- * Construct the data packet for a transmit buffer and acutally put
- * the buffer onto the transmit ring, kicking the card to process the
- * the buffer.
+ * Construct the data packet for a transmit buffer.
*/
static int
-wpi_tx_data(struct wpi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
- int ac)
+wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
+ const struct ieee80211_txparam *tp;
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
- struct wpi_tx_ring *ring = &sc->txq[ac];
- struct wpi_tx_desc *desc;
- struct wpi_tx_data *data;
- struct wpi_tx_cmd *cmd;
- struct wpi_cmd_data *tx;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct wpi_node *wn = (void *)ni;
+ struct ieee80211_channel *chan;
struct ieee80211_frame *wh;
- const struct ieee80211_txparam *tp;
- struct ieee80211_key *k;
- struct mbuf *mnew;
- int i, error, nsegs, rate, hdrlen, ismcast;
- bus_dma_segment_t segs[WPI_MAX_SCATTER];
+ struct ieee80211_key *k = NULL;
+ struct wpi_cmd_data tx;
+ struct wpi_buf tx_data;
+ uint32_t flags;
+ uint16_t qos;
+ uint8_t tid, type;
+ int ac, error, rate, ismcast, totlen;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
- desc = &ring->desc[ring->cur];
- data = &ring->data[ring->cur];
+ /* Select EDCA Access Category and TX ring for this frame. */
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
+ tid = qos & IEEE80211_QOS_TID;
+ } else {
+ qos = 0;
+ tid = 0;
+ }
+ ac = M_WME_GETAC(m);
- wh = mtod(m0, struct ieee80211_frame *);
+ chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
+ ni->ni_chan : ic->ic_curchan;
+ tp = &vap->iv_txparms[ieee80211_chan2mode(chan)];
- hdrlen = ieee80211_hdrsize(wh);
- ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
+ /* Choose a TX rate index. */
+ if (type == IEEE80211_FC0_TYPE_MGT)
+ rate = tp->mgmtrate;
+ else if (ismcast)
+ rate = tp->mcastrate;
+ else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
+ rate = tp->ucastrate;
+ else if (m->m_flags & M_EAPOL)
+ rate = tp->mgmtrate;
+ else {
+ /* XXX pass pktlen */
+ (void) ieee80211_ratectl_rate(ni, NULL, 0);
+ rate = ni->ni_txrate;
+ }
+ /* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
- k = ieee80211_crypto_encap(ni, m0);
+ /* Retrieve key for TX. */
+ k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
- m_freem(m0);
- return ENOBUFS;
+ error = ENOBUFS;
+ goto fail;
}
- /* packet header may have moved, reset our local pointer */
- wh = mtod(m0, struct ieee80211_frame *);
+ /* 802.11 header may have moved. */
+ wh = mtod(m, struct ieee80211_frame *);
}
+ totlen = m->m_pkthdr.len;
- cmd = &ring->cmd[ring->cur];
- cmd->code = WPI_CMD_TX_DATA;
- cmd->flags = 0;
- cmd->qid = ring->qid;
- cmd->idx = ring->cur;
+ if (ieee80211_radiotap_active_vap(vap)) {
+ struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
- tx = (struct wpi_cmd_data *)cmd->data;
- tx->flags = htole32(WPI_TX_AUTO_SEQ);
- tx->timeout = htole16(0);
- tx->ofdm_mask = 0xff;
- tx->cck_mask = 0x0f;
- tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
- tx->id = ismcast ? WPI_ID_BROADCAST : WPI_ID_BSS;
- tx->len = htole16(m0->m_pkthdr.len);
+ tap->wt_flags = 0;
+ tap->wt_rate = rate;
+ if (k != NULL)
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
+
+ ieee80211_radiotap_tx(vap, m);
+ }
+ flags = 0;
if (!ismcast) {
- if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0 ||
- !cap->cap_wmeParams[ac].wmep_noackPolicy)
- tx->flags |= htole32(WPI_TX_NEED_ACK);
- if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
- tx->flags |= htole32(WPI_TX_NEED_RTS|WPI_TX_FULL_TXOP);
- tx->rts_ntries = 7;
+ /* Unicast frame, check if an ACK is expected. */
+ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
+ IEEE80211_QOS_ACKPOLICY_NOACK)
+ flags |= WPI_TX_NEED_ACK;
+ }
+
+ /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
+ if (!ismcast) {
+ /* NB: Group frames are sent using CCK in 802.11b/g. */
+ if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
+ flags |= WPI_TX_NEED_RTS;
+ } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
+ WPI_RATE_IS_OFDM(rate)) {
+ if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+ flags |= WPI_TX_NEED_CTS;
+ else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
+ flags |= WPI_TX_NEED_RTS;
}
+
+ if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
+ flags |= WPI_TX_FULL_TXOP;
}
- /* pick a rate */
- tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) {
+
+ memset(&tx, 0, sizeof (struct wpi_cmd_data));
+ if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
- /* tell h/w to set timestamp in probe responses */
+
+ /* Tell HW to set timestamp in probe responses. */
if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
- tx->flags |= htole32(WPI_TX_INSERT_TSTAMP);
+ flags |= WPI_TX_INSERT_TSTAMP;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
- tx->timeout = htole16(3);
+ tx.timeout = htole16(3);
else
- tx->timeout = htole16(2);
- rate = tp->mgmtrate;
- } else if (ismcast) {
- rate = tp->mcastrate;
- } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
- rate = tp->ucastrate;
- } else {
- (void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ tx.timeout = htole16(2);
}
- tx->rate = wpi_plcp_signal(rate);
- /* be very persistant at sending frames out */
-#if 0
- tx->data_ntries = tp->maxretry;
-#else
- tx->data_ntries = 15; /* XXX way too high */
-#endif
+ if (ismcast || type != IEEE80211_FC0_TYPE_DATA)
+ tx.id = WPI_ID_BROADCAST;
+ else {
+ if (wn->id == WPI_ID_UNDEFINED &&
+ (vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_AHDEMO)) {
+ error = wpi_add_ibss_node(sc, ni);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not add IBSS node, error %d\n",
+ __func__, error);
+ goto fail;
+ }
+ }
+
+ if (wn->id == WPI_ID_UNDEFINED) {
+ device_printf(sc->sc_dev,
+ "%s: undefined node id\n", __func__);
+ error = EINVAL;
+ goto fail;
+ }
+
+ tx.id = wn->id;
+ }
+
+ if (type != IEEE80211_FC0_TYPE_MGT)
+ tx.data_ntries = tp->maxretry;
+
+ tx.len = htole16(totlen);
+ tx.flags = htole32(flags);
+ tx.plcp = rate2plcp(rate);
+ tx.tid = tid;
+ tx.lifetime = htole32(WPI_LIFETIME_INFINITE);
+ tx.ofdm_mask = 0xff;
+ tx.cck_mask = 0x0f;
+ tx.rts_ntries = 7;
+
+ if (k != NULL && k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) {
+ tx.security = WPI_CIPHER_CCMP;
+ memcpy(tx.key, k->wk_key, k->wk_keylen);
+ }
+ }
+
+ tx_data.data = &tx;
+ tx_data.ni = ni;
+ tx_data.m = m;
+ tx_data.size = sizeof(tx);
+ tx_data.code = WPI_CMD_TX_DATA;
+ tx_data.ac = ac;
+
+ return wpi_cmd2(sc, &tx_data);
+
+fail: m_freem(m);
+ return error;
+}
+
+static int
+wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame *wh;
+ struct wpi_cmd_data tx;
+ struct wpi_buf tx_data;
+ uint32_t flags;
+ uint8_t type;
+ int ac, rate, totlen;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ totlen = m->m_pkthdr.len;
+
+ ac = params->ibp_pri & 3;
+
+ /* Choose a TX rate index. */
+ rate = params->ibp_rate0;
+
+ flags = 0;
+ if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
+ flags |= WPI_TX_NEED_ACK;
+ if (params->ibp_flags & IEEE80211_BPF_RTS)
+ flags |= WPI_TX_NEED_RTS;
+ if (params->ibp_flags & IEEE80211_BPF_CTS)
+ flags |= WPI_TX_NEED_CTS;
+ if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
+ flags |= WPI_TX_FULL_TXOP;
if (ieee80211_radiotap_active_vap(vap)) {
struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
+
tap->wt_flags = 0;
tap->wt_rate = rate;
- tap->wt_hwqueue = ac;
- if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
- tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
- ieee80211_radiotap_tx(vap, m0);
+ ieee80211_radiotap_tx(vap, m);
}
- /* save and trim IEEE802.11 header */
- m_copydata(m0, 0, hdrlen, (caddr_t)&tx->wh);
- m_adj(m0, hdrlen);
+ memset(&tx, 0, sizeof (struct wpi_cmd_data));
+ if (type == IEEE80211_FC0_TYPE_MGT) {
+ uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
- error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0 && error != EFBIG) {
- device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
- error);
- m_freem(m0);
- return error;
+ /* Tell HW to set timestamp in probe responses. */
+ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+ flags |= WPI_TX_INSERT_TSTAMP;
+ if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
+ subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
+ tx.timeout = htole16(3);
+ else
+ tx.timeout = htole16(2);
}
- if (error != 0) {
- /* XXX use m_collapse */
- mnew = m_defrag(m0, M_NOWAIT);
- if (mnew == NULL) {
- device_printf(sc->sc_dev,
- "could not defragment mbuf\n");
- m_freem(m0);
- return ENOBUFS;
- }
- m0 = mnew;
- error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
- m0, segs, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not map mbuf (error %d)\n", error);
- m_freem(m0);
- return error;
- }
- }
+ tx.len = htole16(totlen);
+ tx.flags = htole32(flags);
+ tx.plcp = rate2plcp(rate);
+ tx.id = WPI_ID_BROADCAST;
+ tx.lifetime = htole32(WPI_LIFETIME_INFINITE);
+ tx.rts_ntries = params->ibp_try1;
+ tx.data_ntries = params->ibp_try0;
- data->m = m0;
- data->ni = ni;
+ tx_data.data = &tx;
+ tx_data.ni = ni;
+ tx_data.m = m;
+ tx_data.size = sizeof(tx);
+ tx_data.code = WPI_CMD_TX_DATA;
+ tx_data.ac = ac;
- DPRINTFN(WPI_DEBUG_TX, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
- ring->qid, ring->cur, m0->m_pkthdr.len, nsegs));
+ return wpi_cmd2(sc, &tx_data);
+}
- /* first scatter/gather segment is used by the tx data command */
- desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 |
- (1 + nsegs) << 24);
- desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
- ring->cur * sizeof (struct wpi_tx_cmd));
- desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_data));
- for (i = 1; i <= nsegs; i++) {
- desc->segs[i].addr = htole32(segs[i - 1].ds_addr);
- desc->segs[i].len = htole32(segs[i - 1].ds_len);
+static int
+wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = ic->ic_ifp;
+ struct wpi_softc *sc = ifp->if_softc;
+ int error = 0;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ ieee80211_free_node(ni);
+ m_freem(m);
+ return ENETDOWN;
}
- bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
- BUS_DMASYNC_PREWRITE);
+ WPI_LOCK(sc);
+ if (params == NULL) {
+ /*
+ * Legacy path; interpret frame contents to decide
+ * precisely how to send the frame.
+ */
+ error = wpi_tx_data(sc, m, ni);
+ } else {
+ /*
+ * Caller supplied explicit parameters to use in
+ * sending the frame.
+ */
+ error = wpi_tx_data_raw(sc, m, ni, params);
+ }
+ WPI_UNLOCK(sc);
- ring->queued++;
+ if (error != 0) {
+ /* NB: m is reclaimed on tx failure */
+ ieee80211_free_node(ni);
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- /* kick ring */
- ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
- WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+
+ return error;
+ }
+
+ sc->sc_tx_timer = 5;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
@@ -2038,72 +2649,86 @@ wpi_start_locked(struct ifnet *ifp)
struct wpi_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- int ac;
WPI_LOCK_ASSERT(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ (ifp->if_drv_flags & IFF_DRV_OACTIVE))
return;
for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- ac = M_WME_GETAC(m);
- if (sc->txq[ac].queued > sc->txq[ac].count - 8) {
- /* there is no place left in this ring */
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ if (sc->qfullmsk != 0) {
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
- ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
- if (wpi_tx_data(sc, m, ni, ac) != 0) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (wpi_tx_data(sc, m, ni) != 0) {
+ WPI_UNLOCK(sc);
ieee80211_free_node(ni);
+ WPI_LOCK(sc);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- break;
- }
- sc->sc_tx_timer = 5;
+ } else
+ sc->sc_tx_timer = 5;
}
+
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
}
-static int
-wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
- const struct ieee80211_bpf_params *params)
+static void
+wpi_watchdog_rfkill(void *arg)
{
- struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct wpi_softc *sc = ifp->if_softc;
+ struct wpi_softc *sc = arg;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
- /* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- m_freem(m);
- ieee80211_free_node(ni);
- return ENETDOWN;
- }
- WPI_LOCK(sc);
+ DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n");
- /* management frames go into ring 0 */
- if (sc->txq[0].queued > sc->txq[0].count - 8) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- m_freem(m);
- WPI_UNLOCK(sc);
- ieee80211_free_node(ni);
- return ENOBUFS; /* XXX */
+ /* No need to lock firmware memory. */
+ if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) {
+ /* Radio kill switch is still off. */
+ callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
+ sc);
+ } else
+ ieee80211_runtask(ic, &sc->sc_radioon_task);
+}
+
+/**
+ * Called every second, wpi_watchdog used by the watch dog timer
+ * to check that the card is still alive
+ */
+static void
+wpi_watchdog(void *arg)
+{
+ struct wpi_softc *sc = arg;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+
+ DPRINTF(sc, WPI_DEBUG_WATCHDOG, "Watchdog: tick\n");
+
+ if (sc->sc_tx_timer > 0) {
+ if (--sc->sc_tx_timer == 0) {
+ if_printf(ifp, "device timeout\n");
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ ieee80211_runtask(ic, &sc->sc_reinittask);
+ }
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- if (wpi_tx_data(sc, m, ni, 0) != 0)
- goto bad;
- sc->sc_tx_timer = 5;
- callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
+ if (sc->sc_scan_timer > 0) {
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ if (--sc->sc_scan_timer == 0 && vap != NULL) {
+ if_printf(ifp, "scan timeout\n");
+ ieee80211_cancel_scan(vap);
+ ieee80211_runtask(ic, &sc->sc_reinittask);
+ }
+ }
- WPI_UNLOCK(sc);
- return 0;
-bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- WPI_UNLOCK(sc);
- ieee80211_free_node(ni);
- return EIO; /* XXX */
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
}
static int
@@ -2111,30 +2736,36 @@ wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct wpi_softc *sc = ifp->if_softc;
struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ int error = 0, startall = 0, stop = 0;
switch (cmd) {
+ case SIOCGIFADDR:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
case SIOCSIFFLAGS:
WPI_LOCK(sc);
- if ((ifp->if_flags & IFF_UP)) {
+ if (ifp->if_flags & IFF_UP) {
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- wpi_init_locked(sc, 0);
- startall = 1;
+ wpi_init_locked(sc);
+ if (WPI_READ(sc, WPI_GP_CNTRL) &
+ WPI_GP_CNTRL_RFKILL)
+ startall = 1;
+ else
+ stop = 1;
}
- } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) ||
- (sc->flags & WPI_FLAG_HW_RADIO_OFF))
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
wpi_stop_locked(sc);
WPI_UNLOCK(sc);
if (startall)
ieee80211_start_all(ic);
+ else if (vap != NULL && stop)
+ ieee80211_stop(vap);
break;
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
default:
error = EINVAL;
break;
@@ -2143,64 +2774,58 @@ wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
/*
- * Extract various information from EEPROM.
- */
-static void
-wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
-{
- int i;
-
- /* read the hardware capabilities, revision and SKU type */
- wpi_read_prom_data(sc, WPI_EEPROM_CAPABILITIES, &sc->cap,1);
- wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,2);
- wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1);
-
- /* read the regulatory domain */
- wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 4);
-
- /* read in the hw MAC address */
- wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 6);
-
- /* read the list of authorized channels */
- for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
- wpi_read_eeprom_channels(sc,i);
-
- /* read the power level calibration info for each group */
- for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
- wpi_read_eeprom_group(sc,i);
-}
-
-/*
* Send a command to the firmware.
*/
static int
-wpi_cmd(struct wpi_softc *sc, int code, const void *buf, int size, int async)
+wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size,
+ int async)
{
- struct wpi_tx_ring *ring = &sc->cmdq;
+ struct wpi_tx_ring *ring = &sc->txq[4];
struct wpi_tx_desc *desc;
+ struct wpi_tx_data *data;
struct wpi_tx_cmd *cmd;
+ struct mbuf *m;
+ bus_addr_t paddr;
+ int totlen, error;
-#ifdef WPI_DEBUG
- if (!async) {
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ if (async == 0)
WPI_LOCK_ASSERT(sc);
- }
-#endif
- DPRINTFN(WPI_DEBUG_CMD,("wpi_cmd %d size %d async %d\n", code, size,
- async));
+ DPRINTF(sc, WPI_DEBUG_CMD, "wpi_cmd %s size %zu async %d\n",
+ wpi_cmd_str(code), size, async);
if (sc->flags & WPI_FLAG_BUSY) {
device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n",
__func__, code);
return EAGAIN;
}
- sc->flags|= WPI_FLAG_BUSY;
-
- KASSERT(size <= sizeof cmd->data, ("command %d too large: %d bytes",
- code, size));
+ sc->flags |= WPI_FLAG_BUSY;
desc = &ring->desc[ring->cur];
- cmd = &ring->cmd[ring->cur];
+ data = &ring->data[ring->cur];
+ totlen = 4 + size;
+
+ if (size > sizeof cmd->data) {
+ /* Command is too large to fit in a descriptor. */
+ if (totlen > MCLBYTES)
+ return EINVAL;
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
+ if (m == NULL)
+ return ENOMEM;
+ cmd = mtod(m, struct wpi_tx_cmd *);
+ error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
+ totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_freem(m);
+ return error;
+ }
+ data->m = m;
+ } else {
+ cmd = &ring->cmd[ring->cur];
+ paddr = data->cmd_paddr;
+ }
cmd->code = code;
cmd->flags = 0;
@@ -2208,56 +2833,36 @@ wpi_cmd(struct wpi_softc *sc, int code, const void *buf, int size, int async)
cmd->idx = ring->cur;
memcpy(cmd->data, buf, size);
- desc->flags = htole32(WPI_PAD32(size) << 28 | 1 << 24);
- desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
- ring->cur * sizeof (struct wpi_tx_cmd));
- desc->segs[0].len = htole32(4 + size);
+ desc->nsegs = 1 + (WPI_PAD32(size) << 4);
+ desc->segs[0].addr = htole32(paddr);
+ desc->segs[0].len = htole32(totlen);
- /* kick cmd ring */
- ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
- WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
-
- if (async) {
- sc->flags &= ~ WPI_FLAG_BUSY;
- return 0;
+ if (size > sizeof cmd->data) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
}
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
- return msleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
-}
+ /* Kick command ring. */
+ ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
+ wpi_update_tx_ring(sc, ring);
-static int
-wpi_wme_update(struct ieee80211com *ic)
-{
-#define WPI_EXP2(v) htole16((1 << (v)) - 1)
-#define WPI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v))
- struct wpi_softc *sc = ic->ic_ifp->if_softc;
- const struct wmeParams *wmep;
- struct wpi_wme_setup wme;
- int ac;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- /* don't override default WME values if WME is not actually enabled */
- if (!(ic->ic_flags & IEEE80211_F_WME))
+ if (async) {
+ sc->flags &= ~WPI_FLAG_BUSY;
return 0;
-
- wme.flags = 0;
- for (ac = 0; ac < WME_NUM_AC; ac++) {
- wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
- wme.ac[ac].aifsn = wmep->wmep_aifsn;
- wme.ac[ac].cwmin = WPI_EXP2(wmep->wmep_logcwmin);
- wme.ac[ac].cwmax = WPI_EXP2(wmep->wmep_logcwmax);
- wme.ac[ac].txop = WPI_USEC(wmep->wmep_txopLimit);
-
- DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
- "txop=%d\n", ac, wme.ac[ac].aifsn, wme.ac[ac].cwmin,
- wme.ac[ac].cwmax, wme.ac[ac].txop));
}
- return wpi_cmd(sc, WPI_CMD_SET_WME, &wme, sizeof wme, 1);
-#undef WPI_USEC
-#undef WPI_EXP2
+
+ return msleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
}
/*
- * Configure h/w multi-rate retries.
+ * Configure HW multi-rate retries.
*/
static int
wpi_mrr_setup(struct wpi_softc *sc)
@@ -2267,360 +2872,769 @@ wpi_mrr_setup(struct wpi_softc *sc)
struct wpi_mrr_setup mrr;
int i, error;
- memset(&mrr, 0, sizeof (struct wpi_mrr_setup));
-
- /* CCK rates (not used with 802.11a) */
- for (i = WPI_CCK1; i <= WPI_CCK11; i++) {
+ /* CCK rates (not used with 802.11a). */
+ for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) {
mrr.rates[i].flags = 0;
- mrr.rates[i].signal = wpi_ridx_to_plcp[i];
- /* fallback to the immediate lower CCK rate (if any) */
- mrr.rates[i].next = (i == WPI_CCK1) ? WPI_CCK1 : i - 1;
- /* try one time at this rate before falling back to "next" */
+ mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
+ /* Fallback to the immediate lower CCK rate (if any.) */
+ mrr.rates[i].next =
+ (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1;
+ /* Try one time at this rate before falling back to "next". */
mrr.rates[i].ntries = 1;
}
-
- /* OFDM rates (not used with 802.11b) */
- for (i = WPI_OFDM6; i <= WPI_OFDM54; i++) {
+ /* OFDM rates (not used with 802.11b). */
+ for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) {
mrr.rates[i].flags = 0;
- mrr.rates[i].signal = wpi_ridx_to_plcp[i];
- /* fallback to the immediate lower OFDM rate (if any) */
- /* we allow fallback from OFDM/6 to CCK/2 in 11b/g mode */
- mrr.rates[i].next = (i == WPI_OFDM6) ?
+ mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
+ /* Fallback to the immediate lower rate (if any.) */
+ /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */
+ mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ?
((ic->ic_curmode == IEEE80211_MODE_11A) ?
- WPI_OFDM6 : WPI_CCK2) :
+ WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) :
i - 1;
- /* try one time at this rate before falling back to "next" */
+ /* Try one time at this rate before falling back to "next". */
mrr.rates[i].ntries = 1;
}
-
- /* setup MRR for control frames */
- mrr.which = WPI_MRR_CTL;
+ /* Setup MRR for control frames. */
+ mrr.which = htole32(WPI_MRR_CTL);
error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not setup MRR for control frames\n");
return error;
}
-
- /* setup MRR for data frames */
- mrr.which = WPI_MRR_DATA;
+ /* Setup MRR for data frames. */
+ mrr.which = htole32(WPI_MRR_DATA);
error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not setup MRR for data frames\n");
return error;
}
-
return 0;
}
+static int
+wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct wpi_node *wn = (void *)ni;
+ struct wpi_node_info node;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ if (wn->id == WPI_ID_UNDEFINED)
+ return EINVAL;
+
+ memset(&node, 0, sizeof node);
+ IEEE80211_ADDR_COPY(node.macaddr, ni->ni_bssid);
+ node.id = wn->id;
+ node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
+ wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
+ node.action = htole32(WPI_ACTION_SET_RATE);
+ node.antenna = WPI_ANTENNA_BOTH;
+
+ return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
+}
+
+/*
+ * Broadcast node is used to send group-addressed and management frames.
+ */
+static int
+wpi_add_broadcast_node(struct wpi_softc *sc, int async)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct wpi_node_info node;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ memset(&node, 0, sizeof node);
+ IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
+ node.id = WPI_ID_BROADCAST;
+ node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
+ wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
+ node.action = htole32(WPI_ACTION_SET_RATE);
+ node.antenna = WPI_ANTENNA_BOTH;
+
+ return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async);
+}
+
+static int
+wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni)
+{
+ struct wpi_node *wn = (void *)ni;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ if (wn->id != WPI_ID_UNDEFINED)
+ return EINVAL;
+
+ wn->id = alloc_unrl(sc->sc_unr);
+
+ if (wn->id == (uint8_t)-1)
+ return ENOBUFS;
+
+ return wpi_add_node(sc, ni);
+}
+
+static void
+wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni)
+{
+ struct wpi_node *wn = (void *)ni;
+ struct wpi_cmd_del_node node;
+ int error;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ if (wn->id == WPI_ID_UNDEFINED) {
+ device_printf(sc->sc_dev, "%s: undefined node id passed\n",
+ __func__);
+ return;
+ }
+
+ memset(&node, 0, sizeof node);
+ IEEE80211_ADDR_COPY(node.macaddr, ni->ni_bssid);
+ node.count = 1;
+
+ error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not delete node %u, error %d\n", __func__,
+ wn->id, error);
+ }
+}
+
+static int
+wpi_updateedca(struct ieee80211com *ic)
+{
+#define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
+ struct wpi_softc *sc = ic->ic_ifp->if_softc;
+ struct wpi_edca_params cmd;
+ int aci, error;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ memset(&cmd, 0, sizeof cmd);
+ cmd.flags = htole32(WPI_EDCA_UPDATE);
+ for (aci = 0; aci < WME_NUM_AC; aci++) {
+ const struct wmeParams *ac =
+ &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
+ cmd.ac[aci].aifsn = ac->wmep_aifsn;
+ cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin));
+ cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax));
+ cmd.ac[aci].txoplimit =
+ htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
+
+ DPRINTF(sc, WPI_DEBUG_EDCA,
+ "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
+ "txoplimit=%d\n", aci, cmd.ac[aci].aifsn,
+ cmd.ac[aci].cwmin, cmd.ac[aci].cwmax,
+ cmd.ac[aci].txoplimit);
+ }
+ IEEE80211_UNLOCK(ic);
+ WPI_LOCK(sc);
+ error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
+ WPI_UNLOCK(sc);
+ IEEE80211_LOCK(ic);
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
+ return error;
+#undef WPI_EXP2
+}
+
+static void
+wpi_set_promisc(struct wpi_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ uint32_t promisc_filter;
+
+ promisc_filter = WPI_FILTER_PROMISC | WPI_FILTER_CTL;
+
+ if (ifp->if_flags & IFF_PROMISC)
+ sc->rxon.filter |= htole32(promisc_filter);
+ else
+ sc->rxon.filter &= ~htole32(promisc_filter);
+}
+
+static void
+wpi_update_promisc(struct ifnet *ifp)
+{
+ struct wpi_softc *sc = ifp->if_softc;
+
+ wpi_set_promisc(sc);
+
+ WPI_LOCK(sc);
+ if (wpi_send_rxon(sc, 1, 1) != 0) {
+ device_printf(sc->sc_dev, "%s: could not send RXON\n",
+ __func__);
+ }
+ WPI_UNLOCK(sc);
+}
+
+static void
+wpi_update_mcast(struct ifnet *ifp)
+{
+ /* Ignore */
+}
+
static void
wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on)
{
struct wpi_cmd_led led;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
led.which = which;
led.unit = htole32(100000); /* on/off in unit of 100ms */
led.off = off;
led.on = on;
-
(void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1);
}
-static void
-wpi_enable_tsf(struct wpi_softc *sc, struct ieee80211_node *ni)
+static int
+wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni)
{
- struct wpi_cmd_tsf tsf;
+ struct wpi_cmd_timing cmd;
uint64_t val, mod;
- memset(&tsf, 0, sizeof tsf);
- memcpy(&tsf.tstamp, ni->ni_tstamp.data, 8);
- tsf.bintval = htole16(ni->ni_intval);
- tsf.lintval = htole16(10);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ memset(&cmd, 0, sizeof cmd);
+ memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
+ cmd.bintval = htole16(ni->ni_intval);
+ cmd.lintval = htole16(10);
- /* compute remaining time until next beacon */
- val = (uint64_t)ni->ni_intval * 1024; /* msec -> usec */
- mod = le64toh(tsf.tstamp) % val;
- tsf.binitval = htole32((uint32_t)(val - mod));
+ /* Compute remaining time until next beacon. */
+ val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
+ mod = le64toh(cmd.tstamp) % val;
+ cmd.binitval = htole32((uint32_t)(val - mod));
- if (wpi_cmd(sc, WPI_CMD_TSF, &tsf, sizeof tsf, 1) != 0)
- device_printf(sc->sc_dev, "could not enable TSF\n");
+ DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
+ ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
+
+ return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1);
+}
+
+/*
+ * This function is called periodically (every 60 seconds) to adjust output
+ * power to temperature changes.
+ */
+static void
+wpi_power_calibration(struct wpi_softc *sc)
+{
+ int temp;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ /* Update sensor data. */
+ temp = (int)WPI_READ(sc, WPI_UCODE_GP2);
+ DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp);
+
+ /* Sanity-check read value. */
+ if (temp < -260 || temp > 25) {
+ /* This can't be correct, ignore. */
+ DPRINTF(sc, WPI_DEBUG_TEMP,
+ "out-of-range temperature reported: %d\n", temp);
+ return;
+ }
+
+ DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp);
+
+ /* Adjust Tx power if need be. */
+ if (abs(temp - sc->temp) <= 6)
+ return;
+
+ sc->temp = temp;
+
+ if (wpi_set_txpower(sc, 1) != 0) {
+ /* just warn, too bad for the automatic calibration... */
+ device_printf(sc->sc_dev,"could not adjust Tx power\n");
+ }
}
-#if 0
/*
- * Build a beacon frame that the firmware will broadcast periodically in
- * IBSS or HostAP modes.
+ * Set TX power for current channel.
*/
static int
-wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
+wpi_set_txpower(struct wpi_softc *sc, int async)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
- struct wpi_tx_ring *ring = &sc->cmdq;
- struct wpi_tx_desc *desc;
- struct wpi_tx_data *data;
- struct wpi_tx_cmd *cmd;
- struct wpi_cmd_beacon *bcn;
- struct ieee80211_beacon_offsets bo;
- struct mbuf *m0;
- bus_addr_t physaddr;
- int error;
+ struct ieee80211_channel *ch;
+ struct wpi_power_group *group;
+ struct wpi_cmd_txpower cmd;
+ uint8_t chan;
+ int idx, i;
- desc = &ring->desc[ring->cur];
- data = &ring->data[ring->cur];
+ /* Retrieve current channel from last RXON. */
+ chan = sc->rxon.chan;
+ ch = &ic->ic_channels[chan];
- m0 = ieee80211_beacon_alloc(ic, ni, &bo);
- if (m0 == NULL) {
- device_printf(sc->sc_dev, "could not allocate beacon frame\n");
- return ENOMEM;
+ /* Find the TX power group to which this channel belongs. */
+ if (IEEE80211_IS_CHAN_5GHZ(ch)) {
+ for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
+ if (chan <= group->chan)
+ break;
+ } else
+ group = &sc->groups[0];
+
+ memset(&cmd, 0, sizeof cmd);
+ cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
+ cmd.chan = htole16(chan);
+
+ /* Set TX power for all OFDM and CCK rates. */
+ for (i = 0; i <= WPI_RIDX_MAX ; i++) {
+ /* Retrieve TX power for this channel/rate. */
+ idx = wpi_get_power_index(sc, group, ch, i);
+
+ cmd.rates[i].plcp = wpi_ridx_to_plcp[i];
+
+ if (IEEE80211_IS_CHAN_5GHZ(ch)) {
+ cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx];
+ cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx];
+ } else {
+ cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx];
+ cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx];
+ }
+ DPRINTF(sc, WPI_DEBUG_TEMP,
+ "chan %d/ridx %d: power index %d\n", chan, i, idx);
}
- cmd = &ring->cmd[ring->cur];
- cmd->code = WPI_CMD_SET_BEACON;
- cmd->flags = 0;
- cmd->qid = ring->qid;
- cmd->idx = ring->cur;
+ return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async);
+}
- bcn = (struct wpi_cmd_beacon *)cmd->data;
- memset(bcn, 0, sizeof (struct wpi_cmd_beacon));
- bcn->id = WPI_ID_BROADCAST;
- bcn->ofdm_mask = 0xff;
- bcn->cck_mask = 0x0f;
- bcn->lifetime = htole32(WPI_LIFETIME_INFINITE);
- bcn->len = htole16(m0->m_pkthdr.len);
- bcn->rate = (ic->ic_curmode == IEEE80211_MODE_11A) ?
- wpi_plcp_signal(12) : wpi_plcp_signal(2);
- bcn->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP);
-
- /* save and trim IEEE802.11 header */
- m_copydata(m0, 0, sizeof (struct ieee80211_frame), (caddr_t)&bcn->wh);
- m_adj(m0, sizeof (struct ieee80211_frame));
-
- /* assume beacon frame is contiguous */
- error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m0, void *),
- m0->m_pkthdr.len, wpi_dma_map_addr, &physaddr, 0);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not map beacon\n");
- m_freem(m0);
- return error;
+/*
+ * Determine Tx power index for a given channel/rate combination.
+ * This takes into account the regulatory information from EEPROM and the
+ * current temperature.
+ */
+static int
+wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
+ struct ieee80211_channel *c, int ridx)
+{
+/* Fixed-point arithmetic division using a n-bit fractional part. */
+#define fdivround(a, b, n) \
+ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
+
+/* Linear interpolation. */
+#define interpolate(x, x1, y1, x2, y2, n) \
+ ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
+
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct wpi_power_sample *sample;
+ int pwr, idx;
+ u_int chan;
+
+ /* Get channel number. */
+ chan = ieee80211_chan2ieee(ic, c);
+
+ /* Default TX power is group maximum TX power minus 3dB. */
+ pwr = group->maxpwr / 2;
+
+ /* Decrease TX power for highest OFDM rates to reduce distortion. */
+ switch (ridx) {
+ case WPI_RIDX_OFDM36:
+ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 0 : 5;
+ break;
+ case WPI_RIDX_OFDM48:
+ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 7 : 10;
+ break;
+ case WPI_RIDX_OFDM54:
+ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 9 : 12;
+ break;
}
- data->m = m0;
+ /* Never exceed the channel maximum allowed TX power. */
+ pwr = min(pwr, sc->maxpwr[chan]);
- /* first scatter/gather segment is used by the beacon command */
- desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 | 2 << 24);
- desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
- ring->cur * sizeof (struct wpi_tx_cmd));
- desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_beacon));
- desc->segs[1].addr = htole32(physaddr);
- desc->segs[1].len = htole32(m0->m_pkthdr.len);
+ /* Retrieve TX power index into gain tables from samples. */
+ for (sample = group->samples; sample < &group->samples[3]; sample++)
+ if (pwr > sample[1].power)
+ break;
+ /* Fixed-point linear interpolation using a 19-bit fractional part. */
+ idx = interpolate(pwr, sample[0].power, sample[0].index,
+ sample[1].power, sample[1].index, 19);
- /* kick cmd ring */
- ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
- WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
+ /*-
+ * Adjust power index based on current temperature:
+ * - if cooler than factory-calibrated: decrease output power
+ * - if warmer than factory-calibrated: increase output power
+ */
+ idx -= (sc->temp - group->temp) * 11 / 100;
- return 0;
+ /* Decrease TX power for CCK rates (-5dB). */
+ if (ridx >= WPI_RIDX_CCK1)
+ idx += 10;
+
+ /* Make sure idx stays in a valid range. */
+ if (idx < 0)
+ return 0;
+ if (idx > WPI_MAX_PWR_INDEX)
+ return WPI_MAX_PWR_INDEX;
+ return idx;
+
+#undef interpolate
+#undef fdivround
}
-#endif
+/*
+ * Set STA mode power saving level (between 0 and 5).
+ * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
+ */
static int
-wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
+wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async)
+{
+ struct wpi_pmgt_cmd cmd;
+ const struct wpi_pmgt *pmgt;
+ uint32_t max, skip_dtim;
+ uint32_t reg;
+ int i;
+
+ DPRINTF(sc, WPI_DEBUG_PWRSAVE,
+ "%s: dtim=%d, level=%d, async=%d\n",
+ __func__, dtim, level, async);
+
+ /* Select which PS parameters to use. */
+ if (dtim <= 10)
+ pmgt = &wpi_pmgt[0][level];
+ else
+ pmgt = &wpi_pmgt[1][level];
+
+ memset(&cmd, 0, sizeof cmd);
+ if (level != 0) /* not CAM */
+ cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP);
+ /* Retrieve PCIe Active State Power Management (ASPM). */
+ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
+ if (!(reg & 0x1)) /* L0s Entry disabled. */
+ cmd.flags |= htole16(WPI_PS_PCI_PMGT);
+
+ cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU);
+ cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU);
+
+ if (dtim == 0) {
+ dtim = 1;
+ skip_dtim = 0;
+ } else
+ skip_dtim = pmgt->skip_dtim;
+
+ if (skip_dtim != 0) {
+ cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM);
+ max = pmgt->intval[4];
+ if (max == (uint32_t)-1)
+ max = dtim * (skip_dtim + 1);
+ else if (max > dtim)
+ max = (max / dtim) * dtim;
+ } else
+ max = dtim;
+
+ for (i = 0; i < 5; i++)
+ cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
+
+ return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
+}
+
+static int
+wpi_send_btcoex(struct wpi_softc *sc)
+{
+ struct wpi_bluetooth cmd;
+
+ memset(&cmd, 0, sizeof cmd);
+ cmd.flags = WPI_BT_COEX_MODE_4WIRE;
+ cmd.lead_time = WPI_BT_LEAD_TIME_DEF;
+ cmd.max_kill = WPI_BT_MAX_KILL_DEF;
+ DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
+ __func__);
+ return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
+}
+
+static int
+wpi_send_rxon(struct wpi_softc *sc, int assoc, int async)
{
- struct ieee80211com *ic = vap->iv_ic;
- struct ieee80211_node *ni = vap->iv_bss;
- struct wpi_node_info node;
int error;
+ if (assoc && (sc->rxon.filter & htole32(WPI_FILTER_BSS))) {
+ struct wpi_assoc rxon_assoc;
- /* update adapter's configuration */
- sc->config.associd = 0;
- sc->config.filter &= ~htole32(WPI_FILTER_BSS);
- IEEE80211_ADDR_COPY(sc->config.bssid, ni->ni_bssid);
- sc->config.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
- if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
- sc->config.flags |= htole32(WPI_CONFIG_AUTO |
- WPI_CONFIG_24GHZ);
- } else {
- sc->config.flags &= ~htole32(WPI_CONFIG_AUTO |
- WPI_CONFIG_24GHZ);
- }
- if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
- sc->config.cck_mask = 0;
- sc->config.ofdm_mask = 0x15;
- } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
- sc->config.cck_mask = 0x03;
- sc->config.ofdm_mask = 0;
+ rxon_assoc.flags = sc->rxon.flags;
+ rxon_assoc.filter = sc->rxon.filter;
+ rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
+ rxon_assoc.cck_mask = sc->rxon.cck_mask;
+ rxon_assoc.reserved = 0;
+
+ error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc,
+ sizeof (struct wpi_assoc), async);
} else {
- /* XXX assume 802.11b/g */
- sc->config.cck_mask = 0x0f;
- sc->config.ofdm_mask = 0x15;
+ error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
+ sizeof (struct wpi_rxon), async);
}
-
- DPRINTF(("config chan %d flags %x cck %x ofdm %x\n", sc->config.chan,
- sc->config.flags, sc->config.cck_mask, sc->config.ofdm_mask));
- error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config,
- sizeof (struct wpi_config), 1);
if (error != 0) {
- device_printf(sc->sc_dev, "could not configure\n");
+ device_printf(sc->sc_dev, "RXON command failed, error %d\n",
+ error);
return error;
}
- /* configuration has changed, set Tx power accordingly */
- if ((error = wpi_set_txpower(sc, ni->ni_chan, 1)) != 0) {
- device_printf(sc->sc_dev, "could not set Tx power\n");
+ /* Configuration has changed, set Tx power accordingly. */
+ if ((error = wpi_set_txpower(sc, async)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not set TX power, error %d\n", __func__, error);
return error;
}
- /* add default node */
- memset(&node, 0, sizeof node);
- IEEE80211_ADDR_COPY(node.bssid, ni->ni_bssid);
- node.id = WPI_ID_BSS;
- node.rate = (ic->ic_curmode == IEEE80211_MODE_11A) ?
- wpi_plcp_signal(12) : wpi_plcp_signal(2);
- node.action = htole32(WPI_ACTION_SET_RATE);
- node.antenna = WPI_ANTENNA_BOTH;
- error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
- if (error != 0)
- device_printf(sc->sc_dev, "could not add BSS node\n");
+ if (!(sc->rxon.filter & htole32(WPI_FILTER_BSS))) {
+ /* Add broadcast node. */
+ error = wpi_add_broadcast_node(sc, async);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "could not add broadcast node, error %d\n", error);
+ return error;
+ }
+ }
- return (error);
+ return 0;
}
+/**
+ * Configure the card to listen to a particular channel, this transisions the
+ * card in to being able to receive frames from remote devices.
+ */
static int
-wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
+wpi_config(struct wpi_softc *sc)
{
- struct ieee80211com *ic = vap->iv_ic;
- struct ieee80211_node *ni = vap->iv_bss;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ uint32_t flags;
int error;
- if (vap->iv_opmode == IEEE80211_M_MONITOR) {
- /* link LED blinks while monitoring */
- wpi_set_led(sc, WPI_LED_LINK, 5, 5);
- return 0;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ /* Set power saving level to CAM during initialization. */
+ if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not set power saving level\n", __func__);
+ return error;
}
- wpi_enable_tsf(sc, ni);
+ /* Configure bluetooth coexistence. */
+ if ((error = wpi_send_btcoex(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "could not configure bluetooth coexistence\n");
+ return error;
+ }
- /* update adapter's configuration */
- sc->config.associd = htole16(ni->ni_associd & ~0xc000);
- /* short preamble/slot time are negotiated when associating */
- sc->config.flags &= ~htole32(WPI_CONFIG_SHPREAMBLE |
- WPI_CONFIG_SHSLOT);
- if (ic->ic_flags & IEEE80211_F_SHSLOT)
- sc->config.flags |= htole32(WPI_CONFIG_SHSLOT);
- if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
- sc->config.flags |= htole32(WPI_CONFIG_SHPREAMBLE);
- sc->config.filter |= htole32(WPI_FILTER_BSS);
+ /* Configure adapter. */
+ memset(&sc->rxon, 0, sizeof (struct wpi_rxon));
+ IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
- /* XXX put somewhere HC_QOS_SUPPORT_ASSOC + HC_IBSS_START */
+ /* Set default channel. */
+ sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
+ sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
+ if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+ sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
- DPRINTF(("config chan %d flags %x\n", sc->config.chan,
- sc->config.flags));
- error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct
- wpi_config), 1);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not update configuration\n");
+ switch (ic->ic_opmode) {
+ case IEEE80211_M_STA:
+ sc->rxon.mode = WPI_MODE_STA;
+ sc->rxon.filter = htole32(WPI_FILTER_MULTICAST);
+ break;
+ case IEEE80211_M_IBSS:
+ sc->rxon.mode = WPI_MODE_IBSS;
+ sc->rxon.filter = htole32(WPI_FILTER_BEACON |
+ WPI_FILTER_MULTICAST);
+ break;
+ /* XXX workaround for passive channels selection */
+ case IEEE80211_M_AHDEMO:
+ sc->rxon.filter = htole32(WPI_FILTER_MULTICAST);
+ /* FALLTHROUGH */
+ case IEEE80211_M_HOSTAP:
+ sc->rxon.mode = WPI_MODE_HOSTAP;
+ break;
+ case IEEE80211_M_MONITOR:
+ sc->rxon.mode = WPI_MODE_MONITOR;
+ sc->rxon.filter = htole32(WPI_FILTER_MULTICAST);
+ break;
+ default:
+ device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode);
+ return EINVAL;
+ }
+ wpi_set_promisc(sc);
+ sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
+ sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
+
+ if ((error = wpi_send_rxon(sc, 0, 0)) != 0) {
+ device_printf(sc->sc_dev, "%s: could not send RXON\n",
+ __func__);
+ return error;
+ }
+
+ /* Setup rate scalling. */
+ if ((error = wpi_mrr_setup(sc)) != 0) {
+ device_printf(sc->sc_dev, "could not setup MRR, error %d\n",
+ error);
return error;
}
- error = wpi_set_txpower(sc, ni->ni_chan, 1);
+ /* Disable beacon notifications (unused). */
+ flags = WPI_STATISTICS_BEACON_DISABLE;
+ error = wpi_cmd(sc, WPI_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
if (error != 0) {
- device_printf(sc->sc_dev, "could set txpower\n");
+ device_printf(sc->sc_dev,
+ "could not disable beacon statistics, error %d\n", error);
return error;
}
- /* link LED always on while associated */
- wpi_set_led(sc, WPI_LED_LINK, 0, 1);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- /* start automatic rate control timer */
- callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
+ return 0;
+}
+
+static uint16_t
+wpi_get_active_dwell_time(struct wpi_softc *sc,
+ struct ieee80211_channel *c, uint8_t n_probes)
+{
+ /* No channel? Default to 2GHz settings. */
+ if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
+ return (WPI_ACTIVE_DWELL_TIME_2GHZ +
+ WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
+ }
- return (error);
+ /* 5GHz dwell time. */
+ return (WPI_ACTIVE_DWELL_TIME_5GHZ +
+ WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
}
/*
- * Send a scan request to the firmware. Since this command is huge, we map it
- * into a mbufcluster instead of using the pre-allocated set of commands. Note,
- * much of this code is similar to that in wpi_cmd but because we must manually
- * construct the probe & channels, we duplicate what's needed here. XXX In the
- * future, this function should be modified to use wpi_cmd to help cleanup the
- * code base.
+ * Limit the total dwell time to 85% of the beacon interval.
+ *
+ * Returns the dwell time in milliseconds.
+ */
+static uint16_t
+wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time)
+{
+ struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211vap *vap = NULL;
+ int bintval = 0;
+
+ /* bintval is in TU (1.024mS) */
+ if (! TAILQ_EMPTY(&ic->ic_vaps)) {
+ vap = TAILQ_FIRST(&ic->ic_vaps);
+ bintval = vap->iv_bss->ni_intval;
+ }
+
+ /*
+ * If it's non-zero, we should calculate the minimum of
+ * it and the DWELL_BASE.
+ *
+ * XXX Yes, the math should take into account that bintval
+ * is 1.024mS, not 1mS..
+ */
+ if (bintval > 0) {
+ DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__,
+ bintval);
+ return (MIN(WPI_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
+ }
+
+ /* No association context? Default. */
+ return (WPI_PASSIVE_DWELL_BASE);
+}
+
+static uint16_t
+wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c)
+{
+ uint16_t passive;
+
+ if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c))
+ passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ;
+ else
+ passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ;
+
+ /* Clamp to the beacon interval if we're associated. */
+ return (wpi_limit_dwell(sc, passive));
+}
+
+/*
+ * Send a scan request to the firmware.
*/
static int
-wpi_scan(struct wpi_softc *sc)
+wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
struct ieee80211_scan_state *ss = ic->ic_scan;
- struct wpi_tx_ring *ring = &sc->cmdq;
- struct wpi_tx_desc *desc;
- struct wpi_tx_data *data;
- struct wpi_tx_cmd *cmd;
struct wpi_scan_hdr *hdr;
+ struct wpi_cmd_data *tx;
+ struct wpi_scan_essid *essids;
struct wpi_scan_chan *chan;
struct ieee80211_frame *wh;
struct ieee80211_rateset *rs;
- struct ieee80211_channel *c;
- enum ieee80211_phymode mode;
- uint8_t *frm;
- int pktlen, error, i, nssid;
- bus_addr_t physaddr;
+ uint16_t dwell_active, dwell_passive;
+ uint8_t *buf, *frm;
+ int buflen, error, i, nssid;
- desc = &ring->desc[ring->cur];
- data = &ring->data[ring->cur];
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- if (data->m == NULL) {
+ /*
+ * We are absolutely not allowed to send a scan command when another
+ * scan command is pending.
+ */
+ if (sc->sc_scan_timer) {
+ device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
+ __func__);
+ return (EAGAIN);
+ }
+
+ buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (buf == NULL) {
device_printf(sc->sc_dev,
- "could not allocate mbuf for scan command\n");
+ "%s: could not allocate buffer for scan command\n",
+ __func__);
return ENOMEM;
}
-
- cmd = mtod(data->m, struct wpi_tx_cmd *);
- cmd->code = WPI_CMD_SCAN;
- cmd->flags = 0;
- cmd->qid = ring->qid;
- cmd->idx = ring->cur;
-
- hdr = (struct wpi_scan_hdr *)cmd->data;
- memset(hdr, 0, sizeof(struct wpi_scan_hdr));
+ hdr = (struct wpi_scan_hdr *)buf;
/*
- * Move to the next channel if no packets are received within 5 msecs
- * after sending the probe request (this helps to reduce the duration
- * of active scans).
+ * Move to the next channel if no packets are received within 10 msecs
+ * after sending the probe request.
*/
- hdr->quiet = htole16(5);
- hdr->threshold = htole16(1);
+ hdr->quiet_time = htole16(10); /* timeout in milliseconds */
+ hdr->quiet_threshold = htole16(1); /* min # of packets */
+ /*
+ * Max needs to be greater than active and passive and quiet!
+ * It's also in microseconds!
+ */
+ hdr->max_svc = htole32(250 * IEEE80211_DUR_TU);
+ hdr->pause_svc = htole32((4 << 24) |
+ (100 * IEEE80211_DUR_TU)); /* Hardcode for now */
+ hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON);
- if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
- /* send probe requests at 6Mbps */
- hdr->tx.rate = wpi_ridx_to_plcp[WPI_OFDM6];
+ tx = (struct wpi_cmd_data *)(hdr + 1);
+ tx->flags = htole32(WPI_TX_AUTO_SEQ);
+ tx->id = WPI_ID_BROADCAST;
+ tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
- /* Enable crc checking */
- hdr->promotion = htole16(1);
+ if (IEEE80211_IS_CHAN_5GHZ(c)) {
+ /* Send probe requests at 6Mbps. */
+ tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6];
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
} else {
- hdr->flags = htole32(WPI_CONFIG_24GHZ | WPI_CONFIG_AUTO);
- /* send probe requests at 1Mbps */
- hdr->tx.rate = wpi_ridx_to_plcp[WPI_CCK1];
+ hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO);
+ /* Send probe requests at 1Mbps. */
+ tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1];
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
}
- hdr->tx.id = WPI_ID_BROADCAST;
- hdr->tx.lifetime = htole32(WPI_LIFETIME_INFINITE);
- hdr->tx.flags = htole32(WPI_TX_AUTO_SEQ);
- memset(hdr->scan_essids, 0, sizeof(hdr->scan_essids));
+ essids = (struct wpi_scan_essid *)(tx + 1);
nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS);
for (i = 0; i < nssid; i++) {
- hdr->scan_essids[i].id = IEEE80211_ELEMID_SSID;
- hdr->scan_essids[i].esslen = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
- memcpy(hdr->scan_essids[i].essid, ss->ss_ssid[i].ssid,
- hdr->scan_essids[i].esslen);
+ essids[i].id = IEEE80211_ELEMID_SSID;
+ essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
+ memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len);
#ifdef WPI_DEBUG
- if (wpi_debug & WPI_DEBUG_SCANNING) {
+ if (sc->sc_debug & WPI_DEBUG_SCAN) {
printf("Scanning Essid: ");
- ieee80211_print_essid(hdr->scan_essids[i].essid,
- hdr->scan_essids[i].esslen);
+ ieee80211_print_essid(essids[i].data, essids[i].len);
printf("\n");
}
#endif
@@ -2630,877 +3644,1118 @@ wpi_scan(struct wpi_softc *sc)
* Build a probe request frame. Most of the following code is a
* copy & paste of what is done in net80211.
*/
- wh = (struct ieee80211_frame *)&hdr->scan_essids[WPI_SCAN_MAX_ESSIDS];
+ wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_PROBE_REQ;
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
- *(u_int16_t *)&wh->i_dur[0] = 0; /* filled by h/w */
- *(u_int16_t *)&wh->i_seq[0] = 0; /* filled by h/w */
+ *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */
+ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */
frm = (uint8_t *)(wh + 1);
-
- mode = ieee80211_chan2mode(ic->ic_curchan);
- rs = &ic->ic_sup_rates[mode];
-
frm = ieee80211_add_ssid(frm, NULL, 0);
frm = ieee80211_add_rates(frm, rs);
- frm = ieee80211_add_xrates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
- /* setup length of probe request */
- hdr->tx.len = htole16(frm - (uint8_t *)wh);
+ /* Set length of probe request. */
+ tx->len = htole16(frm - (uint8_t *)wh);
/*
* Construct information about the channel that we
* want to scan. The firmware expects this to be directly
* after the scan probe request
*/
- c = ic->ic_curchan;
chan = (struct wpi_scan_chan *)frm;
- chan->chan = ieee80211_chan2ieee(ic, c);
+ chan->chan = htole16(ieee80211_chan2ieee(ic, c));
chan->flags = 0;
- if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
+ if (nssid) {
+ hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT;
+ chan->flags |= WPI_CHAN_NPBREQS(nssid);
+ } else
+ hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER;
+
+ if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE))
chan->flags |= WPI_CHAN_ACTIVE;
- if (nssid != 0)
- chan->flags |= WPI_CHAN_DIRECT;
- }
- chan->gain_dsp = 0x6e; /* Default level */
- if (IEEE80211_IS_CHAN_5GHZ(c)) {
- chan->active = htole16(10);
- chan->passive = htole16(ss->ss_maxdwell);
- chan->gain_radio = 0x3b;
- } else {
- chan->active = htole16(20);
- chan->passive = htole16(ss->ss_maxdwell);
- chan->gain_radio = 0x28;
- }
- DPRINTFN(WPI_DEBUG_SCANNING,
- ("Scanning %u Passive: %d\n",
- chan->chan,
- c->ic_flags & IEEE80211_CHAN_PASSIVE));
+ /*
+ * Calculate the active/passive dwell times.
+ */
+
+ dwell_active = wpi_get_active_dwell_time(sc, c, nssid);
+ dwell_passive = wpi_get_passive_dwell_time(sc, c);
+
+ /* Make sure they're valid. */
+ if (dwell_passive <= dwell_active)
+ dwell_passive = dwell_active + 1;
+
+ chan->active = htole16(dwell_active);
+ chan->passive = htole16(dwell_passive);
+
+ chan->dsp_gain = 0x6e; /* Default level */
+
+ if (IEEE80211_IS_CHAN_5GHZ(c))
+ chan->rf_gain = 0x3b;
+ else
+ chan->rf_gain = 0x28;
+
+ DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n",
+ chan->chan, (c->ic_flags & IEEE80211_CHAN_PASSIVE) ? 1 : 0);
hdr->nchan++;
chan++;
- frm += sizeof (struct wpi_scan_chan);
-#if 0
- // XXX All Channels....
- for (c = &ic->ic_channels[1];
- c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
- if ((c->ic_flags & ic->ic_curchan->ic_flags) != ic->ic_curchan->ic_flags)
- continue;
+ buflen = (uint8_t *)chan - buf;
+ hdr->len = htole16(buflen);
- chan->chan = ieee80211_chan2ieee(ic, c);
- chan->flags = 0;
- if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
- chan->flags |= WPI_CHAN_ACTIVE;
- if (ic->ic_des_ssid[0].len != 0)
- chan->flags |= WPI_CHAN_DIRECT;
- }
- chan->gain_dsp = 0x6e; /* Default level */
- if (IEEE80211_IS_CHAN_5GHZ(c)) {
- chan->active = htole16(10);
- chan->passive = htole16(110);
- chan->gain_radio = 0x3b;
- } else {
- chan->active = htole16(20);
- chan->passive = htole16(120);
- chan->gain_radio = 0x28;
- }
+ DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n",
+ hdr->nchan);
+ error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1);
+ free(buf, M_DEVBUF);
- DPRINTFN(WPI_DEBUG_SCANNING,
- ("Scanning %u Passive: %d\n",
- chan->chan,
- c->ic_flags & IEEE80211_CHAN_PASSIVE));
+ sc->sc_scan_timer = 5;
- hdr->nchan++;
- chan++;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- frm += sizeof (struct wpi_scan_chan);
- }
-#endif
+ return error;
+}
- hdr->len = htole16(frm - (uint8_t *)hdr);
- pktlen = frm - (uint8_t *)cmd;
+static int
+wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni = vap->iv_bss;
+ int error;
- error = bus_dmamap_load(ring->data_dmat, data->map, cmd, pktlen,
- wpi_dma_map_addr, &physaddr, BUS_DMA_NOWAIT);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not map scan command\n");
- m_freem(data->m);
- data->m = NULL;
- return error;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ /* Update adapter configuration. */
+ sc->rxon.associd = 0;
+ sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
+ IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
+ sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
+ sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
+ if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
+ sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
+ if (ic->ic_flags & IEEE80211_F_SHSLOT)
+ sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
+ if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
+ if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
+ sc->rxon.cck_mask = 0;
+ sc->rxon.ofdm_mask = 0x15;
+ } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
+ sc->rxon.cck_mask = 0x03;
+ sc->rxon.ofdm_mask = 0;
+ } else {
+ /* Assume 802.11b/g. */
+ sc->rxon.cck_mask = 0x0f;
+ sc->rxon.ofdm_mask = 0x15;
}
- desc->flags = htole32(WPI_PAD32(pktlen) << 28 | 1 << 24);
- desc->segs[0].addr = htole32(physaddr);
- desc->segs[0].len = htole32(pktlen);
+ DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
+ sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
+ sc->rxon.ofdm_mask);
- bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
- BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
+ if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
+ device_printf(sc->sc_dev, "%s: could not send RXON\n",
+ __func__);
+ }
- /* kick cmd ring */
- ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
- WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- sc->sc_scan_timer = 5;
- return 0; /* will be notified async. of failure/success */
+ return error;
}
-/**
- * Configure the card to listen to a particular channel, this transisions the
- * card in to being able to receive frames from remote devices.
- */
static int
-wpi_config(struct wpi_softc *sc)
+wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
- struct wpi_power power;
- struct wpi_bluetooth bluetooth;
- struct wpi_node_info node;
- int error;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct wpi_vap *wvp = WPI_VAP(vap);
+ struct wpi_buf *bcn = &wvp->wv_bcbuf;
+ struct ieee80211_beacon_offsets bo;
+ struct wpi_cmd_beacon *cmd;
+ struct mbuf *m;
+ int totlen;
- /* set power mode */
- memset(&power, 0, sizeof power);
- power.flags = htole32(WPI_POWER_CAM|0x8);
- error = wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &power, sizeof power, 0);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not set power mode\n");
- return error;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC)
+ return EINVAL;
+
+ m = ieee80211_beacon_alloc(ni, &bo);
+ if (m == NULL) {
+ device_printf(sc->sc_dev,
+ "%s: could not allocate beacon frame\n", __func__);
+ return ENOMEM;
}
+ totlen = m->m_pkthdr.len;
- /* configure bluetooth coexistence */
- memset(&bluetooth, 0, sizeof bluetooth);
- bluetooth.flags = 3;
- bluetooth.lead = 0xaa;
- bluetooth.kill = 1;
- error = wpi_cmd(sc, WPI_CMD_BLUETOOTH, &bluetooth, sizeof bluetooth,
- 0);
- if (error != 0) {
+ if (bcn->data == NULL) {
+ cmd = malloc(sizeof(struct wpi_cmd_beacon), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+
+ if (cmd == NULL) {
+ device_printf(sc->sc_dev,
+ "could not allocate buffer for beacon command\n");
+ m_freem(m);
+ return ENOMEM;
+ }
+
+ cmd->id = WPI_ID_BROADCAST;
+ cmd->ofdm_mask = 0xff;
+ cmd->cck_mask = 0x0f;
+ cmd->lifetime = htole32(WPI_LIFETIME_INFINITE);
+ cmd->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP);
+
+ bcn->data = cmd;
+ bcn->ni = NULL;
+ bcn->code = WPI_CMD_SET_BEACON;
+ bcn->ac = 4;
+ bcn->size = sizeof(struct wpi_cmd_beacon);
+ } else
+ cmd = bcn->data;
+
+ cmd->len = htole16(totlen);
+ cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
+ wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
+
+ /* NB: m will be freed in wpi_cmd_done() */
+ bcn->m = m;
+
+ return wpi_cmd2(sc, bcn);
+}
+
+static void
+wpi_update_beacon(struct ieee80211vap *vap, int item)
+{
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct wpi_softc *sc = ifp->if_softc;
+ int error;
+
+ if ((error = wpi_setup_beacon(sc, ni)) != 0) {
device_printf(sc->sc_dev,
- "could not configure bluetooth coexistence\n");
- return error;
+ "%s: could not update beacon frame, error %d", __func__,
+ error);
}
+}
- /* configure adapter */
- memset(&sc->config, 0, sizeof (struct wpi_config));
- IEEE80211_ADDR_COPY(sc->config.myaddr, IF_LLADDR(ifp));
- /*set default channel*/
- sc->config.chan = htole16(ieee80211_chan2ieee(ic, ic->ic_curchan));
- sc->config.flags = htole32(WPI_CONFIG_TSF);
- if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
- sc->config.flags |= htole32(WPI_CONFIG_AUTO |
- WPI_CONFIG_24GHZ);
+static int
+wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni = vap->iv_bss;
+ int error;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+
+ if (vap->iv_opmode == IEEE80211_M_MONITOR) {
+ /* Link LED blinks while monitoring. */
+ wpi_set_led(sc, WPI_LED_LINK, 5, 5);
+ return 0;
}
- sc->config.filter = 0;
- switch (ic->ic_opmode) {
- case IEEE80211_M_STA:
- case IEEE80211_M_WDS: /* No know setup, use STA for now */
- sc->config.mode = WPI_MODE_STA;
- sc->config.filter |= htole32(WPI_FILTER_MULTICAST);
- break;
- case IEEE80211_M_IBSS:
- case IEEE80211_M_AHDEMO:
- sc->config.mode = WPI_MODE_IBSS;
- sc->config.filter |= htole32(WPI_FILTER_BEACON |
- WPI_FILTER_MULTICAST);
- break;
- case IEEE80211_M_HOSTAP:
- sc->config.mode = WPI_MODE_HOSTAP;
- break;
- case IEEE80211_M_MONITOR:
- sc->config.mode = WPI_MODE_MONITOR;
- sc->config.filter |= htole32(WPI_FILTER_MULTICAST |
- WPI_FILTER_CTL | WPI_FILTER_PROMISC);
- break;
- default:
- device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode);
+
+ /* XXX kernel panic workaround */
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC) {
+ device_printf(sc->sc_dev, "%s: incomplete configuration\n",
+ __func__);
return EINVAL;
}
- sc->config.cck_mask = 0x0f; /* not yet negotiated */
- sc->config.ofdm_mask = 0xff; /* not yet negotiated */
- error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config,
- sizeof (struct wpi_config), 0);
- if (error != 0) {
- device_printf(sc->sc_dev, "configure command failed\n");
+
+ if ((error = wpi_set_timing(sc, ni)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not set timing, error %d\n", __func__, error);
return error;
}
- /* configuration has changed, set Tx power accordingly */
- if ((error = wpi_set_txpower(sc, ic->ic_curchan, 0)) != 0) {
- device_printf(sc->sc_dev, "could not set Tx power\n");
- return error;
+ /* Update adapter configuration. */
+ IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
+ sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni));
+ sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
+ sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
+ if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
+ sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
+ /* Short preamble and slot time are negotiated when associating. */
+ sc->rxon.flags &= ~htole32(WPI_RXON_SHPREAMBLE | WPI_RXON_SHSLOT);
+ if (ic->ic_flags & IEEE80211_F_SHSLOT)
+ sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
+ if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
+ if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
+ sc->rxon.cck_mask = 0;
+ sc->rxon.ofdm_mask = 0x15;
+ } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
+ sc->rxon.cck_mask = 0x03;
+ sc->rxon.ofdm_mask = 0;
+ } else {
+ /* Assume 802.11b/g. */
+ sc->rxon.cck_mask = 0x0f;
+ sc->rxon.ofdm_mask = 0x15;
}
+ sc->rxon.filter |= htole32(WPI_FILTER_BSS);
- /* add broadcast node */
- memset(&node, 0, sizeof node);
- IEEE80211_ADDR_COPY(node.bssid, ifp->if_broadcastaddr);
- node.id = WPI_ID_BROADCAST;
- node.rate = wpi_plcp_signal(2);
- error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 0);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not add broadcast node\n");
+ /* XXX put somewhere HC_QOS_SUPPORT_ASSOC + HC_IBSS_START */
+
+ DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n",
+ sc->rxon.chan, sc->rxon.flags);
+
+ if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
+ device_printf(sc->sc_dev, "%s: could not send RXON\n",
+ __func__);
return error;
}
- /* Setup rate scalling */
- error = wpi_mrr_setup(sc);
- if (error != 0) {
- device_printf(sc->sc_dev, "could not setup MRR\n");
- return error;
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ if ((error = wpi_setup_beacon(sc, ni)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not setup beacon, error %d\n", __func__,
+ error);
+ return error;
+ }
}
- return 0;
-}
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ /* Add BSS node. */
+ ((struct wpi_node *)ni)->id = WPI_ID_BSS;
+ if ((error = wpi_add_node(sc, ni)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not add BSS node, error %d\n", __func__,
+ error);
+ return error;
+ }
+ }
-static void
-wpi_stop_master(struct wpi_softc *sc)
-{
- uint32_t tmp;
- int ntries;
+ /* Link LED always on while associated. */
+ wpi_set_led(sc, WPI_LED_LINK, 0, 1);
- DPRINTFN(WPI_DEBUG_HW,("Disabling Firmware execution\n"));
+ /* Start periodic calibration timer. */
+ callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
- tmp = WPI_READ(sc, WPI_RESET);
- WPI_WRITE(sc, WPI_RESET, tmp | WPI_STOP_MASTER | WPI_NEVO_RESET);
+ /* Enable power-saving mode if requested by user. */
+ if (vap->iv_flags & IEEE80211_F_PMGTON)
+ (void)wpi_set_pslevel(sc, 0, 3, 1);
- tmp = WPI_READ(sc, WPI_GPIO_CTL);
- if ((tmp & WPI_GPIO_PWR_STATUS) == WPI_GPIO_PWR_SLEEP)
- return; /* already asleep */
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- for (ntries = 0; ntries < 100; ntries++) {
- if (WPI_READ(sc, WPI_RESET) & WPI_MASTER_DISABLED)
- break;
- DELAY(10);
- }
- if (ntries == 100) {
- device_printf(sc->sc_dev, "timeout waiting for master\n");
- }
+ return 0;
}
static int
-wpi_power_up(struct wpi_softc *sc)
+wpi_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
+ ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
{
- uint32_t tmp;
- int ntries;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct wpi_softc *sc = ifp->if_softc;
- wpi_mem_lock(sc);
- tmp = wpi_mem_read(sc, WPI_MEM_POWER);
- wpi_mem_write(sc, WPI_MEM_POWER, tmp & ~0x03000000);
- wpi_mem_unlock(sc);
+ if (!(&vap->iv_nw_keys[0] <= k &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ /* should not happen */
+ DPRINTF(sc, WPI_DEBUG_KEY, "%s: bogus group key\n",
+ __func__);
+ return 0;
+ }
+ *keyix = 0; /* NB: use key index 0 for ucast key */
+ } else {
+ *keyix = *rxkeyix = k - vap->iv_nw_keys;
- for (ntries = 0; ntries < 5000; ntries++) {
- if (WPI_READ(sc, WPI_GPIO_STATUS) & WPI_POWERED)
- break;
- DELAY(10);
+ if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM)
+ k->wk_flags |= IEEE80211_KEY_SWCRYPT;
}
- if (ntries == 5000) {
- device_printf(sc->sc_dev,
- "timeout waiting for NIC to power up\n");
- return ETIMEDOUT;
- }
- return 0;
+ return 1;
}
static int
-wpi_reset(struct wpi_softc *sc)
+wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
+ const uint8_t mac[IEEE80211_ADDR_LEN])
{
- uint32_t tmp;
- int ntries;
+ const struct ieee80211_cipher *cip = k->wk_cipher;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct wpi_softc *sc = ic->ic_ifp->if_softc;
+ struct wpi_node *wn = (void *)ni;
+ struct wpi_node_info node;
+ uint16_t kflags;
+ int error;
- DPRINTFN(WPI_DEBUG_HW,
- ("Resetting the card - clearing any uploaded firmware\n"));
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- /* clear any pending interrupts */
- WPI_WRITE(sc, WPI_INTR, 0xffffffff);
+ switch (cip->ic_cipher) {
+ case IEEE80211_CIPHER_AES_CCM:
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ return 1;
- tmp = WPI_READ(sc, WPI_PLL_CTL);
- WPI_WRITE(sc, WPI_PLL_CTL, tmp | WPI_PLL_INIT);
+ kflags = WPI_KFLAG_CCMP;
+ break;
+ default:
+ /* null_key_set() */
+ return 1;
+ }
- tmp = WPI_READ(sc, WPI_CHICKEN);
- WPI_WRITE(sc, WPI_CHICKEN, tmp | WPI_CHICKEN_RXNOLOS);
+ if (wn->id == WPI_ID_UNDEFINED)
+ return 0;
- tmp = WPI_READ(sc, WPI_GPIO_CTL);
- WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_INIT);
+ kflags |= WPI_KFLAG_KID(k->wk_keyix);
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ kflags |= WPI_KFLAG_MULTICAST;
- /* wait for clock stabilization */
- for (ntries = 0; ntries < 25000; ntries++) {
- if (WPI_READ(sc, WPI_GPIO_CTL) & WPI_GPIO_CLOCK)
- break;
- DELAY(10);
- }
- if (ntries == 25000) {
- device_printf(sc->sc_dev,
- "timeout waiting for clock stabilization\n");
- return ETIMEDOUT;
- }
+ memset(&node, 0, sizeof node);
+ node.id = wn->id;
+ node.control = WPI_NODE_UPDATE;
+ node.flags = WPI_FLAG_KEY_SET;
+ node.kflags = htole16(kflags);
+ memcpy(node.key, k->wk_key, k->wk_keylen);
- /* initialize EEPROM */
- tmp = WPI_READ(sc, WPI_EEPROM_STATUS);
+ DPRINTF(sc, WPI_DEBUG_KEY, "set key id=%d for node %d\n", k->wk_keyix,
+ node.id);
- if ((tmp & WPI_EEPROM_VERSION) == 0) {
- device_printf(sc->sc_dev, "EEPROM not found\n");
- return EIO;
+ error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "can't update node info, error %d\n",
+ error);
+ return 0;
}
- WPI_WRITE(sc, WPI_EEPROM_STATUS, tmp & ~WPI_EEPROM_LOCKED);
- return 0;
+ return 1;
}
-static void
-wpi_hw_config(struct wpi_softc *sc)
+static int
+wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
{
- uint32_t rev, hw;
+ const struct ieee80211_cipher *cip = k->wk_cipher;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct wpi_softc *sc = ic->ic_ifp->if_softc;
+ struct wpi_node *wn = (void *)ni;
+ struct wpi_node_info node;
- /* voodoo from the Linux "driver".. */
- hw = WPI_READ(sc, WPI_HWCONFIG);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
- if ((rev & 0xc0) == 0x40)
- hw |= WPI_HW_ALM_MB;
- else if (!(rev & 0x80))
- hw |= WPI_HW_ALM_MM;
+ switch (cip->ic_cipher) {
+ case IEEE80211_CIPHER_AES_CCM:
+ break;
+ default:
+ /* null_key_delete() */
+ return 1;
+ }
- if (sc->cap == 0x80)
- hw |= WPI_HW_SKU_MRC;
+ if (vap->iv_state != IEEE80211_S_RUN ||
+ (k->wk_flags & IEEE80211_KEY_GROUP))
+ return 1; /* Nothing to do. */
- hw &= ~WPI_HW_REV_D;
- if ((le16toh(sc->rev) & 0xf0) == 0xd0)
- hw |= WPI_HW_REV_D;
+ memset(&node, 0, sizeof node);
+ node.id = wn->id;
+ node.control = WPI_NODE_UPDATE;
+ node.flags = WPI_FLAG_KEY_SET;
- if (sc->type > 1)
- hw |= WPI_HW_TYPE_B;
+ DPRINTF(sc, WPI_DEBUG_KEY, "delete keys for node %d\n", node.id);
+ (void)wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
- WPI_WRITE(sc, WPI_HWCONFIG, hw);
+ return 1;
}
-static void
-wpi_rfkill_resume(struct wpi_softc *sc)
+/*
+ * This function is called after the runtime firmware notifies us of its
+ * readiness (called in a process context).
+ */
+static int
+wpi_post_alive(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- int ntries;
+ int ntries, error;
+
+ /* Check (again) that the radio is not disabled. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
- /* enable firmware again */
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF);
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ /* NB: Runtime firmware must be up and running. */
+ if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) {
+ device_printf(sc->sc_dev,
+ "RF switch: radio disabled (%s)\n", __func__);
+ wpi_nic_unlock(sc);
+ return EPERM; /* :-) */
+ }
+ wpi_nic_unlock(sc);
- /* wait for thermal sensors to calibrate */
+ /* Wait for thermal sensor to calibrate. */
for (ntries = 0; ntries < 1000; ntries++) {
- if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0)
+ if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0)
break;
DELAY(10);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
- "timeout waiting for thermal calibration\n");
- return;
- }
- DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp));
+ "timeout waiting for thermal sensor calibration\n");
+ return ETIMEDOUT;
+ }
- if (wpi_config(sc) != 0) {
- device_printf(sc->sc_dev, "device config failed\n");
- return;
- }
+ DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp);
+ return 0;
+}
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- sc->flags &= ~WPI_FLAG_HW_RADIO_OFF;
+/*
+ * The firmware boot code is small and is intended to be copied directly into
+ * the NIC internal memory (no DMA transfer).
+ */
+static int
+wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size)
+{
+ int error, ntries;
- if (vap != NULL) {
- if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
- if (vap->iv_opmode != IEEE80211_M_MONITOR) {
- ieee80211_beacon_miss(ic);
- wpi_set_led(sc, WPI_LED_LINK, 0, 1);
- } else
- wpi_set_led(sc, WPI_LED_LINK, 5, 5);
- } else {
- ieee80211_scan_next(vap);
- wpi_set_led(sc, WPI_LED_LINK, 20, 2);
+ DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size);
+
+ size /= sizeof (uint32_t);
+
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+
+ /* Copy microcode image into NIC memory. */
+ wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE,
+ (const uint32_t *)ucode, size);
+
+ wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0);
+ wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE);
+ wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size);
+
+ /* Start boot load now. */
+ wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START);
+
+ /* Wait for transfer to complete. */
+ for (ntries = 0; ntries < 1000; ntries++) {
+ uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS);
+ DPRINTF(sc, WPI_DEBUG_HW,
+ "firmware status=0x%x, val=0x%x, result=0x%x\n", status,
+ WPI_FH_TX_STATUS_IDLE(6),
+ status & WPI_FH_TX_STATUS_IDLE(6));
+ if (status & WPI_FH_TX_STATUS_IDLE(6)) {
+ DPRINTF(sc, WPI_DEBUG_HW,
+ "Status Match! - ntries = %d\n", ntries);
+ break;
}
+ DELAY(10);
+ }
+ if (ntries == 1000) {
+ device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
+ __func__);
+ wpi_nic_unlock(sc);
+ return ETIMEDOUT;
}
- callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
+ /* Enable boot after power up. */
+ wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN);
+
+ wpi_nic_unlock(sc);
+ return 0;
}
-static void
-wpi_init_locked(struct wpi_softc *sc, int force)
+static int
+wpi_load_firmware(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- uint32_t tmp;
- int ntries, qid;
+ struct wpi_fw_info *fw = &sc->fw;
+ struct wpi_dma_info *dma = &sc->fw_dma;
+ int error;
- wpi_stop_locked(sc);
- (void)wpi_reset(sc);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_CLOCK1, 0xa00);
- DELAY(20);
- tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV);
- wpi_mem_write(sc, WPI_MEM_PCIDEV, tmp | 0x800);
- wpi_mem_unlock(sc);
+ /* Copy initialization sections into pre-allocated DMA-safe memory. */
+ memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+ memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
- (void)wpi_power_up(sc);
- wpi_hw_config(sc);
+ /* Tell adapter where to find initialization sections. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
+ wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz);
+ wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
+ dma->paddr + WPI_FW_DATA_MAXSZ);
+ wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
+ wpi_nic_unlock(sc);
+
+ /* Load firmware boot code. */
+ error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
+ __func__);
+ return error;
+ }
- /* init Rx ring */
- wpi_mem_lock(sc);
- WPI_WRITE(sc, WPI_RX_BASE, sc->rxq.desc_dma.paddr);
- WPI_WRITE(sc, WPI_RX_RIDX_PTR, sc->shared_dma.paddr +
- offsetof(struct wpi_shared, next));
- WPI_WRITE(sc, WPI_RX_WIDX, (WPI_RX_RING_COUNT - 1) & ~7);
- WPI_WRITE(sc, WPI_RX_CONFIG, 0xa9601010);
- wpi_mem_unlock(sc);
-
- /* init Tx rings */
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_MODE, 2); /* bypass mode */
- wpi_mem_write(sc, WPI_MEM_RA, 1); /* enable RA0 */
- wpi_mem_write(sc, WPI_MEM_TXCFG, 0x3f); /* enable all 6 Tx rings */
- wpi_mem_write(sc, WPI_MEM_BYPASS1, 0x10000);
- wpi_mem_write(sc, WPI_MEM_BYPASS2, 0x30002);
- wpi_mem_write(sc, WPI_MEM_MAGIC4, 4);
- wpi_mem_write(sc, WPI_MEM_MAGIC5, 5);
-
- WPI_WRITE(sc, WPI_TX_BASE_PTR, sc->shared_dma.paddr);
- WPI_WRITE(sc, WPI_MSG_CONFIG, 0xffff05a5);
-
- for (qid = 0; qid < 6; qid++) {
- WPI_WRITE(sc, WPI_TX_CTL(qid), 0);
- WPI_WRITE(sc, WPI_TX_BASE(qid), 0);
- WPI_WRITE(sc, WPI_TX_CONFIG(qid), 0x80200008);
- }
- wpi_mem_unlock(sc);
-
- /* clear "radio off" and "disable command" bits (reversed logic) */
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF);
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD);
- sc->flags &= ~WPI_FLAG_HW_RADIO_OFF;
-
- /* clear any pending interrupts */
- WPI_WRITE(sc, WPI_INTR, 0xffffffff);
-
- /* enable interrupts */
- WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK);
-
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF);
- WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF);
-
- if ((wpi_load_firmware(sc)) != 0) {
- device_printf(sc->sc_dev,
- "A problem occurred loading the firmware to the driver\n");
- return;
- }
-
- /* At this point the firmware is up and running. If the hardware
- * RF switch is turned off thermal calibration will fail, though
- * the card is still happy to continue to accept commands, catch
- * this case and schedule a task to watch for it to be turned on.
- */
- wpi_mem_lock(sc);
- tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF);
- wpi_mem_unlock(sc);
+ /* Now press "execute". */
+ WPI_WRITE(sc, WPI_RESET, 0);
- if (!(tmp & 0x1)) {
- sc->flags |= WPI_FLAG_HW_RADIO_OFF;
- device_printf(sc->sc_dev,"Radio Transmitter is switched off\n");
- goto out;
+ /* Wait at most one second for first alive notification. */
+ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: timeout waiting for adapter to initialize, error %d\n",
+ __func__, error);
+ return error;
}
- /* wait for thermal sensors to calibrate */
- for (ntries = 0; ntries < 1000; ntries++) {
- if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0)
- break;
- DELAY(10);
+ /* Copy runtime sections into pre-allocated DMA-safe memory. */
+ memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+ memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+
+ /* Tell adapter where to find runtime sections. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
+ wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz);
+ wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
+ dma->paddr + WPI_FW_DATA_MAXSZ);
+ wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE,
+ WPI_FW_UPDATED | fw->main.textsz);
+ wpi_nic_unlock(sc);
+
+ return 0;
+}
+
+static int
+wpi_read_firmware(struct wpi_softc *sc)
+{
+ const struct firmware *fp;
+ struct wpi_fw_info *fw = &sc->fw;
+ const struct wpi_firmware_hdr *hdr;
+ int error;
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE,
+ "Attempting Loading Firmware from %s module\n", WPI_FW_NAME);
+
+ WPI_UNLOCK(sc);
+ fp = firmware_get(WPI_FW_NAME);
+ WPI_LOCK(sc);
+
+ if (fp == NULL) {
+ device_printf(sc->sc_dev,
+ "could not load firmware image '%s'\n", WPI_FW_NAME);
+ return EINVAL;
}
- if (ntries == 1000) {
+ sc->fw_fp = fp;
+
+ if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
device_printf(sc->sc_dev,
- "timeout waiting for thermal sensors calibration\n");
- return;
+ "firmware file too short: %zu bytes\n", fp->datasize);
+ error = EINVAL;
+ goto fail;
}
- DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp));
- if (wpi_config(sc) != 0) {
- device_printf(sc->sc_dev, "device config failed\n");
- return;
+ fw->size = fp->datasize;
+ fw->data = (const uint8_t *)fp->data;
+
+ /* Extract firmware header information. */
+ hdr = (const struct wpi_firmware_hdr *)fw->data;
+
+ /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW |
+ |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
+
+ fw->main.textsz = le32toh(hdr->rtextsz);
+ fw->main.datasz = le32toh(hdr->rdatasz);
+ fw->init.textsz = le32toh(hdr->itextsz);
+ fw->init.datasz = le32toh(hdr->idatasz);
+ fw->boot.textsz = le32toh(hdr->btextsz);
+ fw->boot.datasz = 0;
+
+ /* Sanity-check firmware header. */
+ if (fw->main.textsz > WPI_FW_TEXT_MAXSZ ||
+ fw->main.datasz > WPI_FW_DATA_MAXSZ ||
+ fw->init.textsz > WPI_FW_TEXT_MAXSZ ||
+ fw->init.datasz > WPI_FW_DATA_MAXSZ ||
+ fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ ||
+ (fw->boot.textsz & 3) != 0) {
+ device_printf(sc->sc_dev, "invalid firmware header\n");
+ error = EINVAL;
+ goto fail;
}
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-out:
- callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
+ /* Check that all firmware sections fit. */
+ if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz +
+ fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
+ device_printf(sc->sc_dev,
+ "firmware file too short: %zu bytes\n", fw->size);
+ error = EINVAL;
+ goto fail;
+ }
+
+ /* Get pointers to firmware sections. */
+ fw->main.text = (const uint8_t *)(hdr + 1);
+ fw->main.data = fw->main.text + fw->main.textsz;
+ fw->init.text = fw->main.data + fw->main.datasz;
+ fw->init.data = fw->init.text + fw->init.textsz;
+ fw->boot.text = fw->init.data + fw->init.datasz;
+
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE,
+ "Firmware Version: Major %d, Minor %d, Driver %d, \n"
+ "runtime (text: %u, data: %u) init (text: %u, data %u) boot (text %u)\n",
+ hdr->major, hdr->minor, le32toh(hdr->driver),
+ fw->main.textsz, fw->main.datasz,
+ fw->init.textsz, fw->init.datasz, fw->boot.textsz);
+
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text);
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data);
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text);
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data);
+ DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text);
+
+ return 0;
+
+fail: wpi_unload_firmware(sc);
+ return error;
}
+/**
+ * Free the referenced firmware image
+ */
static void
-wpi_init(void *arg)
+wpi_unload_firmware(struct wpi_softc *sc)
{
- struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ if (sc->fw_fp != NULL) {
+ firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
+ sc->fw_fp = NULL;
+ }
+}
- WPI_LOCK(sc);
- wpi_init_locked(sc, 0);
- WPI_UNLOCK(sc);
+static int
+wpi_clock_wait(struct wpi_softc *sc)
+{
+ int ntries;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vaps */
+ /* Set "initialization complete" bit. */
+ WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
+
+ /* Wait for clock stabilization. */
+ for (ntries = 0; ntries < 2500; ntries++) {
+ if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY)
+ return 0;
+ DELAY(100);
+ }
+ device_printf(sc->sc_dev,
+ "%s: timeout waiting for clock stabilization\n", __func__);
+
+ return ETIMEDOUT;
}
-static void
-wpi_stop_locked(struct wpi_softc *sc)
+static int
+wpi_apm_init(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- uint32_t tmp;
- int ac;
+ uint32_t reg;
+ int error;
- sc->sc_tx_timer = 0;
- sc->sc_scan_timer = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- sc->flags &= ~WPI_FLAG_HW_RADIO_OFF;
- callout_stop(&sc->watchdog_to);
- callout_stop(&sc->calib_to);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- /* disable interrupts */
- WPI_WRITE(sc, WPI_MASK, 0);
- WPI_WRITE(sc, WPI_INTR, WPI_INTR_MASK);
- WPI_WRITE(sc, WPI_INTR_STATUS, 0xff);
- WPI_WRITE(sc, WPI_INTR_STATUS, 0x00070000);
+ /* Disable L0s exit timer (NMI bug workaround). */
+ WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER);
+ /* Don't wait for ICH L0s (ICH bug workaround). */
+ WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX);
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_MODE, 0);
- wpi_mem_unlock(sc);
+ /* Set FH wait threshold to max (HW bug under stress workaround). */
+ WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000);
- /* reset all Tx rings */
- for (ac = 0; ac < 4; ac++)
- wpi_reset_tx_ring(sc, &sc->txq[ac]);
- wpi_reset_tx_ring(sc, &sc->cmdq);
+ /* Retrieve PCIe Active State Power Management (ASPM). */
+ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
+ /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
+ if (reg & 0x02) /* L1 Entry enabled. */
+ WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
+ else
+ WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
- /* reset Rx ring */
- wpi_reset_rx_ring(sc, &sc->rxq);
+ WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT);
- wpi_mem_lock(sc);
- wpi_mem_write(sc, WPI_MEM_CLOCK2, 0x200);
- wpi_mem_unlock(sc);
+ /* Wait for clock stabilization before accessing prph. */
+ if ((error = wpi_clock_wait(sc)) != 0)
+ return error;
- DELAY(5);
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ /* Enable DMA and BSM (Bootstrap State Machine). */
+ wpi_prph_write(sc, WPI_APMG_CLK_EN,
+ WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT);
+ DELAY(20);
+ /* Disable L1-Active. */
+ wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS);
+ wpi_nic_unlock(sc);
- wpi_stop_master(sc);
+ return 0;
+}
- tmp = WPI_READ(sc, WPI_RESET);
- WPI_WRITE(sc, WPI_RESET, tmp | WPI_SW_RESET);
- sc->flags &= ~WPI_FLAG_BUSY;
+static void
+wpi_apm_stop_master(struct wpi_softc *sc)
+{
+ int ntries;
+
+ /* Stop busmaster DMA activity. */
+ WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER);
+
+ if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) ==
+ WPI_GP_CNTRL_MAC_PS)
+ return; /* Already asleep. */
+
+ for (ntries = 0; ntries < 100; ntries++) {
+ if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED)
+ return;
+ DELAY(10);
+ }
+ device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
}
static void
-wpi_stop(struct wpi_softc *sc)
+wpi_apm_stop(struct wpi_softc *sc)
{
- WPI_LOCK(sc);
- wpi_stop_locked(sc);
- WPI_UNLOCK(sc);
+ wpi_apm_stop_master(sc);
+
+ /* Reset the entire device. */
+ WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW);
+ DELAY(10);
+ /* Clear "initialization complete" bit. */
+ WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
}
static void
-wpi_calib_timeout(void *arg)
+wpi_nic_config(struct wpi_softc *sc)
{
- struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- int temp;
+ uint32_t rev;
- if (vap->iv_state != IEEE80211_S_RUN)
- return;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- /* update sensor data */
- temp = (int)WPI_READ(sc, WPI_TEMPERATURE);
- DPRINTFN(WPI_DEBUG_TEMP,("Temp in calibration is: %d\n", temp));
+ /* voodoo from the Linux "driver".. */
+ rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
+ if ((rev & 0xc0) == 0x40)
+ WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB);
+ else if (!(rev & 0x80))
+ WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM);
- wpi_power_calibration(sc, temp);
+ if (sc->cap == 0x80)
+ WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC);
- callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
+ if ((le16toh(sc->rev) & 0xf0) == 0xd0)
+ WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
+ else
+ WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
+
+ if (sc->type > 1)
+ WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B);
}
-/*
- * This function is called periodically (every 60 seconds) to adjust output
- * power to temperature changes.
- */
-static void
-wpi_power_calibration(struct wpi_softc *sc, int temp)
+static int
+wpi_hw_init(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ int chnl, ntries, error;
- /* sanity-check read value */
- if (temp < -260 || temp > 25) {
- /* this can't be correct, ignore */
- DPRINTFN(WPI_DEBUG_TEMP,
- ("out-of-range temperature reported: %d\n", temp));
- return;
- }
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- DPRINTFN(WPI_DEBUG_TEMP,("temperature %d->%d\n", sc->temp, temp));
+ /* Clear pending interrupts. */
+ WPI_WRITE(sc, WPI_INT, 0xffffffff);
- /* adjust Tx power if need be */
- if (abs(temp - sc->temp) <= 6)
- return;
+ if ((error = wpi_apm_init(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not power ON adapter, error %d\n", __func__,
+ error);
+ return error;
+ }
- sc->temp = temp;
+ /* Select VMAIN power source. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK);
+ wpi_nic_unlock(sc);
+ /* Spin until VMAIN gets selected. */
+ for (ntries = 0; ntries < 5000; ntries++) {
+ if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN)
+ break;
+ DELAY(10);
+ }
+ if (ntries == 5000) {
+ device_printf(sc->sc_dev, "timeout selecting power source\n");
+ return ETIMEDOUT;
+ }
- if (wpi_set_txpower(sc, vap->iv_bss->ni_chan, 1) != 0) {
- /* just warn, too bad for the automatic calibration... */
- device_printf(sc->sc_dev,"could not adjust Tx power\n");
+ /* Perform adapter initialization. */
+ wpi_nic_config(sc);
+
+ /* Initialize RX ring. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ /* Set physical address of RX ring. */
+ WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr);
+ /* Set physical address of RX read pointer. */
+ WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr +
+ offsetof(struct wpi_shared, next));
+ WPI_WRITE(sc, WPI_FH_RX_WPTR, 0);
+ /* Enable RX. */
+ WPI_WRITE(sc, WPI_FH_RX_CONFIG,
+ WPI_FH_RX_CONFIG_DMA_ENA |
+ WPI_FH_RX_CONFIG_RDRBD_ENA |
+ WPI_FH_RX_CONFIG_WRSTATUS_ENA |
+ WPI_FH_RX_CONFIG_MAXFRAG |
+ WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) |
+ WPI_FH_RX_CONFIG_IRQ_DST_HOST |
+ WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1));
+ (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */
+ wpi_nic_unlock(sc);
+ WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7);
+
+ /* Initialize TX rings. */
+ if ((error = wpi_nic_lock(sc)) != 0)
+ return error;
+ wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */
+ wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */
+ /* Enable all 6 TX rings. */
+ wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f);
+ wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000);
+ wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002);
+ wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4);
+ wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5);
+ /* Set physical address of TX rings. */
+ WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr);
+ WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5);
+
+ /* Enable all DMA channels. */
+ for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
+ WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0);
+ WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0);
+ WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008);
+ }
+ wpi_nic_unlock(sc);
+ (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */
+
+ /* Clear "radio off" and "commands blocked" bits. */
+ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
+ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED);
+
+ /* Clear pending interrupts. */
+ WPI_WRITE(sc, WPI_INT, 0xffffffff);
+ /* Enable interrupts. */
+ WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
+
+ /* _Really_ make sure "radio off" bit is cleared! */
+ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
+ WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
+
+ if ((error = wpi_load_firmware(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not load firmware, error %d\n", __func__,
+ error);
+ return error;
}
+ /* Wait at most one second for firmware alive notification. */
+ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: timeout waiting for adapter to initialize, error %d\n",
+ __func__, error);
+ return error;
+ }
+
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
+
+ /* Do post-firmware initialization. */
+ return wpi_post_alive(sc);
}
-/**
- * Read the eeprom to find out what channels are valid for the given
- * band and update net80211 with what we find.
- */
static void
-wpi_read_eeprom_channels(struct wpi_softc *sc, int n)
+wpi_hw_stop(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- const struct wpi_chan_band *band = &wpi_bands[n];
- struct wpi_eeprom_chan channels[WPI_MAX_CHAN_PER_BAND];
- struct ieee80211_channel *c;
- int chan, i, passive;
+ int chnl, qid, ntries;
- wpi_read_prom_data(sc, band->addr, channels,
- band->nchan * sizeof (struct wpi_eeprom_chan));
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- for (i = 0; i < band->nchan; i++) {
- if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
- DPRINTFN(WPI_DEBUG_HW,
- ("Channel Not Valid: %d, band %d\n",
- band->chan[i],n));
- continue;
- }
+ if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP)
+ wpi_nic_lock(sc);
- passive = 0;
- chan = band->chan[i];
- c = &ic->ic_channels[ic->ic_nchans++];
+ WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO);
- /* is active scan allowed on this channel? */
- if (!(channels[i].flags & WPI_EEPROM_CHAN_ACTIVE)) {
- passive = IEEE80211_CHAN_PASSIVE;
- }
+ /* Disable interrupts. */
+ WPI_WRITE(sc, WPI_INT_MASK, 0);
+ WPI_WRITE(sc, WPI_INT, 0xffffffff);
+ WPI_WRITE(sc, WPI_FH_INT, 0xffffffff);
- if (n == 0) { /* 2GHz band */
- c->ic_ieee = chan;
- c->ic_freq = ieee80211_ieee2mhz(chan,
- IEEE80211_CHAN_2GHZ);
- c->ic_flags = IEEE80211_CHAN_B | passive;
+ /* Make sure we no longer hold the NIC lock. */
+ wpi_nic_unlock(sc);
- c = &ic->ic_channels[ic->ic_nchans++];
- c->ic_ieee = chan;
- c->ic_freq = ieee80211_ieee2mhz(chan,
- IEEE80211_CHAN_2GHZ);
- c->ic_flags = IEEE80211_CHAN_G | passive;
+ if (wpi_nic_lock(sc) == 0) {
+ /* Stop TX scheduler. */
+ wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0);
+ wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0);
- } else { /* 5GHz band */
- /*
- * Some 3945ABG adapters support channels 7, 8, 11
- * and 12 in the 2GHz *and* 5GHz bands.
- * Because of limitations in our net80211(9) stack,
- * we can't support these channels in 5GHz band.
- * XXX not true; just need to map to proper frequency
- */
- if (chan <= 14)
- continue;
-
- c->ic_ieee = chan;
- c->ic_freq = ieee80211_ieee2mhz(chan,
- IEEE80211_CHAN_5GHZ);
- c->ic_flags = IEEE80211_CHAN_A | passive;
+ /* Stop all DMA channels. */
+ for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
+ WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0);
+ for (ntries = 0; ntries < 200; ntries++) {
+ if (WPI_READ(sc, WPI_FH_TX_STATUS) &
+ WPI_FH_TX_STATUS_IDLE(chnl))
+ break;
+ DELAY(10);
+ }
}
+ wpi_nic_unlock(sc);
+ }
- /* save maximum allowed power for this channel */
- sc->maxpwr[chan] = channels[i].maxpwr;
+ /* Stop RX ring. */
+ wpi_reset_rx_ring(sc);
-#if 0
- // XXX We can probably use this an get rid of maxpwr - ben 20070617
- ic->ic_channels[chan].ic_maxpower = channels[i].maxpwr;
- //ic->ic_channels[chan].ic_minpower...
- //ic->ic_channels[chan].ic_maxregtxpower...
-#endif
+ /* Reset all TX rings. */
+ for (qid = 0; qid < WPI_NTXQUEUES; qid++)
+ wpi_reset_tx_ring(sc, &sc->txq[qid]);
- DPRINTF(("adding chan %d (%dMHz) flags=0x%x maxpwr=%d"
- " passive=%d, offset %d\n", chan, c->ic_freq,
- channels[i].flags, sc->maxpwr[chan],
- (c->ic_flags & IEEE80211_CHAN_PASSIVE) != 0,
- ic->ic_nchans));
+ if (wpi_nic_lock(sc) == 0) {
+ wpi_prph_write(sc, WPI_APMG_CLK_DIS,
+ WPI_APMG_CLK_CTRL_DMA_CLK_RQT);
+ wpi_nic_unlock(sc);
}
+ DELAY(5);
+ /* Power OFF adapter. */
+ wpi_apm_stop(sc);
}
static void
-wpi_read_eeprom_group(struct wpi_softc *sc, int n)
+wpi_radio_on(void *arg0, int pending)
{
- struct wpi_power_group *group = &sc->groups[n];
- struct wpi_eeprom_group rgroup;
- int i;
-
- wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, &rgroup,
- sizeof rgroup);
-
- /* save power group information */
- group->chan = rgroup.chan;
- group->maxpwr = rgroup.maxpwr;
- /* temperature at which the samples were taken */
- group->temp = (int16_t)le16toh(rgroup.temp);
+ struct wpi_softc *sc = arg0;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- DPRINTF(("power group %d: chan=%d maxpwr=%d temp=%d\n", n,
- group->chan, group->maxpwr, group->temp));
+ device_printf(sc->sc_dev, "RF switch: radio enabled\n");
- for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
- group->samples[i].index = rgroup.samples[i].index;
- group->samples[i].power = rgroup.samples[i].power;
+ if (vap != NULL) {
+ wpi_init(sc);
+ ieee80211_init(vap);
+ }
- DPRINTF(("\tsample %d: index=%d power=%d\n", i,
- group->samples[i].index, group->samples[i].power));
+ if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) {
+ WPI_LOCK(sc);
+ callout_stop(&sc->watchdog_rfkill);
+ WPI_UNLOCK(sc);
}
}
-/*
- * Update Tx power to match what is defined for channel `c'.
- */
-static int
-wpi_set_txpower(struct wpi_softc *sc, struct ieee80211_channel *c, int async)
+static void
+wpi_radio_off(void *arg0, int pending)
{
+ struct wpi_softc *sc = arg0;
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
- struct wpi_power_group *group;
- struct wpi_cmd_txpower txpower;
- u_int chan;
- int i;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- /* get channel number */
- chan = ieee80211_chan2ieee(ic, c);
+ device_printf(sc->sc_dev, "RF switch: radio disabled\n");
- /* find the power group to which this channel belongs */
- if (IEEE80211_IS_CHAN_5GHZ(c)) {
- for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
- if (chan <= group->chan)
- break;
- } else
- group = &sc->groups[0];
+ wpi_stop(sc);
+ if (vap != NULL)
+ ieee80211_stop(vap);
- memset(&txpower, 0, sizeof txpower);
- txpower.band = IEEE80211_IS_CHAN_5GHZ(c) ? 0 : 1;
- txpower.channel = htole16(chan);
+ callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc);
+}
- /* set Tx power for all OFDM and CCK rates */
- for (i = 0; i <= 11 ; i++) {
- /* retrieve Tx power for this channel/rate combination */
- int idx = wpi_get_power_index(sc, group, c,
- wpi_ridx_to_rate[i]);
+static void
+wpi_init_locked(struct wpi_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ int error;
- txpower.rates[i].rate = wpi_ridx_to_plcp[i];
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if (IEEE80211_IS_CHAN_5GHZ(c)) {
- txpower.rates[i].gain_radio = wpi_rf_gain_5ghz[idx];
- txpower.rates[i].gain_dsp = wpi_dsp_gain_5ghz[idx];
- } else {
- txpower.rates[i].gain_radio = wpi_rf_gain_2ghz[idx];
- txpower.rates[i].gain_dsp = wpi_dsp_gain_2ghz[idx];
- }
- DPRINTFN(WPI_DEBUG_TEMP,("chan %d/rate %d: power index %d\n",
- chan, wpi_ridx_to_rate[i], idx));
+ WPI_LOCK_ASSERT(sc);
+
+ /* Check that the radio is not disabled by hardware switch. */
+ if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) {
+ device_printf(sc->sc_dev,
+ "RF switch: radio disabled (%s)\n", __func__);
+ callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
+ sc);
+ return;
}
- return wpi_cmd(sc, WPI_CMD_TXPOWER, &txpower, sizeof txpower, async);
-}
+ /* Read firmware images from the filesystem. */
+ if ((error = wpi_read_firmware(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not read firmware, error %d\n", __func__,
+ error);
+ goto fail;
+ }
-/*
- * Determine Tx power index for a given channel/rate combination.
- * This takes into account the regulatory information from EEPROM and the
- * current temperature.
- */
-static int
-wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
- struct ieee80211_channel *c, int rate)
-{
-/* fixed-point arithmetic division using a n-bit fractional part */
-#define fdivround(a, b, n) \
- ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
+ /* Initialize hardware and upload firmware. */
+ error = wpi_hw_init(sc);
+ wpi_unload_firmware(sc);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not initialize hardware, error %d\n", __func__,
+ error);
+ goto fail;
+ }
-/* linear interpolation */
-#define interpolate(x, x1, y1, x2, y2, n) \
- ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
+ /* Configure adapter now that it is ready. */
+ if ((error = wpi_config(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "%s: could not configure device, error %d\n", __func__,
+ error);
+ goto fail;
+ }
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct wpi_power_sample *sample;
- int pwr, idx;
- u_int chan;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
- /* get channel number */
- chan = ieee80211_chan2ieee(ic, c);
+ callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
- /* default power is group's maximum power - 3dB */
- pwr = group->maxpwr / 2;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
- /* decrease power for highest OFDM rates to reduce distortion */
- switch (rate) {
- case 72: /* 36Mb/s */
- pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 0 : 5;
- break;
- case 96: /* 48Mb/s */
- pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 7 : 10;
- break;
- case 108: /* 54Mb/s */
- pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 9 : 12;
- break;
- }
+ return;
- /* never exceed channel's maximum allowed Tx power */
- pwr = min(pwr, sc->maxpwr[chan]);
+fail: wpi_stop_locked(sc);
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+}
- /* retrieve power index into gain tables from samples */
- for (sample = group->samples; sample < &group->samples[3]; sample++)
- if (pwr > sample[1].power)
- break;
- /* fixed-point linear interpolation using a 19-bit fractional part */
- idx = interpolate(pwr, sample[0].power, sample[0].index,
- sample[1].power, sample[1].index, 19);
+static void
+wpi_init(void *arg)
+{
+ struct wpi_softc *sc = arg;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
- /*
- * Adjust power index based on current temperature
- * - if colder than factory-calibrated: decreate output power
- * - if warmer than factory-calibrated: increase output power
- */
- idx -= (sc->temp - group->temp) * 11 / 100;
+ WPI_LOCK(sc);
+ wpi_init_locked(sc);
+ WPI_UNLOCK(sc);
- /* decrease power for CCK rates (-5dB) */
- if (!WPI_RATE_IS_OFDM(rate))
- idx += 10;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ieee80211_start_all(ic);
+}
- /* keep power index in a valid range */
- if (idx < 0)
- return 0;
- if (idx > WPI_MAX_PWR_INDEX)
- return WPI_MAX_PWR_INDEX;
- return idx;
+static void
+wpi_stop_locked(struct wpi_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
-#undef interpolate
-#undef fdivround
+ WPI_LOCK_ASSERT(sc);
+
+ sc->sc_scan_timer = 0;
+ sc->sc_tx_timer = 0;
+ callout_stop(&sc->watchdog_to);
+ callout_stop(&sc->calib_to);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ /* Power OFF hardware. */
+ wpi_hw_stop(sc);
}
-/**
- * Called by net80211 framework to indicate that a scan
- * is starting. This function doesn't actually do the scan,
- * wpi_scan_curchan starts things off. This function is more
- * of an early warning from the framework we should get ready
- * for the scan.
+static void
+wpi_stop(struct wpi_softc *sc)
+{
+ WPI_LOCK(sc);
+ wpi_stop_locked(sc);
+ WPI_UNLOCK(sc);
+}
+
+/*
+ * Callback from net80211 to start a scan.
*/
static void
wpi_scan_start(struct ieee80211com *ic)
@@ -3513,15 +4768,21 @@ wpi_scan_start(struct ieee80211com *ic)
WPI_UNLOCK(sc);
}
-/**
- * Called by the net80211 framework, indicates that the
- * scan has ended. If there is a scan in progress on the card
- * then it should be aborted.
+/*
+ * Callback from net80211 to terminate a scan.
*/
static void
wpi_scan_end(struct ieee80211com *ic)
{
- /* XXX ignore */
+ struct ifnet *ifp = ic->ic_ifp;
+ struct wpi_softc *sc = ifp->if_softc;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ WPI_LOCK(sc);
+ wpi_set_led(sc, WPI_LED_LINK, 0, 1);
+ WPI_UNLOCK(sc);
+ }
}
/**
@@ -3531,22 +4792,38 @@ wpi_scan_end(struct ieee80211com *ic)
static void
wpi_set_channel(struct ieee80211com *ic)
{
+ const struct ieee80211_channel *c = ic->ic_curchan;
struct ifnet *ifp = ic->ic_ifp;
struct wpi_softc *sc = ifp->if_softc;
int error;
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+
+ WPI_LOCK(sc);
+ sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
+ sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
+ sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
+ sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
+
/*
* Only need to set the channel in Monitor mode. AP scanning and auth
* are already taken care of by their respective firmware commands.
*/
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
- WPI_LOCK(sc);
- error = wpi_config(sc);
- WPI_UNLOCK(sc);
- if (error != 0)
+ sc->rxon.chan = ieee80211_chan2ieee(ic, c);
+ if (IEEE80211_IS_CHAN_2GHZ(c)) {
+ sc->rxon.flags |= htole32(WPI_RXON_AUTO |
+ WPI_RXON_24GHZ);
+ } else {
+ sc->rxon.flags &= ~htole32(WPI_RXON_AUTO |
+ WPI_RXON_24GHZ);
+ }
+ if ((error = wpi_send_rxon(sc, 0, 0)) != 0)
device_printf(sc->sc_dev,
- "error %d settting channel\n", error);
+ "%s: error %d settting channel\n", __func__,
+ error);
}
+ WPI_UNLOCK(sc);
}
/**
@@ -3558,13 +4835,21 @@ static void
wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
struct ieee80211vap *vap = ss->ss_vap;
- struct ifnet *ifp = vap->iv_ic->ic_ifp;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = ic->ic_ifp;
struct wpi_softc *sc = ifp->if_softc;
+ int error;
- WPI_LOCK(sc);
- if (wpi_scan(sc))
- ieee80211_cancel_scan(vap);
- WPI_UNLOCK(sc);
+ if (sc->rxon.chan != ieee80211_chan2ieee(ic, ic->ic_curchan)) {
+ WPI_LOCK(sc);
+ error = wpi_scan(sc, ic->ic_curchan);
+ WPI_UNLOCK(sc);
+ if (error != 0)
+ ieee80211_cancel_scan(vap);
+ } else {
+ /* Send probe request when associated. */
+ sc->sc_scan_curchan(ss, maxdwell);
+ }
}
/**
@@ -3580,118 +4865,19 @@ wpi_scan_mindwell(struct ieee80211_scan_state *ss)
}
static void
-wpi_hwreset(void *arg, int pending)
-{
- struct wpi_softc *sc = arg;
-
- WPI_LOCK(sc);
- wpi_init_locked(sc, 0);
- WPI_UNLOCK(sc);
-}
-
-static void
-wpi_rfreset(void *arg, int pending)
-{
- struct wpi_softc *sc = arg;
-
- WPI_LOCK(sc);
- wpi_rfkill_resume(sc);
- WPI_UNLOCK(sc);
-}
-
-/*
- * Allocate DMA-safe memory for firmware transfer.
- */
-static int
-wpi_alloc_fwmem(struct wpi_softc *sc)
-{
- /* allocate enough contiguous space to store text and data */
- return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
- WPI_FW_MAIN_TEXT_MAXSZ + WPI_FW_MAIN_DATA_MAXSZ, 1,
- BUS_DMA_NOWAIT);
-}
-
-static void
-wpi_free_fwmem(struct wpi_softc *sc)
-{
- wpi_dma_contig_free(&sc->fw_dma);
-}
-
-/**
- * Called every second, wpi_watchdog used by the watch dog timer
- * to check that the card is still alive
- */
-static void
-wpi_watchdog(void *arg)
+wpi_hw_reset(void *arg, int pending)
{
struct wpi_softc *sc = arg;
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
- uint32_t tmp;
-
- DPRINTFN(WPI_DEBUG_WATCHDOG,("Watchdog: tick\n"));
-
- if (sc->flags & WPI_FLAG_HW_RADIO_OFF) {
- /* No need to lock firmware memory */
- tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF);
-
- if ((tmp & 0x1) == 0) {
- /* Radio kill switch is still off */
- callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
- return;
- }
-
- device_printf(sc->sc_dev, "Hardware Switch Enabled\n");
- ieee80211_runtask(ic, &sc->sc_radiotask);
- return;
- }
-
- if (sc->sc_tx_timer > 0) {
- if (--sc->sc_tx_timer == 0) {
- device_printf(sc->sc_dev,"device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- ieee80211_runtask(ic, &sc->sc_restarttask);
- }
- }
- if (sc->sc_scan_timer > 0) {
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- if (--sc->sc_scan_timer == 0 && vap != NULL) {
- device_printf(sc->sc_dev,"scan timeout\n");
- ieee80211_cancel_scan(vap);
- ieee80211_runtask(ic, &sc->sc_restarttask);
- }
- }
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc);
-}
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
-#ifdef WPI_DEBUG
-static const char *wpi_cmd_str(int cmd)
-{
- switch (cmd) {
- case WPI_DISABLE_CMD: return "WPI_DISABLE_CMD";
- case WPI_CMD_CONFIGURE: return "WPI_CMD_CONFIGURE";
- case WPI_CMD_ASSOCIATE: return "WPI_CMD_ASSOCIATE";
- case WPI_CMD_SET_WME: return "WPI_CMD_SET_WME";
- case WPI_CMD_TSF: return "WPI_CMD_TSF";
- case WPI_CMD_ADD_NODE: return "WPI_CMD_ADD_NODE";
- case WPI_CMD_TX_DATA: return "WPI_CMD_TX_DATA";
- case WPI_CMD_MRR_SETUP: return "WPI_CMD_MRR_SETUP";
- case WPI_CMD_SET_LED: return "WPI_CMD_SET_LED";
- case WPI_CMD_SET_POWER_MODE: return "WPI_CMD_SET_POWER_MODE";
- case WPI_CMD_SCAN: return "WPI_CMD_SCAN";
- case WPI_CMD_SET_BEACON:return "WPI_CMD_SET_BEACON";
- case WPI_CMD_TXPOWER: return "WPI_CMD_TXPOWER";
- case WPI_CMD_BLUETOOTH: return "WPI_CMD_BLUETOOTH";
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
- default:
- KASSERT(1, ("Unknown Command: %d\n", cmd));
- return "UNKNOWN CMD"; /* Make the compiler happy */
- }
+ wpi_stop(sc);
+ if (vap != NULL)
+ ieee80211_stop(vap);
+ wpi_init(sc);
+ if (vap != NULL)
+ ieee80211_init(vap);
}
-#endif
-
-MODULE_DEPEND(wpi, pci, 1, 1, 1);
-MODULE_DEPEND(wpi, wlan, 1, 1, 1);
-MODULE_DEPEND(wpi, firmware, 1, 1, 1);
diff --git a/sys/dev/wpi/if_wpi_debug.h b/sys/dev/wpi/if_wpi_debug.h
new file mode 100644
index 000000000000..91a7383e348d
--- /dev/null
+++ b/sys/dev/wpi/if_wpi_debug.h
@@ -0,0 +1,98 @@
+/*-
+ * Copyright (c) 2006,2007
+ * Damien Bergamini <damien.bergamini@free.fr>
+ * Benjamin Close <Benjamin.Close@clearchain.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __IF_WPI_DEBUG_H__
+#define __IF_WPI_DEBUG_H__
+
+#ifdef WPI_DEBUG
+enum {
+ WPI_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
+ WPI_DEBUG_RECV = 0x00000002, /* basic recv operation */
+ WPI_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
+ WPI_DEBUG_HW = 0x00000008, /* Stage 1 (eeprom) debugging */
+ WPI_DEBUG_RESET = 0x00000010, /* reset processing */
+ WPI_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */
+ WPI_DEBUG_BEACON = 0x00000040, /* beacon handling */
+ WPI_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
+ WPI_DEBUG_INTR = 0x00000100, /* ISR */
+ WPI_DEBUG_SCAN = 0x00000200, /* Scan related operations */
+ WPI_DEBUG_NOTIFY = 0x00000400, /* State 2 Notif intr debug */
+ WPI_DEBUG_TEMP = 0x00000800, /* TXPower/Temp Calibration */
+ WPI_DEBUG_CMD = 0x00001000, /* cmd submission */
+ WPI_DEBUG_TRACE = 0x00002000, /* Print begin and start driver function */
+ WPI_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */
+ WPI_DEBUG_EEPROM = 0x00008000, /* EEPROM info */
+ WPI_DEBUG_KEY = 0x00010000, /* node key management */
+ WPI_DEBUG_EDCA = 0x00020000, /* WME info */
+ WPI_DEBUG_ANY = 0xffffffff
+};
+
+#define DPRINTF(sc, m, ...) do { \
+ if (sc->sc_debug & (m)) \
+ printf(__VA_ARGS__); \
+} while (0)
+
+#define TRACE_STR_BEGIN "->%s: begin\n"
+#define TRACE_STR_DOING "->Doing %s\n"
+#define TRACE_STR_END "->%s: end\n"
+#define TRACE_STR_END_ERR "->%s: end in error\n"
+
+static const char *wpi_cmd_str(int cmd)
+{
+ switch (cmd) {
+ /* Notifications */
+ case WPI_UC_READY: return "UC_READY";
+ case WPI_RX_DONE: return "RX_DONE";
+ case WPI_START_SCAN: return "START_SCAN";
+ case WPI_SCAN_RESULTS: return "SCAN_RESULTS";
+ case WPI_STOP_SCAN: return "STOP_SCAN";
+ case WPI_BEACON_SENT: return "BEACON_SENT";
+ case WPI_RX_STATISTICS: return "RX_STATS";
+ case WPI_BEACON_STATISTICS: return "BEACON_STATS";
+ case WPI_STATE_CHANGED: return "STATE_CHANGED";
+ case WPI_BEACON_MISSED: return "BEACON_MISSED";
+
+ /* Command notifications */
+ case WPI_CMD_RXON: return "WPI_CMD_RXON";
+ case WPI_CMD_RXON_ASSOC: return "WPI_CMD_RXON_ASSOC";
+ case WPI_CMD_EDCA_PARAMS: return "WPI_CMD_EDCA_PARAMS";
+ case WPI_CMD_TIMING: return "WPI_CMD_TIMING";
+ case WPI_CMD_ADD_NODE: return "WPI_CMD_ADD_NODE";
+ case WPI_CMD_DEL_NODE: return "WPI_CMD_DEL_NODE";
+ case WPI_CMD_TX_DATA: return "WPI_CMD_TX_DATA";
+ case WPI_CMD_MRR_SETUP: return "WPI_CMD_MRR_SETUP";
+ case WPI_CMD_SET_LED: return "WPI_CMD_SET_LED";
+ case WPI_CMD_SET_POWER_MODE: return "WPI_CMD_SET_POWER_MODE";
+ case WPI_CMD_SCAN: return "WPI_CMD_SCAN";
+ case WPI_CMD_SET_BEACON: return "WPI_CMD_SET_BEACON";
+ case WPI_CMD_TXPOWER: return "WPI_CMD_TXPOWER";
+ case WPI_CMD_BT_COEX: return "WPI_CMD_BT_COEX";
+
+ default:
+ KASSERT(1, ("Unknown Command: %d\n", cmd));
+ return "UNKNOWN CMD";
+ }
+}
+
+#else
+#define DPRINTF(sc, m, ...) do { (void) sc; } while (0)
+#endif
+
+#endif /* __IF_WPI_DEBUG_H__ */
diff --git a/sys/dev/wpi/if_wpireg.h b/sys/dev/wpi/if_wpireg.h
index 60d183a6736b..cfd8a09ad282 100644
--- a/sys/dev/wpi/if_wpireg.h
+++ b/sys/dev/wpi/if_wpireg.h
@@ -18,155 +18,207 @@
*/
#define WPI_TX_RING_COUNT 256
-#define WPI_CMD_RING_COUNT 256
-#define WPI_RX_RING_COUNT 64
+#define WPI_TX_RING_LOMARK 192
+#define WPI_TX_RING_HIMARK 224
+#define WPI_RX_RING_COUNT_LOG 6
+#define WPI_RX_RING_COUNT (1 << WPI_RX_RING_COUNT_LOG)
+
+#define WPI_NTXQUEUES 8
+#define WPI_NDMACHNLS 6
+
+/* Maximum scatter/gather. */
+#define WPI_MAX_SCATTER 4
/*
* Rings must be aligned on a 16K boundary.
*/
#define WPI_RING_DMA_ALIGN 0x4000
-/* maximum scatter/gather */
-#define WPI_MAX_SCATTER 4
-
-/* maximum Rx buffer size */
+/* Maximum Rx buffer size. */
#define WPI_RBUF_SIZE ( 3 * 1024 ) /* XXX 3000 but must be aligned */
/*
* Control and status registers.
*/
-#define WPI_HWCONFIG 0x000
-#define WPI_INTR 0x008
-#define WPI_MASK 0x00c
-#define WPI_INTR_STATUS 0x010
-#define WPI_GPIO_STATUS 0x018
+#define WPI_HW_IF_CONFIG 0x000
+#define WPI_INT 0x008
+#define WPI_INT_MASK 0x00c
+#define WPI_FH_INT 0x010
+#define WPI_GPIO_IN 0x018
#define WPI_RESET 0x020
-#define WPI_GPIO_CTL 0x024
-#define WPI_EEPROM_CTL 0x02c
-#define WPI_EEPROM_STATUS 0x030
-#define WPI_UCODE_SET 0x058
-#define WPI_UCODE_CLR 0x05c
-#define WPI_TEMPERATURE 0x060
-#define WPI_CHICKEN 0x100
-#define WPI_PLL_CTL 0x20c
-#define WPI_WRITE_MEM_ADDR 0x444
-#define WPI_READ_MEM_ADDR 0x448
-#define WPI_WRITE_MEM_DATA 0x44c
-#define WPI_READ_MEM_DATA 0x450
-#define WPI_TX_WIDX 0x460
-#define WPI_TX_CTL(qid) (0x940 + (qid) * 8)
-#define WPI_TX_BASE(qid) (0x944 + (qid) * 8)
-#define WPI_TX_DESC(qid) (0x980 + (qid) * 80)
-#define WPI_RX_CONFIG 0xc00
-#define WPI_RX_BASE 0xc04
-#define WPI_RX_WIDX 0xc20
-#define WPI_RX_RIDX_PTR 0xc24
-#define WPI_RX_CTL 0xcc0
-#define WPI_RX_STATUS 0xcc4
-#define WPI_TX_CONFIG(qid) (0xd00 + (qid) * 32)
-#define WPI_TX_CREDIT(qid) (0xd04 + (qid) * 32)
-#define WPI_TX_STATE(qid) (0xd08 + (qid) * 32)
-#define WPI_TX_BASE_PTR 0xe80
-#define WPI_MSG_CONFIG 0xe88
-#define WPI_TX_STATUS 0xe90
+#define WPI_GP_CNTRL 0x024
+#define WPI_EEPROM 0x02c
+#define WPI_EEPROM_GP 0x030
+#define WPI_GIO 0x03c
+#define WPI_UCODE_GP1 0x054
+#define WPI_UCODE_GP1_SET 0x058
+#define WPI_UCODE_GP1_CLR 0x05c
+#define WPI_UCODE_GP2 0x060
+#define WPI_GIO_CHICKEN 0x100
+#define WPI_ANA_PLL 0x20c
+#define WPI_DBG_HPET_MEM 0x240
+#define WPI_MEM_RADDR 0x40c
+#define WPI_MEM_WADDR 0x410
+#define WPI_MEM_WDATA 0x418
+#define WPI_MEM_RDATA 0x41c
+#define WPI_PRPH_WADDR 0x444
+#define WPI_PRPH_RADDR 0x448
+#define WPI_PRPH_WDATA 0x44c
+#define WPI_PRPH_RDATA 0x450
+#define WPI_HBUS_TARG_WRPTR 0x460
+
+/*
+ * Flow-Handler registers.
+ */
+#define WPI_FH_CBBC_CTRL(qid) (0x940 + (qid) * 8)
+#define WPI_FH_CBBC_BASE(qid) (0x944 + (qid) * 8)
+#define WPI_FH_RX_CONFIG 0xc00
+#define WPI_FH_RX_BASE 0xc04
+#define WPI_FH_RX_WPTR 0xc20
+#define WPI_FH_RX_RPTR_ADDR 0xc24
+#define WPI_FH_RSSR_TBL 0xcc0
+#define WPI_FH_RX_STATUS 0xcc4
+#define WPI_FH_TX_CONFIG(qid) (0xd00 + (qid) * 32)
+#define WPI_FH_TX_BASE 0xe80
+#define WPI_FH_MSG_CONFIG 0xe88
+#define WPI_FH_TX_STATUS 0xe90
/*
* NIC internal memory offsets.
*/
-#define WPI_MEM_MODE 0x2e00
-#define WPI_MEM_RA 0x2e04
-#define WPI_MEM_TXCFG 0x2e10
-#define WPI_MEM_MAGIC4 0x2e14
-#define WPI_MEM_MAGIC5 0x2e20
-#define WPI_MEM_BYPASS1 0x2e2c
-#define WPI_MEM_BYPASS2 0x2e30
-#define WPI_MEM_CLOCK1 0x3004
-#define WPI_MEM_CLOCK2 0x3008
-#define WPI_MEM_POWER 0x300c
-#define WPI_MEM_PCIDEV 0x3010
-#define WPI_MEM_HW_RADIO_OFF 0x3014
-#define WPI_MEM_UCODE_CTL 0x3400
-#define WPI_MEM_UCODE_SRC 0x3404
-#define WPI_MEM_UCODE_DST 0x3408
-#define WPI_MEM_UCODE_SIZE 0x340c
-#define WPI_MEM_UCODE_BASE 0x3800
-
-#define WPI_MEM_TEXT_BASE 0x3490
-#define WPI_MEM_TEXT_SIZE 0x3494
-#define WPI_MEM_DATA_BASE 0x3498
-#define WPI_MEM_DATA_SIZE 0x349c
-
-
-/* possible flags for register WPI_HWCONFIG */
-#define WPI_HW_ALM_MB (1 << 8)
-#define WPI_HW_ALM_MM (1 << 9)
-#define WPI_HW_SKU_MRC (1 << 10)
-#define WPI_HW_REV_D (1 << 11)
-#define WPI_HW_TYPE_B (1 << 12)
-
-/* possible flags for registers WPI_READ_MEM_ADDR/WPI_WRITE_MEM_ADDR */
-#define WPI_MEM_4 ((sizeof (uint32_t) - 1) << 24)
-
-/* possible values for WPI_MEM_UCODE_DST */
-#define WPI_FW_TEXT 0x00000000
-
-/* possible flags for WPI_GPIO_STATUS */
-#define WPI_POWERED (1 << 9)
-
-/* possible flags for register WPI_RESET */
-#define WPI_NEVO_RESET (1 << 0)
-#define WPI_SW_RESET (1 << 7)
-#define WPI_MASTER_DISABLED (1 << 8)
-#define WPI_STOP_MASTER (1 << 9)
-
-/* possible flags for register WPI_GPIO_CTL */
-#define WPI_GPIO_CLOCK (1 << 0)
-#define WPI_GPIO_INIT (1 << 2)
-#define WPI_GPIO_MAC (1 << 3)
-#define WPI_GPIO_SLEEP (1 << 4)
-#define WPI_GPIO_PWR_STATUS 0x07000000
-#define WPI_GPIO_PWR_SLEEP (4 << 24)
-
-/* possible flags for register WPI_CHICKEN */
-#define WPI_CHICKEN_RXNOLOS (1 << 23)
-
-/* possible flags for register WPI_PLL_CTL */
-#define WPI_PLL_INIT (1 << 24)
-
-/* possible flags for register WPI_UCODE_CLR */
-#define WPI_RADIO_OFF (1 << 1)
-#define WPI_DISABLE_CMD (1 << 2)
-
-/* possible flags for WPI_RX_STATUS */
-#define WPI_RX_IDLE (1 << 24)
-
-/* possible flags for register WPI_UC_CTL */
-#define WPI_UC_ENABLE (1 << 30)
-#define WPI_UC_RUN (1U << 31)
-
-/* possible flags for register WPI_INTR_CSR */
-#define WPI_ALIVE_INTR (1 << 0)
-#define WPI_WAKEUP_INTR (1 << 1)
-#define WPI_SW_ERROR (1 << 25)
-#define WPI_TX_INTR (1 << 27)
-#define WPI_HW_ERROR (1 << 29)
-#define WPI_RX_INTR (1U << 31)
-
-#define WPI_INTR_MASK \
- (WPI_SW_ERROR | WPI_HW_ERROR | WPI_TX_INTR | WPI_RX_INTR | \
- WPI_ALIVE_INTR | WPI_WAKEUP_INTR)
-
-/* possible flags for register WPI_TX_STATUS */
-#define WPI_TX_IDLE(qid) (1 << ((qid) + 24) | 1 << ((qid) + 16))
-
-/* possible flags for register WPI_EEPROM_CTL */
-#define WPI_EEPROM_READY (1 << 0)
-
-/* possible flags for register WPI_EEPROM_STATUS */
+#define WPI_ALM_SCHED_MODE 0x2e00
+#define WPI_ALM_SCHED_ARASTAT 0x2e04
+#define WPI_ALM_SCHED_TXFACT 0x2e10
+#define WPI_ALM_SCHED_TXF4MF 0x2e14
+#define WPI_ALM_SCHED_TXF5MF 0x2e20
+#define WPI_ALM_SCHED_SBYPASS_MODE1 0x2e2c
+#define WPI_ALM_SCHED_SBYPASS_MODE2 0x2e30
+#define WPI_APMG_CLK_EN 0x3004
+#define WPI_APMG_CLK_DIS 0x3008
+#define WPI_APMG_PS 0x300c
+#define WPI_APMG_PCI_STT 0x3010
+#define WPI_APMG_RFKILL 0x3014
+#define WPI_BSM_WR_CTRL 0x3400
+#define WPI_BSM_WR_MEM_SRC 0x3404
+#define WPI_BSM_WR_MEM_DST 0x3408
+#define WPI_BSM_WR_DWCOUNT 0x340c
+#define WPI_BSM_DRAM_TEXT_ADDR 0x3490
+#define WPI_BSM_DRAM_TEXT_SIZE 0x3494
+#define WPI_BSM_DRAM_DATA_ADDR 0x3498
+#define WPI_BSM_DRAM_DATA_SIZE 0x349c
+#define WPI_BSM_SRAM_BASE 0x3800
+
+
+/* Possible flags for register WPI_HW_IF_CONFIG. */
+#define WPI_HW_IF_CONFIG_ALM_MB (1 << 8)
+#define WPI_HW_IF_CONFIG_ALM_MM (1 << 9)
+#define WPI_HW_IF_CONFIG_SKU_MRC (1 << 10)
+#define WPI_HW_IF_CONFIG_REV_D (1 << 11)
+#define WPI_HW_IF_CONFIG_TYPE_B (1 << 12)
+
+/* Possible flags for registers WPI_PRPH_RADDR/WPI_PRPH_WADDR. */
+#define WPI_PRPH_DWORD ((sizeof (uint32_t) - 1) << 24)
+
+/* Possible values for WPI_BSM_WR_MEM_DST. */
+#define WPI_FW_TEXT_BASE 0x00000000
+#define WPI_FW_DATA_BASE 0x00800000
+
+/* Possible flags for WPI_GPIO_IN. */
+#define WPI_GPIO_IN_VMAIN (1 << 9)
+
+/* Possible flags for register WPI_RESET. */
+#define WPI_RESET_NEVO (1 << 0)
+#define WPI_RESET_SW (1 << 7)
+#define WPI_RESET_MASTER_DISABLED (1 << 8)
+#define WPI_RESET_STOP_MASTER (1 << 9)
+
+/* Possible flags for register WPI_GP_CNTRL. */
+#define WPI_GP_CNTRL_MAC_ACCESS_ENA (1 << 0)
+#define WPI_GP_CNTRL_MAC_CLOCK_READY (1 << 0)
+#define WPI_GP_CNTRL_INIT_DONE (1 << 2)
+#define WPI_GP_CNTRL_MAC_ACCESS_REQ (1 << 3)
+#define WPI_GP_CNTRL_SLEEP (1 << 4)
+#define WPI_GP_CNTRL_PS_MASK (7 << 24)
+#define WPI_GP_CNTRL_MAC_PS (4 << 24)
+#define WPI_GP_CNTRL_RFKILL (1 << 27)
+
+/* Possible flags for register WPI_GIO_CHICKEN. */
+#define WPI_GIO_CHICKEN_L1A_NO_L0S_RX (1 << 23)
+#define WPI_GIO_CHICKEN_DIS_L0S_TIMER (1 << 29)
+
+/* Possible flags for register WPI_GIO. */
+#define WPI_GIO_L0S_ENA (1 << 1)
+
+/* Possible flags for register WPI_FH_RX_CONFIG. */
+#define WPI_FH_RX_CONFIG_DMA_ENA (1U << 31)
+#define WPI_FH_RX_CONFIG_RDRBD_ENA (1 << 29)
+#define WPI_FH_RX_CONFIG_WRSTATUS_ENA (1 << 27)
+#define WPI_FH_RX_CONFIG_MAXFRAG (1 << 24)
+#define WPI_FH_RX_CONFIG_NRBD(x) ((x) << 20)
+#define WPI_FH_RX_CONFIG_IRQ_DST_HOST (1 << 12)
+#define WPI_FH_RX_CONFIG_IRQ_TIMEOUT(x) ((x) << 4)
+
+/* Possible flags for register WPI_ANA_PLL. */
+#define WPI_ANA_PLL_INIT (1 << 24)
+
+/* Possible flags for register WPI_UCODE_GP1*. */
+#define WPI_UCODE_GP1_MAC_SLEEP (1 << 0)
+#define WPI_UCODE_GP1_RFKILL (1 << 1)
+#define WPI_UCODE_GP1_CMD_BLOCKED (1 << 2)
+
+/* Possible flags for register WPI_FH_RX_STATUS. */
+#define WPI_FH_RX_STATUS_IDLE (1 << 24)
+
+/* Possible flags for register WPI_BSM_WR_CTRL. */
+#define WPI_BSM_WR_CTRL_START_EN (1 << 30)
+#define WPI_BSM_WR_CTRL_START (1U << 31)
+
+/* Possible flags for register WPI_INT. */
+#define WPI_INT_ALIVE (1 << 0)
+#define WPI_INT_WAKEUP (1 << 1)
+#define WPI_INT_SW_RX (1 << 3)
+#define WPI_INT_SW_ERR (1 << 25)
+#define WPI_INT_FH_TX (1 << 27)
+#define WPI_INT_HW_ERR (1 << 29)
+#define WPI_INT_FH_RX (1U << 31)
+
+/* Shortcut. */
+#define WPI_INT_MASK_DEF \
+ (WPI_INT_SW_ERR | WPI_INT_HW_ERR | WPI_INT_FH_TX | \
+ WPI_INT_FH_RX | WPI_INT_ALIVE | WPI_INT_WAKEUP | \
+ WPI_INT_SW_RX)
+
+/* Possible flags for register WPI_FH_INT. */
+#define WPI_FH_INT_RX_CHNL(x) (1 << ((x) + 16))
+#define WPI_FH_INT_HI_PRIOR (1 << 30)
+/* Shortcuts for the above. */
+#define WPI_FH_INT_RX \
+ (WPI_FH_INT_RX_CHNL(0) | \
+ WPI_FH_INT_RX_CHNL(1) | \
+ WPI_FH_INT_RX_CHNL(2) | \
+ WPI_FH_INT_HI_PRIOR)
+
+/* Possible flags for register WPI_FH_TX_STATUS. */
+#define WPI_FH_TX_STATUS_IDLE(qid) \
+ (1 << ((qid) + 24) | 1 << ((qid) + 16))
+
+/* Possible flags for register WPI_EEPROM. */
+#define WPI_EEPROM_READ_VALID (1 << 0)
+
+/* Possible flags for register WPI_EEPROM_GP. */
#define WPI_EEPROM_VERSION 0x00000007
-#define WPI_EEPROM_LOCKED 0x00000180
+#define WPI_EEPROM_GP_IF_OWNER 0x00000180
+
+/* Possible flags for register WPI_APMG_PS. */
+#define WPI_APMG_PS_PWR_SRC_MASK (3 << 24)
+/* Possible flags for registers WPI_APMG_CLK_*. */
+#define WPI_APMG_CLK_CTRL_DMA_CLK_RQT (1 << 9)
+#define WPI_APMG_CLK_CTRL_BSM_CLK_RQT (1 << 11)
+
+/* Possible flags for register WPI_APMG_PCI_STT. */
+#define WPI_APMG_PCI_STT_L1A_DIS (1 << 11)
struct wpi_shared {
uint32_t txbase[8];
@@ -176,20 +228,21 @@ struct wpi_shared {
#define WPI_MAX_SEG_LEN 65520
struct wpi_tx_desc {
- uint32_t flags;
+ uint8_t reserved1[3];
+ uint8_t nsegs;
#define WPI_PAD32(x) (roundup2(x, 4) - (x))
struct {
uint32_t addr;
uint32_t len;
- } __attribute__((__packed__)) segs[WPI_MAX_SCATTER];
- uint8_t reserved[28];
+ } __packed segs[WPI_MAX_SCATTER];
+ uint8_t reserved2[28];
} __packed;
struct wpi_tx_stat {
- uint8_t nrts;
- uint8_t ntries;
- uint8_t nkill;
+ uint8_t rtsfailcnt;
+ uint8_t ackfailcnt;
+ uint8_t btkillcnt;
uint8_t rate;
uint32_t duration;
uint32_t status;
@@ -204,8 +257,11 @@ struct wpi_rx_desc {
#define WPI_START_SCAN 130
#define WPI_SCAN_RESULTS 131
#define WPI_STOP_SCAN 132
+#define WPI_BEACON_SENT 144
+#define WPI_RX_STATISTICS 156
+#define WPI_BEACON_STATISTICS 157
#define WPI_STATE_CHANGED 161
-#define WPI_MISSED_BEACON 162
+#define WPI_BEACON_MISSED 162
uint8_t flags;
uint8_t idx;
@@ -228,8 +284,10 @@ struct wpi_rx_stat {
struct wpi_rx_head {
uint16_t chan;
uint16_t flags;
+#define WPI_STAT_FLAG_SHPREAMBLE (1 << 2)
+
uint8_t reserved;
- uint8_t rate;
+ uint8_t plcp;
uint16_t len;
} __packed;
@@ -239,17 +297,23 @@ struct wpi_rx_tail {
#define WPI_RX_NO_OVFL_ERR (1 << 1)
/* shortcut for the above */
#define WPI_RX_NOERROR (WPI_RX_NO_CRC_ERR | WPI_RX_NO_OVFL_ERR)
+#define WPI_RX_CIPHER_MASK (7 << 8)
+#define WPI_RX_CIPHER_CCMP (2 << 8)
+#define WPI_RX_DECRYPT_MASK (3 << 11)
+#define WPI_RX_DECRYPT_OK (3 << 11)
+
uint64_t tstamp;
uint32_t tbeacon;
} __packed;
struct wpi_tx_cmd {
uint8_t code;
-#define WPI_CMD_CONFIGURE 16
-#define WPI_CMD_ASSOCIATE 17
-#define WPI_CMD_SET_WME 19
-#define WPI_CMD_TSF 20
+#define WPI_CMD_RXON 16
+#define WPI_CMD_RXON_ASSOC 17
+#define WPI_CMD_EDCA_PARAMS 19
+#define WPI_CMD_TIMING 20
#define WPI_CMD_ADD_NODE 24
+#define WPI_CMD_DEL_NODE 25
#define WPI_CMD_TX_DATA 28
#define WPI_CMD_MRR_SETUP 71
#define WPI_CMD_SET_LED 72
@@ -257,21 +321,22 @@ struct wpi_tx_cmd {
#define WPI_CMD_SCAN 128
#define WPI_CMD_SET_BEACON 145
#define WPI_CMD_TXPOWER 151
-#define WPI_CMD_BLUETOOTH 155
+#define WPI_CMD_BT_COEX 155
+#define WPI_CMD_GET_STATISTICS 156
uint8_t flags;
uint8_t idx;
uint8_t qid;
- uint8_t data[360];
+ uint8_t data[124];
} __packed;
-/* structure for WPI_CMD_CONFIGURE */
-struct wpi_config {
+/* Structure for command WPI_CMD_RXON. */
+struct wpi_rxon {
uint8_t myaddr[IEEE80211_ADDR_LEN];
uint16_t reserved1;
uint8_t bssid[IEEE80211_ADDR_LEN];
uint16_t reserved2;
- uint8_t wlap_bssid_addr[6];
+ uint8_t wlap[IEEE80211_ADDR_LEN];
uint16_t reserved3;
uint8_t mode;
#define WPI_MODE_HOSTAP 1
@@ -279,21 +344,22 @@ struct wpi_config {
#define WPI_MODE_IBSS 4
#define WPI_MODE_MONITOR 6
- uint8_t air_propogation;
+ uint8_t air;
uint16_t reserved4;
uint8_t ofdm_mask;
uint8_t cck_mask;
uint16_t associd;
uint32_t flags;
-#define WPI_CONFIG_24GHZ (1 << 0)
-#define WPI_CONFIG_CCK (1 << 1)
-#define WPI_CONFIG_AUTO (1 << 2)
-#define WPI_CONFIG_SHSLOT (1 << 4)
-#define WPI_CONFIG_SHPREAMBLE (1 << 5)
-#define WPI_CONFIG_NODIVERSITY (1 << 7)
-#define WPI_CONFIG_ANTENNA_A (1 << 8)
-#define WPI_CONFIG_ANTENNA_B (1 << 9)
-#define WPI_CONFIG_TSF (1 << 15)
+#define WPI_RXON_24GHZ (1 << 0)
+#define WPI_RXON_CCK (1 << 1)
+#define WPI_RXON_AUTO (1 << 2)
+#define WPI_RXON_SHSLOT (1 << 4)
+#define WPI_RXON_SHPREAMBLE (1 << 5)
+#define WPI_RXON_NODIVERSITY (1 << 7)
+#define WPI_RXON_ANTENNA_A (1 << 8)
+#define WPI_RXON_ANTENNA_B (1 << 9)
+#define WPI_RXON_TSF (1 << 15)
+#define WPI_RXON_CTS_TO_SELF (1 << 30)
uint32_t filter;
#define WPI_FILTER_PROMISC (1 << 0)
@@ -304,10 +370,10 @@ struct wpi_config {
#define WPI_FILTER_BEACON (1 << 6)
uint8_t chan;
- uint16_t reserved6;
+ uint16_t reserved5;
} __packed;
-/* structure for command WPI_CMD_ASSOCIATE */
+/* Structure for command WPI_CMD_RXON_ASSOC. */
struct wpi_assoc {
uint32_t flags;
uint32_t filter;
@@ -316,20 +382,22 @@ struct wpi_assoc {
uint16_t reserved;
} __packed;
-/* structure for command WPI_CMD_SET_WME */
-struct wpi_wme_setup {
+/* Structure for command WPI_CMD_EDCA_PARAMS. */
+struct wpi_edca_params {
uint32_t flags;
+#define WPI_EDCA_UPDATE (1 << 0)
+
struct {
uint16_t cwmin;
uint16_t cwmax;
uint8_t aifsn;
uint8_t reserved;
- uint16_t txop;
+ uint16_t txoplimit;
} __packed ac[WME_NUM_AC];
} __packed;
-/* structure for command WPI_CMD_TSF */
-struct wpi_cmd_tsf {
+/* Structure for command WPI_CMD_TIMING. */
+struct wpi_cmd_timing {
uint64_t tstamp;
uint16_t bintval;
uint16_t atim;
@@ -338,41 +406,60 @@ struct wpi_cmd_tsf {
uint16_t reserved;
} __packed;
-/* structure for WPI_CMD_ADD_NODE */
+/* Structure for command WPI_CMD_ADD_NODE. */
struct wpi_node_info {
uint8_t control;
-#define WPI_NODE_UPDATE (1 << 0)
+#define WPI_NODE_UPDATE (1 << 0)
uint8_t reserved1[3];
- uint8_t bssid[IEEE80211_ADDR_LEN];
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
uint16_t reserved2;
uint8_t id;
#define WPI_ID_BSS 0
+#define WPI_ID_IBSS_MIN 2
+#define WPI_ID_IBSS_MAX 23
#define WPI_ID_BROADCAST 24
+#define WPI_ID_UNDEFINED (uint8_t)-1
uint8_t flags;
+#define WPI_FLAG_KEY_SET (1 << 0)
+
uint16_t reserved3;
- uint16_t key_flags;
- uint8_t tkip;
+ uint16_t kflags;
+#define WPI_KFLAG_CCMP (1 << 1)
+#define WPI_KFLAG_KID(kid) ((kid) << 8)
+#define WPI_KFLAG_MULTICAST (1 << 14)
+
+ uint8_t tsc2;
uint8_t reserved4;
uint16_t ttak[5];
uint16_t reserved5;
uint8_t key[IEEE80211_KEYBUF_SIZE];
uint32_t action;
-#define WPI_ACTION_SET_RATE 4
+#define WPI_ACTION_SET_RATE (1 << 2)
+
uint32_t mask;
uint16_t tid;
- uint8_t rate;
+ uint8_t plcp;
uint8_t antenna;
-#define WPI_ANTENNA_A (1<<6)
-#define WPI_ANTENNA_B (1<<7)
-#define WPI_ANTENNA_BOTH (WPI_ANTENNA_A|WPI_ANTENNA_B)
+#define WPI_ANTENNA_A (1 << 6)
+#define WPI_ANTENNA_B (1 << 7)
+#define WPI_ANTENNA_BOTH (WPI_ANTENNA_A | WPI_ANTENNA_B)
+
uint8_t add_imm;
uint8_t del_imm;
uint16_t add_imm_start;
} __packed;
-/* structure for command WPI_CMD_TX_DATA */
+/* Structure for command WPI_CMD_DEL_NODE. */
+struct wpi_cmd_del_node {
+ uint8_t count;
+ uint8_t reserved1[3];
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ uint16_t reserved2;
+} __packed;
+
+/* Structure for command WPI_CMD_TX_DATA. */
struct wpi_cmd_data {
uint16_t len;
uint16_t lnext;
@@ -381,34 +468,39 @@ struct wpi_cmd_data {
#define WPI_TX_NEED_CTS (1 << 2)
#define WPI_TX_NEED_ACK (1 << 3)
#define WPI_TX_FULL_TXOP (1 << 7)
-#define WPI_TX_BT_DISABLE (1 << 12) /* bluetooth coexistence */
+#define WPI_TX_BT_DISABLE (1 << 12) /* bluetooth coexistence */
#define WPI_TX_AUTO_SEQ (1 << 13)
#define WPI_TX_INSERT_TSTAMP (1 << 16)
- uint8_t rate;
+ uint8_t plcp;
uint8_t id;
uint8_t tid;
uint8_t security;
+#define WPI_CIPHER_WEP 1
+#define WPI_CIPHER_CCMP 2
+#define WPI_CIPHER_TKIP 3
+#define WPI_CIPHER_WEP104 9
+
uint8_t key[IEEE80211_KEYBUF_SIZE];
uint8_t tkip[IEEE80211_WEP_MICLEN];
uint32_t fnext;
uint32_t lifetime;
#define WPI_LIFETIME_INFINITE 0xffffffff
+
uint8_t ofdm_mask;
uint8_t cck_mask;
uint8_t rts_ntries;
uint8_t data_ntries;
uint16_t timeout;
uint16_t txop;
- struct ieee80211_frame wh;
} __packed;
-/* structure for command WPI_CMD_SET_BEACON */
+/* Structure for command WPI_CMD_SET_BEACON. */
struct wpi_cmd_beacon {
uint16_t len;
uint16_t reserved1;
uint32_t flags; /* same as wpi_cmd_data */
- uint8_t rate;
+ uint8_t plcp;
uint8_t id;
uint8_t reserved2[30];
uint32_t lifetime;
@@ -418,11 +510,10 @@ struct wpi_cmd_beacon {
uint16_t tim;
uint8_t timsz;
uint8_t reserved4;
- struct ieee80211_frame wh;
} __packed;
-/* structure for notification WPI_MISSED_BEACON */
-struct wpi_missed_beacon {
+/* Structure for notification WPI_BEACON_MISSED. */
+struct wpi_beacon_missed {
uint32_t consecutive;
uint32_t total;
uint32_t expected;
@@ -430,29 +521,22 @@ struct wpi_missed_beacon {
} __packed;
-/* structure for WPI_CMD_MRR_SETUP */
+/* Structure for command WPI_CMD_MRR_SETUP. */
+#define WPI_RIDX_MAX 11
struct wpi_mrr_setup {
- uint8_t which;
+ uint32_t which;
#define WPI_MRR_CTL 0
#define WPI_MRR_DATA 1
- uint8_t reserved[3];
-
struct {
- uint8_t signal;
+ uint8_t plcp;
uint8_t flags;
uint8_t ntries;
uint8_t next;
-#define WPI_OFDM6 0
-#define WPI_OFDM54 7
-#define WPI_CCK1 8
-#define WPI_CCK2 9
-#define WPI_CCK11 11
-
- } __attribute__((__packed__)) rates[WPI_CCK11 + 1];
+ } __packed rates[WPI_RIDX_MAX + 1];
} __packed;
-/* structure for WPI_CMD_SET_LED */
+/* Structure for command WPI_CMD_SET_LED. */
struct wpi_cmd_led {
uint32_t unit; /* multiplier (in usecs) */
uint8_t which;
@@ -464,136 +548,127 @@ struct wpi_cmd_led {
uint8_t reserved;
} __packed;
-/* structure for WPI_CMD_SET_POWER_MODE */
-struct wpi_power {
- uint32_t flags;
-#define WPI_POWER_CAM 0 /* constantly awake mode */
- uint32_t rx_timeout;
- uint32_t tx_timeout;
- uint32_t sleep[5];
+/* Structure for command WPI_CMD_SET_POWER_MODE. */
+struct wpi_pmgt_cmd {
+ uint16_t flags;
+#define WPI_PS_ALLOW_SLEEP (1 << 0)
+#define WPI_PS_NOTIFY (1 << 1)
+#define WPI_PS_SLEEP_OVER_DTIM (1 << 2)
+#define WPI_PS_PCI_PMGT (1 << 3)
+
+ uint8_t reserved[2];
+ uint32_t rxtimeout;
+ uint32_t txtimeout;
+ uint32_t intval[5];
+} __packed;
+
+/* Structures for command WPI_CMD_SCAN. */
+#define WPI_SCAN_MAX_ESSIDS 4
+struct wpi_scan_essid {
+ uint8_t id;
+ uint8_t len;
+ uint8_t data[IEEE80211_NWID_LEN];
} __packed;
-/* structure for command WPI_CMD_SCAN */
struct wpi_scan_hdr {
uint16_t len;
uint8_t reserved1;
uint8_t nchan;
- uint16_t quiet;
- uint16_t threshold;
- uint16_t promotion;
+ uint16_t quiet_time;
+ uint16_t quiet_threshold;
+ uint16_t crc_threshold;
uint16_t reserved2;
- uint32_t maxtimeout;
- uint32_t suspend;
+ uint32_t max_svc; /* background scans */
+ uint32_t pause_svc; /* background scans */
uint32_t flags;
uint32_t filter;
-struct {
- uint16_t len;
- uint16_t lnext;
- uint32_t flags;
- uint8_t rate;
- uint8_t id;
- uint8_t tid;
- uint8_t security;
- uint8_t key[IEEE80211_KEYBUF_SIZE];
- uint8_t tkip[IEEE80211_WEP_MICLEN];
- uint32_t fnext;
- uint32_t lifetime;
- uint8_t ofdm_mask;
- uint8_t cck_mask;
- uint8_t rts_ntries;
- uint8_t data_ntries;
- uint16_t timeout;
- uint16_t txop;
-} tx __attribute__((__packed__));
-
-#define WPI_SCAN_MAX_ESSIDS 4
- struct {
- uint8_t id;
- uint8_t esslen;
- uint8_t essid[IEEE80211_NWID_LEN];
- }scan_essids[WPI_SCAN_MAX_ESSIDS];
- /* followed by probe request body */
- /* followed by nchan x wpi_scan_chan */
+ /* Followed by a struct wpi_cmd_data. */
+ /* Followed by an array of 4 structs wpi_scan_essid. */
+ /* Followed by probe request body. */
+ /* Followed by an array of ``nchan'' structs wpi_scan_chan. */
} __packed;
struct wpi_scan_chan {
uint8_t flags;
+#define WPI_CHAN_ACTIVE (1 << 0)
+#define WPI_CHAN_NPBREQS(x) (((1 << (x)) - 1) << 1)
+
uint8_t chan;
-#define WPI_CHAN_ACTIVE (1 << 0)
-#define WPI_CHAN_DIRECT (1 << 1)
- uint8_t gain_radio;
- uint8_t gain_dsp;
+ uint8_t rf_gain;
+ uint8_t dsp_gain;
uint16_t active; /* msecs */
uint16_t passive; /* msecs */
} __packed;
-/* structure for WPI_CMD_BLUETOOTH */
-struct wpi_bluetooth {
- uint8_t flags;
- uint8_t lead;
- uint8_t kill;
- uint8_t reserved;
- uint32_t ack;
- uint32_t cts;
-} __packed;
+#define WPI_SCAN_CRC_TH_DEFAULT htole16(1)
+#define WPI_SCAN_CRC_TH_NEVER htole16(0xffff)
-/* structure for command WPI_CMD_TXPOWER */
-struct wpi_cmd_txpower {
+/* Maximum size of a scan command. */
+#define WPI_SCAN_MAXSZ (MCLBYTES - 4)
+#define WPI_ACTIVE_DWELL_TIME_2GHZ (30) /* all times in msec */
+#define WPI_ACTIVE_DWELL_TIME_5GHZ (20)
+#define WPI_ACTIVE_DWELL_FACTOR_2GHZ ( 3)
+#define WPI_ACTIVE_DWELL_FACTOR_5GHZ ( 2)
+
+#define WPI_PASSIVE_DWELL_TIME_2GHZ ( 20)
+#define WPI_PASSIVE_DWELL_TIME_5GHZ ( 10)
+#define WPI_PASSIVE_DWELL_BASE (100)
+
+/* Structure for command WPI_CMD_TXPOWER. */
+struct wpi_cmd_txpower {
uint8_t band;
-#define WPI_RATE_5GHZ 0
-#define WPI_RATE_2GHZ 1
+#define WPI_BAND_5GHZ 0
+#define WPI_BAND_2GHZ 1
+
uint8_t reserved;
- uint16_t channel;
+ uint16_t chan;
-#define WPI_RATE_MAPPING_COUNT 12
struct {
- uint8_t rate;
- uint8_t gain_radio;
- uint8_t gain_dsp;
- uint8_t reserved;
- } __packed rates [WPI_RATE_MAPPING_COUNT];
+ uint8_t plcp;
+ uint8_t rf_gain;
+ uint8_t dsp_gain;
+ uint8_t reserved;
+ } __packed rates[WPI_RIDX_MAX + 1];
} __packed;
+/* Structure for command WPI_CMD_BT_COEX. */
+struct wpi_bluetooth {
+ uint8_t flags;
+#define WPI_BT_COEX_DISABLE 0
+#define WPI_BT_COEX_MODE_2WIRE 1
+#define WPI_BT_COEX_MODE_3WIRE 2
+#define WPI_BT_COEX_MODE_4WIRE 3
+ uint8_t lead_time;
+#define WPI_BT_LEAD_TIME_DEF 30
-#define WPI_FW_MAIN_TEXT_MAXSZ (80 * 1024 )
-#define WPI_FW_MAIN_DATA_MAXSZ (32 * 1024 )
-#define WPI_FW_INIT_TEXT_MAXSZ (80 * 1024 )
-#define WPI_FW_INIT_DATA_MAXSZ (32 * 1024 )
-#define WPI_FW_BOOT_TEXT_MAXSZ 1024
-
-#define WPI_FW_UPDATED (1 << 31 )
-
-/* firmware image header */
-struct wpi_firmware_hdr {
-
-#define WPI_FW_MINVERSION 2144
+ uint8_t max_kill;
+#define WPI_BT_MAX_KILL_DEF 5
- uint32_t version;
- uint32_t rtextsz;
- uint32_t rdatasz;
- uint32_t itextsz;
- uint32_t idatasz;
- uint32_t btextsz;
+ uint8_t reserved;
+ uint32_t kill_ack;
+ uint32_t kill_cts;
} __packed;
-/* structure for WPI_UC_READY notification */
+/* Structure for WPI_UC_READY notification. */
struct wpi_ucode_info {
- uint32_t version;
+ uint8_t minor;
+ uint8_t major;
+ uint16_t reserved1;
uint8_t revision[8];
uint8_t type;
uint8_t subtype;
- uint16_t reserved;
+ uint16_t reserved2;
uint32_t logptr;
- uint32_t errorptr;
- uint32_t timestamp;
+ uint32_t errptr;
+ uint32_t tstamp;
uint32_t valid;
} __packed;
-/* structure for WPI_START_SCAN notification */
+/* Structure for WPI_START_SCAN notification. */
struct wpi_start_scan {
uint64_t tstamp;
uint32_t tbeacon;
@@ -603,7 +678,7 @@ struct wpi_start_scan {
uint32_t status;
} __packed;
-/* structure for WPI_STOP_SCAN notification */
+/* Structure for WPI_STOP_SCAN notification. */
struct wpi_stop_scan {
uint8_t nchan;
uint8_t status;
@@ -612,9 +687,114 @@ struct wpi_stop_scan {
uint64_t tsf;
} __packed;
+/* Structures for WPI_{RX,BEACON}_STATISTICS notification. */
+struct wpi_rx_phy_stats {
+ uint32_t ina;
+ uint32_t fina;
+ uint32_t bad_plcp;
+ uint32_t bad_crc32;
+ uint32_t overrun;
+ uint32_t eoverrun;
+ uint32_t good_crc32;
+ uint32_t fa;
+ uint32_t bad_fina_sync;
+ uint32_t sfd_timeout;
+ uint32_t fina_timeout;
+ uint32_t no_rts_ack;
+ uint32_t rxe_limit;
+ uint32_t ack;
+ uint32_t cts;
+} __packed;
+
+struct wpi_rx_general_stats {
+ uint32_t bad_cts;
+ uint32_t bad_ack;
+ uint32_t not_bss;
+ uint32_t filtered;
+ uint32_t bad_chan;
+} __packed;
+
+struct wpi_rx_stats {
+ struct wpi_rx_phy_stats ofdm;
+ struct wpi_rx_phy_stats cck;
+ struct wpi_rx_general_stats general;
+} __packed;
+
+struct wpi_tx_stats {
+ uint32_t preamble;
+ uint32_t rx_detected;
+ uint32_t bt_defer;
+ uint32_t bt_kill;
+ uint32_t short_len;
+ uint32_t cts_timeout;
+ uint32_t ack_timeout;
+ uint32_t exp_ack;
+ uint32_t ack;
+} __packed;
+
+struct wpi_general_stats {
+ uint32_t temp;
+ uint32_t burst_check;
+ uint32_t burst;
+ uint32_t reserved[4];
+ uint32_t sleep;
+ uint32_t slot_out;
+ uint32_t slot_idle;
+ uint32_t ttl_tstamp;
+ uint32_t tx_ant_a;
+ uint32_t tx_ant_b;
+ uint32_t exec;
+ uint32_t probe;
+} __packed;
+
+struct wpi_stats {
+ uint32_t flags;
+ struct wpi_rx_stats rx;
+ struct wpi_tx_stats tx;
+ struct wpi_general_stats general;
+} __packed;
+
+/* Possible flags for command WPI_CMD_GET_STATISTICS. */
+#define WPI_STATISTICS_BEACON_DISABLE (1 << 1)
+
+
+/* Firmware error dump entry. */
+struct wpi_fw_dump {
+ uint32_t desc;
+ uint32_t time;
+ uint32_t blink[2];
+ uint32_t ilink[2];
+ uint32_t data;
+} __packed;
+
+/* Firmware image file header. */
+struct wpi_firmware_hdr {
+
+#define WPI_FW_MINVERSION 2144
+#define WPI_FW_NAME "wpifw"
+
+ uint16_t driver;
+ uint8_t minor;
+ uint8_t major;
+ uint32_t rtextsz;
+ uint32_t rdatasz;
+ uint32_t itextsz;
+ uint32_t idatasz;
+ uint32_t btextsz;
+} __packed;
+
+#define WPI_FW_TEXT_MAXSZ ( 80 * 1024 )
+#define WPI_FW_DATA_MAXSZ ( 32 * 1024 )
+#define WPI_FW_BOOT_TEXT_MAXSZ 1024
+
+#define WPI_FW_UPDATED (1U << 31 )
+
+/*
+ * Offsets into EEPROM.
+ */
#define WPI_EEPROM_MAC 0x015
#define WPI_EEPROM_REVISION 0x035
-#define WPI_EEPROM_CAPABILITIES 0x045
+#define WPI_EEPROM_SKU_CAP 0x045
#define WPI_EEPROM_TYPE 0x04a
#define WPI_EEPROM_DOMAIN 0x060
#define WPI_EEPROM_BAND1 0x063
@@ -626,49 +806,66 @@ struct wpi_stop_scan {
struct wpi_eeprom_chan {
uint8_t flags;
-#define WPI_EEPROM_CHAN_VALID (1<<0)
-#define WPI_EEPROM_CHAN_IBSS (1<<1)
-#define WPI_EEPROM_CHAN_ACTIVE (1<<3)
-#define WPI_EEPROM_CHAN_RADAR (1<<4)
+#define WPI_EEPROM_CHAN_VALID (1 << 0)
+#define WPI_EEPROM_CHAN_IBSS (1 << 1)
+#define WPI_EEPROM_CHAN_ACTIVE (1 << 3)
+#define WPI_EEPROM_CHAN_RADAR (1 << 4)
int8_t maxpwr;
} __packed;
struct wpi_eeprom_sample {
- uint8_t index;
- int8_t power;
- uint16_t volt;
-};
+ uint8_t index;
+ int8_t power;
+ uint16_t volt;
+} __packed;
#define WPI_POWER_GROUPS_COUNT 5
-
struct wpi_eeprom_group {
- struct wpi_eeprom_sample samples[5];
- int32_t coef[5];
- int32_t corr[5];
- int8_t maxpwr;
- uint8_t chan;
- int16_t temp;
+ struct wpi_eeprom_sample samples[5];
+ int32_t coef[5];
+ int32_t corr[5];
+ int8_t maxpwr;
+ uint8_t chan;
+ int16_t temp;
} __packed;
-#define WPI_CHAN_BANDS_COUNT 5
+#define WPI_CHAN_BANDS_COUNT 5
#define WPI_MAX_CHAN_PER_BAND 14
-
static const struct wpi_chan_band {
- uint32_t addr; /* offset in EEPROM */
- uint8_t nchan;
- uint8_t chan[WPI_MAX_CHAN_PER_BAND];
-} wpi_bands[5] = {
- { WPI_EEPROM_BAND1, 14,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }},
- { WPI_EEPROM_BAND2, 13,
- { 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 }},
- { WPI_EEPROM_BAND3, 12,
- { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 }},
- { WPI_EEPROM_BAND4, 11,
- { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 }},
- { WPI_EEPROM_BAND5, 6,
- { 145, 149, 153, 157, 161, 165 }}
+ uint32_t addr; /* offset in EEPROM */
+ uint8_t nchan;
+ uint8_t chan[WPI_MAX_CHAN_PER_BAND];
+} wpi_bands[] = {
+ /* 20MHz channels, 2GHz band. */
+ { WPI_EEPROM_BAND1, 14,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 } },
+ /* 20MHz channels, 5GHz band. */
+ { WPI_EEPROM_BAND2, 13,
+ { 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 } },
+ { WPI_EEPROM_BAND3, 12,
+ { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 } },
+ { WPI_EEPROM_BAND4, 11,
+ { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 } },
+ { WPI_EEPROM_BAND5, 6,
+ { 145, 149, 153, 157, 161, 165 } }
+};
+
+/* HW rate indices. */
+#define WPI_RIDX_OFDM6 0
+#define WPI_RIDX_OFDM36 5
+#define WPI_RIDX_OFDM48 6
+#define WPI_RIDX_OFDM54 7
+#define WPI_RIDX_CCK1 8
+#define WPI_RIDX_CCK2 9
+#define WPI_RIDX_CCK11 11
+
+static const uint8_t wpi_ridx_to_plcp[] = {
+ /* OFDM: IEEE Std 802.11a-1999, pp. 14 Table 80 */
+ /* R1-R4 (ral/ural is R4-R1) */
+ 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3,
+ /* CCK: device-dependent */
+ 10, 20, 55, 110
};
#define WPI_MAX_PWR_INDEX 77
@@ -678,25 +875,25 @@ static const struct wpi_chan_band {
* the reference driver.)
*/
static const uint8_t wpi_rf_gain_2ghz[WPI_MAX_PWR_INDEX + 1] = {
- 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xbb, 0xbb, 0xbb,
- 0xbb, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xd3, 0xd3, 0xb3, 0xb3, 0xb3,
- 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x73, 0xeb, 0xeb, 0xeb,
- 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xab, 0xab, 0xab, 0x8b,
- 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xc3, 0xc3, 0xc3, 0xc3, 0xa3,
- 0xa3, 0xa3, 0xa3, 0x83, 0x83, 0x83, 0x83, 0x63, 0x63, 0x63, 0x63,
- 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23, 0x03, 0x03, 0x03,
- 0x03
+ 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xbb, 0xbb, 0xbb,
+ 0xbb, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xd3, 0xd3, 0xb3, 0xb3, 0xb3,
+ 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x73, 0xeb, 0xeb, 0xeb,
+ 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xab, 0xab, 0xab, 0x8b,
+ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xc3, 0xc3, 0xc3, 0xc3, 0xa3,
+ 0xa3, 0xa3, 0xa3, 0x83, 0x83, 0x83, 0x83, 0x63, 0x63, 0x63, 0x63,
+ 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23, 0x03, 0x03, 0x03,
+ 0x03
};
static const uint8_t wpi_rf_gain_5ghz[WPI_MAX_PWR_INDEX + 1] = {
- 0xfb, 0xfb, 0xfb, 0xdb, 0xdb, 0xbb, 0xbb, 0x9b, 0x9b, 0x7b, 0x7b,
- 0x7b, 0x7b, 0x5b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x1b, 0x1b,
- 0x1b, 0x73, 0x73, 0x73, 0x53, 0x53, 0x53, 0x53, 0x53, 0x33, 0x33,
- 0x33, 0x33, 0x13, 0x13, 0x13, 0x13, 0x13, 0xab, 0xab, 0xab, 0x8b,
- 0x8b, 0x8b, 0x8b, 0x6b, 0x6b, 0x6b, 0x6b, 0x4b, 0x4b, 0x4b, 0x4b,
- 0x2b, 0x2b, 0x2b, 0x2b, 0x0b, 0x0b, 0x0b, 0x0b, 0x83, 0x83, 0x63,
- 0x63, 0x63, 0x63, 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23,
- 0x03
+ 0xfb, 0xfb, 0xfb, 0xdb, 0xdb, 0xbb, 0xbb, 0x9b, 0x9b, 0x7b, 0x7b,
+ 0x7b, 0x7b, 0x5b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x1b, 0x1b,
+ 0x1b, 0x73, 0x73, 0x73, 0x53, 0x53, 0x53, 0x53, 0x53, 0x33, 0x33,
+ 0x33, 0x33, 0x13, 0x13, 0x13, 0x13, 0x13, 0xab, 0xab, 0xab, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x6b, 0x6b, 0x6b, 0x6b, 0x4b, 0x4b, 0x4b, 0x4b,
+ 0x2b, 0x2b, 0x2b, 0x2b, 0x0b, 0x0b, 0x0b, 0x0b, 0x83, 0x83, 0x63,
+ 0x63, 0x63, 0x63, 0x43, 0x43, 0x43, 0x43, 0x23, 0x23, 0x23, 0x23,
+ 0x03
};
/*
@@ -704,34 +901,94 @@ static const uint8_t wpi_rf_gain_5ghz[WPI_MAX_PWR_INDEX + 1] = {
* from the reference driver.)
*/
static const uint8_t wpi_dsp_gain_2ghz[WPI_MAX_PWR_INDEX + 1] = {
- 0x7f, 0x7f, 0x7f, 0x7f, 0x7d, 0x6e, 0x69, 0x62, 0x7d, 0x73, 0x6c,
- 0x63, 0x77, 0x6f, 0x69, 0x61, 0x5c, 0x6a, 0x64, 0x78, 0x71, 0x6b,
- 0x7d, 0x77, 0x70, 0x6a, 0x65, 0x61, 0x5b, 0x6b, 0x79, 0x73, 0x6d,
- 0x7f, 0x79, 0x73, 0x6c, 0x66, 0x60, 0x5c, 0x6e, 0x68, 0x62, 0x74,
- 0x7d, 0x77, 0x71, 0x6b, 0x65, 0x60, 0x71, 0x6a, 0x66, 0x5f, 0x71,
- 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f,
- 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66,
- 0x5f
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7d, 0x6e, 0x69, 0x62, 0x7d, 0x73, 0x6c,
+ 0x63, 0x77, 0x6f, 0x69, 0x61, 0x5c, 0x6a, 0x64, 0x78, 0x71, 0x6b,
+ 0x7d, 0x77, 0x70, 0x6a, 0x65, 0x61, 0x5b, 0x6b, 0x79, 0x73, 0x6d,
+ 0x7f, 0x79, 0x73, 0x6c, 0x66, 0x60, 0x5c, 0x6e, 0x68, 0x62, 0x74,
+ 0x7d, 0x77, 0x71, 0x6b, 0x65, 0x60, 0x71, 0x6a, 0x66, 0x5f, 0x71,
+ 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f,
+ 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66, 0x5f, 0x71, 0x6a, 0x66,
+ 0x5f
};
static const uint8_t wpi_dsp_gain_5ghz[WPI_MAX_PWR_INDEX + 1] = {
- 0x7f, 0x78, 0x72, 0x77, 0x65, 0x71, 0x66, 0x72, 0x67, 0x75, 0x6b,
- 0x63, 0x5c, 0x6c, 0x7d, 0x76, 0x6d, 0x66, 0x60, 0x5a, 0x68, 0x62,
- 0x5c, 0x76, 0x6f, 0x68, 0x7e, 0x79, 0x71, 0x69, 0x63, 0x76, 0x6f,
- 0x68, 0x62, 0x74, 0x6d, 0x66, 0x62, 0x5d, 0x71, 0x6b, 0x63, 0x78,
- 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63,
- 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x6b, 0x63, 0x78,
- 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63,
- 0x78
+ 0x7f, 0x78, 0x72, 0x77, 0x65, 0x71, 0x66, 0x72, 0x67, 0x75, 0x6b,
+ 0x63, 0x5c, 0x6c, 0x7d, 0x76, 0x6d, 0x66, 0x60, 0x5a, 0x68, 0x62,
+ 0x5c, 0x76, 0x6f, 0x68, 0x7e, 0x79, 0x71, 0x69, 0x63, 0x76, 0x6f,
+ 0x68, 0x62, 0x74, 0x6d, 0x66, 0x62, 0x5d, 0x71, 0x6b, 0x63, 0x78,
+ 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63,
+ 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x6b, 0x63, 0x78,
+ 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63, 0x78, 0x71, 0x6b, 0x63,
+ 0x78
};
+/*
+ * Power saving settings (values obtained from the reference driver.)
+ */
+#define WPI_NDTIMRANGES 2
+#define WPI_NPOWERLEVELS 6
+static const struct wpi_pmgt {
+ uint32_t rxtimeout;
+ uint32_t txtimeout;
+ uint32_t intval[5];
+ int skip_dtim;
+} wpi_pmgt[WPI_NDTIMRANGES][WPI_NPOWERLEVELS] = {
+ /* DTIM <= 10 */
+ {
+ { 0, 0, { 0, 0, 0, 0, 0 }, 0 }, /* CAM */
+ { 200, 500, { 1, 2, 3, 4, 4 }, 0 }, /* PS level 1 */
+ { 200, 300, { 2, 4, 6, 7, 7 }, 0 }, /* PS level 2 */
+ { 50, 100, { 2, 6, 9, 9, 10 }, 0 }, /* PS level 3 */
+ { 50, 25, { 2, 7, 9, 9, 10 }, 1 }, /* PS level 4 */
+ { 25, 25, { 4, 7, 10, 10, 10 }, 1 } /* PS level 5 */
+ },
+ /* DTIM >= 11 */
+ {
+ { 0, 0, { 0, 0, 0, 0, 0 }, 0 }, /* CAM */
+ { 200, 500, { 1, 2, 3, 4, -1 }, 0 }, /* PS level 1 */
+ { 200, 300, { 2, 4, 6, 7, -1 }, 0 }, /* PS level 2 */
+ { 50, 100, { 2, 6, 9, 9, -1 }, 0 }, /* PS level 3 */
+ { 50, 25, { 2, 7, 9, 9, -1 }, 0 }, /* PS level 4 */
+ { 25, 25, { 4, 7, 10, 10, -1 }, 0 } /* PS level 5 */
+ }
+};
+
+/* Firmware errors. */
+static const char * const wpi_fw_errmsg[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT",
+ "SYSASSERT",
+ "FATAL_ERROR"
+};
+
+/* XXX description for some error codes (error data). */
+/* 0x00000074 - wrong totlen field */
+/* 0x000003B3 - powersave error */
+/* 0x00000447 - wrong channel selected */
#define WPI_READ(sc, reg) \
- bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
+ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
#define WPI_WRITE(sc, reg, val) \
- bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
#define WPI_WRITE_REGION_4(sc, offset, datap, count) \
- bus_space_write_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \
- (datap), (count))
+ bus_space_write_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \
+ (datap), (count))
+
+#define WPI_SETBITS(sc, reg, mask) \
+ WPI_WRITE(sc, reg, WPI_READ(sc, reg) | (mask))
+
+#define WPI_CLRBITS(sc, reg, mask) \
+ WPI_WRITE(sc, reg, WPI_READ(sc, reg) & ~(mask))
+
+#define WPI_BARRIER_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_WRITE)
+
+#define WPI_BARRIER_READ_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
diff --git a/sys/dev/wpi/if_wpivar.h b/sys/dev/wpi/if_wpivar.h
index e579264a49d8..53627271afad 100644
--- a/sys/dev/wpi/if_wpivar.h
+++ b/sys/dev/wpi/if_wpivar.h
@@ -16,8 +16,6 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <net80211/ieee80211_amrr.h>
-
struct wpi_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsft;
@@ -28,7 +26,7 @@ struct wpi_rx_radiotap_header {
int8_t wr_dbm_antsignal;
int8_t wr_dbm_antnoise;
uint8_t wr_antenna;
-};
+} __packed;
#define WPI_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
@@ -45,8 +43,7 @@ struct wpi_tx_radiotap_header {
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
- uint8_t wt_hwqueue;
-};
+} __packed;
#define WPI_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
@@ -56,15 +53,14 @@ struct wpi_tx_radiotap_header {
struct wpi_dma_info {
bus_dma_tag_t tag;
bus_dmamap_t map;
- bus_addr_t paddr; /* aligned p address */
- bus_addr_t paddr_start; /* possibly unaligned p start*/
- caddr_t vaddr; /* aligned v address */
- caddr_t vaddr_start; /* possibly unaligned v start */
+ bus_addr_t paddr;
+ caddr_t vaddr;
bus_size_t size;
};
struct wpi_tx_data {
bus_dmamap_t map;
+ bus_addr_t cmd_paddr;
struct mbuf *m;
struct ieee80211_node *ni;
};
@@ -74,19 +70,17 @@ struct wpi_tx_ring {
struct wpi_dma_info cmd_dma;
struct wpi_tx_desc *desc;
struct wpi_tx_cmd *cmd;
- struct wpi_tx_data *data;
+ struct wpi_tx_data data[WPI_TX_RING_COUNT];
bus_dma_tag_t data_dmat;
int qid;
- int count;
int queued;
int cur;
+ int update;
};
-#define WPI_RBUF_COUNT ( WPI_RX_RING_COUNT + 16 )
-
struct wpi_rx_data {
- bus_dmamap_t map;
- struct mbuf *m;
+ struct mbuf *m;
+ bus_dmamap_t map;
};
struct wpi_rx_ring {
@@ -95,15 +89,12 @@ struct wpi_rx_ring {
struct wpi_rx_data data[WPI_RX_RING_COUNT];
bus_dma_tag_t data_dmat;
int cur;
+ int update;
};
-struct wpi_amrr {
- struct ieee80211_node ni; /* must be the first */
- int txcnt;
- int retrycnt;
- int success;
- int success_threshold;
- int recovery;
+struct wpi_node {
+ struct ieee80211_node ni; /* must be the first */
+ uint8_t id;
};
struct wpi_power_sample {
@@ -119,80 +110,117 @@ struct wpi_power_group {
int16_t temp;
};
+struct wpi_buf {
+ void *data;
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+ size_t size;
+ int code;
+ int ac;
+};
+
struct wpi_vap {
struct ieee80211vap vap;
+ struct wpi_buf wv_bcbuf;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define WPI_VAP(vap) ((struct wpi_vap *)(vap))
+struct wpi_fw_part {
+ const uint8_t *text;
+ uint32_t textsz;
+ const uint8_t *data;
+ uint32_t datasz;
+};
+
+struct wpi_fw_info {
+ const uint8_t *data;
+ size_t size;
+ struct wpi_fw_part init;
+ struct wpi_fw_part main;
+ struct wpi_fw_part boot;
+};
+
struct wpi_softc {
device_t sc_dev;
+
struct ifnet *sc_ifp;
+ int sc_debug;
+
struct mtx sc_mtx;
+ struct unrhdr *sc_unr;
/* Flags indicating the current state the driver
* expects the hardware to be in
*/
uint32_t flags;
-#define WPI_FLAG_HW_RADIO_OFF (1 << 0)
-#define WPI_FLAG_BUSY (1 << 1)
-#define WPI_FLAG_AUTH (1 << 2)
+#define WPI_FLAG_BUSY (1 << 0)
- /* shared area */
+ /* Shared area. */
struct wpi_dma_info shared_dma;
struct wpi_shared *shared;
- struct wpi_tx_ring txq[WME_NUM_AC];
- struct wpi_tx_ring cmdq;
+ struct wpi_tx_ring txq[WPI_NTXQUEUES];
struct wpi_rx_ring rxq;
- /* TX Thermal Callibration */
+ /* TX Thermal Callibration. */
struct callout calib_to;
int calib_cnt;
- /* Watch dog timer */
+ /* Watch dog timers. */
struct callout watchdog_to;
- /* Hardware switch polling timer */
- struct callout hwswitch_to;
+ struct callout watchdog_rfkill;
+
+ /* Firmware image. */
+ struct wpi_fw_info fw;
+ uint32_t errptr;
struct resource *irq;
struct resource *mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
void *sc_ih;
+ bus_size_t sc_sz;
+ int sc_cap_off; /* PCIe Capabilities. */
- struct wpi_config config;
+ struct wpi_rxon rxon;
int temp;
-
+ uint32_t qfullmsk;
int sc_tx_timer;
int sc_scan_timer;
- struct bpf_if *sc_drvbpf;
+ void (*sc_node_free)(struct ieee80211_node *);
+ void (*sc_scan_curchan)(struct ieee80211_scan_state *,
+ unsigned long);
struct wpi_rx_radiotap_header sc_rxtap;
struct wpi_tx_radiotap_header sc_txtap;
- /* firmware image */
+ /* Firmware image. */
const struct firmware *fw_fp;
- /* firmware DMA transfer */
+ /* Firmware DMA transfer. */
struct wpi_dma_info fw_dma;
- /* Tasks used by the driver */
- struct task sc_restarttask; /* reset firmware task */
- struct task sc_radiotask; /* reset rf task */
+ /* Tasks used by the driver. */
+ struct task sc_reinittask;
+ struct task sc_radiooff_task;
+ struct task sc_radioon_task;
- /* Eeprom info */
+ /* Eeprom info. */
uint8_t cap;
uint16_t rev;
uint8_t type;
+ struct wpi_eeprom_chan
+ eeprom_channels[WPI_CHAN_BANDS_COUNT][WPI_MAX_CHAN_PER_BAND];
struct wpi_power_group groups[WPI_POWER_GROUPS_COUNT];
int8_t maxpwr[IEEE80211_CHAN_MAX];
- char domain[4]; /*reglatory domain XXX */
+ char domain[4]; /* Regulatory domain. */
};
+
#define WPI_LOCK_INIT(_sc) \
mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
diff --git a/sys/fs/autofs/autofs.c b/sys/fs/autofs/autofs.c
index 2e08c3d847eb..c4b1ec331dce 100644
--- a/sys/fs/autofs/autofs.c
+++ b/sys/fs/autofs/autofs.c
@@ -584,6 +584,34 @@ autofs_ioctl_request(struct autofs_daemon_request *adr)
}
static int
+autofs_ioctl_done_101(struct autofs_daemon_done_101 *add)
+{
+ struct autofs_request *ar;
+
+ sx_xlock(&autofs_softc->sc_lock);
+ TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
+ if (ar->ar_id == add->add_id)
+ break;
+ }
+
+ if (ar == NULL) {
+ sx_xunlock(&autofs_softc->sc_lock);
+ AUTOFS_DEBUG("id %d not found", add->add_id);
+ return (ESRCH);
+ }
+
+ ar->ar_error = add->add_error;
+ ar->ar_wildcards = true;
+ ar->ar_done = true;
+ ar->ar_in_progress = false;
+ cv_broadcast(&autofs_softc->sc_cv);
+
+ sx_xunlock(&autofs_softc->sc_lock);
+
+ return (0);
+}
+
+static int
autofs_ioctl_done(struct autofs_daemon_done *add)
{
struct autofs_request *ar;
@@ -658,6 +686,9 @@ autofs_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode,
case AUTOFSREQUEST:
return (autofs_ioctl_request(
(struct autofs_daemon_request *)arg));
+ case AUTOFSDONE101:
+ return (autofs_ioctl_done_101(
+ (struct autofs_daemon_done_101 *)arg));
case AUTOFSDONE:
return (autofs_ioctl_done(
(struct autofs_daemon_done *)arg));
diff --git a/sys/fs/autofs/autofs_ioctl.h b/sys/fs/autofs/autofs_ioctl.h
index 328dd9ccd0cc..92d7314f1602 100644
--- a/sys/fs/autofs/autofs_ioctl.h
+++ b/sys/fs/autofs/autofs_ioctl.h
@@ -71,6 +71,21 @@ struct autofs_daemon_request {
char adr_options[MAXPATHLEN];
};
+/*
+ * Compatibility with 10.1-RELEASE automountd(8).
+ */
+struct autofs_daemon_done_101 {
+ /*
+ * Identifier, copied from adr_id.
+ */
+ int add_id;
+
+ /*
+ * Error number, possibly returned to userland.
+ */
+ int add_error;
+};
+
struct autofs_daemon_done {
/*
* Identifier, copied from adr_id.
@@ -87,9 +102,15 @@ struct autofs_daemon_done {
* Error number, possibly returned to userland.
*/
int add_error;
+
+ /*
+ * Reserved for future use.
+ */
+ int add_spare[7];
};
#define AUTOFSREQUEST _IOR('I', 0x01, struct autofs_daemon_request)
-#define AUTOFSDONE _IOW('I', 0x02, struct autofs_daemon_done)
+#define AUTOFSDONE101 _IOW('I', 0x02, struct autofs_daemon_done_101)
+#define AUTOFSDONE _IOW('I', 0x03, struct autofs_daemon_done)
#endif /* !AUTOFS_IOCTL_H */
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 3d1999e4a916..7cae22013a4d 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -39,10 +39,27 @@
#include "opt_smp.h"
#include <machine/asmacros.h>
+#include <machine/specialreg.h>
#include <x86/apicreg.h>
#include "assym.s"
+ .text
+ SUPERALIGN_TEXT
+ /* End Of Interrupt to APIC */
+as_lapic_eoi:
+ cmpl $0,x2apic_mode
+ jne 1f
+ movl lapic_map,%eax
+ movl $0,LA_EOI(%eax)
+ ret
+1:
+ movl $MSR_APIC_EOI,%ecx
+ xorl %eax,%eax
+ xorl %edx,%edx
+ wrmsr
+ ret
+
/*
* I/O Interrupt Entry Point. Rather than having one entry point for
* each interrupt source, we use one entry point for each 32-bit word
@@ -58,16 +75,23 @@ IDTVEC(vec_name) ; \
SET_KERNEL_SREGS ; \
cld ; \
FAKE_MCOUNT(TF_EIP(%esp)) ; \
- movl lapic, %edx ; /* pointer to local APIC */ \
+ cmpl $0,x2apic_mode ; \
+ je 1f ; \
+ movl $(MSR_APIC_ISR0 + index),%ecx ; \
+ rdmsr ; \
+ jmp 2f ; \
+1: ; \
+ movl lapic_map, %edx ;/* pointer to local APIC */ \
movl LA_ISR + 16 * (index)(%edx), %eax ; /* load ISR */ \
+2: ; \
bsrl %eax, %eax ; /* index of highest set bit in ISR */ \
- jz 1f ; \
+ jz 3f ; \
addl $(32 * index),%eax ; \
pushl %esp ; \
pushl %eax ; /* pass the IRQ */ \
call lapic_handle_intr ; \
addl $8, %esp ; /* discard parameter */ \
-1: ; \
+3: ; \
MEXITCOUNT ; \
jmp doreti
@@ -164,8 +188,7 @@ IDTVEC(xen_intr_upcall)
.text
SUPERALIGN_TEXT
invltlb_ret:
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
POP_FRAME
iret
@@ -232,8 +255,7 @@ IDTVEC(ipi_intr_bitmap_handler)
SET_KERNEL_SREGS
cld
- movl lapic, %edx
- movl $0, LA_EOI(%edx) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
FAKE_MCOUNT(TF_EIP(%esp))
@@ -251,9 +273,7 @@ IDTVEC(cpustop)
SET_KERNEL_SREGS
cld
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
-
+ call as_lapic_eoi
call cpustop_handler
POP_FRAME
@@ -270,9 +290,7 @@ IDTVEC(cpususpend)
SET_KERNEL_SREGS
cld
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
-
+ call as_lapic_eoi
call cpususpend_handler
POP_FRAME
@@ -298,8 +316,7 @@ IDTVEC(rendezvous)
#endif
call smp_rendezvous_action
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
POP_FRAME
iret
@@ -315,8 +332,7 @@ IDTVEC(lazypmap)
call pmap_lazyfix_action
- movl lapic, %eax
- movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
+ call as_lapic_eoi
POP_FRAME
iret
#endif /* SMP */
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index 0994be9b3724..97e2e979bd20 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -219,13 +219,8 @@ ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
ASSYM(PC_PRIVATE_TSS, offsetof(struct pcpu, pc_private_tss));
#ifdef DEV_APIC
-ASSYM(LA_VER, offsetof(struct LAPIC, version));
-ASSYM(LA_TPR, offsetof(struct LAPIC, tpr));
-ASSYM(LA_EOI, offsetof(struct LAPIC, eoi));
-ASSYM(LA_SVR, offsetof(struct LAPIC, svr));
-ASSYM(LA_ICR_LO, offsetof(struct LAPIC, icr_lo));
-ASSYM(LA_ICR_HI, offsetof(struct LAPIC, icr_hi));
-ASSYM(LA_ISR, offsetof(struct LAPIC, isr0));
+ASSYM(LA_EOI, LAPIC_EOI * LAPIC_MEM_MUL);
+ASSYM(LA_ISR, LAPIC_ISR0 * LAPIC_MEM_MUL);
#endif
ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL));
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 288be908d1b9..32b954075d62 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -719,8 +719,11 @@ init_secondary(void)
load_cr0(cr0);
CHECK_WRITE(0x38, 5);
- /* Disable local APIC just to be sure. */
- lapic_disable();
+ /*
+ * On real hardware, switch to x2apic mode if possible.
+ * Disable local APIC until BSP directed APs to run.
+ */
+ lapic_xapic_mode();
/* signal our startup to the BSP. */
mp_naps++;
@@ -1138,14 +1141,27 @@ ipi_startup(int apic_id, int vector)
{
/*
+ * This attempts to follow the algorithm described in the
+ * Intel Multiprocessor Specification v1.4 in section B.4.
+ * For each IPI, we allow the local APIC ~20us to deliver the
+ * IPI. If that times out, we panic.
+ */
+
+ /*
* first we do an INIT IPI: this INIT IPI might be run, resetting
* and running the target CPU. OR this INIT IPI might be latched (P5
* bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
* ignored.
*/
- lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
+ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
- lapic_ipi_wait(-1);
+ lapic_ipi_wait(20);
+
+ /* Explicitly deassert the INIT IPI. */
+ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
+ APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
+ apic_id);
+
DELAY(10000); /* wait ~10mS */
/*
@@ -1157,9 +1173,11 @@ ipi_startup(int apic_id, int vector)
* will run.
*/
lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
- APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
+ APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
vector, apic_id);
- lapic_ipi_wait(-1);
+ if (!lapic_ipi_wait(20))
+ panic("Failed to deliver first STARTUP IPI to APIC %d",
+ apic_id);
DELAY(200); /* wait ~200uS */
/*
@@ -1169,9 +1187,12 @@ ipi_startup(int apic_id, int vector)
* recognized after hardware RESET or INIT IPI.
*/
lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
- APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
+ APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
vector, apic_id);
- lapic_ipi_wait(-1);
+ if (!lapic_ipi_wait(20))
+ panic("Failed to deliver second STARTUP IPI to APIC %d",
+ apic_id);
+
DELAY(200); /* wait ~200uS */
}
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index c625dcb1903f..f80a8983ab29 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -346,6 +346,15 @@ rdmsr(u_int msr)
return (rv);
}
+static __inline uint32_t
+rdmsr32(u_int msr)
+{
+ uint32_t low;
+
+ __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "edx");
+ return (low);
+}
+
static __inline uint64_t
rdpmc(u_int pmc)
{
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index beb49bc56962..0cab45b05190 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_init_path.h"
+#include "opt_verbose_sysinit.h"
#include <sys/param.h>
#include <sys/kernel.h>
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 6aa35234cb1d..fdf2271bc873 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -410,6 +410,11 @@ initclocks(dummy)
#ifdef SW_WATCHDOG
EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
#endif
+ /*
+ * Arrange for ticks to wrap 10 minutes after boot to help catch
+ * sign problems sooner.
+ */
+ ticks = INT_MAX - (hz * 10 * 60);
}
/*
diff --git a/sys/kern/kern_clocksource.c b/sys/kern/kern_clocksource.c
index 4178513fd187..cbbbce445420 100644
--- a/sys/kern/kern_clocksource.c
+++ b/sys/kern/kern_clocksource.c
@@ -908,3 +908,42 @@ sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
+
+#include "opt_ddb.h"
+
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(clocksource, db_show_clocksource)
+{
+ struct pcpu_state *st;
+ int c;
+
+ CPU_FOREACH(c) {
+ st = DPCPU_ID_PTR(c, timerstate);
+ db_printf(
+ "CPU %2d: action %d handle %d ipi %d idle %d\n"
+ " now %#jx nevent %#jx (%jd)\n"
+ " ntick %#jx (%jd) nhard %#jx (%jd)\n"
+ " nstat %#jx (%jd) nprof %#jx (%jd)\n"
+ " ncall %#jx (%jd) ncallopt %#jx (%jd)\n",
+ c, st->action, st->handle, st->ipi, st->idle,
+ (uintmax_t)st->now,
+ (uintmax_t)st->nextevent,
+ (uintmax_t)(st->nextevent - st->now) / tick_sbt,
+ (uintmax_t)st->nexttick,
+ (uintmax_t)(st->nexttick - st->now) / tick_sbt,
+ (uintmax_t)st->nexthard,
+ (uintmax_t)(st->nexthard - st->now) / tick_sbt,
+ (uintmax_t)st->nextstat,
+ (uintmax_t)(st->nextstat - st->now) / tick_sbt,
+ (uintmax_t)st->nextprof,
+ (uintmax_t)(st->nextprof - st->now) / tick_sbt,
+ (uintmax_t)st->nextcall,
+ (uintmax_t)(st->nextcall - st->now) / tick_sbt,
+ (uintmax_t)st->nextcallopt,
+ (uintmax_t)(st->nextcallopt - st->now) / tick_sbt);
+ }
+}
+
+#endif
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 45cb4b95630c..9eb6c0e0d26b 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -42,10 +42,12 @@ __FBSDID("$FreeBSD$");
#include "opt_core.h"
#include <sys/param.h>
+#include <sys/ctype.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/acct.h>
+#include <sys/bus.h>
#include <sys/capsicum.h>
#include <sys/condvar.h>
#include <sys/event.h>
@@ -178,6 +180,10 @@ static int set_core_nodump_flag = 0;
SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
0, "Enable setting the NODUMP flag on coredump files");
+static int coredump_devctl = 1;
+SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
+ 0, "Generate a devctl notification when processes coredump");
+
/*
* Signal properties and actions.
* The array below categorizes the signals and their default actions
@@ -3216,6 +3222,24 @@ out:
return (0);
}
+static int
+coredump_sanitise_path(const char *path)
+{
+ size_t i;
+
+ /*
+ * Only send a subset of ASCII to devd(8) because it
+ * might pass these strings to sh -c.
+ */
+ for (i = 0; path[i]; i++)
+ if (!(isalpha(path[i]) || isdigit(path[i])) &&
+ path[i] != '/' && path[i] != '.' &&
+ path[i] != '-')
+ return (0);
+
+ return (1);
+}
+
/*
* Dump a process' core. The main routine does some
* policy checking, and creates the name of the coredump;
@@ -3237,6 +3261,11 @@ coredump(struct thread *td)
void *rl_cookie;
off_t limit;
int compress;
+ char *data = NULL;
+ char *fullpath, *freepath = NULL;
+ size_t len;
+ static const char comm_name[] = "comm=";
+ static const char core_name[] = "core=";
#ifdef COMPRESS_USER_CORES
compress = compress_user_cores;
@@ -3322,9 +3351,39 @@ close:
error1 = vn_close(vp, FWRITE, cred, td);
if (error == 0)
error = error1;
+ else
+ goto out;
+ /*
+ * Notify the userland helper that a process triggered a core dump.
+ * This allows the helper to run an automated debugging session.
+ */
+ if (coredump_devctl == 0)
+ goto out;
+ len = MAXPATHLEN * 2 + sizeof(comm_name) - 1 +
+ sizeof(' ') + sizeof(core_name) - 1;
+ data = malloc(len, M_TEMP, M_WAITOK);
+ if (data == NULL)
+ goto out;
+ if (vn_fullpath_global(td, p->p_textvp, &fullpath, &freepath) != 0)
+ goto out;
+ if (!coredump_sanitise_path(fullpath))
+ goto out;
+ snprintf(data, len, "%s%s ", comm_name, fullpath);
+ free(freepath, M_TEMP);
+ freepath = NULL;
+ if (vn_fullpath_global(td, vp, &fullpath, &freepath) != 0)
+ goto out;
+ if (!coredump_sanitise_path(fullpath))
+ goto out;
+ strlcat(data, core_name, len);
+ strlcat(data, fullpath, len);
+ devctl_notify("kernel", "signal", "coredump", data);
+out:
#ifdef AUDIT
audit_proc_coredump(td, name, error);
#endif
+ free(freepath, M_TEMP);
+ free(data, M_TEMP);
free(name, M_TEMP);
return (error);
}
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 1d5d24faa40b..c8eb6d887b08 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -166,26 +166,16 @@ struct callout_cpu {
char cc_ktr_event_name[20];
};
-#define cc_exec_curr cc_exec_entity[0].cc_curr
-#define cc_exec_next cc_exec_entity[0].cc_next
-#define cc_exec_cancel cc_exec_entity[0].cc_cancel
-#define cc_exec_waiting cc_exec_entity[0].cc_waiting
-#define cc_exec_curr_dir cc_exec_entity[1].cc_curr
-#define cc_exec_next_dir cc_exec_entity[1].cc_next
-#define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel
-#define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting
-
+#define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
+#define cc_exec_next(cc, dir) cc->cc_exec_entity[dir].cc_next
+#define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
+#define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
#ifdef SMP
-#define cc_migration_func cc_exec_entity[0].ce_migration_func
-#define cc_migration_arg cc_exec_entity[0].ce_migration_arg
-#define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu
-#define cc_migration_time cc_exec_entity[0].ce_migration_time
-#define cc_migration_prec cc_exec_entity[0].ce_migration_prec
-#define cc_migration_func_dir cc_exec_entity[1].ce_migration_func
-#define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg
-#define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu
-#define cc_migration_time_dir cc_exec_entity[1].ce_migration_time
-#define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec
+#define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
+#define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
+#define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
+#define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
+#define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
struct callout_cpu cc_cpu[MAXCPU];
#define CPUBLOCK MAXCPU
@@ -235,16 +225,16 @@ static void
cc_cce_cleanup(struct callout_cpu *cc, int direct)
{
- cc->cc_exec_entity[direct].cc_curr = NULL;
- cc->cc_exec_entity[direct].cc_next = NULL;
- cc->cc_exec_entity[direct].cc_cancel = false;
- cc->cc_exec_entity[direct].cc_waiting = false;
+ cc_exec_curr(cc, direct) = NULL;
+ cc_exec_next(cc, direct) = NULL;
+ cc_exec_cancel(cc, direct) = false;
+ cc_exec_waiting(cc, direct) = false;
#ifdef SMP
- cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
- cc->cc_exec_entity[direct].ce_migration_time = 0;
- cc->cc_exec_entity[direct].ce_migration_prec = 0;
- cc->cc_exec_entity[direct].ce_migration_func = NULL;
- cc->cc_exec_entity[direct].ce_migration_arg = NULL;
+ cc_migration_cpu(cc, direct) = CPUBLOCK;
+ cc_migration_time(cc, direct) = 0;
+ cc_migration_prec(cc, direct) = 0;
+ cc_migration_func(cc, direct) = NULL;
+ cc_migration_arg(cc, direct) = NULL;
#endif
}
@@ -256,7 +246,7 @@ cc_cce_migrating(struct callout_cpu *cc, int direct)
{
#ifdef SMP
- return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK);
+ return (cc_migration_cpu(cc, direct) != CPUBLOCK);
#else
return (0);
#endif
@@ -492,7 +482,7 @@ callout_process(sbintime_t now)
#ifdef CALLOUT_PROFILING
++depth_dir;
#endif
- cc->cc_exec_next_dir =
+ cc_exec_next(cc, 1) =
LIST_NEXT(tmp, c_links.le);
cc->cc_bucket = firstb & callwheelmask;
LIST_REMOVE(tmp, c_links.le);
@@ -501,7 +491,7 @@ callout_process(sbintime_t now)
&mpcalls_dir, &lockcalls_dir, NULL,
#endif
1);
- tmp = cc->cc_exec_next_dir;
+ tmp = cc_exec_next(cc, 1);
} else {
tmpn = LIST_NEXT(tmp, c_links.le);
LIST_REMOVE(tmp, c_links.le);
@@ -585,7 +575,7 @@ callout_lock(struct callout *c)
static void
callout_cc_add(struct callout *c, struct callout_cpu *cc,
sbintime_t sbt, sbintime_t precision, void (*func)(void *),
- void *arg, int cpu, int flags)
+ void *arg, int cpu, int flags, int direct)
{
int bucket;
@@ -606,7 +596,7 @@ callout_cc_add(struct callout *c, struct callout_cpu *cc,
(u_int)(c->c_precision & 0xffffffff));
LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
if (cc->cc_bucket == bucket)
- cc->cc_exec_next_dir = c;
+ cc_exec_next(cc, direct) = c;
#ifndef NO_EVENTTIMERS
/*
* Inform the eventtimers(4) subsystem there's a new callout
@@ -679,8 +669,9 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
c->c_flags = CALLOUT_LOCAL_ALLOC;
else
c->c_flags &= ~CALLOUT_PENDING;
- cc->cc_exec_entity[direct].cc_curr = c;
- cc->cc_exec_entity[direct].cc_cancel = false;
+
+ cc_exec_curr(cc, direct) = c;
+ cc_exec_cancel(cc, direct) = false;
CC_UNLOCK(cc);
if (c_lock != NULL) {
class->lc_lock(c_lock, lock_status);
@@ -688,12 +679,12 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
* The callout may have been cancelled
* while we switched locks.
*/
- if (cc->cc_exec_entity[direct].cc_cancel) {
+ if (cc_exec_cancel(cc, direct)) {
class->lc_unlock(c_lock);
goto skip;
}
/* The callout cannot be stopped now. */
- cc->cc_exec_entity[direct].cc_cancel = true;
+ cc_exec_cancel(cc, direct) = true;
if (c_lock == &Giant.lock_object) {
#ifdef CALLOUT_PROFILING
(*gcalls)++;
@@ -744,9 +735,9 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
class->lc_unlock(c_lock);
skip:
CC_LOCK(cc);
- KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr"));
- cc->cc_exec_entity[direct].cc_curr = NULL;
- if (cc->cc_exec_entity[direct].cc_waiting) {
+ KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
+ cc_exec_curr(cc, direct) = NULL;
+ if (cc_exec_waiting(cc, direct)) {
/*
* There is someone waiting for the
* callout to complete.
@@ -762,9 +753,9 @@ skip:
*/
c->c_flags &= ~CALLOUT_DFRMIGRATION;
}
- cc->cc_exec_entity[direct].cc_waiting = false;
+ cc_exec_waiting(cc, direct) = false;
CC_UNLOCK(cc);
- wakeup(&cc->cc_exec_entity[direct].cc_waiting);
+ wakeup(&cc_exec_waiting(cc, direct));
CC_LOCK(cc);
} else if (cc_cce_migrating(cc, direct)) {
KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
@@ -774,11 +765,11 @@ skip:
* If the callout was scheduled for
* migration just perform it now.
*/
- new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu;
- new_time = cc->cc_exec_entity[direct].ce_migration_time;
- new_prec = cc->cc_exec_entity[direct].ce_migration_prec;
- new_func = cc->cc_exec_entity[direct].ce_migration_func;
- new_arg = cc->cc_exec_entity[direct].ce_migration_arg;
+ new_cpu = cc_migration_cpu(cc, direct);
+ new_time = cc_migration_time(cc, direct);
+ new_prec = cc_migration_prec(cc, direct);
+ new_func = cc_migration_func(cc, direct);
+ new_arg = cc_migration_arg(cc, direct);
cc_cce_cleanup(cc, direct);
/*
@@ -787,7 +778,7 @@ skip:
*
* As first thing, handle deferred callout stops.
*/
- if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
+ if (!callout_migrating(c)) {
CTR3(KTR_CALLOUT,
"deferred cancelled %p func %p arg %p",
c, new_func, new_arg);
@@ -799,7 +790,7 @@ skip:
new_cc = callout_cpu_switch(c, cc, new_cpu);
flags = (direct) ? C_DIRECT_EXEC : 0;
callout_cc_add(c, new_cc, new_time, new_prec, new_func,
- new_arg, new_cpu, flags);
+ new_arg, new_cpu, flags, direct);
CC_UNLOCK(new_cc);
CC_LOCK(cc);
#else
@@ -1007,15 +998,15 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
KASSERT(!direct || c->c_lock == NULL,
("%s: direct callout %p has lock", __func__, c));
cc = callout_lock(c);
- if (cc->cc_exec_entity[direct].cc_curr == c) {
+ if (cc_exec_curr(cc, direct) == c) {
/*
* We're being asked to reschedule a callout which is
* currently in progress. If there is a lock then we
* can cancel the callout if it has not really started.
*/
- if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel)
- cancelled = cc->cc_exec_entity[direct].cc_cancel = true;
- if (cc->cc_exec_entity[direct].cc_waiting) {
+ if (c->c_lock != NULL && cc_exec_cancel(cc, direct))
+ cancelled = cc_exec_cancel(cc, direct) = true;
+ if (cc_exec_waiting(cc, direct)) {
/*
* Someone has called callout_drain to kill this
* callout. Don't reschedule.
@@ -1026,11 +1017,30 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
CC_UNLOCK(cc);
return (cancelled);
}
+#ifdef SMP
+ if (callout_migrating(c)) {
+ /*
+ * This only occurs when a second callout_reset_sbt_on
+ * is made after a previous one moved it into
+ * deferred migration (below). Note we do *not* change
+ * the prev_cpu even though the previous target may
+ * be different.
+ */
+ cc_migration_cpu(cc, direct) = cpu;
+ cc_migration_time(cc, direct) = to_sbt;
+ cc_migration_prec(cc, direct) = precision;
+ cc_migration_func(cc, direct) = ftn;
+ cc_migration_arg(cc, direct) = arg;
+ cancelled = 1;
+ CC_UNLOCK(cc);
+ return (cancelled);
+ }
+#endif
}
if (c->c_flags & CALLOUT_PENDING) {
if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
- if (cc->cc_exec_next_dir == c)
- cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
+ if (cc_exec_next(cc, direct) == c)
+ cc_exec_next(cc, direct) = LIST_NEXT(c, c_links.le);
LIST_REMOVE(c, c_links.le);
} else
TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
@@ -1045,15 +1055,29 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
* to a more appropriate moment.
*/
if (c->c_cpu != cpu) {
- if (cc->cc_exec_entity[direct].cc_curr == c) {
- cc->cc_exec_entity[direct].ce_migration_cpu = cpu;
- cc->cc_exec_entity[direct].ce_migration_time
- = to_sbt;
- cc->cc_exec_entity[direct].ce_migration_prec
- = precision;
- cc->cc_exec_entity[direct].ce_migration_func = ftn;
- cc->cc_exec_entity[direct].ce_migration_arg = arg;
- c->c_flags |= CALLOUT_DFRMIGRATION;
+ if (cc_exec_curr(cc, direct) == c) {
+ /*
+ * Pending will have been removed since we are
+ * actually executing the callout on another
+ * CPU. That callout should be waiting on the
+ * lock the caller holds. If we set both
+ * active/and/pending after we return and the
+ * lock on the executing callout proceeds, it
+ * will then see pending is true and return.
+ * At the return from the actual callout execution
+ * the migration will occur in softclock_call_cc
+ * and this new callout will be placed on the
+ * new CPU via a call to callout_cpu_switch() which
+ * will get the lock on the right CPU followed
+ * by a call callout_cc_add() which will add it there.
+ * (see above in softclock_call_cc()).
+ */
+ cc_migration_cpu(cc, direct) = cpu;
+ cc_migration_time(cc, direct) = to_sbt;
+ cc_migration_prec(cc, direct) = precision;
+ cc_migration_func(cc, direct) = ftn;
+ cc_migration_arg(cc, direct) = arg;
+ c->c_flags |= (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING);
CTR6(KTR_CALLOUT,
"migration of %p func %p arg %p in %d.%08x to %u deferred",
c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
@@ -1065,7 +1089,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
}
#endif
- callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
+ callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags, direct);
CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
(u_int)(to_sbt & 0xffffffff));
@@ -1095,6 +1119,7 @@ _callout_stop_safe(struct callout *c, int safe)
struct callout_cpu *cc, *old_cc;
struct lock_class *class;
int direct, sq_locked, use_lock;
+ int not_on_a_list;
if (safe)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
@@ -1120,6 +1145,26 @@ _callout_stop_safe(struct callout *c, int safe)
again:
cc = callout_lock(c);
+ if ((c->c_flags & (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING)) ==
+ (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING)) {
+ /*
+ * Special case where this slipped in while we
+ * were migrating *as* the callout is about to
+ * execute. The caller probably holds the lock
+ * the callout wants.
+ *
+ * Get rid of the migration first. Then set
+ * the flag that tells this code *not* to
+ * try to remove it from any lists (its not
+ * on one yet). When the callout wheel runs,
+ * it will ignore this callout.
+ */
+ c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_ACTIVE);
+ not_on_a_list = 1;
+ } else {
+ not_on_a_list = 0;
+ }
+
/*
* If the callout was migrating while the callout cpu lock was
* dropped, just drop the sleepqueue lock and check the states
@@ -1128,7 +1173,7 @@ again:
if (sq_locked != 0 && cc != old_cc) {
#ifdef SMP
CC_UNLOCK(cc);
- sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting);
+ sleepq_release(&cc_exec_waiting(old_cc, direct));
sq_locked = 0;
old_cc = NULL;
goto again;
@@ -1149,13 +1194,12 @@ again:
* If it wasn't on the queue and it isn't the current
* callout, then we can't stop it, so just bail.
*/
- if (cc->cc_exec_entity[direct].cc_curr != c) {
+ if (cc_exec_curr(cc, direct) != c) {
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
CC_UNLOCK(cc);
if (sq_locked)
- sleepq_release(
- &cc->cc_exec_entity[direct].cc_waiting);
+ sleepq_release(&cc_exec_waiting(cc, direct));
return (0);
}
@@ -1166,7 +1210,7 @@ again:
* just wait for the current invocation to
* finish.
*/
- while (cc->cc_exec_entity[direct].cc_curr == c) {
+ while (cc_exec_curr(cc, direct) == c) {
/*
* Use direct calls to sleepqueue interface
* instead of cv/msleep in order to avoid
@@ -1187,7 +1231,7 @@ again:
if (!sq_locked) {
CC_UNLOCK(cc);
sleepq_lock(
- &cc->cc_exec_entity[direct].cc_waiting);
+ &cc_exec_waiting(cc, direct));
sq_locked = 1;
old_cc = cc;
goto again;
@@ -1199,15 +1243,15 @@ again:
* will be packed up, just let softclock()
* take care of it.
*/
- cc->cc_exec_entity[direct].cc_waiting = true;
+ cc_exec_waiting(cc, direct) = true;
DROP_GIANT();
CC_UNLOCK(cc);
sleepq_add(
- &cc->cc_exec_entity[direct].cc_waiting,
+ &cc_exec_waiting(cc, direct),
&cc->cc_lock.lock_object, "codrain",
SLEEPQ_SLEEP, 0);
sleepq_wait(
- &cc->cc_exec_entity[direct].cc_waiting,
+ &cc_exec_waiting(cc, direct),
0);
sq_locked = 0;
old_cc = NULL;
@@ -1217,7 +1261,8 @@ again:
CC_LOCK(cc);
}
} else if (use_lock &&
- !cc->cc_exec_entity[direct].cc_cancel) {
+ !cc_exec_cancel(cc, direct)) {
+
/*
* The current callout is waiting for its
* lock which we hold. Cancel the callout
@@ -1225,7 +1270,7 @@ again:
* lock, the callout will be skipped in
* softclock().
*/
- cc->cc_exec_entity[direct].cc_cancel = true;
+ cc_exec_cancel(cc, direct) = true;
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
KASSERT(!cc_cce_migrating(cc, direct),
@@ -1233,12 +1278,34 @@ again:
CC_UNLOCK(cc);
KASSERT(!sq_locked, ("sleepqueue chain locked"));
return (1);
- } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
+ } else if (callout_migrating(c)) {
+ /*
+ * The callout is currently being serviced
+ * and the "next" callout is scheduled at
+ * its completion with a migration. We remove
+ * the migration flag so it *won't* get rescheduled,
+ * but we can't stop the one thats running so
+ * we return 0.
+ */
c->c_flags &= ~CALLOUT_DFRMIGRATION;
+#ifdef SMP
+ /*
+ * We can't call cc_cce_cleanup here since
+ * if we do it will remove .ce_curr and
+ * its still running. This will prevent a
+ * reschedule of the callout when the
+ * execution completes.
+ */
+ cc_migration_cpu(cc, direct) = CPUBLOCK;
+ cc_migration_time(cc, direct) = 0;
+ cc_migration_prec(cc, direct) = 0;
+ cc_migration_func(cc, direct) = NULL;
+ cc_migration_arg(cc, direct) = NULL;
+#endif
CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
c, c->c_func, c->c_arg);
CC_UNLOCK(cc);
- return (1);
+ return (0);
}
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
@@ -1247,20 +1314,21 @@ again:
return (0);
}
if (sq_locked)
- sleepq_release(&cc->cc_exec_entity[direct].cc_waiting);
+ sleepq_release(&cc_exec_waiting(cc, direct));
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
- if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
- if (cc->cc_exec_next_dir == c)
- cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
- LIST_REMOVE(c, c_links.le);
- } else
- TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
+ if (not_on_a_list == 0) {
+ if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
+ if (cc_exec_next(cc, direct) == c)
+ cc_exec_next(cc, direct) = LIST_NEXT(c, c_links.le);
+ LIST_REMOVE(c, c_links.le);
+ } else
+ TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
+ }
callout_cc_del(c, cc);
-
CC_UNLOCK(cc);
return (1);
}
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index 7f916454c342..1c7b21ca7d9e 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/poll.h>
+#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/condvar.h>
#include <sys/queue.h>
@@ -128,15 +129,6 @@ struct device {
device_state_t state; /**< current device state */
uint32_t devflags; /**< api level flags for device_get_flags() */
u_int flags; /**< internal device flags */
-#define DF_ENABLED 0x01 /* device should be probed/attached */
-#define DF_FIXEDCLASS 0x02 /* devclass specified at create time */
-#define DF_WILDCARD 0x04 /* unit was originally wildcard */
-#define DF_DESCMALLOCED 0x08 /* description was malloced */
-#define DF_QUIET 0x10 /* don't print verbose attach message */
-#define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */
-#define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */
-#define DF_REBID 0x80 /* Can rebid after attach */
-#define DF_SUSPENDED 0x100 /* Device is suspended. */
u_int order; /**< order from device_add_child_ordered() */
void *ivars; /**< instance variables */
void *softc; /**< current driver's variables */
@@ -148,6 +140,8 @@ struct device {
static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures");
static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc");
+static void devctl2_init(void);
+
#ifdef BUS_DEBUG
static int bus_debug = 1;
@@ -432,6 +426,7 @@ devinit(void)
cv_init(&devsoftc.cv, "dev cv");
TAILQ_INIT(&devsoftc.devq);
knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx);
+ devctl2_init();
}
static int
@@ -2648,6 +2643,15 @@ device_is_attached(device_t dev)
}
/**
+ * @brief Return non-zero if the device is currently suspended.
+ */
+int
+device_is_suspended(device_t dev)
+{
+ return ((dev->flags & DF_SUSPENDED) != 0);
+}
+
+/**
* @brief Set the devclass of a device
* @see devclass_add_device().
*/
@@ -3653,7 +3657,7 @@ bus_generic_suspend_child(device_t dev, device_t child)
error = DEVICE_SUSPEND(child);
if (error == 0)
- dev->flags |= DF_SUSPENDED;
+ child->flags |= DF_SUSPENDED;
return (error);
}
@@ -3668,7 +3672,7 @@ bus_generic_resume_child(device_t dev, device_t child)
{
DEVICE_RESUME(child);
- dev->flags &= ~DF_SUSPENDED;
+ child->flags &= ~DF_SUSPENDED;
return (0);
}
@@ -5031,3 +5035,253 @@ bus_free_resource(device_t dev, int type, struct resource *r)
return (0);
return (bus_release_resource(dev, type, rman_get_rid(r), r));
}
+
+/*
+ * /dev/devctl2 implementation. The existing /dev/devctl device has
+ * implicit semantics on open, so it could not be reused for this.
+ * Another option would be to call this /dev/bus?
+ */
+static int
+find_device(struct devreq *req, device_t *devp)
+{
+ device_t dev;
+
+ /*
+ * First, ensure that the name is nul terminated.
+ */
+ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL)
+ return (EINVAL);
+
+ /*
+ * Second, try to find an attached device whose name matches
+ * 'name'.
+ */
+ TAILQ_FOREACH(dev, &bus_data_devices, devlink) {
+ if (dev->nameunit != NULL &&
+ strcmp(dev->nameunit, req->dr_name) == 0) {
+ *devp = dev;
+ return (0);
+ }
+ }
+
+ /* Finally, give device enumerators a chance. */
+ dev = NULL;
+ EVENTHANDLER_INVOKE(dev_lookup, req->dr_name, &dev);
+ if (dev == NULL)
+ return (ENOENT);
+ *devp = dev;
+ return (0);
+}
+
+static bool
+driver_exists(struct device *bus, const char *driver)
+{
+ devclass_t dc;
+
+ for (dc = bus->devclass; dc != NULL; dc = dc->parent) {
+ if (devclass_find_driver_internal(dc, driver) != NULL)
+ return (true);
+ }
+ return (false);
+}
+
+static int
+devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct devreq *req;
+ device_t dev;
+ int error, old;
+
+ /* Locate the device to control. */
+ mtx_lock(&Giant);
+ req = (struct devreq *)data;
+ switch (cmd) {
+ case DEV_ATTACH:
+ case DEV_DETACH:
+ case DEV_ENABLE:
+ case DEV_DISABLE:
+ case DEV_SUSPEND:
+ case DEV_RESUME:
+ case DEV_SET_DRIVER:
+ error = priv_check(td, PRIV_DRIVER);
+ if (error == 0)
+ error = find_device(req, &dev);
+ break;
+ default:
+ error = ENOTTY;
+ break;
+ }
+ if (error) {
+ mtx_unlock(&Giant);
+ return (error);
+ }
+
+ /* Perform the requested operation. */
+ switch (cmd) {
+ case DEV_ATTACH:
+ if (device_is_attached(dev) && (dev->flags & DF_REBID) == 0)
+ error = EBUSY;
+ else if (!device_is_enabled(dev))
+ error = ENXIO;
+ else
+ error = device_probe_and_attach(dev);
+ break;
+ case DEV_DETACH:
+ if (!device_is_attached(dev)) {
+ error = ENXIO;
+ break;
+ }
+ if (!(req->dr_flags & DEVF_FORCE_DETACH)) {
+ error = device_quiesce(dev);
+ if (error)
+ break;
+ }
+ error = device_detach(dev);
+ break;
+ case DEV_ENABLE:
+ if (device_is_enabled(dev)) {
+ error = EBUSY;
+ break;
+ }
+
+ /*
+ * If the device has been probed but not attached (e.g.
+ * when it has been disabled by a loader hint), just
+ * attach the device rather than doing a full probe.
+ */
+ device_enable(dev);
+ if (device_is_alive(dev)) {
+ /*
+ * If the device was disabled via a hint, clear
+ * the hint.
+ */
+ if (resource_disabled(dev->driver->name, dev->unit))
+ resource_unset_value(dev->driver->name,
+ dev->unit, "disabled");
+ error = device_attach(dev);
+ } else
+ error = device_probe_and_attach(dev);
+ break;
+ case DEV_DISABLE:
+ if (!device_is_enabled(dev)) {
+ error = ENXIO;
+ break;
+ }
+
+ if (!(req->dr_flags & DEVF_FORCE_DETACH)) {
+ error = device_quiesce(dev);
+ if (error)
+ break;
+ }
+
+ /*
+ * Force DF_FIXEDCLASS on around detach to preserve
+ * the existing name.
+ */
+ old = dev->flags;
+ dev->flags |= DF_FIXEDCLASS;
+ error = device_detach(dev);
+ if (!(old & DF_FIXEDCLASS))
+ dev->flags &= ~DF_FIXEDCLASS;
+ if (error == 0)
+ device_disable(dev);
+ break;
+ case DEV_SUSPEND:
+ if (device_is_suspended(dev)) {
+ error = EBUSY;
+ break;
+ }
+ if (device_get_parent(dev) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev);
+ break;
+ case DEV_RESUME:
+ if (!device_is_suspended(dev)) {
+ error = EINVAL;
+ break;
+ }
+ if (device_get_parent(dev) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ error = BUS_RESUME_CHILD(device_get_parent(dev), dev);
+ break;
+ case DEV_SET_DRIVER: {
+ devclass_t dc;
+ char driver[128];
+
+ error = copyinstr(req->dr_data, driver, sizeof(driver), NULL);
+ if (error)
+ break;
+ if (driver[0] == '\0') {
+ error = EINVAL;
+ break;
+ }
+ if (dev->devclass != NULL &&
+ strcmp(driver, dev->devclass->name) == 0)
+ /* XXX: Could possibly force DF_FIXEDCLASS on? */
+ break;
+
+ /*
+ * Scan drivers for this device's bus looking for at
+ * least one matching driver.
+ */
+ if (dev->parent == NULL) {
+ error = EINVAL;
+ break;
+ }
+ if (!driver_exists(dev->parent, driver)) {
+ error = ENOENT;
+ break;
+ }
+ dc = devclass_create(driver);
+ if (dc == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+ /* Detach device if necessary. */
+ if (device_is_attached(dev)) {
+ if (req->dr_flags & DEVF_SET_DRIVER_DETACH)
+ error = device_detach(dev);
+ else
+ error = EBUSY;
+ if (error)
+ break;
+ }
+
+ /* Clear any previously-fixed device class and unit. */
+ if (dev->flags & DF_FIXEDCLASS)
+ devclass_delete_device(dev->devclass, dev);
+ dev->flags |= DF_WILDCARD;
+ dev->unit = -1;
+
+ /* Force the new device class. */
+ error = devclass_add_device(dc, dev);
+ if (error)
+ break;
+ dev->flags |= DF_FIXEDCLASS;
+ error = device_probe_and_attach(dev);
+ break;
+ }
+ }
+ mtx_unlock(&Giant);
+ return (error);
+}
+
+static struct cdevsw devctl2_cdevsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = devctl2_ioctl,
+ .d_name = "devctl2",
+};
+
+static void
+devctl2_init(void)
+{
+
+ make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL,
+ UID_ROOT, GID_WHEEL, 0600, "devctl2");
+}
diff --git a/sys/kern/subr_hints.c b/sys/kern/subr_hints.c
index 25838ee34136..00cfbf1bfb88 100644
--- a/sys/kern/subr_hints.c
+++ b/sys/kern/subr_hints.c
@@ -461,3 +461,31 @@ resource_disabled(const char *name, int unit)
return (0);
return (value);
}
+
+/*
+ * Clear a value associated with a device by removing it from
+ * the kernel environment. This only removes a hint for an
+ * exact unit.
+ */
+int
+resource_unset_value(const char *name, int unit, const char *resname)
+{
+ char varname[128];
+ const char *retname, *retvalue;
+ int error, line;
+ size_t len;
+
+ line = 0;
+ error = resource_find(&line, NULL, name, &unit, resname, NULL,
+ &retname, NULL, NULL, NULL, NULL, &retvalue);
+ if (error)
+ return (error);
+
+ retname -= strlen("hint.");
+ len = retvalue - retname - 1;
+ if (len > sizeof(varname) - 1)
+ return (ENAMETOOLONG);
+ memcpy(varname, retname, len);
+ varname[len] = '\0';
+ return (kern_unsetenv(varname));
+}
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 7829620a7be6..1df89ad61e6f 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -531,9 +531,10 @@ shm_alloc(struct ucred *ucred, mode_t mode)
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
+ shmfd->shm_object->pg_color = 0;
VM_OBJECT_WLOCK(shmfd->shm_object);
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
- vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
+ vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
VM_OBJECT_WUNLOCK(shmfd->shm_object);
vfs_timestamp(&shmfd->shm_birthtime);
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index b2b4969975cd..09fa7ed071bf 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -888,12 +888,18 @@ vfs_domount_update(
ASSERT_VOP_ELOCKED(vp, __func__);
KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
+ mp = vp->v_mount;
if ((vp->v_vflag & VV_ROOT) == 0) {
+ if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
+ == 0)
+ error = EXDEV;
+ else
+ error = EINVAL;
vput(vp);
- return (EINVAL);
+ return (error);
}
- mp = vp->v_mount;
+
/*
* We only allow the filesystem to be reloaded if it
* is currently mounted read-only.
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 7498dab0792a..7f917d6e5c6a 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -735,6 +735,8 @@ _zfs= zfs
.endif
+SUBDIR+=${MODULES_EXTRA}
+
.for reject in ${WITHOUT_MODULES}
SUBDIR:= ${SUBDIR:N${reject}}
.endfor
diff --git a/sys/modules/cxgbe/Makefile b/sys/modules/cxgbe/Makefile
index f7862b5cd722..a46850c1afcf 100644
--- a/sys/modules/cxgbe/Makefile
+++ b/sys/modules/cxgbe/Makefile
@@ -6,6 +6,7 @@ SYSDIR?=${.CURDIR}/../..
.include "${SYSDIR}/conf/kern.opts.mk"
SUBDIR= if_cxgbe
+SUBDIR+= if_cxl
SUBDIR+= t4_firmware
SUBDIR+= t5_firmware
SUBDIR+= ${_tom}
diff --git a/sys/modules/cxgbe/if_cxl/Makefile b/sys/modules/cxgbe/if_cxl/Makefile
new file mode 100644
index 000000000000..ec4ff1ed1802
--- /dev/null
+++ b/sys/modules/cxgbe/if_cxl/Makefile
@@ -0,0 +1,11 @@
+#
+# $FreeBSD$
+#
+
+CXGBE= ${.CURDIR}/../../../dev/cxgbe
+.PATH: ${CXGBE}
+
+KMOD= if_cxl
+SRCS= if_cxl.c
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/dtb/rpi/Makefile b/sys/modules/dtb/rpi/Makefile
new file mode 100644
index 000000000000..9ea76638bd52
--- /dev/null
+++ b/sys/modules/dtb/rpi/Makefile
@@ -0,0 +1,5 @@
+# $FreeBSD$
+# DTS files for the Raspberry Pi-B
+DTS=rpi.dts
+
+.include <bsd.dtb.mk>
diff --git a/sys/modules/dtrace/Makefile b/sys/modules/dtrace/Makefile
index 08b6937bb4de..94a7a423dfab 100644
--- a/sys/modules/dtrace/Makefile
+++ b/sys/modules/dtrace/Makefile
@@ -22,5 +22,7 @@ SUBDIR+= fbt fasttrap
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_ARCH} == "powerpc64"
SUBDIR+= systrace_freebsd32
.endif
-
+.if ${MACHINE_CPUARCH} == "arm"
+SUBDIR+= fbt
+.endif
.include <bsd.subdir.mk>
diff --git a/sys/modules/dtrace/dtrace/Makefile b/sys/modules/dtrace/dtrace/Makefile
index 5492c6f2d55a..417266c5641b 100644
--- a/sys/modules/dtrace/dtrace/Makefile
+++ b/sys/modules/dtrace/dtrace/Makefile
@@ -53,6 +53,11 @@ EXPORT_SYMS= dtrace_register \
dtrace_asm.o: assym.s
+.if ${MACHINE_CPUARCH} == "arm"
+assym.o: assym.s
+ ${AS} -meabi=5 -o assym.o assym.s
+.endif
+
.include <bsd.kmod.mk>
CFLAGS+= -include ${SYSDIR}/cddl/compat/opensolaris/sys/debug_compat.h
diff --git a/sys/modules/wpi/Makefile b/sys/modules/wpi/Makefile
index 1e2383ff1ab8..2e0da73b1894 100644
--- a/sys/modules/wpi/Makefile
+++ b/sys/modules/wpi/Makefile
@@ -3,6 +3,6 @@
.PATH: ${.CURDIR}/../../dev/wpi
KMOD = if_wpi
-SRCS = if_wpi.c device_if.h bus_if.h pci_if.h opt_wlan.h
+SRCS = if_wpi.c device_if.h bus_if.h pci_if.h opt_wpi.h opt_wlan.h
.include <bsd.kmod.mk>
diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c
index 5011fc418d83..78ec2f40f781 100644
--- a/sys/netinet/if_ether.c
+++ b/sys/netinet/if_ether.c
@@ -166,10 +166,28 @@ arptimer(void *arg)
struct ifnet *ifp;
if (lle->la_flags & LLE_STATIC) {
- LLE_WUNLOCK(lle);
return;
}
-
+ LLE_WLOCK(lle);
+ if (callout_pending(&lle->la_timer)) {
+ /*
+ * Here we are a bit odd here in the treatment of
+ * active/pending. If the pending bit is set, it got
+ * rescheduled before I ran. The active
+ * bit we ignore, since if it was stopped
+ * in ll_tablefree() and was currently running
+ * it would have return 0 so the code would
+ * not have deleted it since the callout could
+ * not be stopped so we want to go through
+ * with the delete here now. If the callout
+ * was restarted, the pending bit will be back on and
+ * we just want to bail since the callout_reset would
+ * return 1 and our reference would have been removed
+ * by arpresolve() below.
+ */
+ LLE_WUNLOCK(lle);
+ return;
+ }
ifp = lle->lle_tbl->llt_ifp;
CURVNET_SET(ifp->if_vnet);
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index 42cf7e607e7b..bfcb33a641ad 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -962,8 +962,7 @@ in_lltable_new(const struct sockaddr *l3addr, u_int flags)
lle->base.lle_refcnt = 1;
lle->base.lle_free = in_lltable_free;
LLE_LOCK_INIT(&lle->base);
- callout_init_rw(&lle->base.la_timer, &lle->base.lle_lock,
- CALLOUT_RETURNUNLOCKED);
+ callout_init(&lle->base.la_timer, 1);
return (&lle->base);
}
diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c
index e22c0bd69ad9..448f8f288060 100644
--- a/sys/netinet6/in6.c
+++ b/sys/netinet6/in6.c
@@ -1333,6 +1333,7 @@ in6_purgeaddr(struct ifaddr *ifa)
static void
in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
{
+ char ip6buf[INET6_ADDRSTRLEN];
IF_ADDR_WLOCK(ifp);
TAILQ_REMOVE(&ifp->if_addrhead, &ia->ia_ifa, ifa_link);
@@ -1356,7 +1357,7 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
if (ia->ia6_ndpr == NULL) {
nd6log((LOG_NOTICE,
"in6_unlink_ifa: autoconf'ed address "
- "%p has no prefix\n", ia));
+ "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia))));
} else {
ia->ia6_ndpr->ndpr_refcnt--;
ia->ia6_ndpr = NULL;
@@ -2046,8 +2047,7 @@ in6_lltable_new(const struct sockaddr *l3addr, u_int flags)
lle->base.lle_refcnt = 1;
lle->base.lle_free = in6_lltable_free;
LLE_LOCK_INIT(&lle->base);
- callout_init_rw(&lle->base.ln_timer_ch, &lle->base.lle_lock,
- CALLOUT_RETURNUNLOCKED);
+ callout_init(&lle->base.ln_timer_ch, 1);
return (&lle->base);
}
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index 8bcca398f09c..de1707fbb308 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -473,9 +473,28 @@ nd6_llinfo_timer(void *arg)
KASSERT(arg != NULL, ("%s: arg NULL", __func__));
ln = (struct llentry *)arg;
- LLE_WLOCK_ASSERT(ln);
+ LLE_WLOCK(ln);
+ if (callout_pending(&ln->la_timer)) {
+ /*
+ * Here we are a bit odd here in the treatment of
+ * active/pending. If the pending bit is set, it got
+ * rescheduled before I ran. The active
+ * bit we ignore, since if it was stopped
+ * in ll_tablefree() and was currently running
+ * it would have return 0 so the code would
+ * not have deleted it since the callout could
+ * not be stopped so we want to go through
+ * with the delete here now. If the callout
+ * was restarted, the pending bit will be back on and
+ * we just want to bail since the callout_reset would
+ * return 1 and our reference would have been removed
+ * by nd6_llinfo_settimer_locked above since canceled
+ * would have been 1.
+ */
+ LLE_WUNLOCK(ln);
+ return;
+ }
ifp = ln->lle_tbl->llt_ifp;
-
CURVNET_SET(ifp->if_vnet);
if (ln->ln_ntick > 0) {
diff --git a/sys/netpfil/ipfw/ip_fw_iface.c b/sys/netpfil/ipfw/ip_fw_iface.c
index 7e9c992032da..b7c450c6dc46 100644
--- a/sys/netpfil/ipfw/ip_fw_iface.c
+++ b/sys/netpfil/ipfw/ip_fw_iface.c
@@ -24,7 +24,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: projects/ipfw/sys/netpfil/ipfw/ip_fw_iface.c 267384 2014-06-12 09:59:11Z melifaro $");
+__FBSDID("$FreeBSD$");
/*
* Kernel interface tracking API.
@@ -397,20 +397,20 @@ ipfw_iface_del_notify(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
/*
* Unreference interface specified by @ic.
- * Must be called without holding any locks.
+ * Must be called while holding UH lock.
*/
void
ipfw_iface_unref(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
{
struct ipfw_iface *iif;
+ IPFW_UH_WLOCK_ASSERT(ch);
+
iif = ic->iface;
ic->iface = NULL;
- IPFW_UH_WLOCK(ch);
iif->no.refcnt--;
/* TODO: check for references & delete */
- IPFW_UH_WUNLOCK(ch);
}
/*
diff --git a/sys/netpfil/ipfw/ip_fw_nat.c b/sys/netpfil/ipfw/ip_fw_nat.c
index 8b97f6531d14..201be2f1e052 100644
--- a/sys/netpfil/ipfw/ip_fw_nat.c
+++ b/sys/netpfil/ipfw/ip_fw_nat.c
@@ -691,7 +691,7 @@ nat44_get_cfg(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
export_nat_cfg(ptr, ucfg);
/* Estimate memory amount */
- sz = sizeof(struct nat44_cfg_nat);
+ sz = sizeof(ipfw_obj_header) + sizeof(struct nat44_cfg_nat);
LIST_FOREACH(r, &ptr->redir_chain, _next) {
sz += sizeof(struct nat44_cfg_redir);
LIST_FOREACH(s, &r->spool_chain, _next)
@@ -699,7 +699,7 @@ nat44_get_cfg(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
}
ucfg->size = sz;
- if (sd->valsize < sz + sizeof(*oh)) {
+ if (sd->valsize < sz) {
/*
* Submitted buffer size is not enough.
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index ddb73e7f155e..3f46ddd48f6c 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -429,6 +429,7 @@ struct ipfw_ifc {
#define IPFW_UH_RLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_RLOCKED)
#define IPFW_UH_WLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_WLOCKED)
+#define IPFW_UH_UNLOCK_ASSERT(_chain) rw_assert(&(_chain)->uh_lock, RA_UNLOCKED)
#define IPFW_UH_RLOCK(p) rw_rlock(&(p)->uh_lock)
#define IPFW_UH_RUNLOCK(p) rw_runlock(&(p)->uh_lock)
diff --git a/sys/netpfil/ipfw/ip_fw_table.c b/sys/netpfil/ipfw/ip_fw_table.c
index 5dc8ddab0e25..4498ace88d40 100644
--- a/sys/netpfil/ipfw/ip_fw_table.c
+++ b/sys/netpfil/ipfw/ip_fw_table.c
@@ -1198,7 +1198,7 @@ flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
void *astate_old, *astate_new;
char algostate[64], *pstate;
struct tableop_state ts;
- int error;
+ int error, need_gc;
uint16_t kidx;
uint8_t tflags;
@@ -1212,6 +1212,9 @@ flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
+ need_gc = 0;
+ astate_new = NULL;
+ memset(&ti_new, 0, sizeof(ti_new));
restart:
/* Set up swap handler */
memset(&ts, 0, sizeof(ts));
@@ -1237,6 +1240,14 @@ restart:
IPFW_UH_WUNLOCK(ch);
/*
+ * Stage 1.5: if this is not the first attempt, destroy previous state
+ */
+ if (need_gc != 0) {
+ ta->destroy(astate_new, &ti_new);
+ need_gc = 0;
+ }
+
+ /*
* Stage 2: allocate new table instance using same algo.
*/
memset(&ti_new, 0, sizeof(struct table_info));
@@ -1262,7 +1273,8 @@ restart:
* complex checks.
*/
if (ts.modified != 0) {
- ta->destroy(astate_new, &ti_new);
+ /* Delay destroying data since we're holding UH lock */
+ need_gc = 1;
goto restart;
}
@@ -3042,6 +3054,7 @@ free_table_config(struct namedobj_instance *ni, struct table_config *tc)
{
KASSERT(tc->linked == 0, ("free() on linked config"));
+ /* UH lock MUST NOT be held */
/*
* We're using ta without any locking/referencing.
diff --git a/sys/netpfil/ipfw/ip_fw_table_algo.c b/sys/netpfil/ipfw/ip_fw_table_algo.c
index 50ef305bcf62..06a46410813a 100644
--- a/sys/netpfil/ipfw/ip_fw_table_algo.c
+++ b/sys/netpfil/ipfw/ip_fw_table_algo.c
@@ -97,7 +97,7 @@ __FBSDID("$FreeBSD$");
*
* -destroy: request to destroy table instance.
* typedef void (ta_destroy)(void *ta_state, struct table_info *ti);
- * MANDATORY, may be locked (UH+WLOCK). (M_NOWAIT).
+ * MANDATORY, unlocked. (M_WAITOK).
*
* Frees all table entries and all tables structures allocated by -init.
*
@@ -2134,6 +2134,7 @@ destroy_ifidx_locked(struct namedobj_instance *ii, struct named_object *no,
ife = (struct ifentry *)no;
ipfw_iface_del_notify(ch, &ife->ic);
+ ipfw_iface_unref(ch, &ife->ic);
free(ife, M_IPFW_TBL);
}
@@ -2153,7 +2154,9 @@ ta_destroy_ifidx(void *ta_state, struct table_info *ti)
if (icfg->main_ptr != NULL)
free(icfg->main_ptr, M_IPFW);
+ IPFW_UH_WLOCK(ch);
ipfw_objhash_foreach(icfg->ii, destroy_ifidx_locked, ch);
+ IPFW_UH_WUNLOCK(ch);
ipfw_objhash_destroy(icfg->ii);
@@ -2333,8 +2336,9 @@ ta_del_ifidx(void *ta_state, struct table_info *ti, struct tentry_info *tei,
/* Unlink from local list */
ipfw_objhash_del(icfg->ii, &ife->no);
- /* Unlink notifier */
+ /* Unlink notifier and deref */
ipfw_iface_del_notify(icfg->ch, &ife->ic);
+ ipfw_iface_unref(icfg->ch, &ife->ic);
icfg->count--;
tei->value = ife->value;
@@ -2357,11 +2361,8 @@ ta_flush_ifidx_entry(struct ip_fw_chain *ch, struct tentry_info *tei,
tb = (struct ta_buf_ifidx *)ta_buf;
- if (tb->ife != NULL) {
- /* Unlink first */
- ipfw_iface_unref(ch, &tb->ife->ic);
+ if (tb->ife != NULL)
free(tb->ife, M_IPFW_TBL);
- }
}
diff --git a/sys/powerpc/aim/locore32.S b/sys/powerpc/aim/locore32.S
index 6e462a05d33b..a2cb1028701a 100644
--- a/sys/powerpc/aim/locore32.S
+++ b/sys/powerpc/aim/locore32.S
@@ -118,9 +118,7 @@ __start:
bdnz 1b
sync
isync
-#ifdef WII
-#include <powerpc/wii/locore.S>
-#endif
+
/* Zero bss, in case we were started by something unhelpful */
li 0,0
lis 8,_edata@ha
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 020c07ab48f1..5d7c784ed60b 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -256,12 +256,10 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
vm_offset_t startkernel, endkernel;
void *generictrap;
size_t trap_offset, trapsize;
+ vm_offset_t trap;
void *kmdp;
char *env;
register_t msr, scratch;
-#ifdef WII
- register_t vers;
-#endif
uint8_t *cache_check;
int cacheline_warn;
#ifndef __powerpc64__
@@ -280,16 +278,6 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
startkernel = __startkernel;
endkernel = __endkernel;
-#ifdef WII
- /*
- * The Wii loader doesn't pass us any environment so, mdp
- * points to garbage at this point. The Wii CPU is a 750CL.
- */
- vers = mfpvr();
- if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
- mdp = NULL;
-#endif
-
/* Check for ePAPR loader, which puts a magic value into r6 */
if (mdp == (void *)0x65504150)
mdp = NULL;
@@ -481,20 +469,6 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
#endif
/*
- * Copy a code snippet to restore 32-bit bridge mode
- * to the top of every non-generic trap handler
- */
-
- trap_offset += (size_t)&restorebridgesize;
- bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
- bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
- bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
- bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
- bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
- bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
- bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
-
- /*
* Set the common trap entry point to the one that
* knows to restore 32-bit operation on execution.
*/
@@ -507,14 +481,35 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
#else /* powerpc64 */
cpu_features |= PPC_FEATURE_64;
generictrap = &trapcode;
-
- /* Set TOC base so that the interrupt code can get at it */
- *((void **)TRAP_GENTRAP) = &trapcode2;
- *((register_t *)TRAP_TOCBASE) = toc;
#endif
trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
+ /*
+ * Copy generic handler into every possible trap. Special cases will get
+ * different ones in a minute.
+ */
+ for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
+ bcopy(generictrap, (void *)trap, trapsize);
+
+ #ifndef __powerpc64__
+ if (cpu_features & PPC_FEATURE_64) {
+ /*
+ * Copy a code snippet to restore 32-bit bridge mode
+ * to the top of every non-generic trap handler
+ */
+
+ trap_offset += (size_t)&restorebridgesize;
+ bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
+ bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
+ }
+ #endif
+
bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
(size_t)&rstcode);
@@ -527,31 +522,20 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
(size_t)&dblow);
bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
(size_t)&dblow);
-#else
- bcopy(generictrap, (void *)EXC_MCHK, trapsize);
- bcopy(generictrap, (void *)EXC_PGM, trapsize);
- bcopy(generictrap, (void *)EXC_TRC, trapsize);
- bcopy(generictrap, (void *)EXC_BPT, trapsize);
#endif
bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend -
(size_t)&alitrap);
bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend -
(size_t)&dsitrap);
- bcopy(generictrap, (void *)EXC_ISI, trapsize);
+
#ifdef __powerpc64__
+ /* Set TOC base so that the interrupt code can get at it */
+ *((void **)TRAP_GENTRAP) = &trapcode2;
+ *((register_t *)TRAP_TOCBASE) = toc;
+
bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
- #endif
- bcopy(generictrap, (void *)EXC_EXI, trapsize);
- bcopy(generictrap, (void *)EXC_FPU, trapsize);
- bcopy(generictrap, (void *)EXC_DECR, trapsize);
- bcopy(generictrap, (void *)EXC_SC, trapsize);
- bcopy(generictrap, (void *)EXC_FPA, trapsize);
- bcopy(generictrap, (void *)EXC_VEC, trapsize);
- bcopy(generictrap, (void *)EXC_PERF, trapsize);
- bcopy(generictrap, (void *)EXC_VECAST_G4, trapsize);
- bcopy(generictrap, (void *)EXC_VECAST_G5, trapsize);
- #ifndef __powerpc64__
+ #else
/* G2-specific TLB miss helper handlers */
bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
@@ -563,7 +547,7 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
* Restore MSR
*/
mtmsr(msr);
-
+
/* Warn if cachline size was not determined */
if (cacheline_warn == 1) {
printf("WARNING: cacheline size undetermined, setting to 32\n");
@@ -572,7 +556,7 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
/*
* Choose a platform module so we can get the physical memory map.
*/
-
+
platform_probe_and_attach();
/*
@@ -701,7 +685,7 @@ int
ptrace_single_step(struct thread *td)
{
struct trapframe *tf;
-
+
tf = td->td_frame;
tf->srr1 |= PSL_SE;
@@ -753,6 +737,7 @@ spinlock_enter(void)
td = curthread;
if (td->td_md.md_spinlock_count == 0) {
+ __asm __volatile("or 2,2,2"); /* Set high thread priority */
msr = intr_disable();
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_msr = msr;
@@ -771,8 +756,10 @@ spinlock_exit(void)
critical_exit();
msr = td->td_md.md_saved_msr;
td->td_md.md_spinlock_count--;
- if (td->td_md.md_spinlock_count == 0)
+ if (td->td_md.md_spinlock_count == 0) {
intr_restore(msr);
+ __asm __volatile("or 6,6,6"); /* Set normal thread priority */
+ }
}
int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
@@ -789,7 +776,7 @@ db_trap_glue(struct trapframe *frame)
int type = frame->exc;
/* Ignore DTrace traps. */
- if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
+ if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
return (0);
if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
type = T_BREAKPOINT;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 96628f71ae5b..12d108b8953d 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -420,7 +420,7 @@ static void
tlbia(void)
{
vm_offset_t va;
-
+
for (va = 0; va < 0x00040000; va += 0x00001000) {
__asm __volatile("tlbie %0" :: "r"(va));
powerpc_sync();
@@ -623,17 +623,8 @@ moea_cpu_bootstrap(mmu_t mmup, int ap)
isync();
}
-#ifdef WII
- /*
- * Special case for the Wii: don't install the PCI BAT.
- */
- if (strcmp(installed_platform(), "wii") != 0) {
-#endif
- __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
- __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
-#ifdef WII
- }
-#endif
+ __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
+ __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
isync();
__asm __volatile("mtibatu 1,%0" :: "r"(0));
@@ -706,15 +697,9 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
:: "r"(battable[0].batu), "r"(battable[0].batl));
mtmsr(msr);
-#ifdef WII
- if (strcmp(installed_platform(), "wii") != 0) {
-#endif
- /* map pci space */
- __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
- __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
-#ifdef WII
- }
-#endif
+ /* map pci space */
+ __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
+ __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
isync();
/* set global direct map flag */
@@ -885,7 +870,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
*/
chosen = OF_finddevice("/chosen");
if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 &&
- (mmu = OF_instance_to_package(mmui)) != -1 &&
+ (mmu = OF_instance_to_package(mmui)) != -1 &&
(sz = OF_getproplen(mmu, "translations")) != -1) {
translations = NULL;
for (i = 0; phys_avail[i] != 0; i += 2) {
@@ -917,7 +902,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
/* Enter the pages */
for (off = 0; off < translations[i].om_len;
off += PAGE_SIZE)
- moea_kenter(mmup, translations[i].om_va + off,
+ moea_kenter(mmup, translations[i].om_va + off,
translations[i].om_pa + off);
}
}
@@ -1488,7 +1473,7 @@ void
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
{
u_int pte_lo;
- int error;
+ int error;
#if 0
if (va < VM_MIN_KERNEL_ADDRESS)
@@ -1637,7 +1622,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
== NULL) {
pmap->pmap_phys = pmap;
}
-
+
mtx_lock(&moea_vsid_mutex);
/*
@@ -1782,7 +1767,7 @@ void
moea_release(mmu_t mmu, pmap_t pmap)
{
int idx, mask;
-
+
/*
* Free segment register's VSID
*/
@@ -1957,7 +1942,7 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
} else {
if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
- moea_bpvo_pool_index, BPVO_POOL_SIZE,
+ moea_bpvo_pool_index, BPVO_POOL_SIZE,
BPVO_POOL_SIZE * sizeof(struct pvo_entry));
}
pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
@@ -2307,7 +2292,7 @@ moea_pte_spillable_ident(u_int ptegidx)
if (!(pt->pte_lo & PTE_REF))
return (pvo_walk);
}
-
+
return (pvo);
}
@@ -2504,7 +2489,7 @@ moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
*/
prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
if (prot != (BAT_I|BAT_G|BAT_PP_RW))
- return (EPERM);
+ return (EPERM);
/*
* The address should be within the BAT range. Assume that the
@@ -2527,7 +2512,7 @@ moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
int i;
/*
- * This currently does not work for entries that
+ * This currently does not work for entries that
* overlap 256M BAT segments.
*/
@@ -2560,7 +2545,7 @@ moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
ppa = trunc_page(pa);
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
-
+
/*
* If the physical address lies within a valid BAT table entry,
* return the 1:1 mapping. This currently doesn't work
diff --git a/sys/powerpc/conf/NOTES b/sys/powerpc/conf/NOTES
index ebfd299c484b..974c91ba00cd 100644
--- a/sys/powerpc/conf/NOTES
+++ b/sys/powerpc/conf/NOTES
@@ -24,7 +24,6 @@ options POWERMAC #NewWorld Apple PowerMacs
#options PS3 #Sony Playstation 3
options PSIM #GDB PSIM ppc simulator
options MAMBO #IBM Mambo Full System Simulator
-#options WII #Nintendo Wii
options SC_OFWFB # OFW frame buffer
diff --git a/sys/powerpc/conf/WII b/sys/powerpc/conf/WII
deleted file mode 100644
index ebfaffc74300..000000000000
--- a/sys/powerpc/conf/WII
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# Custom kernel for the Nintendo Wii.
-#
-# $FreeBSD$
-
-cpu AIM
-ident WII
-machine powerpc powerpc
-
-makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
-
-options WII
-
-options SCHED_ULE #ULE scheduler
-options PREEMPTION #Enable kernel thread preemption
-options INET #InterNETworking
-options INET6 #IPv6 communications protocols
-options SCTP #Stream Control Transmission Protocol
-options FFS #Berkeley Fast Filesystem
-options SOFTUPDATES #Enable FFS soft updates support
-options UFS_ACL #Support for access control lists
-options UFS_DIRHASH #Improve performance on big directories
-options UFS_GJOURNAL #Enable gjournal-based UFS journaling
-options MD_ROOT #MD is a potential root device
-options NFSCL #Network Filesystem Client
-options NFSD #Network Filesystem Server
-options NFSLOCKD #Network Lock Manager
-options NFS_ROOT #NFS usable as root device
-options MSDOSFS #MSDOS Filesystem
-options CD9660 #ISO 9660 Filesystem
-options PROCFS #Process filesystem (requires PSEUDOFS)
-options PSEUDOFS #Pseudo-filesystem framework
-options GEOM_PART_GPT #GUID Partition Tables.
-options GEOM_LABEL #Provides labelization
-options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI
-options KTRACE #ktrace(1) syscall trace support
-options STACK #stack(9) support
-options SYSVSHM #SYSV-style shared memory
-options SYSVMSG #SYSV-style message queues
-options SYSVSEM #SYSV-style semaphores
-options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
-#options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
-options AUDIT # Security event auditing
-options CAPABILITY_MODE # Capsicum capability mode
-options CAPABILITIES # Capsicum capabilities
-options MAC # TrustedBSD MAC Framework
-options INCLUDE_CONFIG_FILE # Include this file in kernel
-
-# Debugging support. Always need this:
-options KDB # Enable kernel debugger support.
-# For minimum debugger support (stable branch) use:
-options KDB_TRACE # Print a stack trace for a panic.
-# For full debugger support use this instead:
-options DDB #Support DDB
-#options DEADLKRES #Enable the deadlock resolver
-options INVARIANTS #Enable calls of extra sanity checking
-options INVARIANT_SUPPORT #Extra sanity checks of internal structures, required by INVARIANTS
-options WITNESS #Enable checks to detect deadlocks and cycles
-options WITNESS_SKIPSPIN #Don't run witness on spinlocks for speed
-options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
-
-# ATA/SCSI peripherals
-device scbus # SCSI bus (required for ATA/SCSI)
-device da # Direct Access (disks)
-
-# syscons is the default console driver, resembling an SCO console
-device sc
-device kbdmux
-options SC_DFLT_FONT # compile font in
-makeoptions SC_DFLT_FONT=cp437
-
-# Pseudo devices.
-device loop # Network loopback
-device random # Entropy device
-device ether # Ethernet support
-device vlan # 802.1Q VLAN support
-device tun # Packet tunnel.
-device md # Memory "disks"
-device gif # IPv6 and IPv4 tunneling
-device firmware # firmware assist module
-
-
-# The `bpf' device enables the Berkeley Packet Filter.
-# Be aware of the administrative consequences of enabling this!
-# Note that 'bpf' is required for DHCP.
-device bpf #Berkeley packet filter
-
-# USB support
-options USB_DEBUG # enable debug msgs
-device uhci # UHCI PCI->USB interface
-device ohci # OHCI PCI->USB interface
-device ehci # EHCI PCI->USB interface
-device usb # USB Bus (required)
-device uhid # "Human Interface Devices"
-device ukbd # Keyboard
-options KBD_INSTALL_CDEV # install a CDEV entry in /dev
-device ulpt # Printer
-device umass # Disks/Mass storage - Requires scbus and da0
-device ums # Mouse
-# USB Ethernet
-device miibus # MII bus support
-device aue # ADMtek USB Ethernet
-device axe # ASIX Electronics USB Ethernet
-device cdce # Generic USB over Ethernet
-device cue # CATC USB Ethernet
-device kue # Kawasaki LSI USB Ethernet
-
-# GPIO
-device gpio
-device wiigpio
diff --git a/sys/powerpc/include/cpu.h b/sys/powerpc/include/cpu.h
index 6ef9882e3b96..65358a6909f5 100644
--- a/sys/powerpc/include/cpu.h
+++ b/sys/powerpc/include/cpu.h
@@ -88,7 +88,7 @@ get_cyclecount(void)
}
#define cpu_getstack(td) ((td)->td_frame->fixreg[1])
-#define cpu_spinwait() /* nothing */
+#define cpu_spinwait() __asm __volatile("or 27,27,27") /* yield */
extern char btext[];
extern char etext[];
diff --git a/sys/powerpc/ofw/ofw_syscons.c b/sys/powerpc/ofw/ofw_syscons.c
index a8797c2c16e7..7dc51f9888c8 100644
--- a/sys/powerpc/ofw/ofw_syscons.c
+++ b/sys/powerpc/ofw/ofw_syscons.c
@@ -412,7 +412,7 @@ ofwfb_init(int unit, video_adapter_t *adp, int flags)
adp->va_window = (vm_offset_t) ofwfb_static_window;
/*
- * Enable future font-loading and flag color support, as well as
+ * Enable future font-loading and flag color support, as well as
* adding V_ADP_MODECHANGE so that we ofwfb_set_mode() gets called
* when the X server shuts down. This enables us to get the console
* back when X disappears.
@@ -874,7 +874,7 @@ ofwfb_putc32(video_adapter_t *adp, vm_offset_t off, uint8_t c, uint8_t a)
addr = (uint32_t *)sc->sc_addr
+ (row + sc->sc_ymargin)*(sc->sc_stride/4)
+ col + sc->sc_xmargin;
-
+
fg = ofwfb_pix32(sc, ofwfb_foreground(a));
bg = ofwfb_pix32(sc, ofwfb_background(a));
@@ -1000,12 +1000,6 @@ ofwfb_scidentify(driver_t *driver, device_t parent)
device_t child;
/*
- * The Nintendo Wii doesn't have open firmware, so don't probe ofwfb
- * because otherwise we will crash.
- */
- if (strcmp(installed_platform(), "wii") == 0)
- return;
- /*
* Add with a priority guaranteed to make it last on
* the device list
*/
@@ -1019,7 +1013,7 @@ ofwfb_scprobe(device_t dev)
device_set_desc(dev, "System console");
- error = sc_probe_unit(device_get_unit(dev),
+ error = sc_probe_unit(device_get_unit(dev),
device_get_flags(dev) | SC_AUTODETECT_KBD);
if (error != 0)
return (error);
diff --git a/sys/powerpc/powerpc/swtch64.S b/sys/powerpc/powerpc/swtch64.S
index 6722bb642135..ffecd1eb7b26 100644
--- a/sys/powerpc/powerpc/swtch64.S
+++ b/sys/powerpc/powerpc/swtch64.S
@@ -72,6 +72,8 @@ TOC_ENTRY(blocked_lock)
*/
ENTRY(cpu_throw)
mr %r13, %r4
+ li %r14,0 /* Tell cpu_switchin not to release a thread */
+
b cpu_switchin
/*
@@ -139,10 +141,7 @@ ENTRY(cpu_switch)
bl pmap_deactivate /* Deactivate the current pmap */
nop
- addi %r1,%r1,48
-
sync /* Make sure all of that finished */
- std %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */
cpu_switchin:
#if defined(SMP) && defined(SCHED_ULE)
@@ -154,14 +153,20 @@ blocked_loop:
beq- blocked_loop
isync
#endif
+
+ ld %r17,TD_PCB(%r13) /* Get new PCB */
+ ld %r1,PCB_SP(%r17) /* Load the stack pointer */
- mfsprg %r7,0 /* Get the pcpu pointer */
+ /* Release old thread now that we have a stack pointer set up */
+ cmpdi %r14,0
+ beq- 1f
+ std %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */
+
+1: mfsprg %r7,0 /* Get the pcpu pointer */
std %r13,PC_CURTHREAD(%r7) /* Store new current thread */
ld %r17,TD_PCB(%r13) /* Store new current PCB */
std %r17,PC_CURPCB(%r7)
- stdu %r1,-48(%r1)
-
mr %r3,%r13 /* Get new thread ptr */
bl pmap_activate /* Activate the new address space */
nop
diff --git a/sys/powerpc/pseries/mmu_phyp.c b/sys/powerpc/pseries/mmu_phyp.c
index cdf5dfefea55..af3b4e598cb2 100644
--- a/sys/powerpc/pseries/mmu_phyp.c
+++ b/sys/powerpc/pseries/mmu_phyp.c
@@ -102,6 +102,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
char buf[8];
uint32_t prop[2];
uint32_t nptlp, shift = 0, slb_encoding = 0;
+ uint32_t lp_size, lp_encoding;
phandle_t dev, node, root;
int idx, len, res;
@@ -148,9 +149,9 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
* We have to use a variable length array on the stack
* since we have very limited stack space.
*/
- cell_t arr[len/sizeof(cell_t)];
- res = OF_getprop(node, "ibm,segment-page-sizes", &arr,
- sizeof(arr));
+ pcell_t arr[len/sizeof(cell_t)];
+ res = OF_getencprop(node, "ibm,segment-page-sizes", arr,
+ sizeof(arr));
len /= 4;
idx = 0;
while (len > 0) {
@@ -160,18 +161,26 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
idx += 3;
len -= 3;
while (len > 0 && nptlp) {
+ lp_size = arr[idx];
+ lp_encoding = arr[idx+1];
+ if (slb_encoding == SLBV_L && lp_encoding == 0)
+ break;
+
idx += 2;
len -= 2;
nptlp--;
}
+ if (nptlp && slb_encoding == SLBV_L && lp_encoding == 0)
+ break;
}
- /* For now we allow shift only to be <= 0x18. */
- if (shift >= 0x18)
- shift = 0x18;
+ if (len == 0)
+ panic("Standard large pages (SLB[L] = 1, PTE[LP] = 0) "
+ "not supported by this system. Please enable huge "
+ "page backing if running under PowerKVM.");
moea64_large_page_shift = shift;
- moea64_large_page_size = 1ULL << shift;
+ moea64_large_page_size = 1ULL << lp_size;
}
moea64_mid_bootstrap(mmup, kernelstart, kernelend);
@@ -231,6 +240,7 @@ mphyp_pte_unset(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
uint64_t junk;
int err;
+ pvo_pt->pte_hi &= ~LPTE_VALID;
err = phyp_pft_hcall(H_REMOVE, 1UL << 31, slot,
pvo_pt->pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
&junk);
@@ -256,6 +266,7 @@ mphyp_pte_change(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
/* XXX: optimization using H_PROTECT for common case? */
mphyp_pte_unset(mmu, slot, pvo_pt, vpn);
+ pvo_pt->pte_hi |= LPTE_VALID;
result = phyp_pft_hcall(H_ENTER, H_EXACT, slot, pvo_pt->pte_hi,
pvo_pt->pte_lo, &index, &evicted.pte_lo, &junk);
if (result != H_SUCCESS)
@@ -277,7 +288,7 @@ mphyp_pte_spillable_ident(u_int ptegidx, struct lpte *to_evict)
phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pt.pte_hi, &pt.pte_lo,
&junk);
- if (pt.pte_hi & LPTE_SWBITS)
+ if (pt.pte_hi & LPTE_WIRED)
continue;
/* This is a candidate, so remember it */
@@ -290,7 +301,10 @@ mphyp_pte_spillable_ident(u_int ptegidx, struct lpte *to_evict)
}
}
- phyp_pft_hcall(H_READ, 0, slot, 0, 0, &to_evict->pte_hi,
+ if (k == -1)
+ return (k);
+
+ phyp_pft_hcall(H_READ, 0, k, 0, 0, &to_evict->pte_hi,
&to_evict->pte_lo, &junk);
return (k);
}
@@ -379,7 +393,7 @@ mphyp_pte_insert(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
}
}
- KASSERT(pvo->pvo_pte.lpte.pte_hi == evicted.pte_hi,
+ KASSERT((pvo->pvo_pte.lpte.pte_hi | LPTE_VALID) == evicted.pte_hi,
("Unable to find PVO for spilled PTE"));
/*
diff --git a/sys/powerpc/pseries/platform_chrp.c b/sys/powerpc/pseries/platform_chrp.c
index 115d062b480d..b1fc948bf541 100644
--- a/sys/powerpc/pseries/platform_chrp.c
+++ b/sys/powerpc/pseries/platform_chrp.c
@@ -58,7 +58,7 @@ extern void *ap_pcpu;
#endif
#ifdef __powerpc64__
-static uint8_t splpar_vpa[640] __aligned(64);
+static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */
#endif
static vm_offset_t realmaxaddr = VM_MAX_ADDRESS;
@@ -125,6 +125,8 @@ static int
chrp_attach(platform_t plat)
{
#ifdef __powerpc64__
+ int i;
+
/* XXX: check for /rtas/ibm,hypertas-functions? */
if (!(mfmsr() & PSL_HV)) {
struct mem_region *phys, *avail;
@@ -136,14 +138,19 @@ chrp_attach(platform_t plat)
cpu_idle_hook = phyp_cpu_idle;
/* Set up important VPA fields */
- bzero(splpar_vpa, sizeof(splpar_vpa));
- splpar_vpa[4] = (uint8_t)((sizeof(splpar_vpa) >> 8) & 0xff);
- splpar_vpa[5] = (uint8_t)(sizeof(splpar_vpa) & 0xff);
- splpar_vpa[0xba] = 1; /* Maintain FPRs */
- splpar_vpa[0xbb] = 1; /* Maintain PMCs */
- splpar_vpa[0xfc] = 0xff; /* Maintain full SLB */
- splpar_vpa[0xfd] = 0xff;
- splpar_vpa[0xff] = 1; /* Maintain Altivec */
+ for (i = 0; i < MAXCPU; i++) {
+ bzero(splpar_vpa[i], sizeof(splpar_vpa));
+ /* First two: VPA size */
+ splpar_vpa[i][4] =
+ (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff);
+ splpar_vpa[i][5] =
+ (uint8_t)(sizeof(splpar_vpa[i]) & 0xff);
+ splpar_vpa[i][0xba] = 1; /* Maintain FPRs */
+ splpar_vpa[i][0xbb] = 1; /* Maintain PMCs */
+ splpar_vpa[i][0xfc] = 0xff; /* Maintain full SLB */
+ splpar_vpa[i][0xfd] = 0xff;
+ splpar_vpa[i][0xff] = 1; /* Maintain Altivec */
+ }
mb();
/* Set up hypervisor CPU stuff */
@@ -492,11 +499,12 @@ static void
chrp_smp_ap_init(platform_t platform)
{
if (!(mfmsr() & PSL_HV)) {
+ /* Register VPA */
+ phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid),
+ splpar_vpa[PCPU_GET(cpuid)]);
+
/* Set interrupt priority */
phyp_hcall(H_CPPR, 0xff);
-
- /* Register VPA */
- phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid), splpar_vpa);
}
}
#else
diff --git a/sys/powerpc/pseries/xics.c b/sys/powerpc/pseries/xics.c
index b09079aaf111..2d012c5faa7b 100644
--- a/sys/powerpc/pseries/xics.c
+++ b/sys/powerpc/pseries/xics.c
@@ -197,16 +197,31 @@ xicp_bind(device_t dev, u_int irq, cpuset_t cpumask)
{
struct xicp_softc *sc = device_get_softc(dev);
cell_t status, cpu;
+ int ncpus, i, error;
/*
- * This doesn't appear to actually support affinity groups, so just
- * use the first CPU.
+ * This doesn't appear to actually support affinity groups, so pick a
+ * random CPU.
*/
+ ncpus = 0;
CPU_FOREACH(cpu)
- if (CPU_ISSET(cpu, &cpumask)) break;
+ if (CPU_ISSET(cpu, &cpumask)) ncpus++;
+
+ i = mftb() % ncpus;
+ ncpus = 0;
+ CPU_FOREACH(cpu) {
+ if (!CPU_ISSET(cpu, &cpumask))
+ continue;
+ if (ncpus == i)
+ break;
+ ncpus++;
+ }
+
- rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, XICP_PRIORITY,
- &status);
+ error = rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu,
+ XICP_PRIORITY, &status);
+ if (error < 0)
+ panic("Cannot bind interrupt %d to CPU %d", irq, cpu);
}
static void
diff --git a/sys/powerpc/wii/ios_if.m b/sys/powerpc/wii/ios_if.m
deleted file mode 100644
index f5c6a17b9ab1..000000000000
--- a/sys/powerpc/wii/ios_if.m
+++ /dev/null
@@ -1,64 +0,0 @@
-#-
-# Copyright (c) 2013 Rui Paulo
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-# $FreeBSD$
-#
-
-#include <sys/bus.h>
-#include <sys/uio.h>
-
-INTERFACE ios;
-
-METHOD int open {
- device_t dev;
- const char *path;
- int mode;
-};
-
-METHOD int close {
- device_t dev;
- int fd;
-};
-
-METHOD int ioctl {
- device_t dev;
- int fd;
- unsigned int request;
- void *ibuf;
- size_t ilen;
- void *obuf;
- size_t olen;
-};
-
-METHOD int ioctlv {
- device_t dev;
- int fd;
- unsigned int request;
- struct iovec *in;
- size_t ilen;
- struct iovec *out;
- size_t olen;
-};
-
diff --git a/sys/powerpc/wii/locore.S b/sys/powerpc/wii/locore.S
deleted file mode 100644
index 7254bec4a7ea..000000000000
--- a/sys/powerpc/wii/locore.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <machine/bat.h>
-/*
- * When we are invoked from Wii loaders, the state of the MMU and the BAT
- * mappings can vary. In this file we try to reset the MMU to a state
- * that lets us boot FreeBSD.
- *
- * N.B.: keep the BAT0 in sync with mmu_oea.c and never touch BAT1 later.
- *
- * This file is being included from aim/locore32.S.
- */
-
-#define MMU_REALMODE() \
- mfmsr %r12; \
- rlwinm %r12, %r12, 0, ~(PSL_DR|PSL_IR);\
- sync; \
- bl 1f; \
-1: \
- mflr %r11; \
- clrlwi %r11, %r11, 3; /* XXX why? */ \
- addi %r11, %r11, 2f - 1b; \
- mtsrr0 %r11; \
- mtsrr1 %r12; /* Disables the MMU */ \
- isync; \
- rfi; \
-2:
-
-#define MMU_VIRTUALMODE() \
- bl 3f; \
-3: \
- mflr %r11; \
- addi %r11, %r11, 4f - 3b; \
- mfmsr %r12; \
- ori %r12, %r12, PSL_DR|PSL_IR; \
- mtsrr0 %r11; \
- mtsrr1 %r12; /* Enables the MMU */ \
- isync; \
- rfi; \
-4:
-
- MMU_REALMODE()
-
- /* Reset standard BATs */
- li %r11, 0
- mtibatu 0, %r11
- mtibatl 0, %r11
- mtdbatu 0, %r11
- mtdbatl 0, %r11
- mtibatu 1, %r11
- mtibatl 1, %r11
- mtdbatu 1, %r11
- mtdbatl 1, %r11
- mtibatu 2, %r11
- mtibatl 2, %r11
- mtdbatu 2, %r11
- mtdbatl 2, %r11
- mtibatu 3, %r11
- mtibatl 3, %r11
- mtdbatu 3, %r11
- mtdbatl 3, %r11
-
- /* Reset high BATs. IBAT[4-7][UL] + DBAT[4-7][UL] */
- mtspr 560, %r11
- mtspr 561, %r11
- mtspr 562, %r11
- mtspr 563, %r11
- mtspr 564, %r11
- mtspr 565, %r11
- mtspr 566, %r11
- mtspr 567, %r11
- mtspr 568, %r11
- mtspr 569, %r11
- mtspr 570, %r11
- mtspr 571, %r11
- mtspr 572, %r11
- mtspr 573, %r11
- mtspr 574, %r11
- mtspr 575, %r11
-
- /*
- * We need to setup BAT0 as in mmu_oea.c.
- */
- li %r11, BATU(0x00000000, BAT_BL_256M, BAT_Vs)
- li %r12, BATL(0x00000000, BAT_M, BAT_PP_RW)
- mtdbatu 0, %r11
- mtdbatl 0, %r12
- mtibatu 0, %r11
- mtibatl 0, %r12
- isync
-
- /*
- * We use BAT1 to be able to write I/O memory, including the
- * framebuffer registers.
- */
- /* BATU(0x0c000000, BAT_BL_32M, BAT_Vs) */
- lis %r11, 0x0c00
- ori %r11, %r11, BAT_BL_32M|BAT_Vs
- /* BATL(0x0c000000, BAT_I|BAT_G, BAT_PP_RW) */
- lis %r12, 0x0c00
- ori %r12, %r12, BAT_I|BAT_G|BAT_PP_RW
- mtdbatu 1, %r11
- mtdbatl 1, %r12
- isync
-
- MMU_VIRTUALMODE()
diff --git a/sys/powerpc/wii/platform_wii.c b/sys/powerpc/wii/platform_wii.c
deleted file mode 100644
index c5709f4c8a5f..000000000000
--- a/sys/powerpc/wii/platform_wii.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/bus.h>
-#include <sys/pcpu.h>
-#include <sys/proc.h>
-#include <sys/reboot.h>
-#include <sys/smp.h>
-#include <sys/fbio.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
-#include <machine/bus.h>
-#include <machine/cpu.h>
-#include <machine/hid.h>
-#include <machine/platform.h>
-#include <machine/platformvar.h>
-#include <machine/pmap.h>
-#include <machine/smp.h>
-#include <machine/spr.h>
-#include <machine/vmparam.h>
-
-#include <powerpc/wii/wii_fbreg.h>
-#include <powerpc/wii/wii_ipcreg.h>
-
-#include "platform_if.h"
-
-static int wii_probe(platform_t);
-static int wii_attach(platform_t);
-static void wii_mem_regions(platform_t, struct mem_region *,
- int *, struct mem_region *, int *);
-static unsigned long wii_timebase_freq(platform_t, struct cpuref *);
-static void wii_reset(platform_t);
-static void wii_cpu_idle(sbintime_t);
-
-extern void wiibus_reset_system(void);
-
-static platform_method_t wii_methods[] = {
- PLATFORMMETHOD(platform_probe, wii_probe),
- PLATFORMMETHOD(platform_attach, wii_attach),
- PLATFORMMETHOD(platform_mem_regions, wii_mem_regions),
- PLATFORMMETHOD(platform_timebase_freq, wii_timebase_freq),
- PLATFORMMETHOD(platform_reset, wii_reset),
-
- PLATFORMMETHOD_END
-};
-
-static platform_def_t wii_platform = {
- "wii",
- wii_methods,
- 0
-};
-
-PLATFORM_DEF(wii_platform);
-
-static int
-wii_probe(platform_t plat)
-{
- register_t vers = mfpvr();
-
- /*
- * The Wii includes a PowerPC 750CL with custom modifications
- * ("Broadway").
- * For now, we just assume that if we are running on a
- * PowerPC 750CL, then this platform is a Nintendo Wii.
- */
- if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL))
- return (BUS_PROBE_SPECIFIC);
-
- return (ENXIO);
-}
-
-static int
-wii_attach(platform_t plat)
-{
- cpu_idle_hook = wii_cpu_idle;
-
- return (0);
-}
-
-static void
-wii_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
- struct mem_region *avail_regions, int *availsz)
-{
- /* 24MB 1T-SRAM */
- avail_regions[0].mr_start = 0x00000000;
- avail_regions[0].mr_size = 0x01800000;
-
- /*
- * Reserve space for the framebuffer which is located
- * at the end of this 24MB memory region. See wii_fbreg.h.
- */
- avail_regions[0].mr_size -= WIIFB_FB_LEN;
-
- /* 64MB GDDR3 SDRAM */
- avail_regions[1].mr_start = 0x10000000;
- avail_regions[1].mr_size = 0x04000000;
-
- /*
- * Reserve space for the DSP.
- */
- avail_regions[1].mr_start += 0x4000;
- avail_regions[1].mr_size -= 0x4000;
-
- /*
- * Reserve space for the IOS I/O memory.
- */
- avail_regions[1].mr_size -= WIIIPC_IOH_LEN + 1;
-
- memcpy(phys, avail_regions, 2*sizeof(*avail_regions));
- *physsz = *availsz = 2;
-}
-
-static u_long
-wii_timebase_freq(platform_t plat, struct cpuref *cpuref)
-{
-
- /* Bus Frequency (243MHz) / 4 */
- return (60750000);
-}
-
-static void
-wii_reset(platform_t plat __unused)
-{
-
- wiibus_reset_system();
-}
-
-static void
-wii_cpu_idle(sbintime_t sbt)
-{
-}
diff --git a/sys/powerpc/wii/wii_bus.c b/sys/powerpc/wii/wii_bus.c
deleted file mode 100644
index e836529176c5..000000000000
--- a/sys/powerpc/wii/wii_bus.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/bus.h>
-#include <sys/clock.h>
-#include <sys/cpu.h>
-#include <sys/resource.h>
-#include <sys/rman.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
-#include <machine/bus.h>
-#include <machine/platform.h>
-#include <machine/pmap.h>
-#include <machine/resource.h>
-#include <machine/platformvar.h>
-
-#include <powerpc/wii/wii_picreg.h>
-#include <powerpc/wii/wii_fbreg.h>
-#include <powerpc/wii/wii_exireg.h>
-#include <powerpc/wii/wii_ipcreg.h>
-#include <powerpc/wii/wii_gpioreg.h>
-
-#define WIIBUS_CSR_ADDR 0x0d800100
-#define WIIBUS_CSR_LEN 0x300
-#define WIIBUS_CSR_RESET 0x94
-
-struct wiibus_softc {
- device_t sc_dev;
- struct rman sc_rman;
- bus_space_tag_t sc_tag;
- bus_space_handle_t sc_handle;
-};
-
-static struct wiibus_softc *wiibus_sc = NULL;
-
-static uint32_t wiibus_csr_read(struct wiibus_softc *, uint16_t);
-static void wiibus_csr_write(struct wiibus_softc *, uint16_t, uint32_t);
-static void wiibus_identify(driver_t *, device_t);
-static int wiibus_probe(device_t);
-static int wiibus_attach(device_t);
-static int wiibus_print_child(device_t, device_t);
-static struct resource *
- wiibus_alloc_resource(device_t, device_t, int, int *,
- unsigned long, unsigned long, unsigned long,
- unsigned int);
-static int wiibus_activate_resource(device_t, device_t, int, int,
- struct resource *);
- void wiibus_reset_system(void);
-
-static device_method_t wiibus_methods[] = {
- /* Device interface */
- DEVMETHOD(device_identify, wiibus_identify),
- DEVMETHOD(device_probe, wiibus_probe),
- DEVMETHOD(device_attach, wiibus_attach),
-
- /* Bus interface */
- DEVMETHOD(bus_add_child, bus_generic_add_child),
- DEVMETHOD(bus_print_child, wiibus_print_child),
- DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
- DEVMETHOD(bus_alloc_resource, wiibus_alloc_resource),
- DEVMETHOD(bus_activate_resource,wiibus_activate_resource),
-
- DEVMETHOD_END
-};
-
-static MALLOC_DEFINE(M_WIIBUS, "wiibus", "Nintendo Wii system bus");
-
-struct wiibus_devinfo {
- struct resource_list di_resources;
- uint8_t di_init;
-};
-
-static driver_t wiibus_driver = {
- "wiibus",
- wiibus_methods,
- sizeof(struct wiibus_softc)
-};
-
-static devclass_t wiibus_devclass;
-
-DRIVER_MODULE(wiibus, nexus, wiibus_driver, wiibus_devclass, 0, 0);
-
-static uint32_t
-wiibus_csr_read(struct wiibus_softc *sc, uint16_t reg)
-{
-
- return (bus_space_read_4(sc->sc_tag, sc->sc_handle, reg));
-}
-
-static void
-wiibus_csr_write(struct wiibus_softc *sc, uint16_t reg,
- uint32_t val)
-{
-
- bus_space_write_4(sc->sc_tag, sc->sc_handle, reg, val);
-}
-
-static void
-wiibus_identify(driver_t *driver, device_t parent)
-{
-
- if (strcmp(installed_platform(), "wii") != 0)
- return;
-
- if (device_find_child(parent, "wiibus", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "wiibus", 0);
-}
-
-
-static int
-wiibus_probe(device_t dev)
-{
-
- device_set_desc(dev, "Nintendo Wii System Bus");
-
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static void
-wiibus_init_device_resources(struct rman *rm, struct wiibus_devinfo *dinfo,
- unsigned int rid, uintptr_t addr, size_t len, unsigned int irq)
-
-{
-
- if (!dinfo->di_init) {
- resource_list_init(&dinfo->di_resources);
- dinfo->di_init++;
- }
- if (addr) {
- rman_manage_region(rm, addr, addr + len - 1);
- resource_list_add(&dinfo->di_resources, SYS_RES_MEMORY, rid,
- addr, addr + len, len);
- }
- if (irq)
- resource_list_add(&dinfo->di_resources, SYS_RES_IRQ, rid,
- irq, irq, 1);
-}
-
-static int
-wiibus_attach(device_t self)
-{
- struct wiibus_softc *sc;
- struct wiibus_devinfo *dinfo;
- device_t cdev;
-
- sc = device_get_softc(self);
- sc->sc_rman.rm_type = RMAN_ARRAY;
- sc->sc_rman.rm_descr = "Wii Bus Memory Mapped I/O";
- rman_init(&sc->sc_rman);
- KASSERT(wiibus_sc == NULL, ("wiibus_sc already initialised"));
- wiibus_sc = sc;
-
- /* Nintendo PIC */
- dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIPIC_REG_ADDR,
- WIIPIC_REG_LEN, 1);
- cdev = BUS_ADD_CHILD(self, 0, "wiipic", 0);
- device_set_ivars(cdev, dinfo);
-
- /* Framebuffer */
- dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIFB_REG_ADDR,
- WIIFB_REG_LEN, 8);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 1, WIIFB_FB_ADDR,
- WIIFB_FB_LEN, 0);
- cdev = BUS_ADD_CHILD(self, 0, "wiifb", 0);
- device_set_ivars(cdev, dinfo);
-
- /* External Interface Bus */
- dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIEXI_REG_ADDR,
- WIIEXI_REG_LEN, 4);
- cdev = BUS_ADD_CHILD(self, 0, "wiiexi", 0);
- device_set_ivars(cdev, dinfo);
-
- /* Nintendo IOS IPC */
- dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIIPC_REG_ADDR,
- WIIIPC_REG_LEN, 14);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 1, WIIIPC_IOH_ADDR,
- WIIIPC_IOH_LEN, 0);
- cdev = BUS_ADD_CHILD(self, 0, "wiiipc", 0);
- device_set_ivars(cdev, dinfo);
-
- /* GPIO */
- dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO);
- wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIGPIO_REG_ADDR,
- WIIGPIO_REG_LEN, 0);
- cdev = BUS_ADD_CHILD(self, 0, "wiigpio", 0);
- device_set_ivars(cdev, dinfo);
-
- /* The control registers */
- sc->sc_tag = &bs_be_tag;
- sc->sc_handle = (bus_space_handle_t)pmap_mapdev(WIIBUS_CSR_ADDR,
- WIIBUS_CSR_LEN);
-
- return (bus_generic_attach(self));
-}
-
-static int
-wiibus_print_child(device_t dev, device_t child)
-{
- struct wiibus_devinfo *dinfo = device_get_ivars(child);
- int retval = 0;
-
- retval += bus_print_child_header(dev, child);
- retval += resource_list_print_type(&dinfo->di_resources, "mem",
- SYS_RES_MEMORY, "%#lx");
- retval += resource_list_print_type(&dinfo->di_resources, "irq",
- SYS_RES_IRQ, "%ld");
- retval += bus_print_child_footer(dev, child);
-
- return (retval);
-}
-
-static struct resource *
-wiibus_alloc_resource(device_t bus, device_t child, int type,
- int *rid, unsigned long start, unsigned long end,
- unsigned long count, unsigned int flags)
-{
- struct wiibus_softc *sc;
- struct wiibus_devinfo *dinfo;
- struct resource_list_entry *rle;
- struct resource *rv;
- int needactivate;
-
- sc = device_get_softc(bus);
- dinfo = device_get_ivars(child);
- needactivate = flags & RF_ACTIVE;
- flags &= ~RF_ACTIVE;
-
- switch (type) {
- case SYS_RES_MEMORY:
- rle = resource_list_find(&dinfo->di_resources, SYS_RES_MEMORY,
- *rid);
- if (rle == NULL) {
- device_printf(bus, "no res entry for %s memory 0x%x\n",
- device_get_nameunit(child), *rid);
- return (NULL);
- }
- rv = rman_reserve_resource(&sc->sc_rman, rle->start, rle->end,
- rle->count, flags, child);
- if (rv == NULL) {
- device_printf(bus,
- "failed to reserve resource for %s\n",
- device_get_nameunit(child));
- return (NULL);
- }
- rman_set_rid(rv, *rid);
- break;
- case SYS_RES_IRQ:
- return (resource_list_alloc(&dinfo->di_resources, bus, child,
- type, rid, start, end, count, flags));
- default:
- device_printf(bus, "unknown resource request from %s\n",
- device_get_nameunit(child));
- return (NULL);
- }
-
- if (needactivate) {
- if (bus_activate_resource(child, type, *rid, rv) != 0) {
- device_printf(bus,
- "failed to activate resource for %s\n",
- device_get_nameunit(child));
- return (NULL);
- }
- }
-
- return (rv);
-}
-
-static int
-wiibus_activate_resource(device_t bus, device_t child, int type, int rid,
- struct resource *res)
-{
- void *p;
-
- switch (type) {
- case SYS_RES_MEMORY:
- p = pmap_mapdev(rman_get_start(res), rman_get_size(res));
- if (p == NULL)
- return (ENOMEM);
- rman_set_virtual(res, p);
- rman_set_bustag(res, &bs_be_tag);
- rman_set_bushandle(res, (unsigned long)p);
- break;
- case SYS_RES_IRQ:
- return (bus_activate_resource(bus, type, rid, res));
- default:
- device_printf(bus,
- "unknown activate resource request from %s\n",
- device_get_nameunit(child));
- return (ENXIO);
- }
-
- return (rman_activate_resource(res));
-}
-
-void
-wiibus_reset_system(void)
-{
- uint32_t r;
-
- r = wiibus_csr_read(wiibus_sc, WIIBUS_CSR_RESET);
- r &= ~1;
- wiibus_csr_write(wiibus_sc, WIIBUS_CSR_RESET, r);
-}
diff --git a/sys/powerpc/wii/wii_exireg.h b/sys/powerpc/wii/wii_exireg.h
deleted file mode 100644
index 34a6bf61a883..000000000000
--- a/sys/powerpc/wii/wii_exireg.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WII_EXIREG_H
-#define _POWERPC_WII_WII_EXIREG_H
-
-#define WIIEXI_REG_ADDR 0x0d006800
-#define WIIEXI_REG_LEN 0x40
-
-#endif /* _POWERPC_WII_WII_IPCREG_H */
diff --git a/sys/powerpc/wii/wii_fb.c b/sys/powerpc/wii/wii_fb.c
deleted file mode 100644
index c32ab769eff6..000000000000
--- a/sys/powerpc/wii/wii_fb.c
+++ /dev/null
@@ -1,885 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * Copyright (c) 2003 Peter Grehan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/module.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/sysctl.h>
-#include <sys/limits.h>
-#include <sys/conf.h>
-#include <sys/cons.h>
-#include <sys/proc.h>
-#include <sys/fcntl.h>
-#include <sys/malloc.h>
-#include <sys/fbio.h>
-#include <sys/consio.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
-#include <machine/bus.h>
-#include <machine/sc_machdep.h>
-#include <machine/platform.h>
-#include <machine/pmap.h>
-
-#include <sys/rman.h>
-
-#include <dev/fb/fbreg.h>
-#include <dev/syscons/syscons.h>
-
-#include <powerpc/wii/wii_fbreg.h>
-#include <powerpc/wii/wii_fbvar.h>
-
-/*
- * Driver for the Nintendo Wii's framebuffer. Based on Linux's gcnfb.c.
- */
-
-/*
- * Syscons glue.
- */
-static int wiifb_scprobe(device_t);
-static int wiifb_scattach(device_t);
-
-static device_method_t wiifb_sc_methods[] = {
- DEVMETHOD(device_probe, wiifb_scprobe),
- DEVMETHOD(device_attach, wiifb_scattach),
-
- DEVMETHOD_END
-};
-
-static driver_t wiifb_sc_driver = {
- "wiifb",
- wiifb_sc_methods,
- sizeof(sc_softc_t),
-};
-
-static devclass_t sc_devclass;
-
-DRIVER_MODULE(sc, wiibus, wiifb_sc_driver, sc_devclass, 0, 0);
-
-static int
-wiifb_scprobe(device_t dev)
-{
- int error;
-
- device_set_desc(dev, "Nintendo Wii frambuffer");
-
- error = sc_probe_unit(device_get_unit(dev),
- device_get_flags(dev) | SC_AUTODETECT_KBD);
- if (error != 0)
- return (error);
-
- /* This is a fake device, so make sure we added it ourselves */
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-wiifb_scattach(device_t dev)
-{
-
- return (sc_attach_unit(device_get_unit(dev),
- device_get_flags(dev) | SC_AUTODETECT_KBD));
-}
-
-/*
- * Video driver routines and glue.
- */
-static void wiifb_reset_video(struct wiifb_softc *);
-static void wiifb_enable_interrupts(struct wiifb_softc *);
-static void wiifb_configure_tv_mode(struct wiifb_softc *);
-static void wiifb_setup_framebuffer(struct wiifb_softc *);
-static int wiifb_configure(int);
-static vi_probe_t wiifb_probe;
-static vi_init_t wiifb_init;
-static vi_get_info_t wiifb_get_info;
-static vi_query_mode_t wiifb_query_mode;
-static vi_set_mode_t wiifb_set_mode;
-static vi_save_font_t wiifb_save_font;
-static vi_load_font_t wiifb_load_font;
-static vi_show_font_t wiifb_show_font;
-static vi_save_palette_t wiifb_save_palette;
-static vi_load_palette_t wiifb_load_palette;
-static vi_set_border_t wiifb_set_border;
-static vi_save_state_t wiifb_save_state;
-static vi_load_state_t wiifb_load_state;
-static vi_set_win_org_t wiifb_set_win_org;
-static vi_read_hw_cursor_t wiifb_read_hw_cursor;
-static vi_set_hw_cursor_t wiifb_set_hw_cursor;
-static vi_set_hw_cursor_shape_t wiifb_set_hw_cursor_shape;
-static vi_blank_display_t wiifb_blank_display;
-static vi_mmap_t wiifb_mmap;
-static vi_ioctl_t wiifb_ioctl;
-static vi_clear_t wiifb_clear;
-static vi_fill_rect_t wiifb_fill_rect;
-static vi_bitblt_t wiifb_bitblt;
-static vi_diag_t wiifb_diag;
-static vi_save_cursor_palette_t wiifb_save_cursor_palette;
-static vi_load_cursor_palette_t wiifb_load_cursor_palette;
-static vi_copy_t wiifb_copy;
-static vi_putp_t wiifb_putp;
-static vi_putc_t wiifb_putc;
-static vi_puts_t wiifb_puts;
-static vi_putm_t wiifb_putm;
-
-static video_switch_t wiifbvidsw = {
- .probe = wiifb_probe,
- .init = wiifb_init,
- .get_info = wiifb_get_info,
- .query_mode = wiifb_query_mode,
- .set_mode = wiifb_set_mode,
- .save_font = wiifb_save_font,
- .load_font = wiifb_load_font,
- .show_font = wiifb_show_font,
- .save_palette = wiifb_save_palette,
- .load_palette = wiifb_load_palette,
- .set_border = wiifb_set_border,
- .save_state = wiifb_save_state,
- .load_state = wiifb_load_state,
- .set_win_org = wiifb_set_win_org,
- .read_hw_cursor = wiifb_read_hw_cursor,
- .set_hw_cursor = wiifb_set_hw_cursor,
- .set_hw_cursor_shape = wiifb_set_hw_cursor_shape,
- .blank_display = wiifb_blank_display,
- .mmap = wiifb_mmap,
- .ioctl = wiifb_ioctl,
- .clear = wiifb_clear,
- .fill_rect = wiifb_fill_rect,
- .bitblt = wiifb_bitblt,
- .diag = wiifb_diag,
- .save_cursor_palette = wiifb_save_cursor_palette,
- .load_cursor_palette = wiifb_load_cursor_palette,
- .copy = wiifb_copy,
- .putp = wiifb_putp,
- .putc = wiifb_putc,
- .puts = wiifb_puts,
- .putm = wiifb_putm,
-};
-
-VIDEO_DRIVER(wiifb, wiifbvidsw, wiifb_configure);
-
-extern sc_rndr_sw_t txtrndrsw;
-RENDERER(wiifb, 0, txtrndrsw, gfb_set);
-RENDERER_MODULE(wiifb, gfb_set);
-
-static struct wiifb_softc wiifb_softc;
-static uint16_t wiifb_static_window[ROW*COL];
-extern u_char dflt_font_8[];
-
-/*
- * Map the syscons colors to YUY2 (Y'UV422).
- * Some colours are an approximation.
- *
- * The Wii has a 16 bit pixel, so each 32 bit DWORD encodes
- * two pixels. The upper 16 bits is for pixel 0 (left hand pixel
- * in a pair), the lower 16 bits is for pixel 1.
- *
- * For now, we're going to ignore that entirely and just use the
- * lower 16 bits for each pixel. We'll take the upper value into
- * account later.
- */
-static uint32_t wiifb_cmap[16] = {
- 0x00800080, /* Black */
- 0x1dff1d6b, /* Blue */
- 0x4b554b4a, /* Green */
- 0x80808080, /* Cyan */
- 0x4c544cff, /* Red */
- 0x3aaa34b5, /* Magenta */
- 0x7140718a, /* Brown */
- 0xff80ff80, /* White */
- 0x80808080, /* Gray */
- 0xc399c36a, /* Bright Blue */
- 0xd076d074, /* Bright Green */
- 0x80808080, /* Bright Cyan */
- 0x4c544cff, /* Bright Red */
- 0x3aaa34b5, /* Bright Magenta */
- 0xe100e194, /* Bright Yellow */
- 0xff80ff80 /* Bright White */
-};
-
-static struct wiifb_mode_desc wiifb_modes[] = {
- [WIIFB_MODE_NTSC_480i] = {
- "NTSC 480i",
- 640, 480,
- 525,
- WIIFB_MODE_FLAG_INTERLACED,
- },
- [WIIFB_MODE_NTSC_480p] = {
- "NTSC 480p",
- 640, 480,
- 525,
- WIIFB_MODE_FLAG_PROGRESSIVE,
- },
- [WIIFB_MODE_PAL_576i] = {
- "PAL 576i (50Hz)",
- 640, 574,
- 625,
- WIIFB_MODE_FLAG_INTERLACED,
- },
- [WIIFB_MODE_PAL_480i] = {
- "PAL 480i (60Hz)",
- 640, 480,
- 525,
- WIIFB_MODE_FLAG_INTERLACED,
- },
- [WIIFB_MODE_PAL_480p] = {
- "PAL 480p",
- 640, 480,
- 525,
- WIIFB_MODE_FLAG_PROGRESSIVE,
- },
-};
-
-static const uint32_t wiifb_filter_coeft[] = {
- 0x1ae771f0, 0x0db4a574, 0x00c1188e, 0xc4c0cbe2, 0xfcecdecf,
- 0x13130f08, 0x00080c0f
-};
-
-static __inline int
-wiifb_background(uint8_t attr)
-{
-
- return (attr >> 4);
-}
-
-static __inline int
-wiifb_foreground(uint8_t attr)
-{
-
- return (attr & 0x0f);
-}
-
-static void
-wiifb_reset_video(struct wiifb_softc *sc)
-{
- struct wiifb_dispcfg dc;
-
- wiifb_dispcfg_read(sc, &dc);
- dc.dc_reset = 1;
- wiifb_dispcfg_write(sc, &dc);
- dc.dc_reset = 0;
- wiifb_dispcfg_write(sc, &dc);
-}
-
-static void
-wiifb_enable_interrupts(struct wiifb_softc *sc)
-{
- struct wiifb_dispint di;
-
-#ifdef notyet
- /*
- * Display Interrupt 0
- */
- di.di_htiming = 1;
- di.di_vtiming = 1;
- di.di_enable = 1;
- di.di_irq = 1;
- wiifb_dispint_write(sc, 0, &di);
-
- /*
- * Display Interrupt 1
- */
- di.di_htiming = sc->sc_format == WIIFB_FORMAT_PAL ? 433 : 430;
- di.di_vtiming = sc->sc_mode->fd_lines;
- di.di_enable = 1;
- di.di_irq = 1;
- if (sc->sc_mode->fd_flags & WIIFB_MODE_FLAG_INTERLACED)
- di.di_vtiming /= 2;
- wiifb_dispint_write(sc, 1, &di);
-
- /*
- * Display Interrupts 2 and 3 are not used.
- */
- memset(&di, 0, sizeof(di));
- wiifb_dispint_write(sc, 2, &di);
- wiifb_dispint_write(sc, 3, &di);
-#else
- memset(&di, 0, sizeof(di));
- wiifb_dispint_write(sc, 0, &di);
- wiifb_dispint_write(sc, 1, &di);
- wiifb_dispint_write(sc, 2, &di);
- wiifb_dispint_write(sc, 3, &di);
-#endif
-}
-
-/*
- * Reference gcnfb.c for an in depth explanation.
- * XXX only works with NTSC.
- */
-static void
-wiifb_configure_tv_mode(struct wiifb_softc *sc)
-{
- struct wiifb_vtiming vt;
- struct wiifb_hscaling hs;
- struct wiifb_htiming0 ht0;
- struct wiifb_htiming1 ht1;
- struct wiifb_vtimingodd vto;
- struct wiifb_vtimingeven vte;
- struct wiifb_burstblankodd bbo;
- struct wiifb_burstblankeven bbe;
- struct wiifb_picconf pc;
- struct wiifb_mode_desc *mode = sc->sc_mode;
- unsigned int height = mode->fd_height;
- unsigned int width = mode->fd_width;
- unsigned int eqpulse, interlacebias, shift;
- const unsigned int maxwidth = 714;
- unsigned int hblanking = maxwidth - width;
- unsigned int hmargin = hblanking / 2;
- unsigned int A = 20 + hmargin, C = 60 + hblanking - hmargin;
- unsigned int maxheight = 484;
- unsigned int P = 2 * (20 - 10 + 1);
- unsigned int Q = 1;
- unsigned int vblanking = maxheight - height;
- unsigned int vmargin = vblanking / 2;
- unsigned int prb = vmargin;
- unsigned int psb = vblanking - vmargin;
- int i;
-
- /*
- * Vertical timing.
- */
- if (mode->fd_flags & WIIFB_MODE_FLAG_INTERLACED) {
- vt.vt_actvideo = height / 2;
- interlacebias = 1;
- shift = 0;
- } else {
- vt.vt_actvideo = height;
- interlacebias = 0;
- shift = 1;
- }
- /* Lines of equalization */
- if (mode->fd_lines == 625)
- eqpulse = 2 * 2.5;
- else
- eqpulse = 2 * 3;
- vt.vt_eqpulse = eqpulse << shift;
- wiifb_vtiming_write(sc, &vt);
-
- /*
- * Horizontal timings.
- */
- ht0.ht0_hlinew = 858 / 2;
- ht1.ht1_hsyncw = 64;
- ht0.ht0_hcolourstart = 71;
- ht0.ht0_hcolourend = 71 + 34;
- ht1.ht1_hblankstart = (858 / 2) - A;
- ht1.ht1_hblankend = 64 + C;
- wiifb_htiming0_write(sc, &ht0);
- wiifb_htiming1_write(sc, &ht1);
-
- /*
- * Vertical timing odd/even.
- */
- if (vmargin & 1) {
- vto.vto_preb = (P + interlacebias + prb) << shift;
- vto.vto_postb = (Q - interlacebias + psb) << shift;
- vte.vte_preb = (P + prb) << shift;
- vte.vte_postb = (Q - psb) << shift;
- } else {
- /* XXX if this isn't 0, it doesn't work? */
- prb = 0;
- psb = 0;
- vte.vte_preb = (P + interlacebias + prb) << shift;
- vte.vte_postb = (Q - interlacebias + psb) << shift;
- vto.vto_preb = (P + prb) << shift;
- vto.vto_postb = (Q - psb) << shift;
- }
- wiifb_vtimingodd_write(sc, &vto);
- wiifb_vtimingeven_write(sc, &vte);
-
- /*
- * Burst blanking odd/even interval.
- */
- bbo.bbo_bs1 = 2 * (18 - 7 + 1);
- bbe.bbe_bs2 = bbo.bbo_bs3 = bbe.bbe_bs4 = bbo.bbo_bs1;
- bbo.bbo_be1 = 2 * (525 - 7 + 1);
- bbe.bbe_be2 = bbo.bbo_be3 = bbe.bbe_be4 = bbo.bbo_be1;
- wiifb_burstblankodd_write(sc, &bbo);
- wiifb_burstblankeven_write(sc, &bbe);
-
- /*
- * Picture configuration.
- */
- pc.pc_strides = (mode->fd_width * 2) / 32;
- if (mode->fd_flags & WIIFB_MODE_FLAG_INTERLACED)
- pc.pc_strides *= 2;
- pc.pc_reads = (mode->fd_width * 2) / 32;
- wiifb_picconf_write(sc, &pc);
-
- /*
- * Horizontal scaling disabled.
- */
- hs.hs_enable = 0;
- hs.hs_step = 256;
- wiifb_hscaling_write(sc, &hs);
-
- /*
- * Filter coeficient table.
- */
- for (i = 0; i < 7; i++)
- wiifb_filtcoeft_write(sc, i, wiifb_filter_coeft[i]);
-
- /*
- * Anti alias.
- */
- wiifb_antialias_write(sc, 0x00ff0000);
-
- /*
- * Video clock.
- */
- wiifb_videoclk_write(sc,
- mode->fd_flags & WIIFB_MODE_FLAG_INTERLACED ? 0 : 1);
-
- /*
- * Disable horizontal scaling width.
- */
- wiifb_hscalingw_write(sc, mode->fd_width);
-
- /*
- * DEBUG mode borders. Not used.
- */
- wiifb_hborderend_write(sc, 0);
- wiifb_hborderstart_write(sc, 0);
-
- /*
- * XXX unknown registers.
- */
- wiifb_unknown1_write(sc, 0x00ff);
- wiifb_unknown2_write(sc, 0x00ff00ff);
- wiifb_unknown3_write(sc, 0x00ff00ff);
-}
-
-static void
-wiifb_setup_framebuffer(struct wiifb_softc *sc)
-{
- intptr_t addr = sc->sc_fb_addr;
- struct wiifb_topfieldbasel tfbl;
- struct wiifb_bottomfieldbasel bfbl;
- struct wiifb_topfieldbaser tfbr;
- struct wiifb_bottomfieldbaser bfbr;
-
- tfbl.tfbl_fbaddr = addr >> 5;
- tfbl.tfbl_xoffset = (addr / 2) & 0xf;
- tfbl.tfbl_pageoffbit = 1;
- wiifb_topfieldbasel_write(sc, &tfbl);
-
- if (sc->sc_mode->fd_flags & WIIFB_MODE_FLAG_INTERLACED)
- addr += sc->sc_mode->fd_width * 2;
- bfbl.bfbl_fbaddr = addr >> 5;
- bfbl.bfbl_xoffset = (addr / 2) & 0xf;
- bfbl.bfbl_pageoffbit = 1;
- wiifb_bottomfieldbasel_write(sc, &bfbl);
-
- /*
- * Only used used for 3D.
- */
- memset(&tfbr, 0, sizeof(tfbr));
- memset(&bfbr, 0, sizeof(bfbr));
- wiifb_topfieldbaser_write(sc, &tfbr);
- wiifb_bottomfieldbaser_write(sc, &bfbr);
-}
-
-static int
-wiifb_configure(int flags)
-{
- struct wiifb_softc *sc;
- struct wiifb_dispcfg dc;
- int progressive;
-
- sc = &wiifb_softc;
- if (sc->sc_initialized) {
- /* XXX We should instead use bus_space */
- sc->sc_fb_addr = (intptr_t)pmap_mapdev(WIIFB_FB_ADDR, WIIFB_FB_LEN);
- sc->sc_reg_addr = (intptr_t)pmap_mapdev(WIIFB_REG_ADDR, WIIFB_REG_LEN);
- return 0;
- }
-
- sc->sc_console = 1;
-
- sc->sc_fb_addr = WIIFB_FB_ADDR;
- sc->sc_fb_size = WIIFB_FB_LEN;
-
- sc->sc_reg_addr = WIIFB_REG_ADDR;
- sc->sc_reg_size = WIIFB_REG_LEN;
-
- wiifb_reset_video(sc);
- wiifb_dispcfg_read(sc, &dc);
- sc->sc_format = dc.dc_format;
- sc->sc_component = wiifb_component_enabled(sc);
- progressive = dc.dc_noninterlaced;
- switch (sc->sc_format) {
- case WIIFB_FORMAT_MPAL:
- case WIIFB_FORMAT_DEBUG:
- case WIIFB_FORMAT_NTSC:
- sc->sc_mode = progressive ?
- &wiifb_modes[WIIFB_MODE_NTSC_480p] :
- &wiifb_modes[WIIFB_MODE_NTSC_480i];
- break;
- case WIIFB_FORMAT_PAL:
- sc->sc_mode = progressive ?
- &wiifb_modes[WIIFB_MODE_PAL_480p] :
- &wiifb_modes[WIIFB_MODE_PAL_480i];
- break;
- }
- sc->sc_height = sc->sc_mode->fd_height;
- sc->sc_width = sc->sc_mode->fd_width;
- /* Usually we multiply by 4, but I think this looks better. */
- sc->sc_stride = sc->sc_width * 2;
-
- wiifb_init(0, &sc->sc_va, 0);
-
- sc->sc_initialized = 1;
-
- return (0);
-}
-
-static int
-wiifb_probe(int unit, video_adapter_t **adp, void *arg, int flags)
-{
-
- return (0);
-}
-
-static int
-wiifb_init(int unit, video_adapter_t *adp, int flags)
-{
- struct wiifb_softc *sc;
- video_info_t *vi;
-
- sc = (struct wiifb_softc *)adp;
- vi = &adp->va_info;
-
- vid_init_struct(adp, "wiifb", -1, unit);
-
- sc->sc_font = dflt_font_8;
- vi->vi_cheight = WIIFB_FONT_HEIGHT;
- vi->vi_width = sc->sc_width/8;
- vi->vi_height = sc->sc_height/vi->vi_cheight;
- vi->vi_cwidth = 8;
-
- /*
- * Clamp width/height to syscons maximums
- */
- if (vi->vi_width > COL)
- vi->vi_width = COL;
- if (vi->vi_height > ROW)
- vi->vi_height = ROW;
-
- sc->sc_xmargin = (sc->sc_width - (vi->vi_width * vi->vi_cwidth)) / 2;
- sc->sc_ymargin = (sc->sc_height - (vi->vi_height * vi->vi_cheight))/2;
-
- adp->va_window = (vm_offset_t) wiifb_static_window;
- /* XXX no colour support */
- adp->va_flags |= V_ADP_FONT | /*V_ADP_COLOR |*/ V_ADP_MODECHANGE;
-
- vid_register(&sc->sc_va);
-
- wiifb_configure_tv_mode(sc);
- wiifb_setup_framebuffer(sc);
- wiifb_enable_interrupts(sc);
- wiifb_clear(adp);
-
- return (0);
-}
-
-static int
-wiifb_get_info(video_adapter_t *adp, int mode, video_info_t *info)
-{
-
- bcopy(&adp->va_info, info, sizeof(*info));
- return (0);
-}
-
-static int
-wiifb_query_mode(video_adapter_t *adp, video_info_t *info)
-{
-
- return (0);
-}
-
-static int
-wiifb_set_mode(video_adapter_t *adp, int mode)
-{
-
- return (0);
-}
-
-static int
-wiifb_save_font(video_adapter_t *adp, int page, int size, int width,
- u_char *data, int c, int count)
-{
-
- return (0);
-}
-
-static int
-wiifb_load_font(video_adapter_t *adp, int page, int size, int width,
- u_char *data, int c, int count)
-{
- struct wiifb_softc *sc = (struct wiifb_softc *)adp;
-
- sc->sc_font = data;
-
- return (0);
-}
-
-static int
-wiifb_show_font(video_adapter_t *adp, int page)
-{
-
- return (0);
-}
-
-static int
-wiifb_save_palette(video_adapter_t *adp, u_char *palette)
-{
-
- return (0);
-}
-
-static int
-wiifb_load_palette(video_adapter_t *adp, u_char *palette)
-{
-
- return (0);
-}
-
-static int
-wiifb_set_border(video_adapter_t *adp, int border)
-{
-
- return (wiifb_blank_display(adp, border));
-}
-
-static int
-wiifb_save_state(video_adapter_t *adp, void *p, size_t size)
-{
-
- return (0);
-}
-
-static int
-wiifb_load_state(video_adapter_t *adp, void *p)
-{
-
- return (0);
-}
-
-static int
-wiifb_set_win_org(video_adapter_t *adp, off_t offset)
-{
-
- return (0);
-}
-
-static int
-wiifb_read_hw_cursor(video_adapter_t *adp, int *col, int *row)
-{
-
- *col = *row = 0;
-
- return (0);
-}
-
-static int
-wiifb_set_hw_cursor(video_adapter_t *adp, int col, int row)
-{
-
- return (0);
-}
-
-static int
-wiifb_set_hw_cursor_shape(video_adapter_t *adp, int base, int height,
- int celsize, int blink)
-{
-
- return (0);
-}
-
-static int
-wiifb_blank_display(video_adapter_t *adp, int mode)
-{
- struct wiifb_softc *sc = (struct wiifb_softc *)adp;
- uint32_t *p;
-
- for (p = (uint32_t *)sc->sc_fb_addr;
- p < (uint32_t *)(sc->sc_fb_addr + sc->sc_fb_size);
- p++)
- *p = wiifb_cmap[wiifb_background(SC_NORM_ATTR)];
-
- return (0);
-}
-
-static int
-wiifb_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
-{
- struct wiifb_softc *sc;
-
- sc = (struct wiifb_softc *)adp;
-
- /*
- * This might be a legacy VGA mem request: if so, just point it at the
- * framebuffer, since it shouldn't be touched
- */
- if (offset < sc->sc_stride*sc->sc_height) {
- *paddr = sc->sc_fb_addr + offset;
- return (0);
- }
-
- return (EINVAL);
-}
-
-static int
-wiifb_ioctl(video_adapter_t *adp, u_long cmd, caddr_t data)
-{
-
- return (0);
-}
-
-static int
-wiifb_clear(video_adapter_t *adp)
-{
-
- return (wiifb_blank_display(adp, 0));
-}
-
-static int
-wiifb_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy)
-{
-
- return (0);
-}
-
-static int
-wiifb_bitblt(video_adapter_t *adp, ...)
-{
-
- return (0);
-}
-
-static int
-wiifb_diag(video_adapter_t *adp, int level)
-{
-
- return (0);
-}
-
-static int
-wiifb_save_cursor_palette(video_adapter_t *adp, u_char *palette)
-{
-
- return (0);
-}
-
-static int
-wiifb_load_cursor_palette(video_adapter_t *adp, u_char *palette)
-{
-
- return (0);
-}
-
-static int
-wiifb_copy(video_adapter_t *adp, vm_offset_t src, vm_offset_t dst, int n)
-{
-
- return (0);
-}
-
-static int
-wiifb_putp(video_adapter_t *adp, vm_offset_t off, uint32_t p, uint32_t a,
- int size, int bpp, int bit_ltor, int byte_ltor)
-{
-
- return (0);
-}
-
-static int
-wiifb_putc(video_adapter_t *adp, vm_offset_t off, uint8_t c, uint8_t a)
-{
- struct wiifb_softc *sc;
- int row;
- int col;
- int i, j, k;
- uint32_t *addr;
- u_char *p;
- uint32_t fg, bg;
- unsigned long pixel[2];
-
- sc = (struct wiifb_softc *)adp;
- row = (off / adp->va_info.vi_width) * adp->va_info.vi_cheight;
- col = (off % adp->va_info.vi_width) * adp->va_info.vi_cwidth / 2;
- p = sc->sc_font + c*WIIFB_FONT_HEIGHT;
- addr = (uint32_t *)sc->sc_fb_addr
- + (row + sc->sc_ymargin)*(sc->sc_stride/4)
- + col + sc->sc_xmargin;
-
- bg = wiifb_cmap[wiifb_background(a)];
- fg = wiifb_cmap[wiifb_foreground(a)];
-
- for (i = 0; i < WIIFB_FONT_HEIGHT; i++) {
- for (j = 0, k = 7; j < 4; j++, k--) {
- if ((p[i] & (1 << k)) == 0)
- pixel[0] = bg;
- else
- pixel[0] = fg;
- k--;
- if ((p[i] & (1 << k)) == 0)
- pixel[1] = bg;
- else
- pixel[1] = fg;
-
- addr[j] = (pixel[0] & 0xffff00ff) |
- (pixel[1] & 0x0000ff00);
- }
- addr += (sc->sc_stride/4);
- }
-
- return (0);
-}
-
-static int
-wiifb_puts(video_adapter_t *adp, vm_offset_t off, u_int16_t *s, int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- wiifb_putc(adp, off + i, s[i] & 0xff, (s[i] & 0xff00) >> 8);
-
- return (0);
-}
-
-static int
-wiifb_putm(video_adapter_t *adp, int x, int y, uint8_t *pixel_image,
- uint32_t pixel_mask, int size, int width)
-{
-
- return (0);
-}
diff --git a/sys/powerpc/wii/wii_fbreg.h b/sys/powerpc/wii/wii_fbreg.h
deleted file mode 100644
index 24a7cdf63486..000000000000
--- a/sys/powerpc/wii/wii_fbreg.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WII_FBREG_H
-#define _POWERPC_WII_WII_FBREG_H
-
-/*
- * Memory addresses for the I/O and the framebuffer.
- */
-#define WIIFB_REG_ADDR 0x0c002000
-#define WIIFB_REG_LEN 0x100
-#define WIIFB_FB_ADDR 0x01698000 /* at the end of 1T SRAM */
-#define WIIFB_FB_LEN 0x168000
-
-#endif /* _POWERPC_WII_WII_FBREG_H */
diff --git a/sys/powerpc/wii/wii_fbvar.h b/sys/powerpc/wii/wii_fbvar.h
deleted file mode 100644
index 53e71dfa5b81..000000000000
--- a/sys/powerpc/wii/wii_fbvar.h
+++ /dev/null
@@ -1,857 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WIIFB_H
-#define _POWERPC_WII_WIIFB_H
-
-#define WIIFB_FONT_HEIGHT 8
-
-enum wiifb_format {
- WIIFB_FORMAT_NTSC = 0,
- WIIFB_FORMAT_PAL = 1,
- WIIFB_FORMAT_MPAL = 2,
- WIIFB_FORMAT_DEBUG = 3
-};
-
-enum wiifb_mode {
- WIIFB_MODE_NTSC_480i = 0,
- WIIFB_MODE_NTSC_480p = 1,
- WIIFB_MODE_PAL_576i = 2,
- WIIFB_MODE_PAL_480i = 3,
- WIIFB_MODE_PAL_480p = 4
-};
-
-struct wiifb_mode_desc {
- const char *fd_name;
- unsigned int fd_width;
- unsigned int fd_height;
- unsigned int fd_lines;
- uint8_t fd_flags;
-#define WIIFB_MODE_FLAG_PROGRESSIVE 0x00
-#define WIIFB_MODE_FLAG_INTERLACED 0x01
-};
-
-struct wiifb_softc {
- video_adapter_t sc_va;
- struct cdev *sc_si;
- int sc_console;
-
- intptr_t sc_reg_addr;
- unsigned int sc_reg_size;
-
- intptr_t sc_fb_addr;
- unsigned int sc_fb_size;
-
- unsigned int sc_height;
- unsigned int sc_width;
- unsigned int sc_stride;
-
- unsigned int sc_xmargin;
- unsigned int sc_ymargin;
-
- boolean_t sc_component;
- enum wiifb_format sc_format;
- struct wiifb_mode_desc *sc_mode;
-
- unsigned int sc_vtiming;
- unsigned int sc_htiming;
-
- unsigned char *sc_font;
- int sc_initialized;
- int sc_rrid;
-};
-
-/*
- * Vertical timing
- * 16 bit
- */
-#define WIIFB_REG_VTIMING 0x00
-struct wiifb_vtiming {
- uint8_t vt_eqpulse;
- uint16_t vt_actvideo;
-};
-
-static __inline void
-wiifb_vtiming_read(struct wiifb_softc *sc, struct wiifb_vtiming *vt)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMING);
-
- vt->vt_eqpulse = *reg & 0xf;
- vt->vt_actvideo = (*reg >> 4) & 0x3ff;
-}
-
-static __inline void
-wiifb_vtiming_write(struct wiifb_softc *sc, struct wiifb_vtiming *vt)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMING);
-
- *reg = ((vt->vt_actvideo & 0x3ff) << 4) |
- (vt->vt_eqpulse & 0xf);
- powerpc_sync();
-}
-
-/*
- * Display configuration
- * 16 bit
- */
-#define WIIFB_REG_DISPCFG 0x02
-struct wiifb_dispcfg {
- uint8_t dc_enable;
- uint8_t dc_reset;
- uint8_t dc_noninterlaced;
- uint8_t dc_3dmode;
- uint8_t dc_latchenb0;
- uint8_t dc_latchenb1;
- enum wiifb_format dc_format;
-};
-
-static __inline void
-wiifb_dispcfg_read(struct wiifb_softc *sc, struct wiifb_dispcfg *dc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_DISPCFG);
-
- dc->dc_enable = *reg & 0x1;
- dc->dc_reset = (*reg >> 1) & 0x1;
- dc->dc_noninterlaced = (*reg >> 2) & 0x1;
- dc->dc_3dmode = (*reg >> 3) & 0x1;
- dc->dc_latchenb0 = (*reg >> 4) & 0x3;
- dc->dc_latchenb1 = (*reg >> 6) & 0x3;
- dc->dc_format = (*reg >> 8) & 0x3;
-}
-
-static __inline void
-wiifb_dispcfg_write(struct wiifb_softc *sc, struct wiifb_dispcfg *dc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_DISPCFG);
-
- *reg = ((dc->dc_format & 0x3) << 8) |
- ((dc->dc_latchenb1 & 0x3) << 6) |
- ((dc->dc_latchenb0 & 0x3) << 4) |
- ((dc->dc_3dmode & 0x1) << 3) |
- ((dc->dc_noninterlaced & 0x1) << 2) |
- ((dc->dc_reset & 0x1) << 1) |
- (dc->dc_enable & 0x1);
- powerpc_sync();
-}
-
-/*
- * Horizontal Timing 0
- * 32 bit
- */
-#define WIIFB_REG_HTIMING0 0x04
-struct wiifb_htiming0 {
- uint16_t ht0_hlinew; /* half line width */
- uint8_t ht0_hcolourend;
- uint8_t ht0_hcolourstart;
-};
-
-static __inline void
-wiifb_htiming0_read(struct wiifb_softc *sc, struct wiifb_htiming0 *ht0)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_HTIMING0);
-
- ht0->ht0_hlinew = *reg & 0x1ff;
- ht0->ht0_hcolourend = (*reg >> 16) & 0x7f;
- ht0->ht0_hcolourstart = (*reg >> 24) & 0x7f;
-}
-
-static __inline void
-wiifb_htiming0_write(struct wiifb_softc *sc, struct wiifb_htiming0 *ht0)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_HTIMING0);
-
- *reg = ((ht0->ht0_hcolourstart & 0x7f) << 24) |
- ((ht0->ht0_hcolourend & 0x7f) << 16) |
- (ht0->ht0_hlinew & 0x1ff);
- powerpc_sync();
-}
-/*
- * Horizontal Timing 1
- * 32 bit
- */
-#define WIIFB_REG_HTIMING1 0x08
-struct wiifb_htiming1 {
- uint8_t ht1_hsyncw;
- uint16_t ht1_hblankend;
- uint16_t ht1_hblankstart;
-};
-
-static __inline void
-wiifb_htiming1_read(struct wiifb_softc *sc, struct wiifb_htiming1 *ht1)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_HTIMING1);
-
- ht1->ht1_hsyncw = *reg & 0x7f;
- ht1->ht1_hblankend = (*reg >> 7) & 0x3ff;
- ht1->ht1_hblankstart = (*reg >> 17) & 0x3ff;
-}
-
-static __inline void
-wiifb_htiming1_write(struct wiifb_softc *sc, struct wiifb_htiming1 *ht1)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_HTIMING1);
-
- *reg = ((ht1->ht1_hblankstart & 0x3ff) << 17) |
- ((ht1->ht1_hblankend & 0x3ff) << 7) |
- (ht1->ht1_hsyncw & 0x7f);
- powerpc_sync();
-}
-
-/*
- * Vertical Timing Odd
- * 32 bit
- */
-#define WIIFB_REG_VTIMINGODD 0x0c
-struct wiifb_vtimingodd {
- uint16_t vto_preb; /* pre blanking */
- uint16_t vto_postb; /* post blanking */
-};
-
-static __inline void
-wiifb_vtimingodd_read(struct wiifb_softc *sc, struct wiifb_vtimingodd *vto)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMINGODD);
-
- vto->vto_preb = *reg & 0x3ff;
- vto->vto_postb = (*reg >> 16) & 0x3ff;
-}
-
-static __inline void
-wiifb_vtimingodd_write(struct wiifb_softc *sc, struct wiifb_vtimingodd *vto)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMINGODD);
-
- *reg = ((vto->vto_postb & 0x3ff) << 16) |
- (vto->vto_preb & 0x3ff);
- powerpc_sync();
-}
-
-/*
- * Vertical Timing Even
- * 32 bit
- */
-#define WIIFB_REG_VTIMINGEVEN 0x10
-struct wiifb_vtimingeven {
- uint16_t vte_preb; /* pre blanking */
- uint16_t vte_postb; /* post blanking */
-};
-
-static __inline void
-wiifb_vtimingeven_read(struct wiifb_softc *sc, struct wiifb_vtimingeven *vte)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMINGEVEN);
-
- vte->vte_preb = *reg & 0x3ff;
- vte->vte_postb = (*reg >> 16) & 0x3ff;
-}
-
-static __inline void
-wiifb_vtimingeven_write(struct wiifb_softc *sc, struct wiifb_vtimingeven *vte)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_VTIMINGEVEN);
-
- *reg = ((vte->vte_postb & 0x3ff) << 16) |
- (vte->vte_preb & 0x3ff);
- powerpc_sync();
-}
-
-/*
- * Burst Blanking Odd Interval
- * 32 bit
- */
-#define WIIFB_REG_BURSTBLANKODD 0x14
-struct wiifb_burstblankodd {
- uint8_t bbo_bs1;
- uint16_t bbo_be1;
- uint8_t bbo_bs3;
- uint16_t bbo_be3;
-};
-
-static __inline void
-wiifb_burstblankodd_read(struct wiifb_softc *sc,
- struct wiifb_burstblankodd *bbo)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BURSTBLANKODD);
-
- bbo->bbo_bs1 = *reg & 0x1f;
- bbo->bbo_be1 = (*reg >> 5) & 0x7ff;
- bbo->bbo_bs3 = (*reg >> 16) & 0x1f;
- bbo->bbo_be3 = (*reg >> 21) & 0x7ff;
-}
-
-static __inline void
-wiifb_burstblankodd_write(struct wiifb_softc *sc,
- struct wiifb_burstblankodd *bbo)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BURSTBLANKODD);
-
- *reg = ((bbo->bbo_be3 & 0x7ff) << 21) |
- ((bbo->bbo_bs3 & 0x1f) << 16) |
- ((bbo->bbo_be1 & 0x7ff) << 5) |
- (bbo->bbo_bs1 & 0x1f);
- powerpc_sync();
-}
-
-/*
- * Burst Blanking Even Interval
- * 32 bit
- */
-#define WIIFB_REG_BURSTBLANKEVEN 0x18
-struct wiifb_burstblankeven {
- uint8_t bbe_bs2;
- uint16_t bbe_be2;
- uint8_t bbe_bs4;
- uint16_t bbe_be4;
-};
-
-static __inline void
-wiifb_burstblankeven_read(struct wiifb_softc *sc,
- struct wiifb_burstblankeven *bbe)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BURSTBLANKEVEN);
-
- bbe->bbe_bs2 = *reg & 0x1f;
- bbe->bbe_be2 = (*reg >> 5) & 0x7ff;
- bbe->bbe_bs4 = (*reg >> 16) & 0x1f;
- bbe->bbe_be4 = (*reg >> 21) & 0x7ff;
-}
-
-static __inline void
-wiifb_burstblankeven_write(struct wiifb_softc *sc,
- struct wiifb_burstblankeven *bbe)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BURSTBLANKEVEN);
-
- *reg = ((bbe->bbe_be4 & 0x7ff) << 21) |
- ((bbe->bbe_bs4 & 0x1f) << 16) |
- ((bbe->bbe_be2 & 0x7ff) << 5) |
- (bbe->bbe_bs2 & 0x1f);
- powerpc_sync();
-}
-
-/*
- * Top Field Base Left
- * 32 bit
- */
-#define WIIFB_REG_TOPFIELDBASEL 0x1c
-struct wiifb_topfieldbasel {
- uint32_t tfbl_fbaddr;
- uint8_t tfbl_xoffset;
- uint8_t tfbl_pageoffbit;
-};
-
-static __inline void
-wiifb_topfieldbasel_read(struct wiifb_softc *sc,
- struct wiifb_topfieldbasel *tfbl)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_TOPFIELDBASEL);
-
- tfbl->tfbl_fbaddr = *reg & 0xffffff;
- tfbl->tfbl_xoffset = (*reg >> 24) & 0xf;
- tfbl->tfbl_pageoffbit = (*reg >> 28) & 0x1;
-}
-
-static __inline void
-wiifb_topfieldbasel_write(struct wiifb_softc *sc,
- struct wiifb_topfieldbasel *tfbl)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_TOPFIELDBASEL);
-
- *reg = ((tfbl->tfbl_pageoffbit & 0x1) << 28) |
- ((tfbl->tfbl_xoffset & 0xf) << 24) |
- (tfbl->tfbl_fbaddr & 0xffffff);
- powerpc_sync();
-}
-
-/*
- * Top Field Base Right
- * 32 bit
- */
-#define WIIFB_REG_TOPFIELDBASER 0x20
-struct wiifb_topfieldbaser {
- uint32_t tfbr_fbaddr;
- uint8_t tfbr_pageoffbit;
-};
-
-static __inline void
-wiifb_topfieldbaser_read(struct wiifb_softc *sc,
- struct wiifb_topfieldbaser *tfbr)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_TOPFIELDBASER);
-
- tfbr->tfbr_fbaddr = *reg & 0xffffff;
- tfbr->tfbr_pageoffbit = (*reg >> 28) & 0x1;
-}
-
-static __inline void
-wiifb_topfieldbaser_write(struct wiifb_softc *sc,
- struct wiifb_topfieldbaser *tfbr)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_TOPFIELDBASER);
-
- *reg = ((tfbr->tfbr_pageoffbit & 0x1) << 28) |
- (tfbr->tfbr_fbaddr & 0xffffff);
- powerpc_sync();
-}
-
-/*
- * Bottom Field Base Left
- * 32 bit
- */
-#define WIIFB_REG_BOTTOMFIELDBASEL 0x24
-struct wiifb_bottomfieldbasel {
- uint32_t bfbl_fbaddr;
- uint8_t bfbl_xoffset;
- uint8_t bfbl_pageoffbit;
-};
-
-static __inline void
-wiifb_bottomfieldbasel_read(struct wiifb_softc *sc,
- struct wiifb_bottomfieldbasel *bfbl)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BOTTOMFIELDBASEL);
-
- bfbl->bfbl_fbaddr = *reg & 0xffffff;
- bfbl->bfbl_xoffset = (*reg >> 24) & 0xf;
- bfbl->bfbl_pageoffbit = (*reg >> 28) & 0x1;
-}
-
-static __inline void
-wiifb_bottomfieldbasel_write(struct wiifb_softc *sc,
- struct wiifb_bottomfieldbasel *bfbl)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BOTTOMFIELDBASEL);
-
- *reg = ((bfbl->bfbl_pageoffbit & 0x1) << 28) |
- ((bfbl->bfbl_xoffset & 0xf) << 24) |
- (bfbl->bfbl_fbaddr & 0xffffff);
- powerpc_sync();
-}
-
-/*
- * Bottom Field Base Right
- * 32 bit
- */
-#define WIIFB_REG_BOTTOMFIELDBASER 0x28
-struct wiifb_bottomfieldbaser {
- uint32_t bfbr_fbaddr;
- uint8_t bfbr_pageoffbit;
-};
-
-static __inline void
-wiifb_bottomfieldbaser_read(struct wiifb_softc *sc,
- struct wiifb_bottomfieldbaser *bfbr)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BOTTOMFIELDBASER);
-
- bfbr->bfbr_fbaddr = *reg & 0xffffff;
- bfbr->bfbr_pageoffbit = (*reg >> 28) & 0x1;
-}
-
-static __inline void
-wiifb_bottomfieldbaser_write(struct wiifb_softc *sc,
- struct wiifb_bottomfieldbaser *bfbr)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_BOTTOMFIELDBASER);
-
- *reg = ((bfbr->bfbr_pageoffbit & 0x1) << 28) |
- (bfbr->bfbr_fbaddr & 0xffffff);
- powerpc_sync();
-}
-
-/*
- * Display Position Vertical
- * 16 bit
- */
-#define WIIFB_REG_DISPPOSV 0x2c
-static __inline uint16_t
-wiifb_dispposv_read(struct wiifb_softc *sc)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_DISPPOSV);
-
- return (*reg & 0x7ff);
-}
-
-static __inline void
-wiifb_dispposv_write(struct wiifb_softc *sc, uint16_t val)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_DISPPOSV);
-
- *reg = val & 0x7ff;
- powerpc_sync();
-}
-
-/*
- * Display Position Horizontal
- * 16 bit
- */
-#define WIIFB_REG_DISPPOSH 0x2e
-static __inline uint16_t
-wiifb_dispposh_read(struct wiifb_softc *sc)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_DISPPOSH);
-
- return (*reg & 0x7ff);
-}
-
-static __inline void
-wiifb_dispposh_write(struct wiifb_softc *sc, uint16_t val)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_DISPPOSH);
-
- *reg = val & 0x7ff;
- powerpc_sync();
-}
-
-/*
- * Display Interrupts.
- * There are 4 display interrupt registers, all 32 bit.
- */
-#define WIIFB_REG_DISPINT0 0x30
-#define WIIFB_REG_DISPINT1 0x34
-#define WIIFB_REG_DISPINT2 0x38
-#define WIIFB_REG_DISPINT3 0x3c
-struct wiifb_dispint {
- uint16_t di_htiming;
- uint16_t di_vtiming;
- uint8_t di_enable;
- uint8_t di_irq;
-};
-
-static __inline void
-wiifb_dispint_read(struct wiifb_softc *sc, int regno, struct wiifb_dispint *di)
-{
- volatile uint32_t *reg = (uint32_t *)(sc->sc_reg_addr +
- WIIFB_REG_DISPINT0 + regno * 4);
-
- di->di_htiming = *reg & 0x3ff;
- di->di_vtiming = (*reg >> 16) & 0x3ff;
- di->di_enable = (*reg >> 28) & 0x1;
- di->di_irq = (*reg >> 31) & 0x1;
-}
-
-static __inline void
-wiifb_dispint_write(struct wiifb_softc *sc, int regno, struct wiifb_dispint *di)
-{
- volatile uint32_t *reg = (uint32_t *)(sc->sc_reg_addr +
- WIIFB_REG_DISPINT0 + regno * 4);
-
- *reg = ((di->di_irq & 0x1) << 31) |
- ((di->di_enable & 0x1) << 28) |
- ((di->di_vtiming & 0x3ff) << 16) |
- (di->di_htiming & 0x3ff);
- powerpc_sync();
-}
-
-/*
- * Display Latch 0
- * 32 bit
- */
-#define WIIFB_REG_DISPLAYTCH0 0x40
-
-/*
- * Display Latch 1
- * 32 bit
- */
-#define WIIFB_REG_DISPLAYTCH1 0x44
-
-/*
- * Picture Configuration
- * 16 bit
- */
-#define WIIFB_REG_PICCONF 0x48
-struct wiifb_picconf {
- uint8_t pc_strides; /* strides per line (words) */
- uint8_t pc_reads; /* reads per line (words */
-};
-
-static __inline void
-wiifb_picconf_read(struct wiifb_softc *sc, struct wiifb_picconf *pc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_PICCONF);
-
- pc->pc_strides = *reg & 0xff;
- pc->pc_reads = (*reg >> 8) & 0xff;
-}
-
-static __inline void
-wiifb_picconf_write(struct wiifb_softc *sc, struct wiifb_picconf *pc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_PICCONF);
-
- *reg = ((pc->pc_reads & 0xff) << 8) |
- (pc->pc_strides & 0xff);
- powerpc_sync();
-}
-
-/*
- * Horizontal Scaling
- * 16 bit
- */
-#define WIIFB_REG_HSCALING 0x4a
-struct wiifb_hscaling {
- uint16_t hs_step;
- uint8_t hs_enable;
-};
-
-static __inline void
-wiifb_hscaling_read(struct wiifb_softc *sc, struct wiifb_hscaling *hs)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HSCALING);
-
- hs->hs_step = *reg & 0x1ff;
- hs->hs_enable = (*reg >> 12) & 0x1;
-}
-
-static __inline void
-wiifb_hscaling_write(struct wiifb_softc *sc, struct wiifb_hscaling *hs)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HSCALING);
-
- *reg = ((hs->hs_step & 0x1ff) << 12) |
- (hs->hs_enable & 0x1);
- powerpc_sync();
-}
-
-/*
- * Filter Coeficient Table 0-6
- * 32 bit
- */
-#define WIIFB_REG_FILTCOEFT0 0x4c
-#define WIIFB_REG_FILTCOEFT1 0x50
-#define WIIFB_REG_FILTCOEFT2 0x54
-#define WIIFB_REG_FILTCOEFT3 0x58
-#define WIIFB_REG_FILTCOEFT4 0x5c
-#define WIIFB_REG_FILTCOEFT5 0x60
-#define WIIFB_REG_FILTCOEFT6 0x64
-static __inline void
-wiifb_filtcoeft_write(struct wiifb_softc *sc, unsigned int regno,
- uint32_t coeft)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_FILTCOEFT0 + 4 * regno);
-
- *reg = coeft;
- powerpc_sync();
-}
-
-/*
- * Anti-aliasing
- * 32 bit
- */
-#define WIIFB_REG_ANTIALIAS 0x68
-static __inline void
-wiifb_antialias_write(struct wiifb_softc *sc, uint32_t antialias)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_ANTIALIAS);
-
- *reg = antialias;
- powerpc_sync();
-}
-
-/*
- * Video Clock
- * 16 bit
- */
-#define WIIFB_REG_VIDEOCLK 0x6c
-static __inline uint8_t
-wiifb_videoclk_read(struct wiifb_softc *sc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_VIDEOCLK);
-
- return (*reg & 0x1);
-}
-
-static __inline void
-wiifb_videoclk_write(struct wiifb_softc *sc, uint16_t clk54mhz)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_VIDEOCLK);
-
- *reg = clk54mhz & 0x1;
- powerpc_sync();
-}
-
-/*
- * DTV Status
- * 16 bit
- *
- * DTV is another name for the Component Cable output.
- */
-#define WIIFB_REG_DTVSTATUS 0x6e
-static __inline uint16_t
-wiifb_dtvstatus_read(struct wiifb_softc *sc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_DTVSTATUS);
-
- return (*reg & 0x1);
-}
-
-static __inline uint16_t
-wiifb_component_enabled(struct wiifb_softc *sc)
-{
-
- return wiifb_dtvstatus_read(sc);
-}
-
-/*
- * Horizontal Scaling Width
- * 16 bit
- */
-#define WIIFB_REG_HSCALINGW 0x70
-static __inline uint16_t
-wiifb_hscalingw_read(struct wiifb_softc *sc)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HSCALINGW);
-
- return (*reg & 0x3ff);
-}
-
-static __inline void
-wiifb_hscalingw_write(struct wiifb_softc *sc, uint16_t width)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HSCALINGW);
-
- *reg = width & 0x3ff;
- powerpc_sync();
-}
-
-/*
- * Horizontal Border End
- * For debug mode only. Not used by this driver.
- * 16 bit
- */
-#define WIIFB_REG_HBORDEREND 0x72
-static __inline void
-wiifb_hborderend_write(struct wiifb_softc *sc, uint16_t border)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HBORDEREND);
-
- *reg = border;
- powerpc_sync();
-}
-
-/*
- * Horizontal Border Start
- * 16 bit
- */
-#define WIIFB_REG_HBORDERSTART 0x74
-static __inline void
-wiifb_hborderstart_write(struct wiifb_softc *sc, uint16_t border)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_HBORDERSTART);
-
- *reg = border;
- powerpc_sync();
-}
-
-/*
- * Unknown register
- * 16 bit
- */
-#define WIIFB_REG_UNKNOWN1 0x76
-static __inline void
-wiifb_unknown1_write(struct wiifb_softc *sc, uint16_t unknown)
-{
- volatile uint16_t *reg =
- (uint16_t *)(sc->sc_reg_addr + WIIFB_REG_UNKNOWN1);
-
- *reg = unknown;
- powerpc_sync();
-}
-
-/*
- * Unknown register
- * 32 bit
- */
-#define WIIFB_REG_UNKNOWN2 0x78
-static __inline void
-wiifb_unknown2_write(struct wiifb_softc *sc, uint32_t unknown)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_UNKNOWN2);
-
- *reg = unknown;
- powerpc_sync();
-}
-
-/*
- * Unknown register
- * 32 bit
- */
-#define WIIFB_REG_UNKNOWN3 0x7c
-static __inline void
-wiifb_unknown3_write(struct wiifb_softc *sc, uint32_t unknown)
-{
- volatile uint32_t *reg =
- (uint32_t *)(sc->sc_reg_addr + WIIFB_REG_UNKNOWN3);
-
- *reg = unknown;
- powerpc_sync();
-}
-
-#endif /* _POWERPC_WII_WIIFB_H */
diff --git a/sys/powerpc/wii/wii_gpio.c b/sys/powerpc/wii/wii_gpio.c
deleted file mode 100644
index e6d76b8c2ab1..000000000000
--- a/sys/powerpc/wii/wii_gpio.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/module.h>
-#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/gpio.h>
-#include <sys/reboot.h>
-
-#include <machine/bus.h>
-#include <machine/platform.h>
-#include <machine/intr_machdep.h>
-#include <machine/resource.h>
-
-#include <powerpc/wii/wii_gpioreg.h>
-
-#include "gpio_if.h"
-
-struct wiigpio_softc {
- device_t sc_dev;
- struct resource *sc_rres;
- bus_space_tag_t sc_bt;
- bus_space_handle_t sc_bh;
- int sc_rrid;
- struct mtx sc_mtx;
- struct gpio_pin sc_pins[WIIGPIO_NPINS];
-};
-
-
-#define WIIGPIO_PINBANK(_p) ((_p) / (WIIGPIO_NPINS / 2))
-#define WIIGPIO_PINMASK(_p) (1 << ((_p) % (WIIGPIO_NPINS / 2)))
-#define WIIGPIO_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
-#define WIIGPIO_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
-
-static int wiigpio_probe(device_t);
-static int wiigpio_attach(device_t);
-static int wiigpio_detach(device_t);
-static int wiigpio_pin_max(device_t, int *);
-static int wiigpio_pin_getname(device_t, uint32_t, char *);
-static int wiigpio_pin_getflags(device_t, uint32_t, uint32_t *);
-static int wiigpio_pin_setflags(device_t, uint32_t, uint32_t);
-static int wiigpio_pin_getcaps(device_t, uint32_t, uint32_t *);
-static int wiigpio_pin_get(device_t, uint32_t, unsigned int *);
-static int wiigpio_pin_set(device_t, uint32_t, unsigned int);
-static int wiigpio_pin_toggle(device_t, uint32_t);
-static void wiigpio_shutdown(void *, int);
-
-static device_method_t wiigpio_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, wiigpio_probe),
- DEVMETHOD(device_attach, wiigpio_attach),
- DEVMETHOD(device_detach, wiigpio_detach),
-
- /* GPIO protocol */
- DEVMETHOD(gpio_pin_max, wiigpio_pin_max),
- DEVMETHOD(gpio_pin_getname, wiigpio_pin_getname),
- DEVMETHOD(gpio_pin_getflags, wiigpio_pin_getflags),
- DEVMETHOD(gpio_pin_setflags, wiigpio_pin_setflags),
- DEVMETHOD(gpio_pin_getcaps, wiigpio_pin_getcaps),
- DEVMETHOD(gpio_pin_get, wiigpio_pin_get),
- DEVMETHOD(gpio_pin_set, wiigpio_pin_set),
- DEVMETHOD(gpio_pin_toggle, wiigpio_pin_toggle),
-
- DEVMETHOD_END
-};
-
-static driver_t wiigpio_driver = {
- "wiigpio",
- wiigpio_methods,
- sizeof(struct wiigpio_softc)
-};
-
-static devclass_t wiigpio_devclass;
-
-DRIVER_MODULE(wiigpio, wiibus, wiigpio_driver, wiigpio_devclass, 0, 0);
-
-static __inline uint32_t
-wiigpio_read(struct wiigpio_softc *sc, int n)
-{
-
- return (bus_space_read_4(sc->sc_bt, sc->sc_bh, n * 0x20));
-}
-
-static __inline void
-wiigpio_write(struct wiigpio_softc *sc, int n, uint32_t reg)
-{
-
- bus_space_write_4(sc->sc_bt, sc->sc_bh, n * 0x20, reg);
-}
-
-static __inline uint32_t
-wiigpio_dir_read(struct wiigpio_softc *sc, int n)
-{
-
- return (bus_space_read_4(sc->sc_bt, sc->sc_bh, n * 0x20 + 4));
-}
-
-static __inline void
-wiigpio_dir_write(struct wiigpio_softc *sc, int n, uint32_t reg)
-{
-
- bus_space_write_4(sc->sc_bt, sc->sc_bh, n * 0x20 + 4, reg);
-}
-
-static int
-wiigpio_probe(device_t dev)
-{
- device_set_desc(dev, "Nintendo Wii GPIO");
-
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-wiigpio_attach(device_t dev)
-{
- struct wiigpio_softc *sc;
- int i;
- uint32_t d;
-
- sc = device_get_softc(dev);
- sc->sc_dev = dev;
- sc->sc_rrid = 0;
- sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->sc_rrid, RF_ACTIVE);
- if (sc->sc_rres == NULL) {
- device_printf(dev, "could not alloc mem resource\n");
- return (ENXIO);
- }
- sc->sc_bt = rman_get_bustag(sc->sc_rres);
- sc->sc_bh = rman_get_bushandle(sc->sc_rres);
- mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
-#ifdef WIIGPIO_DEBUG
- device_printf(dev, "dir bank0=0x%08x bank1=0x%08x\n",
- wiigpio_dir_read(sc, 0), wiigpio_dir_read(sc, 1));
- device_printf(dev, "val bank0=0x%08x bank1=0x%08x\n",
- wiigpio_read(sc, 0), wiigpio_read(sc, 1));
-#endif
- for (i = 0; i < WIIGPIO_NPINS; i++) {
- sc->sc_pins[i].gp_caps = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
- sc->sc_pins[i].gp_pin = i;
- d = wiigpio_dir_read(sc, WIIGPIO_PINBANK(i));
- if (d & WIIGPIO_PINMASK(i))
- sc->sc_pins[i].gp_flags = GPIO_PIN_OUTPUT;
- else
- sc->sc_pins[i].gp_flags = GPIO_PIN_INPUT;
- snprintf(sc->sc_pins[i].gp_name, GPIOMAXNAME, "PIN %d", i);
-#ifdef WIIGPIO_DEBUG
- device_printf(dev, "PIN %d state %d flag %s\n", i,
- wiigpio_read(sc, WIIGPIO_PINBANK(i)) >>
- (i % (WIIGPIO_NPINS / 2)) & 1,
- sc->sc_pins[i].gp_flags == GPIO_PIN_INPUT ?
- "GPIO_PIN_INPUT" : "GPIO_PIN_OUTPUT");
-#endif
- }
- device_add_child(dev, "gpioc", -1);
- device_add_child(dev, "gpiobus", -1);
- /*
- * We will be responsible for powering off the system.
- */
- EVENTHANDLER_REGISTER(shutdown_final, wiigpio_shutdown, dev,
- SHUTDOWN_PRI_LAST);
-
- return (bus_generic_attach(dev));
-}
-
-static int
-wiigpio_detach(device_t dev)
-{
- struct wiigpio_softc *sc;
-
- sc = device_get_softc(dev);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
- mtx_destroy(&sc->sc_mtx);
-
- return (0);
-}
-
-static int
-wiigpio_pin_max(device_t dev, int *maxpin)
-{
-
- *maxpin = WIIGPIO_NPINS - 1;
-
- return (0);
-}
-
-static int
-wiigpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
-{
- struct wiigpio_softc *sc;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- *caps = sc->sc_pins[pin].gp_caps;
-
- return (0);
-}
-
-static int
-wiigpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
-{
- struct wiigpio_softc *sc;
- uint32_t reg;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- WIIGPIO_LOCK(sc);
- reg = wiigpio_read(sc, WIIGPIO_PINBANK(pin));
- *val = !!(reg & WIIGPIO_PINMASK(pin));
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-wiigpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
-{
- struct wiigpio_softc *sc;
- uint32_t reg, pinbank, pinmask;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- pinbank = WIIGPIO_PINBANK(pin);
- pinmask = WIIGPIO_PINMASK(pin);
- WIIGPIO_LOCK(sc);
- reg = wiigpio_read(sc, pinbank) & ~pinmask;
- if (value)
- reg |= pinmask;
- wiigpio_write(sc, pinbank, reg);
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-wiigpio_pin_toggle(device_t dev, uint32_t pin)
-{
- struct wiigpio_softc *sc;
- uint32_t val, pinbank, pinmask;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- pinbank = WIIGPIO_PINBANK(pin);
- pinmask = WIIGPIO_PINMASK(pin);
- WIIGPIO_LOCK(sc);
- val = wiigpio_read(sc, pinbank);
- if (val & pinmask)
- wiigpio_write(sc, pinbank, val & ~pinmask);
- else
- wiigpio_write(sc, pinbank, val | pinmask);
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-wiigpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
-{
- struct wiigpio_softc *sc;
- uint32_t reg, pinbank, pinmask;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- pinbank = WIIGPIO_PINBANK(pin);
- pinmask = WIIGPIO_PINMASK(pin);
- WIIGPIO_LOCK(sc);
- reg = wiigpio_dir_read(sc, WIIGPIO_PINBANK(pin));
- if (flags & GPIO_PIN_OUTPUT)
- wiigpio_dir_write(sc, pinbank, reg | pinmask);
- else
- wiigpio_dir_write(sc, pinbank, reg & ~pinmask);
- sc->sc_pins[pin].gp_flags = flags;
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-wiigpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
-{
- struct wiigpio_softc *sc;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- WIIGPIO_LOCK(sc);
- *flags = sc->sc_pins[pin].gp_flags;
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-wiigpio_pin_getname(device_t dev, uint32_t pin, char *name)
-{
- struct wiigpio_softc *sc;
-
- if (pin >= WIIGPIO_NPINS)
- return (EINVAL);
- sc = device_get_softc(dev);
- WIIGPIO_LOCK(sc);
- memcpy(name, sc->sc_pins[pin].gp_name, GPIOMAXNAME);
- WIIGPIO_UNLOCK(sc);
-
- return (0);
-}
-
-static void
-wiigpio_shutdown(void *xdev, int howto)
-{
- device_t dev;
-
- if (!(howto & RB_POWEROFF))
- return;
- dev = (device_t)xdev;
- wiigpio_pin_setflags(dev, WIIGPIO_POWEROFF_PIN, GPIO_PIN_OUTPUT);
- wiigpio_pin_set(dev, WIIGPIO_POWEROFF_PIN, 1);
-}
diff --git a/sys/powerpc/wii/wii_gpioreg.h b/sys/powerpc/wii/wii_gpioreg.h
deleted file mode 100644
index 45527f5ab603..000000000000
--- a/sys/powerpc/wii/wii_gpioreg.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WII_GPIOREG_H
-#define _POWERPC_WII_WII_GPIOREG_H
-
-#define WIIGPIO_NPINS 64
-#define WIIGPIO_POWEROFF_PIN 33
-
-#define WIIGPIO_REG_ADDR 0x0d8000c0
-#define WIIGPIO_REG_LEN 0x40
-
-#endif /* _POWERPC_WII_WII_GPIOREG_H */
diff --git a/sys/powerpc/wii/wii_ipc.c b/sys/powerpc/wii/wii_ipc.c
deleted file mode 100644
index 7f49d31f586a..000000000000
--- a/sys/powerpc/wii/wii_ipc.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/module.h>
-#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-
-#include <machine/bus.h>
-#include <machine/platform.h>
-#include <machine/intr_machdep.h>
-#include <machine/resource.h>
-
-#include <powerpc/wii/wii_ipcreg.h>
-
-/*
- * Driver to interface with the Wii's IOS. IOS are small "microkernels" that run
- * on the Broadway GPU and provide access to system services like USB.
- */
-static int wiiipc_probe(device_t);
-static int wiiipc_attach(device_t);
-
-struct wiiipc_softc {
-};
-
-static device_method_t wiiipc_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, wiiipc_probe),
- DEVMETHOD(device_attach, wiiipc_attach),
-
- DEVMETHOD_END
-};
-
-static driver_t wiiipc_driver = {
- "wiiipc",
- wiiipc_methods,
- sizeof(struct wiiipc_softc)
-};
-
-static devclass_t wiiipc_devclass;
-
-DRIVER_MODULE(wiiipc, wiibus, wiiipc_driver, wiiipc_devclass, 0, 0);
-
-static int
-wiiipc_probe(device_t dev)
-{
- device_set_desc(dev, "Nintendo Wii IOS IPC");
-
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-wiiipc_attach(device_t dev)
-{
- struct wiiipc_softc *sc;
-
- sc = device_get_softc(dev);
-#ifdef notyet
- sc->sc_dev = dev;
-
- sc->sc_rrid = 0;
- sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->sc_rrid, RF_ACTIVE);
- if (sc->sc_rres == NULL) {
- device_printf(dev, "could not alloc mem resource\n");
- return (ENXIO);
- }
- sc->sc_bt = rman_get_bustag(sc->sc_rres);
- sc->sc_bh = rman_get_bushandle(sc->sc_rres);
-#endif
-
- return (0);
-}
diff --git a/sys/powerpc/wii/wii_ipcreg.h b/sys/powerpc/wii/wii_ipcreg.h
deleted file mode 100644
index 86b3e1f49598..000000000000
--- a/sys/powerpc/wii/wii_ipcreg.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WII_IPCREG_H
-#define _POWERPC_WII_WII_IPCREG_H
-
-#define WIIIPC_REG_ADDR 0x0d000000
-#define WIIIPC_REG_LEN 0x40
-#define WIIIPC_IOH_ADDR 0x133e0000
-#define WIIIPC_IOH_LEN 0xc20000
-
-#define WIIIPC_TXBUF 0x00
-#define WIIIPC_CSR 0x04
-#define WIIIPC_CSR_TXSTART 0x01
-#define WIIIPC_CSR_TBEI 0x02
-#define WIIIPC_CSR_RBFI 0x04
-#define WIIIPC_CSR_RXREADY 0x08
-#define WIIIPC_CSR_RBFIMASK 0x10
-#define WIIIPC_CSR_TBEIMASK 0x20
-#define WIIIPC_RXBUF 0x08
-#define WIIIPC_ISR 0x30
-#define WIIIPC_ISR_MAGIC 0x40000000
-
-enum wiiipc_cmd {
- WIIIPC_CMD_OPEN = 1,
- WIIIPC_CMD_CLOSE = 2,
- WIIIPC_CMD_READ = 3,
- WIIIPC_CMD_WRITE = 4,
- WIIIPC_CMD_SEEK = 5,
- WIIIPC_CMD_IOCTL = 6,
- WIIIPC_CMD_IOCTLV = 7,
- WIIIPC_CMD_ASYNCRESP = 8
-};
-
-struct wiiipc_ipc_msg {
- uint32_t ipc_cmd;
- int32_t ipc_result;
- int32_t ipc_fd; /* WIIIPC_CMD_ASYNCRESP - the original cmd */
- union {
- struct {
- intptr_t pathname;
- uint32_t mode;
- } _ipc_open;
- struct {
- intptr_t data;
- uint32_t len;
- } _ipc_read, _ipc_write;
- struct {
- int32_t offset;
- int32_t whence;
- } _ipc_seek;
- struct {
- uint32_t request;
- intptr_t ibuf;
- uint32_t ilen;
- intptr_t obuf;
- uint32_t olen;
- } _ipc_ioctl;
- struct {
- uint32_t request;
- uint32_t argin;
- uint32_t argout;
- intptr_t iovec;
- } _ipc_ioctlv;
- uint32_t _ipc_argv[5];
- } args;
-} __attribute__((packed));
-
-CTASSERT(sizeof(struct wiiipc_ipc_msg) == 32);
-
-#define ipc_open args._ipc_open
-#define ipc_read args._ipc_read
-#define ipc_write args._ipc_write
-#define ipc_ioctl args._ipc_ioctl
-#define ipc_ioctlv args._ipc_ioctlv
-
-#endif /* _POWERPC_WII_WII_IPCREG_H */
diff --git a/sys/powerpc/wii/wii_pic.c b/sys/powerpc/wii/wii_pic.c
deleted file mode 100644
index 0844a00d3627..000000000000
--- a/sys/powerpc/wii/wii_pic.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/module.h>
-#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/reboot.h>
-
-#include <machine/bus.h>
-#include <machine/platform.h>
-#include <machine/intr_machdep.h>
-#include <machine/resource.h>
-
-#include <powerpc/wii/wii_picreg.h>
-
-#include "pic_if.h"
-
-static int wiipic_probe(device_t);
-static int wiipic_attach(device_t);
-static void wiipic_dispatch(device_t, struct trapframe *);
-static void wiipic_enable(device_t, unsigned int, unsigned int);
-static void wiipic_eoi(device_t, unsigned int);
-static void wiipic_mask(device_t, unsigned int);
-static void wiipic_unmask(device_t, unsigned int);
-static void wiipic_intr(void *);
-
-struct wiipic_softc {
- device_t sc_dev;
- struct resource *sc_rres;
- bus_space_tag_t sc_bt;
- bus_space_handle_t sc_bh;
- int sc_rrid;
- int sc_irqid;
- struct resource *sc_irq;
- void *sc_irqctx;
- int sc_vector[WIIPIC_NIRQ];
-};
-
-static device_method_t wiipic_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, wiipic_probe),
- DEVMETHOD(device_attach, wiipic_attach),
-
- /* PIC interface */
- DEVMETHOD(pic_dispatch, wiipic_dispatch),
- DEVMETHOD(pic_enable, wiipic_enable),
- DEVMETHOD(pic_eoi, wiipic_eoi),
- DEVMETHOD(pic_mask, wiipic_mask),
- DEVMETHOD(pic_unmask, wiipic_unmask),
-
- DEVMETHOD_END
-};
-
-static driver_t wiipic_driver = {
- "wiipic",
- wiipic_methods,
- sizeof(struct wiipic_softc)
-};
-
-static devclass_t wiipic_devclass;
-
-DRIVER_MODULE(wiipic, wiibus, wiipic_driver, wiipic_devclass, 0, 0);
-
-static __inline uint32_t
-wiipic_imr_read(struct wiipic_softc *sc)
-{
-
- return (bus_space_read_4(sc->sc_bt, sc->sc_bh, WIIPIC_IMR));
-}
-
-static __inline void
-wiipic_imr_write(struct wiipic_softc *sc, uint32_t imr)
-{
-
- bus_space_write_4(sc->sc_bt, sc->sc_bh, WIIPIC_IMR, imr);
-}
-
-static __inline uint32_t
-wiipic_icr_read(struct wiipic_softc *sc)
-{
-
- return (bus_space_read_4(sc->sc_bt, sc->sc_bh, WIIPIC_ICR));
-}
-
-static __inline void
-wiipic_icr_write(struct wiipic_softc *sc, uint32_t icr)
-{
-
- bus_space_write_4(sc->sc_bt, sc->sc_bh, WIIPIC_ICR, icr);
-}
-
-static int
-wiipic_probe(device_t dev)
-{
- device_set_desc(dev, "Nintendo Wii PIC");
-
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-wiipic_attach(device_t dev)
-{
- struct wiipic_softc *sc;
-
- sc = device_get_softc(dev);
- sc->sc_dev = dev;
-
- sc->sc_rrid = 0;
- sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->sc_rrid, RF_ACTIVE);
- if (sc->sc_rres == NULL) {
- device_printf(dev, "could not alloc mem resource\n");
- return (ENXIO);
- }
- sc->sc_bt = rman_get_bustag(sc->sc_rres);
- sc->sc_bh = rman_get_bushandle(sc->sc_rres);
-
- /* Turn off all interrupts */
- wiipic_imr_write(sc, 0x00000000);
- wiipic_icr_write(sc, 0xffffffff);
-
- powerpc_register_pic(dev, 0, WIIPIC_NIRQ, 0, FALSE);
-
- /*
- * Setup the interrupt handler.
- */
- sc->sc_irqid = 0;
- sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
- RF_ACTIVE);
- if (sc->sc_irq == NULL) {
- device_printf(dev, "could not alloc IRQ resource\n");
- return (ENXIO);
- }
- bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, wiipic_intr, sc, &sc->sc_irqctx);
-
- return (0);
-}
-
-static void
-wiipic_dispatch(device_t dev, struct trapframe *tf)
-{
- struct wiipic_softc *sc;
- uint32_t irq;
-
- sc = device_get_softc(dev);
- irq = wiipic_icr_read(sc) & wiipic_imr_read(sc);
- if (irq == 0)
- return;
- irq = ffs(irq) - 1;
- KASSERT(irq < WIIPIC_NIRQ, ("bogus irq %d", irq));
- powerpc_dispatch_intr(sc->sc_vector[irq], tf);
-}
-
-static void
-wiipic_enable(device_t dev, unsigned int irq, unsigned int vector)
-{
- struct wiipic_softc *sc;
-
- KASSERT(irq < WIIPIC_NIRQ, ("bogus irq %d", irq));
- sc = device_get_softc(dev);
- sc->sc_vector[irq] = vector;
- wiipic_unmask(dev, irq);
-}
-
-static void
-wiipic_eoi(device_t dev, unsigned int irq)
-{
- struct wiipic_softc *sc;
- uint32_t icr;
-
- sc = device_get_softc(dev);
- icr = wiipic_icr_read(sc);
- icr |= (1 << irq);
- wiipic_icr_write(sc, icr);
-}
-
-static void
-wiipic_mask(device_t dev, unsigned int irq)
-{
- struct wiipic_softc *sc;
- uint32_t imr;
-
- sc = device_get_softc(dev);
- imr = wiipic_imr_read(sc);
- imr &= ~(1 << irq);
- wiipic_imr_write(sc, imr);
-}
-
-static void
-wiipic_unmask(device_t dev, unsigned int irq)
-{
- struct wiipic_softc *sc;
- uint32_t imr;
-
- sc = device_get_softc(dev);
- imr = wiipic_imr_read(sc);
- imr |= (1 << irq);
- wiipic_imr_write(sc, imr);
-}
-
-/*
- * Reset button interrupt.
- */
-static void
-wiipic_intr(void *xsc)
-{
- struct wiipic_softc *sc;
-
- sc = (struct wiipic_softc *)xsc;
- if (wiipic_icr_read(sc) & WIIPIC_RBS)
- shutdown_nice(RB_AUTOBOOT);
-}
-
diff --git a/sys/powerpc/wii/wii_picreg.h b/sys/powerpc/wii/wii_picreg.h
deleted file mode 100644
index 9e0a7e256d2a..000000000000
--- a/sys/powerpc/wii/wii_picreg.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*-
- * Copyright (C) 2012 Margarida Gouveia
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _POWERPC_WII_WII_PICREG_H
-#define _POWERPC_WII_WII_PICREG_H
-
-#define WIIPIC_REG_ADDR 0x0c003000
-#define WIIPIC_REG_LEN 0x28
-
-#define WIIPIC_ICR 0x00
-#define WIIPIC_RBS 0x10000
-#define WIIPIC_IMR 0x04
-#define WIIPIC_RESET 0x24
-
-#define WIIPIC_NIRQ 32
-
-#endif /* _POWERPC_WII_WII_PICREG_H */
diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h
index e6c4dc374a3d..5ad28d399128 100644
--- a/sys/sys/bitset.h
+++ b/sys/sys/bitset.h
@@ -176,4 +176,14 @@
__bit; \
})
+#define BIT_COUNT(_s, p) __extension__ ({ \
+ __size_t __i; \
+ int __count; \
+ \
+ __count = 0; \
+ for (__i = 0; __i < __bitset_words((_s)); __i++) \
+ __count += __builtin_popcountl((p)->__bits[__i]); \
+ __count; \
+})
+
#endif /* !_SYS_BITSET_H_ */
diff --git a/sys/sys/bus.h b/sys/sys/bus.h
index b15a5568200b..d6dc53585eba 100644
--- a/sys/sys/bus.h
+++ b/sys/sys/bus.h
@@ -31,6 +31,7 @@
#include <machine/_limits.h>
#include <sys/_bus_dma.h>
+#include <sys/ioccom.h>
/**
* @defgroup NEWBUS newbus - a generic framework for managing devices
@@ -70,14 +71,61 @@ struct u_device {
char dv_pnpinfo[128]; /**< @brief Plug and play info */
char dv_location[128]; /**< @brief Where is the device? */
uint32_t dv_devflags; /**< @brief API Flags for device */
- uint16_t dv_flags; /**< @brief flags for dev date */
+ uint16_t dv_flags; /**< @brief flags for dev state */
device_state_t dv_state; /**< @brief State of attachment */
/* XXX more driver info? */
};
+/* Flags exported via dv_flags. */
+#define DF_ENABLED 0x01 /* device should be probed/attached */
+#define DF_FIXEDCLASS 0x02 /* devclass specified at create time */
+#define DF_WILDCARD 0x04 /* unit was originally wildcard */
+#define DF_DESCMALLOCED 0x08 /* description was malloced */
+#define DF_QUIET 0x10 /* don't print verbose attach message */
+#define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */
+#define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */
+#define DF_REBID 0x80 /* Can rebid after attach */
+#define DF_SUSPENDED 0x100 /* Device is suspended. */
+
+/**
+ * @brief Device request structure used for ioctl's.
+ *
+ * Used for ioctl's on /dev/devctl2. All device ioctl's
+ * must have parameter definitions which begin with dr_name.
+ */
+struct devreq_buffer {
+ void *buffer;
+ size_t length;
+};
+
+struct devreq {
+ char dr_name[128];
+ int dr_flags; /* request-specific flags */
+ union {
+ struct devreq_buffer dru_buffer;
+ void *dru_data;
+ } dr_dru;
+#define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */
+#define dr_data dr_dru.dru_data /* fixed-size buffer */
+};
+
+#define DEV_ATTACH _IOW('D', 1, struct devreq)
+#define DEV_DETACH _IOW('D', 2, struct devreq)
+#define DEV_ENABLE _IOW('D', 3, struct devreq)
+#define DEV_DISABLE _IOW('D', 4, struct devreq)
+#define DEV_SUSPEND _IOW('D', 5, struct devreq)
+#define DEV_RESUME _IOW('D', 6, struct devreq)
+#define DEV_SET_DRIVER _IOW('D', 7, struct devreq)
+
+/* Flags for DEV_DETACH and DEV_DISABLE. */
+#define DEVF_FORCE_DETACH 0x0000001
+
+/* Flags for DEV_SET_DRIVER. */
+#define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */
+
#ifdef _KERNEL
-#include <sys/queue.h>
+#include <sys/eventhandler.h>
#include <sys/kobj.h>
/**
@@ -94,6 +142,14 @@ void devctl_queue_data_f(char *__data, int __flags);
void devctl_queue_data(char *__data);
/**
+ * Device name parsers. Hook to allow device enumerators to map
+ * scheme-specific names to a device.
+ */
+typedef void (*dev_lookup_fn)(void *arg, const char *name,
+ device_t *result);
+EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn);
+
+/**
* @brief A device driver (included mainly for compatibility with
* FreeBSD 4.x).
*/
@@ -454,6 +510,7 @@ struct sysctl_oid *device_get_sysctl_tree(device_t dev);
int device_is_alive(device_t dev); /* did probe succeed? */
int device_is_attached(device_t dev); /* did attach succeed? */
int device_is_enabled(device_t dev);
+int device_is_suspended(device_t dev);
int device_is_quiet(device_t dev);
int device_print_prettyname(device_t dev);
int device_printf(device_t dev, const char *, ...) __printflike(2, 3);
@@ -517,6 +574,8 @@ int resource_set_long(const char *name, int unit, const char *resname,
long value);
int resource_set_string(const char *name, int unit, const char *resname,
const char *value);
+int resource_unset_value(const char *name, int unit, const char *resname);
+
/*
* Functions for maintaining and checking consistency of
* bus information exported to userspace.
diff --git a/sys/sys/callout.h b/sys/sys/callout.h
index 1096cb26ff92..910d652f7dd1 100644
--- a/sys/sys/callout.h
+++ b/sys/sys/callout.h
@@ -64,6 +64,7 @@ struct callout_handle {
#ifdef _KERNEL
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
+#define callout_migrating(c) ((c)->c_flags & CALLOUT_DFRMIGRATION)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
#define callout_drain(c) _callout_stop_safe(c, 1)
void callout_init(struct callout *, int);
diff --git a/sys/sys/cdefs.h b/sys/sys/cdefs.h
index b17e415eb5e2..25ba218cca87 100644
--- a/sys/sys/cdefs.h
+++ b/sys/sys/cdefs.h
@@ -293,7 +293,8 @@
#elif defined(__COUNTER__)
#define _Static_assert(x, y) __Static_assert(x, __COUNTER__)
#define __Static_assert(x, y) ___Static_assert(x, y)
-#define ___Static_assert(x, y) typedef char __assert_ ## y[(x) ? 1 : -1]
+#define ___Static_assert(x, y) typedef char __assert_ ## y[(x) ? 1 : -1] \
+ __unused
#else
#define _Static_assert(x, y) struct __hack
#endif
diff --git a/sys/sys/copyright.h b/sys/sys/copyright.h
index 3e1b2b5687e8..6e47358ac14b 100644
--- a/sys/sys/copyright.h
+++ b/sys/sys/copyright.h
@@ -28,6 +28,10 @@
/* Copyrights macros */
+/* Add a FreeBSD vendor copyright here */
+#define COPYRIGHT_Vendor \
+ ""
+
/* FreeBSD */
#define COPYRIGHT_FreeBSD \
"Copyright (c) 1992-2015 The FreeBSD Project.\n"
@@ -48,5 +52,6 @@
#define COPYRIGHT_PC98
#endif
-char copyright[] = COPYRIGHT_FreeBSD COPYRIGHT_PC98 COPYRIGHT_UCB;
+char copyright[] = COPYRIGHT_Vendor COPYRIGHT_FreeBSD COPYRIGHT_PC98 \
+ COPYRIGHT_UCB;
char trademark[] = TRADEMARK_Foundation;
diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h
index d3d60e773ec3..9ccba5890216 100644
--- a/sys/sys/cpuset.h
+++ b/sys/sys/cpuset.h
@@ -60,6 +60,7 @@
#define CPU_OR_ATOMIC(d, s) BIT_OR_ATOMIC(CPU_SETSIZE, d, s)
#define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t)
#define CPU_FFS(p) BIT_FFS(CPU_SETSIZE, p)
+#define CPU_COUNT(p) BIT_COUNT(CPU_SETSIZE, p)
/*
* Valid cpulevel_t values.
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 64e46d9e6499..aba65aca1972 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1100057 /* Master, propagated to newvers */
+#define __FreeBSD_version 1100059 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 5434d5ac7d62..3763e5ca05ce 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -357,7 +357,6 @@ static __inline intrmask_t splhigh(void) { return 0; }
static __inline intrmask_t splimp(void) { return 0; }
static __inline intrmask_t splnet(void) { return 0; }
static __inline intrmask_t spltty(void) { return 0; }
-static __inline intrmask_t splvm(void) { return 0; }
static __inline void splx(intrmask_t ipl __unused) { return; }
/*
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 79783c8efaf4..700854e9466f 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -1393,7 +1393,7 @@ softdep_flush(addr)
VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
kthread_suspend_check();
ACQUIRE_LOCK(ump);
- while ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
+ if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
"sdflush", hz / 2);
ump->softdep_flags &= ~FLUSH_CLEANUP;
@@ -1423,10 +1423,9 @@ worklist_speedup(mp)
ump = VFSTOUFS(mp);
LOCK_OWNED(ump);
- if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) {
+ if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
ump->softdep_flags |= FLUSH_CLEANUP;
- wakeup(&ump->softdep_flushtd);
- }
+ wakeup(&ump->softdep_flushtd);
}
static int
@@ -1471,11 +1470,10 @@ softdep_speedup(ump)
TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
FREE_GBLLOCK(&lk);
if ((altump->softdep_flags &
- (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) {
+ (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
altump->softdep_flags |= FLUSH_CLEANUP;
- altump->um_softdep->sd_cleanups++;
- wakeup(&altump->softdep_flushtd);
- }
+ altump->um_softdep->sd_cleanups++;
+ wakeup(&altump->softdep_flushtd);
FREE_LOCK(altump);
}
}
diff --git a/sys/x86/acpica/madt.c b/sys/x86/acpica/madt.c
index b9cc895fbd06..f20b73521e9c 100644
--- a/sys/x86/acpica/madt.c
+++ b/sys/x86/acpica/madt.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <x86/apicreg.h>
#include <machine/intr_machdep.h>
#include <x86/apicvar.h>
+#include <machine/md_var.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/actables.h>
@@ -127,8 +128,27 @@ madt_probe_cpus(void)
static int
madt_setup_local(void)
{
+ ACPI_TABLE_DMAR *dmartbl;
+ vm_paddr_t dmartbl_physaddr;
madt = pmap_mapbios(madt_physaddr, madt_length);
+ if ((cpu_feature2 & CPUID2_X2APIC) != 0) {
+ x2apic_mode = 1;
+ dmartbl_physaddr = acpi_find_table(ACPI_SIG_DMAR);
+ if (dmartbl_physaddr != 0) {
+ dmartbl = acpi_map_table(dmartbl_physaddr,
+ ACPI_SIG_DMAR);
+ if ((dmartbl->Flags & ACPI_DMAR_X2APIC_OPT_OUT) != 0) {
+ x2apic_mode = 0;
+ if (bootverbose)
+ printf(
+ "x2APIC available but disabled by DMAR table\n");
+ }
+ acpi_unmap_table(dmartbl);
+ }
+ TUNABLE_INT_FETCH("hw.x2apic_enable", &x2apic_mode);
+ }
+
lapic_init(madt->Address);
printf("ACPI APIC Table: <%.*s %.*s>\n",
(int)sizeof(madt->Header.OemId), madt->Header.OemId,
diff --git a/sys/x86/include/apicreg.h b/sys/x86/include/apicreg.h
index 283d50e72226..35630c7d677a 100644
--- a/sys/x86/include/apicreg.h
+++ b/sys/x86/include/apicreg.h
@@ -193,6 +193,66 @@ struct LAPIC {
typedef struct LAPIC lapic_t;
+enum LAPIC_REGISTERS {
+ LAPIC_ID = 0x2,
+ LAPIC_VERSION = 0x3,
+ LAPIC_TPR = 0x8,
+ LAPIC_APR = 0x9,
+ LAPIC_PPR = 0xa,
+ LAPIC_EOI = 0xb,
+ LAPIC_LDR = 0xd,
+ LAPIC_DFR = 0xe, /* Not in x2APIC */
+ LAPIC_SVR = 0xf,
+ LAPIC_ISR0 = 0x10,
+ LAPIC_ISR1 = 0x11,
+ LAPIC_ISR2 = 0x12,
+ LAPIC_ISR3 = 0x13,
+ LAPIC_ISR4 = 0x14,
+ LAPIC_ISR5 = 0x15,
+ LAPIC_ISR6 = 0x16,
+ LAPIC_ISR7 = 0x17,
+ LAPIC_TMR0 = 0x18,
+ LAPIC_TMR1 = 0x19,
+ LAPIC_TMR2 = 0x1a,
+ LAPIC_TMR3 = 0x1b,
+ LAPIC_TMR4 = 0x1c,
+ LAPIC_TMR5 = 0x1d,
+ LAPIC_TMR6 = 0x1e,
+ LAPIC_TMR7 = 0x1f,
+ LAPIC_IRR0 = 0x20,
+ LAPIC_IRR1 = 0x21,
+ LAPIC_IRR2 = 0x22,
+ LAPIC_IRR3 = 0x23,
+ LAPIC_IRR4 = 0x24,
+ LAPIC_IRR5 = 0x25,
+ LAPIC_IRR6 = 0x26,
+ LAPIC_IRR7 = 0x27,
+ LAPIC_ESR = 0x28,
+ LAPIC_LVT_CMCI = 0x2f,
+ LAPIC_ICR_LO = 0x30,
+ LAPIC_ICR_HI = 0x31, /* Not in x2APIC */
+ LAPIC_LVT_TIMER = 0x32,
+ LAPIC_LVT_THERMAL = 0x33,
+ LAPIC_LVT_PCINT = 0x34,
+ LAPIC_LVT_LINT0 = 0x35,
+ LAPIC_LVT_LINT1 = 0x36,
+ LAPIC_LVT_ERROR = 0x37,
+ LAPIC_ICR_TIMER = 0x38,
+ LAPIC_CCR_TIMER = 0x39,
+ LAPIC_DCR_TIMER = 0x3e,
+ LAPIC_SELF_IPI = 0x3f, /* Only in x2APIC */
+};
+
+/*
+ * The LAPIC_SELF_IPI register only exists in x2APIC mode. The
+ * formula below is applicable only to reserve the memory region,
+ * i.e. for xAPIC mode, where LAPIC_SELF_IPI finely serves as the
+ * address past end of the region.
+ */
+#define LAPIC_MEM_REGION (LAPIC_SELF_IPI * 0x10)
+
+#define LAPIC_MEM_MUL 0x10
+
/******************************************************************************
* I/O APIC structure
*/
diff --git a/sys/x86/include/apicvar.h b/sys/x86/include/apicvar.h
index 35603e8ce190..f7dfec816f12 100644
--- a/sys/x86/include/apicvar.h
+++ b/sys/x86/include/apicvar.h
@@ -189,6 +189,7 @@ int ioapic_set_smi(void *cookie, u_int pin);
struct apic_ops {
void (*create)(u_int, int);
void (*init)(vm_paddr_t);
+ void (*xapic_mode)(void);
void (*setup)(int);
void (*dump)(const char *);
void (*disable)(void);
@@ -243,6 +244,13 @@ lapic_init(vm_paddr_t addr)
}
static inline void
+lapic_xapic_mode(void)
+{
+
+ apic_ops.xapic_mode();
+}
+
+static inline void
lapic_setup(int boot)
{
@@ -417,5 +425,11 @@ void lapic_handle_intr(int vector, struct trapframe *frame);
void lapic_handle_timer(struct trapframe *frame);
void xen_intr_handle_upcall(struct trapframe *frame);
+extern int x2apic_mode;
+
+#ifdef _SYS_SYSCTL_H_
+SYSCTL_DECL(_hw_apic);
+#endif
+
#endif /* !LOCORE */
#endif /* _X86_APICVAR_H_ */
diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h
index 4d1086eeae60..60f46fb68e59 100644
--- a/sys/x86/include/specialreg.h
+++ b/sys/x86/include/specialreg.h
@@ -470,6 +470,7 @@
/*
* X2APIC MSRs
*/
+#define MSR_APIC_000 0x800
#define MSR_APIC_ID 0x802
#define MSR_APIC_VERSION 0x803
#define MSR_APIC_TPR 0x808
diff --git a/sys/x86/x86/io_apic.c b/sys/x86/x86/io_apic.c
index 5667120661b4..ec0a71ec615b 100644
--- a/sys/x86/x86/io_apic.c
+++ b/sys/x86/x86/io_apic.c
@@ -130,7 +130,6 @@ struct pic ioapic_template = { ioapic_enable_source, ioapic_disable_source,
static int next_ioapic_base;
static u_int next_id;
-static SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD, 0, "APIC options");
static int enable_extint;
SYSCTL_INT(_hw_apic, OID_AUTO, enable_extint, CTLFLAG_RDTUN, &enable_extint, 0,
"Enable the ExtINT pin in the first I/O APIC");
@@ -896,7 +895,7 @@ apic_attach(device_t dev)
int i;
/* Reserve the local APIC. */
- apic_add_resource(dev, 0, lapic_paddr, sizeof(lapic_t));
+ apic_add_resource(dev, 0, lapic_paddr, LAPIC_MEM_REGION);
i = 1;
STAILQ_FOREACH(io, &ioapic_list, io_next) {
apic_add_resource(dev, i, io->io_paddr, IOAPIC_MEM_REGION);
diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c
index e3228ce34952..1809fa6da9d4 100644
--- a/sys/x86/x86/local_apic.c
+++ b/sys/x86/x86/local_apic.c
@@ -49,12 +49,14 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/smp.h>
+#include <sys/sysctl.h>
#include <sys/timeet.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <x86/apicreg.h>
+#include <machine/cpufunc.h>
#include <machine/cputypes.h>
#include <machine/frame.h>
#include <machine/intr_machdep.h>
@@ -154,11 +156,99 @@ static u_int32_t lapic_timer_divisors[] = {
extern inthand_t IDTVEC(rsvd);
-volatile lapic_t *lapic;
+volatile char *lapic_map;
vm_paddr_t lapic_paddr;
+int x2apic_mode;
static u_long lapic_timer_divisor;
static struct eventtimer lapic_et;
+SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD, 0, "APIC options");
+SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
+
+static uint32_t
+lapic_read32(enum LAPIC_REGISTERS reg)
+{
+ uint32_t res;
+
+ if (x2apic_mode) {
+ res = rdmsr32(MSR_APIC_000 + reg);
+ } else {
+ res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
+ }
+ return (res);
+}
+
+static void
+lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
+{
+
+ if (x2apic_mode) {
+ mfence();
+ wrmsr(MSR_APIC_000 + reg, val);
+ } else {
+ *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
+ }
+}
+
+static void
+lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
+{
+
+ if (x2apic_mode) {
+ wrmsr(MSR_APIC_000 + reg, val);
+ } else {
+ *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
+ }
+}
+
+static uint64_t
+lapic_read_icr(void)
+{
+ uint64_t v;
+ uint32_t vhi, vlo;
+
+ if (x2apic_mode) {
+ v = rdmsr(MSR_APIC_000 + LAPIC_ICR_LO);
+ } else {
+ vhi = lapic_read32(LAPIC_ICR_HI);
+ vlo = lapic_read32(LAPIC_ICR_LO);
+ v = ((uint64_t)vhi << 32) | vlo;
+ }
+ return (v);
+}
+
+static uint64_t
+lapic_read_icr_lo(void)
+{
+
+ return (lapic_read32(LAPIC_ICR_LO));
+}
+
+static void
+lapic_write_icr(uint32_t vhi, uint32_t vlo)
+{
+ uint64_t v;
+
+ if (x2apic_mode) {
+ v = ((uint64_t)vhi << 32) | vlo;
+ mfence();
+ wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
+ } else {
+ lapic_write32(LAPIC_ICR_HI, vhi);
+ lapic_write32(LAPIC_ICR_LO, vlo);
+ }
+}
+
+static void
+native_lapic_enable_x2apic(void)
+{
+ uint64_t apic_base;
+
+ apic_base = rdmsr(MSR_APICBASE);
+ apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
+ wrmsr(MSR_APICBASE, apic_base);
+}
+
static void lapic_enable(void);
static void lapic_resume(struct pic *pic, bool suspend_cancelled);
static void lapic_timer_oneshot(struct lapic *,
@@ -179,6 +269,7 @@ struct pic lapic_pic = { .pic_resume = lapic_resume };
/* Forward declarations for apic_ops */
static void native_lapic_create(u_int apic_id, int boot_cpu);
static void native_lapic_init(vm_paddr_t addr);
+static void native_lapic_xapic_mode(void);
static void native_lapic_setup(int boot);
static void native_lapic_dump(const char *str);
static void native_lapic_disable(void);
@@ -213,6 +304,7 @@ static int native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
struct apic_ops apic_ops = {
.create = native_lapic_create,
.init = native_lapic_init,
+ .xapic_mode = native_lapic_xapic_mode,
.setup = native_lapic_setup,
.dump = native_lapic_dump,
.disable = native_lapic_disable,
@@ -291,11 +383,20 @@ native_lapic_init(vm_paddr_t addr)
u_int regs[4];
int i, arat;
- /* Map the local APIC and setup the spurious interrupt handler. */
+ /*
+ * Enable x2APIC mode if possible, otherwise map the local
+ * APIC registers page.
+ */
KASSERT(trunc_page(addr) == addr,
("local APIC not aligned on a page boundary"));
- lapic_paddr = addr;
- lapic = pmap_mapdev(addr, sizeof(lapic_t));
+ if (x2apic_mode) {
+ native_lapic_enable_x2apic();
+ } else {
+ lapic_paddr = addr;
+ lapic_map = pmap_mapdev(addr, PAGE_SIZE);
+ }
+
+ /* Setup the spurious interrupt handler. */
setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
GSEL_APIC);
@@ -398,33 +499,51 @@ native_lapic_dump(const char* str)
{
uint32_t maxlvt;
- maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
- printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
- lapic->id, lapic->version, lapic->ldr, lapic->dfr);
- printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
- lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
+ printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
+ lapic_read32(LAPIC_ID), lapic_read32(LAPIC_VERSION),
+ lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
+ if ((cpu_feature2 & CPUID2_X2APIC) != 0)
+ printf(" x2APIC: %d", x2apic_mode);
+ printf("\n lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
+ lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
+ lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x",
- lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error);
+ lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
+ lapic_read32(LAPIC_LVT_ERROR));
if (maxlvt >= APIC_LVT_PMC)
- printf(" pmc: 0x%08x", lapic->lvt_pcint);
+ printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
printf("\n");
if (maxlvt >= APIC_LVT_CMCI)
- printf(" cmci: 0x%08x\n", lapic->lvt_cmci);
+ printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
+}
+
+static void
+native_lapic_xapic_mode(void)
+{
+ register_t saveintr;
+
+ saveintr = intr_disable();
+ if (x2apic_mode)
+ native_lapic_enable_x2apic();
+ native_lapic_disable();
+ intr_restore(saveintr);
}
static void
native_lapic_setup(int boot)
{
struct lapic *la;
- u_int32_t maxlvt;
+ uint32_t maxlvt;
register_t saveintr;
char buf[MAXCOMLEN + 1];
+ saveintr = intr_disable();
+
la = &lapics[lapic_id()];
KASSERT(la->la_present, ("missing APIC structure"));
- saveintr = intr_disable();
- maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
/* Initialize the TPR to allow all interrupts. */
lapic_set_tpr(0);
@@ -433,16 +552,21 @@ native_lapic_setup(int boot)
lapic_enable();
/* Program LINT[01] LVT entries. */
- lapic->lvt_lint0 = lvt_mode(la, APIC_LVT_LINT0, lapic->lvt_lint0);
- lapic->lvt_lint1 = lvt_mode(la, APIC_LVT_LINT1, lapic->lvt_lint1);
+ lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
+ lapic_read32(LAPIC_LVT_LINT0)));
+ lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
+ lapic_read32(LAPIC_LVT_LINT1)));
/* Program the PMC LVT entry if present. */
- if (maxlvt >= APIC_LVT_PMC)
- lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
+ if (maxlvt >= APIC_LVT_PMC) {
+ lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
+ LAPIC_LVT_PCINT));
+ }
/* Program timer LVT and setup handler. */
- la->lvt_timer_cache = lapic->lvt_timer =
- lvt_mode(la, APIC_LVT_TIMER, lapic->lvt_timer);
+ la->lvt_timer_cache = lvt_mode(la, APIC_LVT_TIMER,
+ lapic_read32(LAPIC_LVT_TIMER));
+ lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_cache);
if (boot) {
snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
intrcnt_add(buf, &la->la_timer_count);
@@ -460,14 +584,17 @@ native_lapic_setup(int boot)
}
/* Program error LVT and clear any existing errors. */
- lapic->lvt_error = lvt_mode(la, APIC_LVT_ERROR, lapic->lvt_error);
- lapic->esr = 0;
+ lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
+ lapic_read32(LAPIC_LVT_ERROR)));
+ lapic_write32(LAPIC_ESR, 0);
/* XXX: Thermal LVT */
/* Program the CMCI LVT entry if present. */
- if (maxlvt >= APIC_LVT_CMCI)
- lapic->lvt_cmci = lvt_mode(la, APIC_LVT_CMCI, lapic->lvt_cmci);
+ if (maxlvt >= APIC_LVT_CMCI) {
+ lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
+ lapic_read32(LAPIC_LVT_CMCI)));
+ }
intr_restore(saveintr);
}
@@ -478,9 +605,9 @@ native_lapic_reenable_pmc(void)
#ifdef HWPMC_HOOKS
uint32_t value;
- value = lapic->lvt_pcint;
+ value = lapic_read32(LAPIC_LVT_PCINT);
value &= ~APIC_LVT_M;
- lapic->lvt_pcint = value;
+ lapic_write32(LAPIC_LVT_PCINT, value);
#endif
}
@@ -491,7 +618,8 @@ lapic_update_pmc(void *dummy)
struct lapic *la;
la = &lapics[lapic_id()];
- lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
+ lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
+ lapic_read32(LAPIC_LVT_PCINT)));
}
#endif
@@ -502,11 +630,11 @@ native_lapic_enable_pmc(void)
u_int32_t maxlvt;
/* Fail if the local APIC is not present. */
- if (lapic == NULL)
+ if (!x2apic_mode && lapic_map == NULL)
return (0);
/* Fail if the PMC LVT is not present. */
- maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
if (maxlvt < APIC_LVT_PMC)
return (0);
@@ -536,11 +664,11 @@ native_lapic_disable_pmc(void)
u_int32_t maxlvt;
/* Fail if the local APIC is not present. */
- if (lapic == NULL)
+ if (!x2apic_mode && lapic_map == NULL)
return;
/* Fail if the PMC LVT is not present. */
- maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
+ maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
if (maxlvt < APIC_LVT_PMC)
return;
@@ -569,7 +697,8 @@ lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
lapic_timer_set_divisor(lapic_timer_divisor);
lapic_timer_oneshot(la, APIC_TIMER_MAX_COUNT, 0);
DELAY(1000000);
- value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
+ value = APIC_TIMER_MAX_COUNT -
+ lapic_read32(LAPIC_CCR_TIMER);
if (value != APIC_TIMER_MAX_COUNT)
break;
lapic_timer_divisor <<= 1;
@@ -613,21 +742,21 @@ native_lapic_disable(void)
uint32_t value;
/* Software disable the local APIC. */
- value = lapic->svr;
+ value = lapic_read32(LAPIC_SVR);
value &= ~APIC_SVR_SWEN;
- lapic->svr = value;
+ lapic_write32(LAPIC_SVR, value);
}
static void
lapic_enable(void)
{
- u_int32_t value;
+ uint32_t value;
/* Program the spurious vector to enable the local APIC. */
- value = lapic->svr;
+ value = lapic_read32(LAPIC_SVR);
value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
- value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
- lapic->svr = value;
+ value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
+ lapic_write32(LAPIC_SVR, value);
}
/* Reset the local APIC on the BSP during resume. */
@@ -641,27 +770,29 @@ lapic_resume(struct pic *pic, bool suspend_cancelled)
static int
native_lapic_id(void)
{
+ uint32_t v;
- KASSERT(lapic != NULL, ("local APIC is not mapped"));
- return (lapic->id >> APIC_ID_SHIFT);
+ KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
+ v = lapic_read32(LAPIC_ID);
+ if (!x2apic_mode)
+ v >>= APIC_ID_SHIFT;
+ return (v);
}
static int
native_lapic_intr_pending(u_int vector)
{
- volatile u_int32_t *irr;
+ uint32_t irr;
/*
- * The IRR registers are an array of 128-bit registers each of
- * which only describes 32 interrupts in the low 32 bits.. Thus,
- * we divide the vector by 32 to get the 128-bit index. We then
- * multiply that index by 4 to get the equivalent index from
- * treating the IRR as an array of 32-bit registers. Finally, we
- * modulus the vector by 32 to determine the individual bit to
- * test.
+ * The IRR registers are an array of registers each of which
+ * only describes 32 interrupts in the low 32 bits. Thus, we
+ * divide the vector by 32 to get the register index.
+ * Finally, we modulus the vector by 32 to determine the
+ * individual bit to test.
*/
- irr = &lapic->irr0;
- return (irr[(vector / 32) * 4] & 1 << (vector % 32));
+ irr = lapic_read32(LAPIC_IRR0 + vector / 32);
+ return (irr & 1 << (vector % 32));
}
static void
@@ -818,13 +949,13 @@ static void
lapic_set_tpr(u_int vector)
{
#ifdef CHEAP_TPR
- lapic->tpr = vector;
+ lapic_write32(LAPIC_TPR, vector);
#else
- u_int32_t tpr;
+ uint32_t tpr;
- tpr = lapic->tpr & ~APIC_TPR_PRIO;
+ tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
tpr |= vector;
- lapic->tpr = tpr;
+ lapic_write32(LAPIC_TPR, tpr);
#endif
}
@@ -832,7 +963,7 @@ static void
native_lapic_eoi(void)
{
- lapic->eoi = 0;
+ lapic_write32_nofence(LAPIC_EOI, 0);
}
void
@@ -894,46 +1025,46 @@ lapic_timer_set_divisor(u_int divisor)
KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
- lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
+ lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
}
static void
lapic_timer_oneshot(struct lapic *la, u_int count, int enable_int)
{
- u_int32_t value;
+ uint32_t value;
value = la->lvt_timer_cache;
value &= ~APIC_LVTT_TM;
value |= APIC_LVTT_TM_ONE_SHOT;
if (enable_int)
value &= ~APIC_LVT_M;
- lapic->lvt_timer = value;
- lapic->icr_timer = count;
+ lapic_write32(LAPIC_LVT_TIMER, value);
+ lapic_write32(LAPIC_ICR_TIMER, count);
}
static void
lapic_timer_periodic(struct lapic *la, u_int count, int enable_int)
{
- u_int32_t value;
+ uint32_t value;
value = la->lvt_timer_cache;
value &= ~APIC_LVTT_TM;
value |= APIC_LVTT_TM_PERIODIC;
if (enable_int)
value &= ~APIC_LVT_M;
- lapic->lvt_timer = value;
- lapic->icr_timer = count;
+ lapic_write32(LAPIC_LVT_TIMER, value);
+ lapic_write32(LAPIC_ICR_TIMER, count);
}
static void
lapic_timer_stop(struct lapic *la)
{
- u_int32_t value;
+ uint32_t value;
value = la->lvt_timer_cache;
value &= ~APIC_LVTT_TM;
value |= APIC_LVT_M;
- lapic->lvt_timer = value;
+ lapic_write32(LAPIC_LVT_TIMER, value);
}
void
@@ -956,7 +1087,7 @@ native_lapic_enable_cmc(void)
u_int apic_id;
#ifdef DEV_ATPIC
- if (lapic == NULL)
+ if (!x2apic_mode && lapic_map == NULL)
return;
#endif
apic_id = PCPU_GET(apic_id);
@@ -971,7 +1102,7 @@ native_lapic_enable_cmc(void)
void
lapic_handle_error(void)
{
- u_int32_t esr;
+ uint32_t esr;
/*
* Read the contents of the error status register. Write to
@@ -979,8 +1110,8 @@ lapic_handle_error(void)
* to update its value to indicate any errors that have
* occurred since the previous write to the register.
*/
- lapic->esr = 0;
- esr = lapic->esr;
+ lapic_write32(LAPIC_ESR, 0);
+ esr = lapic_read32(LAPIC_ESR);
printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
lapic_eoi();
@@ -1252,48 +1383,49 @@ DB_SHOW_COMMAND(lapic, db_show_lapic)
uint32_t v;
db_printf("lapic ID = %d\n", lapic_id());
- v = lapic->version;
+ v = lapic_read32(LAPIC_VERSION);
db_printf("version = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
v & 0xf);
db_printf("max LVT = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
- v = lapic->svr;
+ v = lapic_read32(LAPIC_SVR);
db_printf("SVR = %02x (%s)\n", v & APIC_SVR_VECTOR,
v & APIC_SVR_ENABLE ? "enabled" : "disabled");
- db_printf("TPR = %02x\n", lapic->tpr);
+ db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
-#define dump_field(prefix, index) \
- dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index, \
+#define dump_field(prefix, regn, index) \
+ dump_mask(__XSTRING(prefix ## index), \
+ lapic_read32(LAPIC_ ## regn ## index), \
index * 32)
db_printf("In-service Interrupts:\n");
- dump_field(isr, 0);
- dump_field(isr, 1);
- dump_field(isr, 2);
- dump_field(isr, 3);
- dump_field(isr, 4);
- dump_field(isr, 5);
- dump_field(isr, 6);
- dump_field(isr, 7);
+ dump_field(isr, ISR, 0);
+ dump_field(isr, ISR, 1);
+ dump_field(isr, ISR, 2);
+ dump_field(isr, ISR, 3);
+ dump_field(isr, ISR, 4);
+ dump_field(isr, ISR, 5);
+ dump_field(isr, ISR, 6);
+ dump_field(isr, ISR, 7);
db_printf("TMR Interrupts:\n");
- dump_field(tmr, 0);
- dump_field(tmr, 1);
- dump_field(tmr, 2);
- dump_field(tmr, 3);
- dump_field(tmr, 4);
- dump_field(tmr, 5);
- dump_field(tmr, 6);
- dump_field(tmr, 7);
+ dump_field(tmr, TMR, 0);
+ dump_field(tmr, TMR, 1);
+ dump_field(tmr, TMR, 2);
+ dump_field(tmr, TMR, 3);
+ dump_field(tmr, TMR, 4);
+ dump_field(tmr, TMR, 5);
+ dump_field(tmr, TMR, 6);
+ dump_field(tmr, TMR, 7);
db_printf("IRR Interrupts:\n");
- dump_field(irr, 0);
- dump_field(irr, 1);
- dump_field(irr, 2);
- dump_field(irr, 3);
- dump_field(irr, 4);
- dump_field(irr, 5);
- dump_field(irr, 6);
- dump_field(irr, 7);
+ dump_field(irr, IRR, 0);
+ dump_field(irr, IRR, 1);
+ dump_field(irr, IRR, 2);
+ dump_field(irr, IRR, 3);
+ dump_field(irr, IRR, 4);
+ dump_field(irr, IRR, 5);
+ dump_field(irr, IRR, 6);
+ dump_field(irr, IRR, 7);
#undef dump_field
}
@@ -1452,22 +1584,28 @@ SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
static int
native_lapic_ipi_wait(int delay)
{
- int x, incr;
+ int x;
+
+ /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
+ if (x2apic_mode)
+ return (1);
/*
- * Wait delay loops for IPI to be sent. This is highly bogus
- * since this is sensitive to CPU clock speed. If delay is
+ * Wait delay microseconds for IPI to be sent. If delay is
* -1, we wait forever.
*/
if (delay == -1) {
- incr = 0;
- delay = 1;
- } else
- incr = 1;
- for (x = 0; x < delay; x += incr) {
- if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
+ while ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) !=
+ APIC_DELSTAT_IDLE)
+ ia32_pause();
+ return (1);
+ }
+
+ for (x = 0; x < delay; x += 5) {
+ if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
+ APIC_DELSTAT_IDLE)
return (1);
- ia32_pause();
+ DELAY(5);
}
return (0);
}
@@ -1475,35 +1613,51 @@ native_lapic_ipi_wait(int delay)
static void
native_lapic_ipi_raw(register_t icrlo, u_int dest)
{
- register_t value, saveintr;
+ uint64_t icr;
+ uint32_t vhi, vlo;
+ register_t saveintr;
/* XXX: Need more sanity checking of icrlo? */
- KASSERT(lapic != NULL, ("%s called too early", __func__));
- KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
+ KASSERT(x2apic_mode || lapic_map != NULL,
+ ("%s called too early", __func__));
+ KASSERT(x2apic_mode ||
+ (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
("%s: invalid dest field", __func__));
KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
("%s: reserved bits set in ICR LO register", __func__));
/* Set destination in ICR HI register if it is being used. */
saveintr = intr_disable();
+ if (!x2apic_mode)
+ icr = lapic_read_icr();
+
if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
- value = lapic->icr_hi;
- value &= ~APIC_ID_MASK;
- value |= dest << APIC_ID_SHIFT;
- lapic->icr_hi = value;
+ if (x2apic_mode) {
+ vhi = dest;
+ } else {
+ vhi = icr >> 32;
+ vhi &= ~APIC_ID_MASK;
+ vhi |= dest << APIC_ID_SHIFT;
+ }
+ } else {
+ vhi = 0;
}
/* Program the contents of the IPI and dispatch it. */
- value = lapic->icr_lo;
- value &= APIC_ICRLO_RESV_MASK;
- value |= icrlo;
- lapic->icr_lo = value;
+ if (x2apic_mode) {
+ vlo = icrlo;
+ } else {
+ vlo = icr;
+ vlo &= APIC_ICRLO_RESV_MASK;
+ vlo |= icrlo;
+ }
+ lapic_write_icr(vhi, vlo);
intr_restore(saveintr);
}
-#define BEFORE_SPIN 1000000
+#define BEFORE_SPIN 50000
#ifdef DETECT_DEADLOCK
-#define AFTER_SPIN 1000
+#define AFTER_SPIN 50
#endif
static void
@@ -1514,7 +1668,7 @@ native_lapic_ipi_vectored(u_int vector, int dest)
KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
("%s: invalid vector %d", __func__, vector));
- icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE;
+ icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
/*
* IPI_STOP_HARD is just a "fake" vector used to send a NMI.
@@ -1522,9 +1676,9 @@ native_lapic_ipi_vectored(u_int vector, int dest)
* the vector.
*/
if (vector == IPI_STOP_HARD)
- icrlo |= APIC_DELMODE_NMI | APIC_LEVEL_ASSERT;
+ icrlo |= APIC_DELMODE_NMI;
else
- icrlo |= vector | APIC_DELMODE_FIXED | APIC_LEVEL_DEASSERT;
+ icrlo |= vector | APIC_DELMODE_FIXED;
destfield = 0;
switch (dest) {
case APIC_IPI_DEST_SELF:
@@ -1537,7 +1691,8 @@ native_lapic_ipi_vectored(u_int vector, int dest)
icrlo |= APIC_DEST_ALLESELF;
break;
default:
- KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
+ KASSERT(x2apic_mode ||
+ (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
("%s: invalid destination 0x%x", __func__, dest));
destfield = dest;
}
@@ -1574,7 +1729,7 @@ native_lapic_ipi_vectored(u_int vector, int dest)
printf("APIC: IPI might be stuck\n");
#else /* !needsattention */
/* Wait until mesage is sent without a timeout. */
- while (lapic->icr_lo & APIC_DELSTAT_PEND)
+ while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
ia32_pause();
#endif /* needsattention */
}
diff --git a/sys/x86/xen/xen_apic.c b/sys/x86/xen/xen_apic.c
index 53083ac56ffd..ed86734f888c 100644
--- a/sys/x86/xen/xen_apic.c
+++ b/sys/x86/xen/xen_apic.c
@@ -350,6 +350,7 @@ xen_pv_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
struct apic_ops xen_apic_ops = {
.create = xen_pv_lapic_create,
.init = xen_pv_lapic_init,
+ .xapic_mode = xen_pv_lapic_disable,
.setup = xen_pv_lapic_setup,
.dump = xen_pv_lapic_dump,
.disable = xen_pv_lapic_disable,