aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/drm2/i915/i915_drv.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/drm2/i915/i915_drv.h')
-rw-r--r--sys/dev/drm2/i915/i915_drv.h1456
1 files changed, 820 insertions, 636 deletions
diff --git a/sys/dev/drm2/i915/i915_drv.h b/sys/dev/drm2/i915/i915_drv.h
index ffdbc1878d34..2dff53c0886a 100644
--- a/sys/dev/drm2/i915/i915_drv.h
+++ b/sys/dev/drm2/i915/i915_drv.h
@@ -57,7 +57,14 @@ enum pipe {
I915_MAX_PIPES
};
#define pipe_name(p) ((p) + 'A')
-#define I915_NUM_PIPE 2
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
enum plane {
PLANE_A = 0,
@@ -78,9 +85,12 @@ enum port {
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
@@ -91,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
/* Interface history:
*
* 1.1: Original.
@@ -106,12 +122,8 @@ struct intel_pch_pll {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
-#define WATCH_EXEC 0
-#define WATCH_LRU 0
-#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
-#define WATCH_PWRITE 0
+#define WATCH_LISTS 0
+#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -124,10 +136,120 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
struct drm_i915_private;
+struct intel_opregion {
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
+ u32 __iomem *lid_state;
+};
+#define OPREGION_SIZE (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+struct drm_i915_master_private {
+ drm_local_map_t *sarea;
+ struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
+
+struct drm_i915_fence_reg {
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ int pin_count;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+};
+
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+ u_int ref;
+ u32 eir;
+ u32 pgtbl_er;
+ u32 ier;
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ bool waiting[I915_NUM_RINGS];
+ u32 pipestat[I915_MAX_PIPES];
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u32 seqno[I915_NUM_RINGS];
+ u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct timeval time;
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 rseqno, wseqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ s32 ring:4;
+ u32 cache_level:2;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+};
+
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
@@ -136,25 +258,24 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
- void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
- void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
/* clock updates for mode set */
@@ -164,9 +285,39 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
+#define DEV_INFO_FLAGS \
+ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_llc)
+
struct intel_device_info {
u8 gen;
- u8 not_supported:1;
u8 is_mobile:1;
u8 is_i85x:1;
u8 is_i915g:1;
@@ -179,7 +330,7 @@ struct intel_device_info {
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
- u8 has_pch_split:1;
+ u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
@@ -196,6 +347,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
+ struct drm_device *dev;
unsigned num_pd_entries;
vm_page_t *pt_pages;
uint32_t pd_offset;
@@ -225,59 +377,18 @@ enum no_fbc_reason {
FBC_MODULE_PARAM,
};
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
- u32 *lid_state;
-};
-#define OPREGION_SIZE (8*1024)
-
-struct drm_i915_master_private {
- drm_local_map_t *sarea;
- struct _drm_i915_sarea *sarea_priv;
-};
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 16
-/* 16 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 5
-
-struct drm_i915_fence_reg {
- struct list_head lru_list;
- struct drm_i915_gem_object *obj;
- int pin_count;
-};
-
-struct sdvo_device_mapping {
- u8 initialized;
- u8 dvo_port;
- u8 slave_addr;
- u8 dvo_wiring;
- u8 i2c_pin;
- u8 ddc_pin;
-};
-
enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -285,133 +396,22 @@ enum intel_pch {
struct intel_fbdev;
struct intel_fbc_work;
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
- device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
- device_t gmbus[GMBUS_NUM_PORTS + 1];
- device_t bbbus[GMBUS_NUM_PORTS + 1];
- /** gmbus_sx protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct sx gmbus_sx;
- uint32_t gpio_mmio_base;
-
- int relative_constants_mode;
-
- drm_local_map_t *mmio_map;
-
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct mtx gt_lock;
-
- /* drm_i915_ring_buffer_t ring; */
- struct intel_ring_buffer rings[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
- uint32_t counter;
- unsigned int status_gfx_addr;
- struct drm_gem_object *hws_obj;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- atomic_t irq_received;
- u32 trace_irq_seqno;
-
- /** Cached value of IER to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
- struct mtx irq_lock;
-
- struct mtx dpio_lock;
-
- u32 hotplug_supported_mask;
-
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t last_instdone;
- uint32_t last_instdone1;
-
- unsigned int stop_rings;
-
- struct intel_opregion opregion;
-
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- /* PCH chipset type */
- enum intel_pch pch_type;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- unsigned long quirks;
+struct intel_gmbus {
+ device_t gmbus_bridge;
+ device_t gmbus;
+ device_t bbbus_bridge;
+ device_t bbbus;
+ u32 force_bit;
+ u32 reg0;
+ u32 gpio_reg;
+ struct drm_i915_private *dev_priv;
+};
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -557,27 +557,243 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct task work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ struct mtx lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct timeout_task delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct sx hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct task error_work;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ /* FIXME Linux<->FreeBSD: "void *regs" on Linux. */
+ drm_local_map_t *mmio_map;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct mtx gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct sx gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ device_t bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ int mch_res_rid;
+ struct resource *mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ struct mtx irq_lock;
+
+ /* DPIO indirect register protection */
+ struct sx dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct task hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct callout hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ struct mtx error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct task error_work;
+ struct completion error_completion;
+ struct taskqueue *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
+
+ struct {
+ /** Bridge to intel-gtt-ko */
+ struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
- struct list_head gtt_list;
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+#ifdef __linux__
+ struct io_mapping *gtt_mapping;
+#endif
+ vm_paddr_t gtt_base_addr;
+ int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
+ eventhandler_tag inactive_shrinker;
+ bool shrinker_no_lock_stealing;
+
/**
- * List of objects currently involved in rendering from the
- * ringbuffer.
+ * List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
@@ -588,15 +804,6 @@ typedef struct drm_i915_private {
struct list_head active_list;
/**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -618,26 +825,14 @@ typedef struct drm_i915_private {
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
- struct timeout_task retire_task;
+ struct timeout_task retire_work;
- /**
+ /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
- uint32_t next_gem_seqno;
-
- /**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
@@ -652,10 +847,10 @@ typedef struct drm_i915_private {
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
+ * It prevents command submission from occurring and makes
* every pending request fail
*/
- int wedged;
+ atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -670,20 +865,8 @@ typedef struct drm_i915_private {
size_t mappable_gtt_total;
size_t object_memory;
u32 object_count;
-
- struct intel_gtt gtt;
- eventhandler_tag i915_lowmem;
} mm;
- const struct intel_device_info *info;
-
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct {
- unsigned allow_batchbuffer : 1;
- u32 *gfx_hws_cpu_addr;
- } dri1;
-
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -694,88 +877,68 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
- /* wait_queue_head_t pending_flip_queue; XXXKIB */
+ wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct task idle_task;
- struct callout idle_callout;
- bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
- device_t bridge_dev;
bool mchbar_need_disable;
- int mch_res_rid;
- struct resource *mch_res;
- struct mtx rps_lock;
- u32 pm_iir;
- struct task rps_task;
+ struct intel_l3_parity l3_parity;
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
+ /* gen6+ rps state */
+ struct intel_gen6_power_mgmt rps;
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
- struct mtx *mchdev_lock;
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
- unsigned long cfb_size;
- unsigned int cfb_fb;
- int cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- struct taskqueue *tq;
- struct task error_task;
- struct task hotplug_task;
- int error_completion;
- struct mtx error_completion_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct mtx error_lock;
- struct callout hangcheck_timer;
-
unsigned long last_gpu_reset;
+ /* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct task console_resume_work;
+
+ struct backlight_device *backlight;
+
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
bool hw_contexts_disabled;
uint32_t hw_context_size;
+
+ u32 fdi_rx_config;
+
+ struct i915_suspend_saved_registers regfile;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
- if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -785,37 +948,48 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
- I915_CACHE_NONE,
+ I915_CACHE_NONE = 0,
I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+ */
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
-enum intel_chip_family {
- CHIP_I8XX = 0x01,
- CHIP_I9XX = 0x02,
- CHIP_I915 = 0x04,
- CHIP_I965 = 0x08,
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
};
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
- /** This object's place on the active/flushing/inactive lists */
+
+ /** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
unsigned int active:1;
@@ -826,12 +1000,6 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -893,19 +1061,15 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
vm_page_t *pages;
int pages_pin_count;
/**
- * DMAR support
- */
- struct sglist *sg_list;
-
- /**
* Used for performing relocations during execbuffer insertion.
*/
- LIST_ENTRY(drm_i915_gem_object) exec_node;
+ struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
@@ -919,7 +1083,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
@@ -941,10 +1106,11 @@ struct drm_i915_gem_object {
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
*/
- int pending_flip;
+ atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
-#define to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -979,72 +1145,110 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct {
+ struct mtx lock;
struct list_head request_list;
- struct mtx lck;
} mm;
struct drm_gem_names context_idr;
};
-struct drm_i915_error_state {
- u_int ref;
- u32 eir;
- u32 pgtbl_er;
- u32 ier;
- bool waiting[I915_NUM_RINGS];
- u32 pipestat[I915_MAX_PIPES];
- u32 tail[I915_NUM_RINGS];
- u32 head[I915_NUM_RINGS];
- u32 ipeir[I915_NUM_RINGS];
- u32 ipehr[I915_NUM_RINGS];
- u32 instdone[I915_NUM_RINGS];
- u32 acthd[I915_NUM_RINGS];
- u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head[I915_NUM_RINGS];
- u32 cpu_ring_tail[I915_NUM_RINGS];
- u32 error; /* gen6+ */
- u32 instpm[I915_NUM_RINGS];
- u32 instps[I915_NUM_RINGS];
- u32 instdone1;
- u32 seqno[I915_NUM_RINGS];
- u64 bbaddr;
- u32 fault_reg[I915_NUM_RINGS];
- u32 done_reg;
- u32 faddr[I915_NUM_RINGS];
- u64 fence[I915_MAX_NUM_FENCES];
- struct timeval time;
- struct drm_i915_error_ring {
- struct drm_i915_error_object {
- int page_count;
- u32 gtt_offset;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer;
- struct drm_i915_error_request {
- long jiffies;
- u32 seqno;
- u32 tail;
- } *requests;
- int num_requests;
- } ring[I915_NUM_RINGS];
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u32 seqno;
- u32 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- s32 pinned:2;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- s32 ring:4;
- u32 cache_level:2;
- } *active_bo, *pinned_bo;
- u32 active_bo_count, pinned_bo_count;
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
-};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -1067,45 +1271,39 @@ struct drm_i915_error_state {
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-extern int intel_iommu_enabled;
extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc __always_unused;
+extern int i915_panel_ignore_lid __read_mostly;
+extern unsigned int i915_powersave __read_mostly;
+extern int i915_semaphores __read_mostly;
+extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
+extern int i915_vbt_sdvo_panel_type __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
+extern int i915_enable_fbc __read_mostly;
+extern int i915_enable_hangcheck __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_preliminary_hw_support __read_mostly;
+
extern struct drm_driver i915_driver_info;
extern struct cdev_pager_ops i915_gem_pager_ops;
-extern unsigned int i915_fbpercrtc;
-extern int i915_panel_ignore_lid;
-extern int i915_panel_invert_brightness;
-extern unsigned int i915_powersave;
-extern int i915_prefault_disable;
-extern int i915_semaphores;
-extern unsigned int i915_lvds_downclock;
-extern int i915_lvds_channel_mode;
-extern int i915_panel_use_ssc;
-extern int i915_vbt_sdvo_panel_type;
-extern int i915_enable_rc6;
-extern int i915_enable_fbc;
-extern int i915_enable_ppgtt;
-extern int i915_enable_hangcheck;
+extern int intel_iommu_gfx_mapped;
const struct intel_device_info *i915_get_device_id(int device);
-int i915_reset(struct drm_device *dev);
-extern int intel_gpu_reset(struct drm_device *dev);
-
/* i915_debug.c */
int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
struct sysctl_oid *top);
void i915_sysctl_cleanup(struct drm_device *dev);
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
-int i915_batchbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_cmdbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
@@ -1117,35 +1315,54 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-unsigned long i915_read_mch_val(void);
-bool i915_gpu_raise(void);
-bool i915_gpu_lower(void);
-bool i915_gpu_busy(void);
-bool i915_gpu_turbo_disable(void);
+extern int intel_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-/* i915_irq.c */
+extern int i915_batchbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_cmdbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_irq_init(struct drm_device *dev);
+extern int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
-void intel_enable_asle(struct drm_device *dev);
-void i915_hangcheck_elapsed(void *context);
+extern void intel_console_resume(void *context, int pending);
+
+/* i915_irq.c */
+void i915_hangcheck_elapsed(void *data);
void i915_handle_error(struct drm_device *dev, bool wedged);
+
+extern void intel_irq_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
+
void i915_error_state_free(struct drm_i915_error_state *error);
-void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void i915_destroy_error_state(struct drm_device *dev);
+void intel_enable_asle(struct drm_device *dev);
+
+//#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+//#else
+//#define i915_destroy_error_state(x)
+//#endif
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1167,13 +1384,17 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1188,16 +1409,64 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-void i915_gem_unload(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
- bool map_and_fenceable);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
+ obj->pages_pin_count++;
+}
+
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
+ obj->pages_pin_count--;
+}
+
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
@@ -1221,46 +1490,66 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible);
+
+void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
-uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size, int tiling_mode);
-int i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
- bool write);
-int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment, struct intel_ring_buffer *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
-int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains, uint32_t flush_domains);
-void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to);
-int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-int i915_gem_idle(struct drm_device *dev);
-int i915_gem_init(struct drm_device *dev);
-int i915_gem_init_hw(struct drm_device *dev);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gpu_idle(struct drm_device *dev);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring, uint32_t seqno);
-int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
- struct drm_i915_gem_request *request);
-int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-void i915_gem_reset(struct drm_device *dev);
-int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
-int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ u32 *seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
uint64_t *phys);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ int id,
+ int align);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
+
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level);
+
+#ifdef FREEBSD_WIP
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+#endif /* FREEBSD_WIP */
+
+int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
@@ -1273,18 +1562,44 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
-int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj, int id, int align);
+/* i915_gem_gtt.c */
+int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
-int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1293,56 +1608,62 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
struct vm_page *m);
-/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
-
-/* i915_gem_stolen.c */
-int i915_gem_init_stolen(struct drm_device *dev);
-void i915_gem_cleanup_stolen(struct drm_device *dev);
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
+#else
+#define i915_verify_lists(dev) 0
+#endif
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+ const char *where, uint32_t mark);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* intel_iic.c */
+/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
-extern void intel_gmbus_set_speed(device_t idev, int speed);
-extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
-extern void intel_iic_reset(struct drm_device *dev);
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
-extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
- unsigned port);
+
+extern device_t intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
+extern void intel_gmbus_set_speed(device_t idev, int speed);
+extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
+extern bool intel_gmbus_is_forced_bit(device_t adapter);
+extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
-int intel_opregion_setup(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_device *dev);
+#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern void intel_opregion_gse_intr(struct drm_device *dev);
extern void intel_opregion_enable_asle(struct drm_device *dev);
+#else
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
+#endif
-/* i915_gem_gtt.c */
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
-
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
@@ -1350,34 +1671,31 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
+extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
-extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
+extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
-extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
-
-extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
-extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
-
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
- struct drm_device *dev);
-extern void intel_overlay_print_error_state(struct sbuf *m,
- struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(
- struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* overlay */
+//#ifdef CONFIG_DEBUG_FS
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
extern void intel_display_print_error_state(struct sbuf *m,
- struct drm_device *dev, struct intel_display_error_state *error);
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
+//#endif
static inline void
trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
@@ -1394,6 +1712,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@@ -1431,143 +1752,6 @@ __i915_write(64, 64)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
-#define I915_VERBOSE 0
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define I915_GEM_HWS_INDEX 0x20
-
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-/* XXXKIB LEGACY */
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
-
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
-/* XXXKIB LEGACY END */
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-
-#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
-#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-
- return ((int32_t)(seq1 - seq2) >= 0);
-}
-
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- intel_gtt_chipset_flush();
-}
-
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
- obj->pages_pin_count++;
-}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
- obj->pages_pin_count--;
-}
-
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
#endif