Current Path : /sys/amd64/compile/hs32/modules/usr/src/sys/modules/if_tun/@/dev/drm2/i915/ |
FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64 |
Current File : //sys/amd64/compile/hs32/modules/usr/src/sys/modules/if_tun/@/dev/drm2/i915/i915_drv.h |
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- */ /* * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <sys/cdefs.h> __FBSDID("$FreeBSD: release/9.1.0/sys/dev/drm2/i915/i915_drv.h 235783 2012-05-22 11:07:44Z kib $"); #ifndef _I915_DRV_H_ #define _I915_DRV_H_ #include <dev/agp/agp_i810.h> #include <dev/drm2/drm_mm.h> #include <dev/drm2/i915/i915_reg.h> #include <dev/drm2/i915/intel_ringbuffer.h> #include <dev/drm2/i915/intel_bios.h> /* General customization: */ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20080730" MALLOC_DECLARE(DRM_I915_GEM); enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, I915_MAX_PIPES }; #define pipe_name(p) ((p) + 'A') #define I915_NUM_PIPE 2 enum plane { PLANE_A = 0, PLANE_B, PLANE_C, }; #define plane_name(p) ((p) + 'A') #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) /* Interface history: * * 1.1: Original. * 1.2: Add Power Management * 1.3: Add vblank support * 1.4: Fix cmdbuffer path, add heap destroy * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe */ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 #define WATCH_BUF 0 #define WATCH_EXEC 0 #define WATCH_LRU 0 #define WATCH_RELOC 0 #define WATCH_INACTIVE 0 #define WATCH_PWRITE 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 #define I915_GEM_PHYS_OVERLAY_REGS 3 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) struct drm_i915_gem_phys_object { int id; drm_dma_handle_t *handle; struct drm_i915_gem_object *cur_obj; }; struct drm_i915_private; struct drm_i915_display_funcs { void (*dpms)(struct drm_crtc *crtc, int mode); bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); int (*crtc_mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); void (*write_eld)(struct drm_connector *connector, struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); void (*init_pch_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj); void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ /* display clock increase/decrease */ /* pll clock increase/decrease */ }; struct intel_device_info { u8 gen; u8 is_mobile:1; u8 is_i85x:1; u8 is_i915g:1; u8 is_i945gm:1; u8 is_g33:1; u8 need_gfx_hws:1; u8 is_g4x:1; u8 is_pineview:1; u8 is_broadwater:1; u8 is_crestline:1; u8 is_ivybridge:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; u8 cursor_needs_physical:1; u8 has_overlay:1; u8 overlay_needs_physical:1; u8 supports_tv:1; u8 has_bsd_ring:1; u8 has_blt_ring:1; u8 has_llc:1; }; #define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { unsigned num_pd_entries; vm_page_t *pt_pages; uint32_t pd_offset; vm_paddr_t *pt_dma_addr; vm_paddr_t scratch_page_dma_addr; }; enum no_fbc_reason { FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ FBC_MODE_TOO_LARGE, /* mode too large for compression */ FBC_BAD_PLANE, /* fbc not supported on plane */ FBC_NOT_TILED, /* buffer not tiled */ FBC_MULTIPLE_PIPES, /* more than one pipe active */ FBC_MODULE_PARAM, }; struct mem_block { struct mem_block *next; struct mem_block *prev; int start; int size; struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ }; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; struct intel_opregion { struct opregion_header *header; struct opregion_acpi *acpi; struct opregion_swsci *swsci; struct opregion_asle *asle; void *vbt; u32 *lid_state; }; #define OPREGION_SIZE (8*1024) #define I915_FENCE_REG_NONE -1 #define I915_MAX_NUM_FENCES 16 /* 16 fences + sign bit for FENCE_REG_NONE */ #define I915_MAX_NUM_FENCE_BITS 5 struct drm_i915_fence_reg { struct list_head lru_list; struct drm_i915_gem_object *obj; uint32_t setup_seqno; int pin_count; }; struct sdvo_device_mapping { u8 initialized; u8 dvo_port; u8 slave_addr; u8 dvo_wiring; u8 i2c_pin; u8 ddc_pin; }; enum intel_pch { PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ }; #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) struct intel_fbdev; struct intel_fbc_work; typedef struct drm_i915_private { struct drm_device *dev; device_t *gmbus_bridge; device_t *bbbus_bridge; device_t *gmbus; device_t *bbbus; /** gmbus_sx protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ struct sx gmbus_sx; int has_gem; int relative_constants_mode; drm_local_map_t *sarea; drm_local_map_t *mmio_map; /** gt_fifo_count and the subsequent register write are synchronized * with dev->struct_mutex. */ unsigned gt_fifo_count; /** forcewake_count is protected by gt_lock */ unsigned forcewake_count; /** gt_lock is also taken in irq contexts. */ struct mtx gt_lock; drm_i915_sarea_t *sarea_priv; /* drm_i915_ring_buffer_t ring; */ struct intel_ring_buffer rings[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; void *hw_status_page; dma_addr_t dma_status_page; uint32_t counter; unsigned int status_gfx_addr; drm_local_map_t hws_map; struct drm_gem_object *hws_obj; struct drm_i915_gem_object *pwrctx; struct drm_i915_gem_object *renderctx; unsigned int cpp; int back_offset; int front_offset; int current_page; int page_flipping; atomic_t irq_received; u32 trace_irq_seqno; /** Cached value of IER to avoid reads in updating the bitfield */ u32 pipestat[2]; u32 irq_mask; u32 gt_irq_mask; u32 pch_irq_mask; struct mtx irq_lock; u32 hotplug_supported_mask; int tex_lru_log_granularity; int allow_batchbuffer; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; int num_pipe; /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000) int hangcheck_count; uint32_t last_acthd; uint32_t last_acthd_bsd; uint32_t last_acthd_blt; uint32_t last_instdone; uint32_t last_instdone1; struct intel_opregion opregion; /* overlay */ struct intel_overlay *overlay; bool sprite_scaling_enabled; /* LVDS info */ int backlight_level; /* restore backlight to this value */ bool backlight_enabled; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ /* Feature bits from the VBIOS */ unsigned int int_tv_support:1; unsigned int lvds_dither:1; unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; int lvds_ssc_freq; struct { int rate; int lanes; int preemphasis; int vswing; bool initialized; bool support; int bpp; struct edp_power_seq pps; } edp; bool no_aux_handshake; int crt_ddc_pin; struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ /* PCH chipset type */ enum intel_pch pch_type; /* Display functions */ struct drm_i915_display_funcs display; unsigned long quirks; /* Register state */ bool modeset_on_lid; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; u32 savePIPEBSRC; u32 saveFPA0; u32 saveFPA1; u32 saveDPLL_A; u32 saveDPLL_A_MD; u32 saveHTOTAL_A; u32 saveHBLANK_A; u32 saveHSYNC_A; u32 saveVTOTAL_A; u32 saveVBLANK_A; u32 saveVSYNC_A; u32 saveBCLRPAT_A; u32 saveTRANSACONF; u32 saveTRANS_HTOTAL_A; u32 saveTRANS_HBLANK_A; u32 saveTRANS_HSYNC_A; u32 saveTRANS_VTOTAL_A; u32 saveTRANS_VBLANK_A; u32 saveTRANS_VSYNC_A; u32 savePIPEASTAT; u32 saveDSPASTRIDE; u32 saveDSPASIZE; u32 saveDSPAPOS; u32 saveDSPAADDR; u32 saveDSPASURF; u32 saveDSPATILEOFF; u32 savePFIT_PGM_RATIOS; u32 saveBLC_HIST_CTL; u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL2; u32 saveBLC_CPU_PWM_CTL; u32 saveBLC_CPU_PWM_CTL2; u32 saveFPB0; u32 saveFPB1; u32 saveDPLL_B; u32 saveDPLL_B_MD; u32 saveHTOTAL_B; u32 saveHBLANK_B; u32 saveHSYNC_B; u32 saveVTOTAL_B; u32 saveVBLANK_B; u32 saveVSYNC_B; u32 saveBCLRPAT_B; u32 saveTRANSBCONF; u32 saveTRANS_HTOTAL_B; u32 saveTRANS_HBLANK_B; u32 saveTRANS_HSYNC_B; u32 saveTRANS_VTOTAL_B; u32 saveTRANS_VBLANK_B; u32 saveTRANS_VSYNC_B; u32 savePIPEBSTAT; u32 saveDSPBSTRIDE; u32 saveDSPBSIZE; u32 saveDSPBPOS; u32 saveDSPBADDR; u32 saveDSPBSURF; u32 saveDSPBTILEOFF; u32 saveVGA0; u32 saveVGA1; u32 saveVGA_PD; u32 saveVGACNTRL; u32 saveADPA; u32 saveLVDS; u32 savePP_ON_DELAYS; u32 savePP_OFF_DELAYS; u32 saveDVOA; u32 saveDVOB; u32 saveDVOC; u32 savePP_ON; u32 savePP_OFF; u32 savePP_CONTROL; u32 savePP_DIVISOR; u32 savePFIT_CONTROL; u32 save_palette_a[256]; u32 save_palette_b[256]; u32 saveDPFC_CB_BASE; u32 saveFBC_CFB_BASE; u32 saveFBC_LL_BASE; u32 saveFBC_CONTROL; u32 saveFBC_CONTROL2; u32 saveIER; u32 saveIIR; u32 saveIMR; u32 saveDEIER; u32 saveDEIMR; u32 saveGTIER; u32 saveGTIMR; u32 saveFDI_RXA_IMR; u32 saveFDI_RXB_IMR; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF2[3]; u8 saveMSR; u8 saveSR[8]; u8 saveGR[25]; u8 saveAR_INDEX; u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; uint64_t saveFENCE[I915_MAX_NUM_FENCES]; u32 saveCURACNTR; u32 saveCURAPOS; u32 saveCURABASE; u32 saveCURBCNTR; u32 saveCURBPOS; u32 saveCURBBASE; u32 saveCURSIZE; u32 saveDP_B; u32 saveDP_C; u32 saveDP_D; u32 savePIPEA_GMCH_DATA_M; u32 savePIPEB_GMCH_DATA_M; u32 savePIPEA_GMCH_DATA_N; u32 savePIPEB_GMCH_DATA_N; u32 savePIPEA_DP_LINK_M; u32 savePIPEB_DP_LINK_M; u32 savePIPEA_DP_LINK_N; u32 savePIPEB_DP_LINK_N; u32 saveFDI_RXA_CTL; u32 saveFDI_TXA_CTL; u32 saveFDI_RXB_CTL; u32 saveFDI_TXB_CTL; u32 savePFA_CTL_1; u32 savePFB_CTL_1; u32 savePFA_WIN_SZ; u32 savePFB_WIN_SZ; u32 savePFA_WIN_POS; u32 savePFB_WIN_POS; u32 savePCH_DREF_CONTROL; u32 saveDISP_ARB_CTL; u32 savePIPEA_DATA_M1; u32 savePIPEA_DATA_N1; u32 savePIPEA_LINK_M1; u32 savePIPEA_LINK_N1; u32 savePIPEB_DATA_M1; u32 savePIPEB_DATA_N1; u32 savePIPEB_LINK_M1; u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; struct { /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head gtt_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; unsigned long gtt_mappable_end; unsigned long gtt_end; /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; /** * List of objects currently involved in rendering from the * ringbuffer. * * Includes buffers having the contents of their GPU caches * flushed, not necessarily primitives. last_rendering_seqno * represents when the rendering involved will be completed. * * A reference is held on the buffer while on this list. */ struct list_head active_list; /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before * unbinding. * * A reference is held on the buffer while on this list. */ struct list_head flushing_list; /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * * last_rendering_seqno is 0 while an object is in this list. * * A reference is not held on the buffer while on this list, * as merely being GTT-bound shouldn't prevent its being * freed, and we'll pull it off the list in the free path. */ struct list_head inactive_list; /** * LRU list of objects which are not in the ringbuffer but * are still pinned in the GTT. */ struct list_head pinned_list; /** LRU list of objects with fence regs on them. */ struct list_head fence_list; /** * List of objects currently pending being freed. * * These objects are no longer in use, but due to a signal * we were prevented from freeing them at the appointed time. */ struct list_head deferred_free_list; /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to * fire periodically while the ring is running. When it * fires, go retire requests. */ struct timeout_task retire_task; /** * Are we in a non-interruptible section of code like * modesetting? */ bool interruptible; uint32_t next_gem_seqno; /** * Waiting sequence number, if any */ uint32_t waiting_gem_seqno; /** * Last seq seen at irq time */ uint32_t irq_gem_seqno; /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. * * This is set between LeaveVT and EnterVT. It needs to be * replaced with a semaphore. It also needs to be * transitioned away from for kernel modesetting. */ int suspended; /** * Flag if the hardware appears to be wedged. * * This is set when attempts to idle the device timeout. * It prevents command submission from occuring and makes * every pending request fail */ int wedged; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ size_t gtt_total; size_t mappable_gtt_total; size_t object_memory; u32 object_count; struct intel_gtt gtt; eventhandler_tag i915_lowmem; } mm; const struct intel_device_info *info; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ unsigned int lvds_border_bits; /* Panel fitter placement and size for Ironlake+ */ u32 pch_pf_pos, pch_pf_size; struct drm_crtc *plane_to_crtc_mapping[3]; struct drm_crtc *pipe_to_crtc_mapping[3]; /* wait_queue_head_t pending_flip_queue; XXXKIB */ bool flip_pending_is_done; /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct task idle_task; struct callout idle_callout; bool busy; u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; struct drm_connector *int_lvds_connector; struct drm_connector *int_edp_connector; device_t bridge_dev; bool mchbar_need_disable; int mch_res_rid; struct resource *mch_res; struct mtx rps_lock; u32 pm_iir; struct task rps_task; u8 cur_delay; u8 min_delay; u8 max_delay; u8 fmax; u8 fstart; u64 last_count1; unsigned long last_time1; unsigned long chipset_power; u64 last_count2; struct timespec last_time2; unsigned long gfx_power; int c_m; int r_t; u8 corr; struct mtx *mchdev_lock; enum no_fbc_reason no_fbc_reason; unsigned long cfb_size; unsigned int cfb_fb; int cfb_plane; int cfb_y; struct intel_fbc_work *fbc_work; unsigned int fsb_freq, mem_freq, is_ddr3; struct taskqueue *tq; struct task error_task; struct task hotplug_task; int error_completion; struct mtx error_completion_lock; struct drm_i915_error_state *first_error; struct mtx error_lock; struct callout hangcheck_timer; unsigned long last_gpu_reset; struct intel_fbdev *fbdev; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; } drm_i915_private_t; enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */ HDMI_AUDIO_AUTO, /* trust EDID */ HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; enum i915_cache_level { I915_CACHE_NONE, I915_CACHE_LLC, I915_CACHE_LLC_MLC, /* gen6+ */ }; enum intel_chip_family { CHIP_I8XX = 0x01, CHIP_I9XX = 0x02, CHIP_I915 = 0x04, CHIP_I965 = 0x08, }; /** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; /** This object's place on the active/flushing/inactive lists */ struct list_head ring_list; struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; /** * This is set if the object is on the active or flushing lists * (has pending rendering), and is not set if it's on inactive (ready * to be unbound). */ unsigned int active:1; /** * This is set if the object has been written to since last bound * to the GTT */ unsigned int dirty:1; /** * This is set if the object has been written to since the last * GPU flush. */ unsigned int pending_gpu_write:1; /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. */ signed int fence_reg:I915_MAX_NUM_FENCE_BITS; /** * Advice: are the backing pages purgeable? */ unsigned int madv:2; /** * Current tiling mode for the object. */ unsigned int tiling_mode:2; unsigned int tiling_changed:1; /** How many users have pinned this object in GTT space. The following * users can each hold at most one reference: pwrite/pread, pin_ioctl * (via user_pin_count), execbuffer (objects are not allowed multiple * times for the same batchbuffer), and the framebuffer code. When * switching/pageflipping, the framebuffer code has at most two buffers * pinned per crtc. * * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * bits with absolutely no headroom. So use 4 bits. */ unsigned int pin_count:4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** * Is the object at the current location in the gtt mappable and * fenceable? Used to avoid costly recalculations. */ unsigned int map_and_fenceable:1; /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more * accurate mappable working set. */ unsigned int fault_mappable:1; unsigned int pin_mappable:1; /* * Is the GPU currently using a fence to access this buffer, */ unsigned int pending_fenced_gpu_access:1; unsigned int fenced_gpu_access:1; unsigned int cache_level:2; unsigned int has_aliasing_ppgtt_mapping:1; vm_page_t *pages; /** * DMAR support */ struct sglist *sg_list; /** * Used for performing relocations during execbuffer insertion. */ LIST_ENTRY(drm_i915_gem_object) exec_node; unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; /** * Current offset of the object in GTT space. * * This is the same as gtt_space->start */ uint32_t gtt_offset; /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; struct intel_ring_buffer *ring; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; struct intel_ring_buffer *last_fenced_ring; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; /** * If present, while GEM_DOMAIN_CPU is in the read domain this array * flags which individual pages are valid. */ uint8_t *page_cpu_valid; /** User space pin count and filp owning the pin */ uint32_t user_pin_count; struct drm_file *pin_filp; /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; /** * Number of crtcs where this object is currently the fb, but * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ int pending_flip; }; #define to_intel_bo(x) member2struct(drm_i915_gem_object, base, (x)) /** * Request queue structure. * * The request queue allows us to note sequence numbers that have been emitted * and may be associated with active buffers to be retired. * * By keeping this list, we can avoid having to do questionable * sequence-number comparisons on buffer last_rendering_seqnos, and associate * an emission time with seqnos for tracking how far ahead of the GPU we are. */ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct intel_ring_buffer *ring; /** GEM sequence number associated with this request. */ uint32_t seqno; /** Postion in the ringbuffer of the end of the request */ u32 tail; /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; /** global list entry for this request */ struct list_head list; struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; }; struct drm_i915_file_private { struct { struct list_head request_list; struct mtx lck; } mm; }; struct drm_i915_error_state { u32 eir; u32 pgtbl_er; u32 pipestat[I915_MAX_PIPES]; u32 tail[I915_NUM_RINGS]; u32 head[I915_NUM_RINGS]; u32 ipeir[I915_NUM_RINGS]; u32 ipehr[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; /* our own tracking of ring head and tail */ u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; u32 instdone1; u32 seqno[I915_NUM_RINGS]; u64 bbaddr; u32 fault_reg[I915_NUM_RINGS]; u32 done_reg; u32 faddr[I915_NUM_RINGS]; u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_ring { struct drm_i915_error_object { int page_count; u32 gtt_offset; u32 *pages[0]; } *ringbuffer, *batchbuffer; struct drm_i915_error_request { long jiffies; u32 seqno; u32 tail; } *requests; int num_requests; } ring[I915_NUM_RINGS]; struct drm_i915_error_buffer { u32 size; u32 name; u32 seqno; u32 gtt_offset; u32 read_domains; u32 write_domain; s32 fence_reg:I915_MAX_NUM_FENCE_BITS; s32 pinned:2; u32 tiling:2; u32 dirty:1; u32 purgeable:1; s32 ring:4; u32 cache_level:2; } *active_bo, *pinned_bo; u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; }; /** * RC6 is a special power stage which allows the GPU to enter an very * low-voltage mode when idle, using down to 0V while at this stage. This * stage is entered automatically when the GPU is idle when RC6 support is * enabled, and as soon as new workload arises GPU wakes up automatically as well. * * There are different RC6 modes available in Intel GPU, which differentiate * among each other with the latency required to enter and leave RC6 and * voltage consumed by the GPU in different states. * * The combination of the following flags define which states GPU is allowed * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and * RC6pp is deepest RC6. Their support by hardware varies according to the * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one * which brings the most power savings; deeper states save more power, but * require higher latency to switch to and wake up. */ #define INTEL_RC6_ENABLE (1<<0) #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) extern int intel_iommu_enabled; extern struct drm_ioctl_desc i915_ioctls[]; extern struct drm_driver_info i915_driver_info; extern struct cdev_pager_ops i915_gem_pager_ops; extern unsigned int i915_fbpercrtc; extern int i915_panel_ignore_lid; extern unsigned int i915_powersave; extern int i915_semaphores; extern unsigned int i915_lvds_downclock; extern int i915_panel_use_ssc; extern int i915_vbt_sdvo_panel_type; extern int i915_enable_rc6; extern int i915_enable_fbc; extern int i915_enable_ppgtt; extern int i915_enable_hangcheck; const struct intel_device_info *i915_get_device_id(int device); int i915_reset(struct drm_device *dev, u8 flags); /* i915_debug.c */ int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); void i915_sysctl_cleanup(struct drm_device *dev); /* i915_dma.c */ extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect __user *boxes, int i, int DR1, int DR4); int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4); unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); unsigned long i915_mch_val(struct drm_i915_private *dev_priv); void i915_update_gfx_val(struct drm_i915_private *dev_priv); unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); unsigned long i915_read_mch_val(void); bool i915_gpu_raise(void); bool i915_gpu_lower(void); bool i915_gpu_busy(void); bool i915_gpu_turbo_disable(void); /* i915_irq.c */ extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_irq_init(struct drm_device *dev); extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_enable_asle(struct drm_device *dev); void i915_hangcheck_elapsed(void *context); void i915_handle_error(struct drm_device *dev, bool wedged); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_destroy_error_state(struct drm_device *dev); /* i915_gem.c */ int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handle_p); int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); void i915_gem_unload(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, bool map_and_fenceable); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); static inline void i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count++; } } static inline void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; dev_priv->fence_regs[obj->fence_reg].pin_count--; } } void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long mappable_end, unsigned long end); uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size, int tiling_mode); int i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); int i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int i915_gem_idle(struct drm_device *dev); int i915_gem_init_hw(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gpu_idle(struct drm_device *dev, bool do_retire); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, uint32_t seqno); int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, struct drm_i915_gem_request *request); int i915_gem_object_get_fence(struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined); void i915_gem_reset(struct drm_device *dev); int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire); int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot); int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot, uint64_t *phys); void i915_gem_release(struct drm_device *dev, struct drm_file *file); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, int align); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable); int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); /* intel_iic.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); extern void intel_gmbus_set_speed(device_t idev, int speed); extern void intel_gmbus_force_bit(device_t idev, bool force_bit); extern void intel_iic_reset(struct drm_device *dev); /* intel_opregion.c */ int intel_opregion_setup(struct drm_device *dev); extern int intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); extern void opregion_asle_intr(struct drm_device *dev); extern void opregion_enable_asle(struct drm_device *dev); /* i915_gem_gtt.c */ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev); void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); void i915_gem_restore_gtt_mappings(struct drm_device *dev); int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void ironlake_init_pch_refclk(struct drm_device *dev); extern void ironlake_enable_rc6(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); extern struct intel_overlay_error_state *intel_overlay_capture_error_state( struct drm_device *dev); extern void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error); extern struct intel_display_error_state *intel_display_capture_error_state( struct drm_device *dev); extern void intel_display_print_error_state(struct sbuf *m, struct drm_device *dev, struct intel_display_error_state *error); static inline void trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz) { CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val); } /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being * returned. */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ (((dev_priv)->info->gen >= 6) && \ ((reg) < 0x40000) && \ ((reg) != FORCEWAKE)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); __i915_read(8, 8) __i915_read(16, 16) __i915_read(32, 32) __i915_read(64, 64) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); __i915_write(8, 8) __i915_write(16, 16) __i915_write(32, 32) __i915_write(64, 64) #undef __i915_write #define I915_READ8(reg) i915_read8(dev_priv, (reg)) #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) #define I915_READ16(reg) i915_read16(dev_priv, (reg)) #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) #define I915_READ(reg) i915_read32(dev_priv, (reg)) #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) #define I915_READ64(reg) i915_read64(dev_priv, (reg)) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define I915_VERBOSE 0 #define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) #define BEGIN_LP_RING(n) \ intel_ring_begin(LP_RING(dev_priv), (n)) #define OUT_RING(x) \ intel_ring_emit(LP_RING(dev_priv), x) #define ADVANCE_LP_RING() \ intel_ring_advance(LP_RING(dev_priv)) #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ if (LP_RING(dev->dev_private)->obj == NULL) \ LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or * MI_STORE_DATA_IMM. * * The following dwords have a reserved meaning: * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. * 0x04: ring 0 head pointer * 0x05: ring 1 head pointer (915-class) * 0x06: ring 2 head pointer (915-class) * 0x10-0x1b: Context status DWords (GM45) * 0x1f: Last written status offset. (GM45) * * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) /* XXXKIB LEGACY */ #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ (dev)->pci_device == 0x2982 || \ (dev)->pci_device == 0x2992 || \ (dev)->pci_device == 0x29A2 || \ (dev)->pci_device == 0x2A02 || \ (dev)->pci_device == 0x2A12 || \ (dev)->pci_device == 0x2A42 || \ (dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32) #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) /* XXXKIB LEGACY END */ #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define PRIMARY_RINGBUFFER_SIZE (128*1024) static inline bool i915_seqno_passed(uint32_t seq1, uint32_t seq2) { return ((int32_t)(seq1 - seq2) >= 0); } u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); #endif