本文整理汇总了C++中INTEL_INFO函数的典型用法代码示例。如果您正苦于以下问题:C++ INTEL_INFO函数的具体用法?C++ INTEL_INFO怎么用?C++ INTEL_INFO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了INTEL_INFO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: i915_reg_read_ioctl
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
return -EINVAL;
}
return 0;
}
示例2: get_context_size
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
switch (INTEL_INFO(dev)->gen) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
}
return ret;
}
示例3: intel_disable_shared_dpll
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
if (INTEL_INFO(dev)->gen < 5)
return;
if (pll == NULL)
return;
mutex_lock(&dev_priv->dpll_lock);
if (WARN_ON(!(pll->active_mask & crtc_mask)))
goto out;
DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
pll->name, pll->active_mask, pll->on,
crtc->base.base.id);
assert_shared_dpll_enabled(dev_priv, pll);
WARN_ON(!pll->on);
pll->active_mask &= ~crtc_mask;
if (pll->active_mask)
goto out;
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->funcs.disable(dev_priv, pll);
pll->on = false;
out:
mutex_unlock(&dev_priv->dpll_lock);
}
示例4: intel_plane_init
int
intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
int ret;
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
intel_plane->update_colorkey = ilk_update_colorkey;
intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
plane_formats = ilk_plane_formats;
num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
break;
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
break;
default:
kfree(intel_plane);
return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
false);
if (ret)
kfree(intel_plane);
return ret;
}
示例5: intel_fbc_can_activate
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
(cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
fbc->no_fbc_reason = "incompatible mode";
return false;
}
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
fbc->no_fbc_reason = "mode too large for compression";
return false;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != BIT(DRM_ROTATE_0)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
if (!stride_is_valid(dev_priv, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
fbc->no_fbc_reason = "pixel format is invalid";
return false;
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
fbc->no_fbc_reason = "pixel rate is too big";
return false;
}
/* It is possible for the required CFB size change without a
* crtc->disable + crtc->enable since it is possible to change the
* stride without triggering a full modeset. Since we try to
* over-allocate the CFB, there's a chance we may keep FBC enabled even
* if this happens, but if we exceed the current CFB size we'll have to
* disable FBC. Notice that it would be possible to disable FBC, wait
* for a frame, free the stolen node, then try to reenable FBC in case
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
fbc->compressed_fb.size * fbc->threshold) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
return true;
}
示例6: fbc_on_plane_a_only
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen < 4;
}
示例7: i915_reset
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool simulated;
int ret;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
simulated = dev_priv->gpu_error.stop_rings != 0;
ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
}
if (i915_stop_ring_allow_warn(dev_priv))
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
ret = i915_gem_init_hw(dev);
dev_priv->gpu_error.reload_in_reset = false;
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
return ret;
}
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev);
return 0;
}
示例8: igt_guc_doorbells
/*
* Create as many clients as number of doorbells. Note that there's already
* client(s)/doorbell(s) created during driver load, but this test creates
* its own and do not interact with the existing ones.
*/
static int igt_guc_doorbells(void *arg)
{
struct drm_i915_private *dev_priv = arg;
struct intel_guc *guc;
int i, err = 0;
u16 db_id;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
pr_err("No guc object!\n");
err = -EINVAL;
goto unlock;
}
err = check_all_doorbells(guc);
if (err)
goto unlock;
for (i = 0; i < ATTEMPTS; i++) {
clients[i] = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
i % GUC_CLIENT_PRIORITY_NUM,
dev_priv->kernel_context);
if (!clients[i]) {
pr_err("[%d] No guc client\n", i);
err = -EINVAL;
goto out;
}
if (IS_ERR(clients[i])) {
if (PTR_ERR(clients[i]) != -ENOSPC) {
pr_err("[%d] unexpected error\n", i);
err = PTR_ERR(clients[i]);
goto out;
}
if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
pr_err("[%d] non-db related alloc fail\n", i);
err = -EINVAL;
goto out;
}
/* expected, ran out of dbs for this client type */
continue;
}
/*
* The check below is only valid because we keep a doorbell
* assigned during the whole life of the client.
*/
if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
pr_err("[%d] more clients than doorbells (%d >= %d)\n",
i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
err = -EINVAL;
goto out;
}
err = validate_client(clients[i],
i % GUC_CLIENT_PRIORITY_NUM, false);
if (err) {
pr_err("[%d] client_alloc sanity check failed!\n", i);
err = -EINVAL;
goto out;
}
db_id = clients[i]->doorbell_id;
err = __guc_client_enable(clients[i]);
if (err) {
pr_err("[%d] Failed to create a doorbell\n", i);
goto out;
}
/* doorbell id shouldn't change, we are holding the mutex */
if (db_id != clients[i]->doorbell_id) {
pr_err("[%d] doorbell id changed (%d != %d)\n",
i, db_id, clients[i]->doorbell_id);
err = -EINVAL;
goto out;
}
err = check_all_doorbells(guc);
if (err)
goto out;
err = ring_doorbell_nop(clients[i]);
if (err)
goto out;
}
//.........这里部分代码省略.........
示例9: i915_tiling_ok
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width, tile_height;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
return true;
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
if (INTEL_INFO(dev)->gen >= 4) {
/* i965 stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
if (IS_GEN3(dev)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_height = 32;
else
tile_height = 8;
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
* number of tile rows. */
if (IS_GEN2(dev))
tile_height *= 2;
/* Size needs to be aligned to a full tile row */
if (size & (tile_height * stride - 1))
return false;
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
if (stride < tile_width)
return false;
if (stride & (stride - 1))
return false;
return true;
}
示例10: i915_restore_display
static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
}
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(DP_B, dev_priv->saveDP_B);
I915_WRITE(DP_C, dev_priv->saveDP_C);
I915_WRITE(DP_D, dev_priv->saveDP_D);
}
/* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
POSTING_READ(VGA_PD);
DRM_UDELAY(150);
i915_restore_vga(dev);
}
示例11: i915_reset
int i915_reset(struct drm_device *dev, u8 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool need_display = true;
int ret;
if (!i915_try_reset)
return 0;
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
i915_gem_reset(dev);
ret = -ENODEV;
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
ret = gen6_do_reset(dev, flags);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
case 4:
ret = i965_do_reset(dev, flags);
break;
case 2:
ret = i8xx_do_reset(dev, flags);
break;
}
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
}
mutex_unlock(&dev->struct_mutex);
if (need_display) {
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
}
return 0;
}
示例12: i915_gem_detect_bit_6_swizzle
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
//.........这里部分代码省略.........
示例13: intel_uncore_init
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
gen6_force_wake_work);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
switch (INTEL_INFO(dev)->gen) {
default:
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
dev_priv->uncore.funcs.mmio_writew = hsw_write16;
dev_priv->uncore.funcs.mmio_writel = hsw_write32;
dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
} else {
dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
dev_priv->uncore.funcs.mmio_writew = gen6_write16;
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
}
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_readb = vlv_read8;
dev_priv->uncore.funcs.mmio_readw = vlv_read16;
dev_priv->uncore.funcs.mmio_readl = vlv_read32;
dev_priv->uncore.funcs.mmio_readq = vlv_read64;
} else {
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
}
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
dev_priv->uncore.funcs.mmio_writew = gen5_write16;
dev_priv->uncore.funcs.mmio_writel = gen5_write32;
dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
dev_priv->uncore.funcs.mmio_readb = gen5_read8;
dev_priv->uncore.funcs.mmio_readw = gen5_read16;
dev_priv->uncore.funcs.mmio_readl = gen5_read32;
dev_priv->uncore.funcs.mmio_readq = gen5_read64;
break;
case 4:
case 3:
//.........这里部分代码省略.........
示例14: create_hw_context
static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
int ret, id;
ctx = kmalloc(sizeof(*ctx), DRM_I915_GEM, M_WAITOK | M_ZERO);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx, DRM_I915_GEM);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
return ERR_PTR(-ENOMEM);
}
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
goto err_out;
}
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
ctx->file_priv = file_priv;
again:
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
DRM_DEBUG_DRIVER("idr allocation failed\n");
goto err_out;
}
ret = idr_get_new_above(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_ID + 1, &id);
if (ret == 0)
ctx->id = id;
if (ret == -EAGAIN)
goto again;
else if (ret)
goto err_out;
return ctx;
err_out:
do_destroy(ctx);
return ERR_PTR(ret);
}
示例15: create_hw_context
static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
struct i915_ctx_handle *han;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
return ERR_PTR(-ENOMEM);
}
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
goto err_out;
}
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
ctx->file_priv = file_priv;
han = malloc(sizeof(*han), M_DRM, M_WAITOK | M_CANFAIL | M_ZERO);
if (han == NULL) {
ret = -ENOMEM;
DRM_DEBUG_DRIVER("idr allocation failed\n");
goto err_out;
}
han->ctx = ctx;
again:
han->handle = ++file_priv->ctx_id;
if (han->handle <= DEFAULT_CONTEXT_ID + 1 || SPLAY_INSERT(i915_ctx_tree,
&file_priv->ctx_tree, han))
goto again;
ctx->id = han->handle;
return ctx;
err_out:
do_destroy(ctx);
return ERR_PTR(ret);
}