本文整理汇总了C++中IS_HASWELL函数的典型用法代码示例。如果您正苦于以下问题:C++ IS_HASWELL函数的具体用法?C++ IS_HASWELL怎么用?C++ IS_HASWELL使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IS_HASWELL函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: intel_uncore_early_sanitize
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
if (IS_HASWELL(dev) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
intel_uncore_forcewake_reset(dev);
}
示例2: Calc_CSC_Param
int Calc_CSC_Param(struct CSCCoeff_Matrix *CSC_Matrix,
struct csc_coeff *CSC_Coeff_t, int devid)
{
struct drm_intel_csc_params input_csc_params;
unsigned short Hsw_Vlv_CSC_Coeff[CSC_MAX_COEFF_COUNT];
memcpy(input_csc_params.m_CSCCoeff, CSC_Matrix->CoeffMatrix,
sizeof(float) * CSC_MAX_COEFF_COUNT);
Convert_Coeff_ToBinary(&input_csc_params, Hsw_Vlv_CSC_Coeff, devid);
Convert_Coeff_ToBSpecFormat(CSC_Coeff_t->csc_coeff,
Hsw_Vlv_CSC_Coeff, devid);
if (IS_HASWELL(devid) || IS_BROADWELL(devid)) {
if (CSC_Matrix->param_valid & CSC_OFFSET_VALID_MASK)
Convert_CSC_Offset_ToBSpecFormat(CSC_Matrix, CSC_Coeff_t);
if (CSC_Matrix->param_valid & CSC_MODE_VALID_MASK) {
if(CSC_Matrix->CSCMode == 0x1)
CSC_Coeff_t->csc_mode = 0x2; /* CSC is before Gamma */
else
CSC_Coeff_t->csc_mode = 0;
}
CSC_Coeff_t->param_valid = CSC_Matrix->param_valid;
}
return 0;
}
示例3: get_context_size
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
switch (INTEL_INFO(dev)->gen) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
#ifdef FREEBSD_WIP
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
#endif
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
panic("i915_gem_context: Unsupported Intel GPU generation %d",
INTEL_INFO(dev)->gen);
}
return ret;
}
示例4: get_context_size
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
switch (INTEL_INFO(dev)->gen) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
case 8:
ret = GEN8_CXT_TOTAL_SIZE;
break;
default:
BUG();
}
return ret;
}
示例5: i915_pm_suspend_late
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late
* suspend hook.
*
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
hsw_enable_pc8(dev_priv);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
示例6: gputop_perf_initialize
bool
gputop_perf_initialize(void)
{
if (intel_dev.device)
return true;
drm_fd = open_render_node(&intel_dev);
if (drm_fd < 0) {
gputop_log(GPUTOP_LOG_LEVEL_HIGH, "Failed to open render node", -1);
return false;
}
/* NB: eu_count needs to be initialized before declaring counters */
init_dev_info(drm_fd, intel_dev.device);
page_size = sysconf(_SC_PAGE_SIZE);
if (IS_HASWELL(intel_dev.device)) {
gputop_oa_add_render_basic_counter_query_hsw(&gputop_devinfo);
gputop_oa_add_compute_basic_counter_query_hsw(&gputop_devinfo);
gputop_oa_add_compute_extended_counter_query_hsw(&gputop_devinfo);
gputop_oa_add_memory_reads_counter_query_hsw(&gputop_devinfo);
gputop_oa_add_memory_writes_counter_query_hsw(&gputop_devinfo);
gputop_oa_add_sampler_balance_counter_query_hsw(&gputop_devinfo);
} else if (IS_BROADWELL(intel_dev.device)) {
gputop_oa_add_render_basic_counter_query_bdw(&gputop_devinfo);
} else if (IS_CHERRYVIEW(intel_dev.device)) {
gputop_oa_add_render_basic_counter_query_chv(&gputop_devinfo);
} else if (IS_SKYLAKE(intel_dev.device)) {
gputop_oa_add_render_basic_counter_query_skl(&gputop_devinfo);
} else
assert(0);
return true;
}
示例7: ivb_pipe_crc_ctl_reg
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PF;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
return -EINVAL;
}
return 0;
}
示例8: __gen6_gt_force_wake_mt_get
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
u32 forcewake_ack;
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
if (INTEL_INFO(dev_priv->dev)->gen < 8)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
示例9: i915_check_vgpu
/**
* i915_check_vgpu - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
uint64_t magic;
uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
if (!IS_HASWELL(dev_priv))
return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
version = INTEL_VGT_IF_VERSION_ENCODE(
__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
示例10: intel_runtime_resume
static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
intel_runtime_pm_enable_interrupts(dev_priv);
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev);
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
else
DRM_DEBUG_KMS("Device resumed\n");
return ret;
}
示例11: intel_virt_detect_pch
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
enum intel_pch ret = PCH_NOP;
/*
* In a virtualized passthrough environment we can be in a
* setup where the ISA bridge is not able to be passed through.
* In this case, a south bridge can be emulated and we have to
* make an educated guess as to which PCH is really there.
*/
if (IS_GEN5(dev)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
return ret;
}
示例12: intel_crtc_set_crc_source
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
int ret = 0;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
if (source) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(intel_crtc);
}
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_G4X(dev_priv))
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(intel_crtc);
}
pipe_crc->skipped = 0;
*values_cnt = 5;
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
}
示例13: intel_uncore_forcewake_reset
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
gen6_force_wake_timer((unsigned long)dev_priv);
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv);
else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
if (IS_GEN9(dev))
__gen9_gt_force_wake_mt_reset(dev_priv);
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
if (IS_VALLEYVIEW(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
} else if (IS_GEN9(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
if (dev_priv->uncore.fw_blittercount)
fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
}
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
示例14: intel_prepare_ddi_buffers
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const u32 *ddi_translations_fdi;
const u32 *ddi_translations_dp;
const u32 *ddi_translations_edp;
const u32 *ddi_translations;
if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
}
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
break;
case PORT_D:
if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp;
else
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
ddi_translations = ddi_translations_fdi;
break;
default:
BUG();
}
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
/* Entry 9 is for HDMI: */
for (i = 0; i < 2; i++) {
I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
reg += 4;
}
}
示例15: gen7_fbc_enable
static void gen7_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
dev_priv->fbc.enabled = true;
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
threshold++;
switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}