本文整理汇总了C++中IS_ERR_OR_NULL函数的典型用法代码示例。如果您正苦于以下问题:C++ IS_ERR_OR_NULL函数的具体用法?C++ IS_ERR_OR_NULL怎么用?C++ IS_ERR_OR_NULL使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IS_ERR_OR_NULL函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mini_isp_debug_load_cfg
int mini_isp_debug_load_cfg(char *cfg_file,char *key_name,u8 *key_var)
{
struct kstat stat;
mm_segment_t fs;
struct file *fp = NULL;
int file_flag = O_RDONLY;
ssize_t ret = 0;
char temp_array[64] = {0};
char temp;
int cnt=0;
bool bRegStart = false;
bool bKeyFound = false;
bool bKeyCfg = false;
print_debug("enter %s", __func__);
if (NULL == cfg_file) {
print_error("%s cfg_file null ptr.", __func__);
return -EINVAL;
}
if (NULL == key_name) {
print_error("%s key_name null ptr.", __func__);
return -EINVAL;
}
if (NULL == key_var) {
print_error("%s key_var null ptr.", __func__);
return -EINVAL;
}
/* must have the following 2 statement */
fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(cfg_file, file_flag, 0666);
if (IS_ERR_OR_NULL(fp)) {
print_debug("no debug configuration file(%s) - do nothing, just skip it!\n",cfg_file);
return -1;
}
if (0 != vfs_stat(cfg_file, &stat)) {
print_error("failed to get file %s state!",cfg_file);
goto ERROR;
}
print_debug("%s size : %d",cfg_file, (u32)stat.size);
while (0 < vfs_read(fp, &temp, 1, &fp->f_pos)) {
switch (temp) {
case '{':
bRegStart = true;
cnt = 0;
bKeyFound = false;
memset(temp_array,sizeof(char),sizeof(temp_array));
break;
case '}':
bRegStart = false;
if(bKeyFound)
{
*key_var = mini_atoi16(temp_array);
bKeyCfg = true;
print_debug("%s:0x%x",key_name,*key_var);
}
break;
case '=':
if (bRegStart)
{
bKeyFound = false;
if (0 == strncmp(key_name,temp_array,strlen(key_name)))
{
bKeyFound = true;
}
cnt = 0;
}
break;
default:
if (bRegStart){
if (cnt >= 64)
{
bRegStart = false;
}
else
{
temp_array[cnt] = temp;
cnt=cnt+1;
}
}
break;
}
if (bKeyCfg)
{
break;
}
}
/* must have the following 1 statement */
set_fs(fs);
//.........这里部分代码省略.........
示例2: therm_est_probe
static int __devinit therm_est_probe(struct platform_device *pdev)
{
int i;
struct therm_estimator *est;
struct therm_est_data *data;
est = kzalloc(sizeof(struct therm_estimator), GFP_KERNEL);
if (IS_ERR_OR_NULL(est))
return -ENOMEM;
platform_set_drvdata(pdev, est);
data = therm_est_get_pdata(&pdev->dev);
est->devs = data->devs;
est->ndevs = data->ndevs;
est->toffset = data->toffset;
est->polling_period = data->polling_period;
est->tc1 = data->tc1;
est->tc2 = data->tc2;
est->cur_temp = DEFAULT_TEMP;
est->ntemp = HIST_UNINIT;
/* initialize timer trips */
est->num_timer_trips = data->num_timer_trips;
est->timer_trips = data->timer_trips;
therm_est_init_timer_trips(est);
mutex_init(&est->timer_trip_lock);
INIT_DELAYED_WORK(&est->timer_trip_work,
therm_est_timer_trip_work_func);
est->workqueue = alloc_workqueue(dev_name(&pdev->dev),
WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
if (!est->workqueue)
goto err;
INIT_DELAYED_WORK(&est->therm_est_work, therm_est_work_func);
queue_delayed_work(est->workqueue,
&est->therm_est_work,
msecs_to_jiffies(est->polling_period));
est->num_trips = data->num_trips;
est->trips = data->trips;
est->tzp = data->tzp;
est->thz = thermal_zone_device_register(dev_name(&pdev->dev),
est->num_trips,
(1 << est->num_trips) - 1,
est,
&therm_est_ops,
est->tzp,
data->passive_delay,
0);
if (IS_ERR_OR_NULL(est->thz))
goto err;
for (i = 0; i < ARRAY_SIZE(therm_est_nodes); i++)
device_create_file(&pdev->dev, &therm_est_nodes[i].dev_attr);
#ifdef CONFIG_PM
est->pm_nb.notifier_call = therm_est_pm_notify,
register_pm_notifier(&est->pm_nb);
#endif
return 0;
err:
cancel_delayed_work_sync(&est->therm_est_work);
if (est->workqueue)
destroy_workqueue(est->workqueue);
kfree(est);
return -EINVAL;
}
示例3: DBG_PMEM
static int res_trk_pmem_alloc
(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
u32 alloc_size;
struct ddl_context *ddl_context;
unsigned long fw_addr;
int rc = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
rc = -EINVAL;
goto bail_out;
}
ddl_context = ddl_get_context();
res_trk_set_mem_type(addr->mem_type);
alloc_size = (sz + alignment);
if (res_trk_get_enable_ion()) {
if (!res_trk_is_cp_enabled() ||
!res_trk_check_for_sec_session()) {
if (!ddl_context->video_ion_client)
ddl_context->video_ion_client =
res_trk_get_ion_client();
if (!ddl_context->video_ion_client) {
DDL_MSG_ERROR(
"%s() :DDL ION Client Invalid handle\n",
__func__);
rc = -ENOMEM;
goto bail_out;
}
alloc_size = (alloc_size+4095) & ~4095;
addr->alloc_handle = ion_alloc(
ddl_context->video_ion_client,
alloc_size, SZ_4K,
res_trk_get_mem_type(),
res_trk_get_ion_flags());
if (IS_ERR_OR_NULL(addr->alloc_handle)) {
DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
__func__);
rc = -ENOMEM;
goto bail_out;
}
} else {
fw_addr = resource_context.vidc_platform_data->fw_addr;
addr->alloc_handle = NULL;
addr->alloced_phys_addr = fw_addr;
addr->buffer_size = sz;
}
} else {
addr->alloced_phys_addr = (phys_addr_t)
allocate_contiguous_memory_nomap(alloc_size,
res_trk_get_mem_type(), SZ_4K);
if (!addr->alloced_phys_addr) {
DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
__func__, alloc_size);
rc = -ENOMEM;
goto bail_out;
}
addr->buffer_size = sz;
return rc;
}
bail_out:
return rc;
}
示例4: msm_pmem_table_add
static int msm_pmem_table_add(struct hlist_head *ptype,
struct msm_pmem_info *info, struct ion_client *client, int domain_num)
{
unsigned long paddr;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
unsigned long kvstart;
struct file *file;
#endif
int rc = -ENOMEM;
unsigned long len;
struct msm_pmem_region *region;
region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
if (!region)
goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
region->handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(region->handle))
goto out1;
if (ion_map_iommu(client, region->handle, domain_num, 0,
SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0)
goto out2;
#elif CONFIG_ANDROID_PMEM
rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
if (rc < 0) {
pr_err("%s: get_pmem_file fd %d error %d\n",
__func__, info->fd, rc);
goto out1;
}
region->file = file;
#else
paddr = 0;
file = NULL;
kvstart = 0;
#endif
if (!info->len)
info->len = len;
rc = check_pmem_info(info, len);
if (rc < 0)
goto out3;
paddr += info->offset;
len = info->len;
if (check_overlap(ptype, paddr, len) < 0) {
rc = -EINVAL;
goto out3;
}
CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
__func__, info->type, info->active, paddr,
(unsigned long)info->vaddr);
INIT_HLIST_NODE(®ion->list);
region->paddr = paddr;
region->len = len;
memcpy(®ion->info, info, sizeof(region->info));
D("%s Adding region to list with type %d\n", __func__,
region->info.type);
D("%s pmem_stats address is 0x%p\n", __func__, ptype);
hlist_add_head(&(region->list), ptype);
return 0;
out3:
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
ion_unmap_iommu(client, region->handle, domain_num, 0);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
out2:
ion_free(client, region->handle);
#elif CONFIG_ANDROID_PMEM
put_pmem_file(region->file);
#endif
out1:
kfree(region);
out:
return rc;
}
示例5: sr_classp5_disable
/**
* sr_classp5_disable() - disable for a voltage domain
* @sr: SmartReflex module, which need to be disabled
* @is_volt_reset: reset the voltage?
*
* This function has the necessity to either disable SR alone OR disable SR
* and reset voltage to appropriate level depending on is_volt_reset parameter.
*
* NOTE: Appropriate locks must be held by calling path to ensure mutual
* exclusivity
*/
static int sr_classp5_disable(struct omap_sr *sr, int is_volt_reset)
{
struct voltagedomain *voltdm = NULL;
struct omap_volt_data *volt_data = NULL;
struct sr_classp5_calib_data *work_data = NULL;
if (IS_ERR_OR_NULL(sr) || IS_ERR_OR_NULL(sr->voltdm)) {
pr_err("%s: bad parameters!\n", __func__);
return -EINVAL;
}
work_data = (struct sr_classp5_calib_data *)sr->voltdm_cdata;
if (IS_ERR_OR_NULL(work_data)) {
pr_err("%s: bad work data %s\n", __func__, sr->name);
return -EINVAL;
}
if (is_idle_task(current)) {
/*
* we should not have seen this path if calibration !complete
* pm_qos constraint is already released after voltage
* calibration work is finished
*/
WARN_ON(work_data->work_active);
return 0;
}
/* Rest is regular DVFS path */
voltdm = sr->voltdm;
volt_data = omap_voltage_get_curr_vdata(voltdm);
if (IS_ERR_OR_NULL(volt_data)) {
pr_warning("%s: Voltage data is NULL. Cannot disable %s\n",
__func__, sr->name);
return -ENODATA;
}
/* need to do rest of code ONLY if required */
if (volt_data->volt_calibrated && !work_data->work_active) {
/*
* We are going OFF - disable clocks manually to allow OFF-mode.
*/
if (sr->suspended)
sr->ops->put(sr);
return 0;
}
if (work_data->work_active) {
/* flag work is dead and remove the old work */
work_data->work_active = false;
cancel_delayed_work_sync(&work_data->work);
sr_notifier_control(sr, false);
}
sr_classp5_stop_hw_loop(sr);
if (is_volt_reset)
voltdm_reset(sr->voltdm);
/* Canceled SR, so no more need to keep request */
pm_qos_update_request(&work_data->qos, PM_QOS_DEFAULT_VALUE);
/*
* We are going OFF - disable clocks manually to allow OFF-mode.
*/
if (sr->suspended) {
/* !!! Should never ever be here - no guarantee to recover !!!*/
WARN(true, "Trying to go OFF with invalid AVS state\n");
sr->ops->put(sr);
}
return 0;
}
示例6: devm_kzalloc
struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
{
struct q6v5_data *drv;
struct resource *res;
struct pil_desc *desc;
int ret;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return ERR_PTR(-ENOMEM);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
drv->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!drv->reg_base)
return ERR_PTR(-ENOMEM);
desc = &drv->desc;
ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
&desc->name);
if (ret)
return ERR_PTR(ret);
desc->dev = &pdev->dev;
drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-femto-modem");
if (drv->qdsp6v5_2_0)
return drv;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!drv->axi_halt_base)
return ERR_PTR(-ENOMEM);
drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-q6v55-mss");
drv->qdsp6v55 |= of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-q6v55-lpass");
drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-q6v56-mss");
drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
"qcom,mba-image-is-not-elf");
drv->xo = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(drv->xo))
return ERR_CAST(drv->xo);
drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
if (IS_ERR(drv->vreg_cx))
return ERR_CAST(drv->vreg_cx);
drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
int voltage;
ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
&voltage);
if (ret) {
dev_err(&pdev->dev, "Failed to find vdd_pll voltage.\n");
return ERR_PTR(ret);
}
ret = regulator_set_voltage(drv->vreg_pll, voltage, voltage);
if (ret) {
dev_err(&pdev->dev, "Failed to request vdd_pll voltage.\n");
return ERR_PTR(ret);
}
ret = regulator_set_optimum_mode(drv->vreg_pll, 10000);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
return ERR_PTR(ret);
}
} else {
drv->vreg_pll = NULL;
}
return drv;
}
示例7: tdmb_fc8080_spi_write_read
//.........这里部分代码省略.........
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
struct tdmb_fc8080_ctrl_blk* fc8080_info_p;
fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
{
if (fc8080_info_p->spi_irq_status)
{
printk("######### spi read function is so late skip ignore #########\n");
return IRQ_HANDLED;
}
tunerbb_drv_fc8080_isr_control(0);
fc8080_info_p->spi_irq_status = TRUE;
broadcast_fc8080_drv_if_isr();
fc8080_info_p->spi_irq_status = FALSE;
tunerbb_drv_fc8080_isr_control(1);
}
else
{
printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
}
return IRQ_HANDLED;
}
#endif
#ifdef FEATURE_DMB_USE_PINCTRL
static int tdmb_pinctrl_init(void)
{
struct pinctrl *tdmb_pinctrl;
struct pinctrl_state *gpio_state_suspend;
tdmb_pinctrl = devm_pinctrl_get(&(fc8080_ctrl_info.pdev->dev));
if(IS_ERR_OR_NULL(tdmb_pinctrl)) {
pr_err("%s: Getting pinctrl handle failed\n", __func__);
return -EINVAL;
}
gpio_state_suspend
= pinctrl_lookup_state(tdmb_pinctrl, "gpio_tdmb_suspend");
if(IS_ERR_OR_NULL(gpio_state_suspend)) {
pr_err("%s: Failed to get the suspend state pinctrl handle\n", __func__);
return -EINVAL;
}
if(pinctrl_select_state(tdmb_pinctrl, gpio_state_suspend)) {
pr_err("%s: error on pinctrl_select_state for tdmb enable and irq pin\n", __func__);
return -EINVAL;
}
else {
printk("%s: success to set pinctrl_select_state for tdmb enable and irq pin\n", __func__);
}
return 0;
}
#endif
static int tdmb_configure_gpios(void)
{
int rc = OK;
int err_count = 0;
fc8080_ctrl_info.dmb_en = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,en-gpio",0);
rc = gpio_request(fc8080_ctrl_info.dmb_en, "DMB_EN");
if (rc < 0) {
err_count++;
printk("%s:Failed GPIO DMB_EN request!!!\n",__func__);
}
fc8080_ctrl_info.dmb_irq = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,irq-gpio",0);
rc = gpio_request(fc8080_ctrl_info.dmb_irq, "DMB_INT_N");
if (rc < 0) {
err_count++;
printk("%s:Failed GPIO DMB_INT_N request!!!\n",__func__);
}
#if defined (CONFIG_MACH_MSM8926_VFP_KR )||defined(CONFIG_MACH_MSM8916_YG_SKT_KR)
fc8080_ctrl_info.dmb_ant = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,ant-gpio",0);
rc = gpio_request(fc8080_ctrl_info.dmb_ant, "DMB_ANT");
if (rc < 0) {
err_count++;
printk("%s:Failed GPIO DMB_ANT request!!!\n",__func__);
}
gpio_direction_output(fc8080_ctrl_info.dmb_ant,0);
#endif
gpio_direction_output(fc8080_ctrl_info.dmb_en, 0);
gpio_direction_input(fc8080_ctrl_info.dmb_irq);
if(err_count > 0) rc = -EINVAL;
return rc;
}
示例8: DRM_DEBUG_PRIME
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
/* is this one of own objects? */
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
exynos_gem_obj = dma_buf->priv;
obj = &exynos_gem_obj->base;
/* is it from our device? */
if (obj->dev == drm_dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
return obj;
}
}
attach = dma_buf_attach(dma_buf, drm_dev->dev);
if (IS_ERR(attach))
return ERR_PTR(-EINVAL);
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
ret = -ENOMEM;
goto err_unmap_attach;
}
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_buffer;
}
sgl = sgt->sgl;
buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgl);
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
/*
* this case could be CONTIG or NONCONTIG type but for now
* sets NONCONTIG.
* TODO. we have to find a way that exporter can notify
* the type of its own buffer to importer.
*/
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
exynos_gem_obj->buffer = buffer;
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
err_free_buffer:
kfree(buffer);
buffer = NULL;
err_unmap_attach:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}
示例9: userptr_mn_invalidate_range_start
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
end = range->end - 1;
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
struct drm_i915_gem_object *obj;
if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
break;
}
/*
* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
* the struct_mutex - and consequently use it after it
* is freed and then double free it. To prevent that
* use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!kref_get_unless_zero(&obj->base.refcount)) {
it = interval_tree_iter_next(it, range->start, end);
continue;
}
spin_unlock(&mn->lock);
if (!unlock) {
unlock = &mn->mm->i915->drm.struct_mutex;
switch (mutex_trylock_recursive(unlock)) {
default:
case MUTEX_TRYLOCK_FAILED:
if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
i915_gem_object_put(obj);
return -EINTR;
}
/* fall through */
case MUTEX_TRYLOCK_SUCCESS:
break;
case MUTEX_TRYLOCK_RECURSIVE:
unlock = ERR_PTR(-EEXIST);
break;
}
}
ret = i915_gem_object_unbind(obj);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
goto unlock;
spin_lock(&mn->lock);
/*
* As we do not (yet) protect the mmu from concurrent insertion
* over this range, there is no guarantee that this search will
* terminate given a pathologic workload.
*/
it = interval_tree_iter_first(&mn->objects, range->start, end);
}
spin_unlock(&mn->lock);
unlock:
if (!IS_ERR_OR_NULL(unlock))
mutex_unlock(unlock);
return ret;
}
示例10: mei_txe_probe
//.........这里部分代码省略.........
dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
err = -ENOMEM;
goto free_device;
}
}
pci_enable_msi(pdev);
/* clear spurious interrupts */
mei_clear_interrupts(dev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_txe_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_txe_irq_quick_handler,
mei_txe_irq_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto free_device;
}
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
}
err = mei_txe_setup_satt2(dev,
dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
if (err)
goto release_irq;
err = mei_register(dev);
if (err)
goto release_irq;
pci_set_drvdata(pdev, dev);
hw->mdev = mei_mm_init(&dev->pdev->dev,
hw->pool_vaddr, hw->pool_paddr, hw->pool_size);
if (IS_ERR_OR_NULL(hw->mdev))
goto deregister_mei;
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
/*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
*/
if (!pci_dev_run_wake(pdev))
mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev);
if (!nopg)
pm_runtime_allow(&pdev->dev);
return 0;
deregister_mei:
mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
/* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
free_device:
if (hw->pool_release)
hw->pool_release(hw);
mei_txe_pci_iounmap(pdev, hw);
kfree(dev);
release_regions:
pci_release_regions(pdev);
disable_device:
pci_disable_device(pdev);
end:
dev_err(&pdev->dev, "initialization failed.\n");
return err;
}
示例11: sptlrpc_lproc_fini
void sptlrpc_lproc_fini(void)
{
if (!IS_ERR_OR_NULL(sptlrpc_debugfs_dir))
ldebugfs_remove(&sptlrpc_debugfs_dir);
}
示例12: mdp4_dsi_video_on
int mdp4_dsi_video_on(struct platform_device *pdev)
{
int dsi_width;
int dsi_height;
int dsi_bpp;
int dsi_border_clr;
int dsi_underflow_clr;
int dsi_hsync_skew;
int hsync_period;
int hsync_ctrl;
int vsync_period;
int display_hctl;
int display_v_start;
int display_v_end;
int active_hctl;
int active_h_start;
int active_h_end;
int active_v_start;
int active_v_end;
int ctrl_polarity;
int h_back_porch;
int h_front_porch;
int v_back_porch;
int v_front_porch;
int hsync_pulse_width;
int vsync_pulse_width;
int hsync_polarity;
int vsync_polarity;
int data_en_polarity;
int hsync_start_x;
int hsync_end_x;
uint8 *buf;
unsigned int buf_offset;
int bpp;
struct fb_info *fbi;
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
int ret = 0;
int cndx = 0;
struct vsycn_ctrl *vctrl;
struct msm_panel_info *pinfo;
vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
pinfo = &mfd->panel_info;
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
mutex_lock(&mfd->dma->ov_mutex);
vctrl->mfd = mfd;
vctrl->dev = mfd->fbi->dev;
vctrl->blt_ctrl = pinfo->lcd.blt_ctrl;
vctrl->vsync_irq_enabled = 0;
vsync_irq_cnt = 0;
/* mdp clock on */
mdp_clk_ctrl(1);
fbi = mfd->fbi;
var = &fbi->var;
pipe = mdp4_dsi_video_alloc_base_pipe();
if (IS_ERR_OR_NULL(pipe)) {
mutex_unlock(&mfd->dma->ov_mutex);
return -EPERM;
}
if (mfd->panel_info.pdest == DISPLAY_4)
mdp4_overlay_panel_mode(MDP4_PANEL_DSI_VIDEO_DMA_S,
pipe->mixer_num);
else
mdp4_overlay_panel_mode(MDP4_PANEL_DSI_VIDEO, pipe->mixer_num);
bpp = fbi->var.bits_per_pixel / 8;
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
atomic_set(&vctrl->suspend, 0);
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
pipe->srcp0_ystride = fbi->fix.line_length;
pipe->bpp = bpp;
if (mfd->display_iova)
pipe->srcp0_addr = mfd->display_iova + buf_offset;
else
//.........这里部分代码省略.........
示例13: mini_isp_debug_load_reg
int mini_isp_debug_load_reg(char *reg_file,u32 *reg_key,u32 reg_max,u32 *reg_cnt)
{
struct kstat stat;
mm_segment_t fs;
struct file *fp = NULL;
int file_flag = O_RDONLY;
ssize_t ret = 0;
u32 addr = 0;
char addr_array[8] = {0};
char temp;
bool bRegStart = false;
if (NULL == reg_file) {
print_error("%s param error", __func__);
return -EINVAL;
}
print_debug("enter %s", __func__);
/* must have the following 2 statement */
fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(reg_file, file_flag, 0666);
if (IS_ERR_OR_NULL(fp)) {
print_debug("no debug configuration file(%s) - do nothing, just skip it!\n",reg_file);
return -1;
}
if (0 != vfs_stat(reg_file, &stat)) {
print_error("failed to get file state!");
goto ERROR;
}
*reg_cnt = 0;
print_debug("file size : %d", (u32) stat.size);
while (0 < vfs_read(fp, &temp, 1, &fp->f_pos)) {
switch (temp) {
case '{':
bRegStart = true;
if (0 == vfs_read(fp, addr_array, 7, &fp->f_pos))
goto ERROR;
addr = mini_atoi16(addr_array);
if (*reg_cnt < reg_max){
reg_key[*reg_cnt]=addr;
*reg_cnt=*reg_cnt+1;
}
break;
case '}':
bRegStart = false;
break;
default:
break;
}
}
/* must have the following 1 statement */
set_fs(fs);
ERROR:
if (NULL != fp)
filp_close(fp, 0);
return ret;
}
示例14: SysDvfsInitialize
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData)
{
IMG_INT32 opp_count;
IMG_UINT32 i, *freq_list;
struct opp *opp;
unsigned long freq;
/**
* We query and store the list of SGX frequencies just this once under the
* assumption that they are unchanging, e.g. no disabling of high frequency
* option for thermal management. This is currently valid for 4430 and 4460.
*/
rcu_read_lock();
opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev);
if (opp_count < 1)
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count"));
return PVRSRV_ERROR_NOT_SUPPORTED;
}
/**
* Allocate the frequency list with a slot for each available frequency plus
* one additional slot to hold a designated frequency value to assume when in
* an unknown frequency state.
*/
freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC);
if (!freq_list)
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list"));
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
/**
* Fill in frequency list from lowest to highest then finally the "unknown"
* frequency value. We use the highest available frequency as our assumed value
* when in an unknown state, because it is safer for APM and hardware recovery
* timers to be longer than intended rather than shorter.
*/
freq = 0;
for (i = 0; i < opp_count; i++)
{
opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq);
if (IS_ERR_OR_NULL(opp))
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i));
kfree(freq_list);
return PVRSRV_ERROR_NOT_SUPPORTED;
}
freq_list[i] = (IMG_UINT32)freq;
freq++;
}
rcu_read_unlock();
freq_list[opp_count] = freq_list[opp_count - 1];
psSysSpecificData->ui32SGXFreqListSize = opp_count + 1;
psSysSpecificData->pui32SGXFreqList = freq_list;
/* Start in unknown state - no frequency request to DVFS yet made */
psSysSpecificData->ui32SGXFreqListIndex = opp_count;
return PVRSRV_OK;
}
示例15: register_memory
static int register_memory(void)
{
int result;
unsigned long paddr;
void *kvptr;
unsigned long kvaddr;
unsigned long mem_len;
mutex_lock(&acdb_data.acdb_mutex);
acdb_data.ion_client =
msm_ion_client_create(UINT_MAX, "audio_acdb_client");
if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
pr_err("%s: Could not register ION client!!!\n", __func__);
result = PTR_ERR(acdb_data.ion_client);
goto err;
}
acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
atomic_read(&acdb_data.map_handle));
if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
pr_err("%s: Could not import map handle!!!\n", __func__);
result = PTR_ERR(acdb_data.ion_handle);
goto err_ion_client;
}
result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
&paddr, (size_t *)&mem_len);
if (result != 0) {
pr_err("%s: Could not get phys addr!!!\n", __func__);
goto err_ion_handle;
}
kvptr = ion_map_kernel(acdb_data.ion_client,
acdb_data.ion_handle, 0);
if (IS_ERR_OR_NULL(kvptr)) {
pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
result = PTR_ERR(kvptr);
goto err_ion_handle;
}
kvaddr = (unsigned long)kvptr;
atomic64_set(&acdb_data.paddr, paddr);
atomic64_set(&acdb_data.kvaddr, kvaddr);
atomic64_set(&acdb_data.mem_len, mem_len);
mutex_unlock(&acdb_data.acdb_mutex);
pr_debug("%s done! paddr = 0x%lx, "
"kvaddr = 0x%lx, len = x%lx\n",
__func__,
(long)atomic64_read(&acdb_data.paddr),
(long)atomic64_read(&acdb_data.kvaddr),
(long)atomic64_read(&acdb_data.mem_len));
return result;
err_ion_handle:
ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
ion_client_destroy(acdb_data.ion_client);
err:
atomic64_set(&acdb_data.mem_len, 0);
mutex_unlock(&acdb_data.acdb_mutex);
return result;
}