本文整理匯總了C++中ATOMIC_INIT函數的典型用法代碼示例。如果您正苦於以下問題:C++ ATOMIC_INIT函數的具體用法?C++ ATOMIC_INIT怎麽用?C++ ATOMIC_INIT使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ATOMIC_INIT函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: DECLARE_WAIT_QUEUE_HEAD
};
static unsigned char key_val;
// interrupt flag, interrupt server function set it 1, key_int_drv_read clear it
static volatile int ev_press = 0;
static struct fasync_struct *button_async;
static DECLARE_WAIT_QUEUE_HEAD(button_waitq);
static DECLARE_MUTEX(button_lock);
#ifdef __ATOMIC__
static atomic_t canopen = ATOMIC_INIT(1); //定義原子變量,並初始化為1
#endif
struct pin_desc pins_desc[4] = {
{S3C2410_GPF0, 0x01},
{S3C2410_GPF2, 0x02},
{S3C2410_GPG3, 0x03},
{S3C2410_GPG11, 0x04},
};
static irqreturn_t key_int_irq(int irq, void *dev_id)
{
struct pin_desc *pindesc = (struct pin_desc *)dev_id;
unsigned int pinval;
pinval = s3c2410_gpio_getpin(pindesc->pin);
示例2: DEFINE_MUTEX
#include <mach/socinfo.h>
#include <mach/msm_subsystem_map.h>
char iommu_dummy[2*SZ_64K-4];
struct msm_iova_data {
struct rb_node node;
struct mem_pool *pools;
int npools;
struct iommu_domain *domain;
int domain_num;
};
static struct rb_root domain_root;
DEFINE_MUTEX(domain_mutex);
static atomic_t domain_nums = ATOMIC_INIT(-1);
int msm_use_iommu()
{
return iommu_present(&platform_bus_type);
}
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
unsigned long page_size,
int cached)
{
int ret = 0;
int i = 0;
unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
示例3: ATOMIC_INIT
#include <linux/clk.h>
#include <linux/proc_fs.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/arch/dma.h>
#include <asm/delay.h>
#include <asm/atomic.h>
/*!
* This variable is used to controll the clock of DMA.
* It counts the number of actived channels
*/
static atomic_t g_dma_actived = ATOMIC_INIT(0);
/*!
* This variable point a proc file which contains the information
* of DMA channels
*/
static struct proc_dir_entry *g_proc_dir;
/*!
* The dma channels
*/
static mxc_dma_channel_t g_dma_channels[MAX_DMA_CHANNELS];
static mx2_dma_priv_t g_dma_privates[MXC_DMA_CHANNELS];
static mx2_dma_bd_t g_dma_bd_table[MXC_DMA_CHANNELS][MAX_BD_SIZE];
static DEFINE_SPINLOCK(dma_list_lock);
示例4: DEFINE_MUTEX
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
/* User keyring creation semaphore */
static DEFINE_MUTEX(key_user_keyring_mutex);
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = ATOMIC_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
.uid = 0,
.user_ns = &init_user_ns,
};
/*
* Install the user and user session keyrings for the current process's UID.
*/
int install_user_keyrings(void)
{
struct user_struct *user;
const struct cred *cred;
示例5: __cfs_fail_check_set
int __cfs_fail_check_set(__u32 id, __u32 value, int set)
{
static atomic_t cfs_fail_count = ATOMIC_INIT(0);
LASSERT(!(id & CFS_FAIL_ONCE));
if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
(CFS_FAILED | CFS_FAIL_ONCE)) {
atomic_set(&cfs_fail_count, 0); /* paranoia */
return 0;
}
/* Fail 1/cfs_fail_val times */
if (cfs_fail_loc & CFS_FAIL_RAND) {
if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
return 0;
}
/* Skip the first cfs_fail_val, then fail */
if (cfs_fail_loc & CFS_FAIL_SKIP) {
if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
return 0;
}
/* check cfs_fail_val... */
if (set == CFS_FAIL_LOC_VALUE) {
if (cfs_fail_val != -1 && cfs_fail_val != value)
return 0;
}
/* Fail cfs_fail_val times, overridden by FAIL_ONCE */
if (cfs_fail_loc & CFS_FAIL_SOME &&
(!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
int count = atomic_inc_return(&cfs_fail_count);
if (count >= cfs_fail_val) {
set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
atomic_set(&cfs_fail_count, 0);
/* we are lost race to increase */
if (count > cfs_fail_val)
return 0;
}
}
if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
(value & CFS_FAIL_ONCE))
set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
/* Lost race to set CFS_FAILED_BIT. */
if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
/* If CFS_FAIL_ONCE is valid, only one process can fail,
* otherwise multi-process can fail at the same time. */
if (cfs_fail_loc & CFS_FAIL_ONCE)
return 0;
}
switch (set) {
case CFS_FAIL_LOC_NOSET:
case CFS_FAIL_LOC_VALUE:
break;
case CFS_FAIL_LOC_ORSET:
cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
break;
case CFS_FAIL_LOC_RESET:
cfs_fail_loc = value;
break;
default:
LASSERTF(0, "called with bad set %u\n", set);
break;
}
return 1;
}
示例6: ATOMIC_INIT
/* Random magic number */
#define SYSFS_MAGIC 0x62656572
static struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
struct kmem_cache *sysfs_dir_cachep;
static const struct super_operations sysfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
struct sysfs_dirent sysfs_root = {
.s_name = "",
.s_count = ATOMIC_INIT(1),
.s_flags = SYSFS_DIR,
.s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = SYSFS_MAGIC;
sb->s_op = &sysfs_ops;
sb->s_time_gran = 1;
示例7: ATOMIC_INIT
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#ifdef CONFIG_POWERSUSPEND
#include <linux/powersuspend.h>
#endif
#include <asm/cputime.h>
static atomic_t active_count = ATOMIC_INIT(0);
static unsigned long stored_timer_rate;
struct cpufreq_interactivex_cpuinfo {
struct timer_list cpu_timer;
int timer_idlecancel;
u64 time_in_idle;
u64 idle_exit_time;
u64 timer_run_time;
int idling;
u64 target_set_time;
u64 target_set_time_in_idle;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
int governor_enabled;
示例8: ATOMIC_INIT
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
#include "common.h"
#include "cpuidle.h"
#include "hardware.h"
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
if (atomic_inc_return(&master) == num_online_cpus()) {
/*
* With this lock, we prevent other cpu to exit and enter
* this function again and become the master.
*/
if (!spin_trylock(&master_lock))
goto idle;
imx6q_set_lpm(WAIT_UNCLOCKED);
cpu_do_idle();
imx6q_set_lpm(WAIT_CLOCKED);
示例9: DEFINE_MUTEX
* request probing to be deferred by returning -EPROBE_DEFER from its probe hook
*
* Deferred probe maintains two lists of devices, a pending list and an active
* list. A driver returning -EPROBE_DEFER causes the device to be added to the
* pending list. A successful driver probe will trigger moving all devices
* from the pending to the active list so that the workqueue will eventually
* retry them.
*
* The deferred_probe_mutex must be held any time the deferred_probe_*_list
* of the (struct device*)->p->deferred_probe pointers are manipulated
*/
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
{
struct device *dev;
struct device_private *private;
/*
* This block processes every device in the deferred 'active' list.
* Each device is removed from the active list and passed to
* bus_probe_device() to re-attempt the probe. The loop continues
* until every device in the active list is removed and retried.
*
* Note: Once the device is removed from the list and the mutex is
示例10: oz_send_isoc_frame
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static int oz_send_isoc_frame(struct oz_pd *pd);
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_isoc_stream_free(struct oz_isoc_stream *st);
static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
static void oz_isoc_destructor(struct sk_buff *skb);
static int oz_def_app_init(void);
static void oz_def_app_term(void);
static int oz_def_app_start(struct oz_pd *pd, int resume);
static void oz_def_app_stop(struct oz_pd *pd, int pause);
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
/*
* Counts the uncompleted isoc frames submitted to netcard.
*/
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
/* Application handler functions.
*/
static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
{oz_usb_init,
oz_usb_term,
oz_usb_start,
oz_usb_stop,
oz_usb_rx,
oz_usb_heartbeat,
oz_usb_farewell,
OZ_APPID_USB},
{oz_def_app_init,
oz_def_app_term,
示例11: mpic_setup_this_cpu
{
mpic_setup_this_cpu();
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
}
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
#ifdef CONFIG_KEXEC
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
#endif
};
#ifdef CONFIG_KEXEC
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
local_irq_disable();
if (secondary) {
atomic_inc(&kexec_down_cpus);
/* loop forever */
while (1);
}
}
static void mpc85xx_smp_kexec_down(void *arg)
{
if (ppc_md.kexec_cpu_down)
示例12: int
int cx18_first_minor;
/* Callback for registering extensions */
int (*cx18_ext_init)(struct cx18 *);
EXPORT_SYMBOL(cx18_ext_init);
/* add your revision and whatnot here */
static struct pci_device_id cx18_pci_tbl[] = {
{PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
static atomic_t cx18_instance = ATOMIC_INIT(0);
/* Parameter declarations */
static int cardtype[CX18_MAX_CARDS];
static int tuner[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static unsigned cardtype_c = 1;
static unsigned tuner_c = 1;
static unsigned radio_c = 1;
static char pal[] = "--";
示例13: ATOMIC_INIT
CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
CHARGERFAULT_INTR_OFFSET, /* Bit 21 EXT_CHRG */
CHARGERFAULT_INTR_OFFSET, /* Bit 22 INT_CHRG */
RSV_INTR_OFFSET, /* Bit 23 Reserved */
};
static int *twl6030_interrupt_mapping = twl6030_interrupt_mapping_table;
/*----------------------------------------------------------------------*/
static unsigned twl6030_irq_base, twl6030_irq_end;
static int twl_irq;
static bool twl_irq_wake_enabled;
static struct task_struct *task;
static struct completion irq_event;
static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
static u8 vbatmin_hi_threshold;
static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
int chained_wakeups;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
chained_wakeups = atomic_read(&twl6030_wakeirqs);
if (chained_wakeups && !twl_irq_wake_enabled) {
if (enable_irq_wake(twl_irq))
pr_err("twl6030 IRQ wake enable failed\n");
示例14: DEFINE_SPINLOCK
static smd_channel_t *smd_channel;
static int initialized;
static wait_queue_head_t newserver_wait;
static wait_queue_head_t smd_wait;
static wait_queue_head_t init_wait;
static int smd_wait_count; /* odd while waiting */
static DEFINE_SPINLOCK(local_endpoints_lock);
static DEFINE_SPINLOCK(remote_endpoints_lock);
static DEFINE_SPINLOCK(server_list_lock);
static struct workqueue_struct *rpcrouter_workqueue;
static struct wake_lock rpcrouter_wake_lock;
static int rpcrouter_need_len;
static atomic_t next_xid = ATOMIC_INIT(1);
static atomic_t next_mid = ATOMIC_INIT(0);
static void do_read_data(struct work_struct *work);
static void do_create_pdevs(struct work_struct *work);
static void do_create_rpcrouter_pdev(struct work_struct *work);
static DECLARE_WORK(work_read_data, do_read_data);
static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
static atomic_t rpcrouter_pdev_created = ATOMIC_INIT(0);
#define RR_STATE_IDLE 0
#define RR_STATE_HEADER 1
#define RR_STATE_BODY 2
#define RR_STATE_ERROR 3
示例15: D
#include <mach/debug_mm.h>
#include <mach/custmproc.h>
#include <mach/smem_pc_oem_cmd.h>
#include <linux/sched.h>
#define DEBUG
#ifdef DEBUG
#define D(fmt, args...) printk(KERN_INFO "tid = %d: " fmt , (int)current->pid , ##args)
#else
#define D(fmt, args...) do {} while (0)
#endif
#define BUFSZ (0)
static DEFINE_MUTEX(voice_lock);
atomic_t voice_started=ATOMIC_INIT(0);
static struct audio_client *voc_tx_clnt;
static struct audio_client *voc_rx_clnt;
static int q6_voice_start(void)
{
int rc = 0;
mutex_lock(&voice_lock);
if (atomic_read(&voice_started)) {
pr_err("[%s:%s] busy\n", __MM_FILE__, __func__);
rc = -EBUSY;
goto done;
}