本文整理汇总了C++中CAST_DOWN函数的典型用法代码示例。如果您正苦于以下问题:C++ CAST_DOWN函数的具体用法?C++ CAST_DOWN怎么用?C++ CAST_DOWN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CAST_DOWN函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: iter2_conjgrad
void iter2_conjgrad(iter_conf* _conf,
const struct operator_s* normaleq_op,
unsigned int D,
const struct operator_p_s* prox_ops[D],
const struct linop_s* ops[D],
const float* biases[D],
const struct operator_p_s* xupdate_op,
long size, float* image, const float* image_adj,
struct iter_monitor_s* monitor)
{
assert(0 == D);
assert(NULL == prox_ops);
assert(NULL == ops);
assert(NULL == biases);
UNUSED(xupdate_op);
auto conf = CAST_DOWN(iter_conjgrad_conf, _conf);
float eps = md_norm(1, MD_DIMS(size), image_adj);
if (checkeps(eps))
goto cleanup;
conjgrad(conf->maxiter, conf->l2lambda, eps * conf->tol, size, select_vecops(image_adj),
OPERATOR2ITOP(normaleq_op), image, image_adj, monitor);
cleanup:
;
}
示例2: iter2_fista
void iter2_fista(iter_conf* _conf,
const struct operator_s* normaleq_op,
unsigned int D,
const struct operator_p_s* prox_ops[D],
const struct linop_s* ops[D],
const float* biases[D],
const struct operator_p_s* xupdate_op,
long size, float* image, const float* image_adj,
struct iter_monitor_s* monitor)
{
assert(D == 1);
assert(NULL == biases);
#if 0
assert(NULL == ops);
#else
UNUSED(ops);
#endif
UNUSED(xupdate_op);
auto conf = CAST_DOWN(iter_fista_conf, _conf);
float eps = md_norm(1, MD_DIMS(size), image_adj);
if (checkeps(eps))
goto cleanup;
assert((conf->continuation >= 0.) && (conf->continuation <= 1.));
fista(conf->maxiter, eps * conf->tol, conf->step, conf->continuation, conf->hogwild, size, select_vecops(image_adj),
OPERATOR2ITOP(normaleq_op), OPERATOR_P2ITOP(prox_ops[0]), image, image_adj, monitor);
cleanup:
;
}
示例3: wavelet_inverse
static void wavelet_inverse(const linop_data_t* _data, data_t* out, const data_t* _in)
{
struct wavelet_plan_s* plan = CAST_DOWN(wavelet_data_s, _data)->plan;
data_t* in = (data_t*) _in;
int numdims_tr = plan->numdims_tr;
int numPixel_tr = plan->numPixel_tr;
int numCoeff_tr = plan->numCoeff_tr;
int b;
for (b=0; b<plan->batchSize; b++)
{
if(numdims_tr==2)
{
if(plan->use_gpu==0)
iwt2_cpu(plan,out+b*numPixel_tr,in+b*numCoeff_tr);
#ifdef USE_CUDA
if(plan->use_gpu==1)
iwt2_gpu(plan,out+b*numPixel_tr,in+b*numCoeff_tr);
#endif
}
if (numdims_tr==3)
{
if(plan->use_gpu==0)
iwt3_cpu(plan,out+b*numPixel_tr,in+b*numCoeff_tr);
#ifdef USE_CUDA
if(plan->use_gpu==1)
iwt3_gpu(plan,out+b*numPixel_tr,in+b*numCoeff_tr);
#endif
}
}
}
示例4: grad_op_free
static void grad_op_free(const linop_data_t* _data)
{
const struct grad_s* data = CAST_DOWN(grad_s, _data);
free(data->dims);
free((void*)data);
}
示例5: ureadc
/*
* Give next character to user as result of read.
*/
int
ureadc(int c, struct uio *uio)
{
if (uio_resid(uio) <= 0)
panic("ureadc: non-positive resid");
uio_update(uio, 0);
if (uio->uio_iovcnt == 0)
panic("ureadc: non-positive iovcnt");
if (uio_curriovlen(uio) <= 0)
panic("ureadc: non-positive iovlen");
switch ((int) uio->uio_segflg) {
case UIO_USERSPACE32:
case UIO_USERSPACE:
case UIO_USERISPACE32:
case UIO_USERISPACE:
case UIO_USERSPACE64:
case UIO_USERISPACE64:
if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0)
return (EFAULT);
break;
case UIO_SYSSPACE32:
case UIO_SYSSPACE:
*(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c;
break;
default:
break;
}
uio_update(uio, 1);
return (0);
}
示例6: wavelet3_thresh_del
static void wavelet3_thresh_del(const operator_data_t* _data)
{
const struct wavelet3_thresh_s* data = CAST_DOWN(wavelet3_thresh_s, _data);
free((void*)data->dims);
free((void*)data->minsize);
free((void*)data);
}
示例7: prox_zero_fun
/**
* Proximal function for f(z) = 0
* Solution is z = x_plus_u
*
* @param prox_data should be of type prox_zero_data
* @param mu proximal penalty
* @param z output
* @param x_plus_u input
*/
static void prox_zero_fun(const operator_data_t* prox_data, float mu, float* z, const float* x_plus_u)
{
UNUSED(mu);
struct prox_zero_data* pdata = CAST_DOWN(prox_zero_data, prox_data);
md_copy(1, MD_DIMS(pdata->size), z, x_plus_u, FL_SIZE);
}
示例8: linop_matrix_apply_normal
static void linop_matrix_apply_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct operator_matrix_s* data = CAST_DOWN(operator_matrix_s, _data);
if (NULL == data->mat_gram) {
complex float* tmp = md_alloc_sameplace(data->N, data->out_dims, CFL_SIZE, src);
linop_matrix_apply(_data, tmp, src);
linop_matrix_apply_adjoint(_data, dst, tmp);
md_free(tmp);
} else {
const complex float* mat_gram = data->mat_gram;
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
if (NULL == data->mat_gram_gpu)
data->mat_gram_gpu = md_gpu_move(2 * data->N, data->grm_dims, data->mat_gram, CFL_SIZE);
mat_gram = data->mat_gram_gpu;
}
#endif
md_ztenmul(2 * data->N, data->gout_dims, dst, data->gin_dims, src, data->grm_dims, mat_gram);
}
}
示例9: rvc_free
static void rvc_free(const linop_data_t* _data)
{
const struct rvc_s* data = CAST_DOWN(rvc_s, _data);
free((void*)data->dims);
free((void*)data);
}
示例10: linop_matrix_apply_adjoint
static void linop_matrix_apply_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct operator_matrix_s* data = CAST_DOWN(operator_matrix_s, _data);
unsigned int N = data->mat_iovec->N;
//debug_printf(DP_DEBUG1, "compute adjoint\n");
md_clear2(N, data->domain_iovec->dims, data->domain_iovec->strs, dst, CFL_SIZE);
// FIXME check all the cases where computation can be done with blas
if (cgemm_forward_standard(data)) {
long L = md_calc_size(data->T_dim, data->domain_iovec->dims);
blas_cgemm('N', 'N', L, data->K, data->T, 1.,
L, (const complex float (*)[])src,
data->T, (const complex float (*)[])data->mat_conj,
0., L, (complex float (*)[])dst);
} else {
md_zfmacc2(N, data->max_dims, data->domain_iovec->strs, dst, data->codomain_iovec->strs, src, data->mat_iovec->strs, data->mat);
}
}
示例11: wait_queue_assert_wait
/*
* Routine: wait_queue_assert_wait
* Purpose:
* Insert the current thread into the supplied wait queue
* waiting for a particular event to be posted to that queue.
*
* Conditions:
* nothing of interest locked.
*/
wait_result_t
wait_queue_assert_wait(
wait_queue_t wq,
event_t event,
wait_interrupt_t interruptible,
uint64_t deadline)
{
spl_t s;
wait_result_t ret;
thread_t thread = current_thread();
/* If it is an invalid wait queue, you can't wait on it */
if (!wait_queue_is_valid(wq))
return (thread->wait_result = THREAD_RESTART);
s = splsched();
wait_queue_lock(wq);
thread_lock(thread);
ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
interruptible, deadline, thread);
thread_unlock(thread);
wait_queue_unlock(wq);
splx(s);
return(ret);
}
示例12: wait_queue_wakeup_all
/*
* Routine: wait_queue_wakeup_all
* Purpose:
* Wakeup some number of threads that are in the specified
* wait queue and waiting on the specified event.
* Conditions:
* Nothing locked
* Returns:
* KERN_SUCCESS - Threads were woken up
* KERN_NOT_WAITING - No threads were waiting <wq,event> pair
*/
kern_return_t
wait_queue_wakeup_all(
wait_queue_t wq,
event_t event,
wait_result_t result)
{
kern_return_t ret;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
// panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
// }
ret = wait_queue_wakeup64_all_locked(
wq, CAST_DOWN(event64_t,event),
result, TRUE);
/* lock released */
splx(s);
return ret;
}
示例13: wait_queue_wakeup_thread
/*
* Routine: wait_queue_wakeup_thread
* Purpose:
* Wakeup the particular thread that was specified if and only
* it was in this wait queue (or one of it's set queues)
* and waiting on the specified event.
*
* This is much safer than just removing the thread from
* whatever wait queue it happens to be on. For instance, it
* may have already been awoken from the wait you intended to
* interrupt and waited on something else (like another
* semaphore).
* Conditions:
* nothing of interest locked
* we need to assume spl needs to be raised
* Returns:
* KERN_SUCCESS - the thread was found waiting and awakened
* KERN_NOT_WAITING - the thread was not waiting here
*/
kern_return_t
wait_queue_wakeup_thread(
wait_queue_t wq,
event_t event,
thread_t thread,
wait_result_t result)
{
kern_return_t res;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
wait_queue_unlock(wq);
if (res == KERN_SUCCESS) {
res = thread_go(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
return res;
}
splx(s);
return KERN_NOT_WAITING;
}
示例14: mach_msg_receive
/*
* Routine: mach_msg_receive [Kernel Internal]
* Purpose:
* Routine for kernel-task threads to actively receive a message.
*
* Unlike being dispatched to by ipc_kobject_server() or the
* reply part of mach_msg_rpc_from_kernel(), this routine
* looks up the receive port name in the kernel's port
* namespace and copies out received port rights to that namespace
* as well. Out-of-line memory is copied out the kernel's
* address space (rather than just providing the vm_map_copy_t).
* Conditions:
* Nothing locked.
* Returns:
* MACH_MSG_SUCCESS Received a message.
* See <mach/message.h> for list of MACH_RCV_XXX errors.
*/
mach_msg_return_t
mach_msg_receive(
mach_msg_header_t *msg,
mach_msg_option_t option,
mach_msg_size_t rcv_size,
mach_port_name_t rcv_name,
mach_msg_timeout_t rcv_timeout,
void (*continuation)(mach_msg_return_t),
__unused mach_msg_size_t slist_size)
{
thread_t self = current_thread();
ipc_space_t space = current_space();
ipc_object_t object;
ipc_mqueue_t mqueue;
mach_msg_return_t mr;
mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
if (mr != MACH_MSG_SUCCESS) {
return mr;
}
/* hold ref for object */
self->ith_msg_addr = CAST_DOWN(mach_vm_address_t, msg);
self->ith_object = object;
self->ith_msize = rcv_size;
self->ith_option = option;
self->ith_continuation = continuation;
ipc_mqueue_receive(mqueue, option, rcv_size, rcv_timeout, THREAD_ABORTSAFE);
if ((option & MACH_RCV_TIMEOUT) && rcv_timeout == 0)
thread_poll_yield(self);
return mach_msg_receive_results();
}
示例15: prox_rvc_apply
static void prox_rvc_apply(const operator_data_t* _data, float mu, complex float* dst, const complex float* src)
{
UNUSED(mu);
struct prox_rvc_data* pdata = CAST_DOWN(prox_rvc_data, _data);
md_zreal(1, MD_DIMS(pdata->size), dst, src);
}