本文整理汇总了C++中THArgCheck函数的典型用法代码示例。如果您正苦于以下问题:C++ THArgCheck函数的具体用法?C++ THArgCheck怎么用?C++ THArgCheck使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了THArgCheck函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: THNN_
void THNN_(SoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int64_t dim) {
THArgCheck(dim >= 0 && dim < input->nDimension, 4,
"dim out of range (got %d, but input has %d dims)", dim, input->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = input->size[dim];
uint64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= input->size[i];
for (int64_t i = dim + 1; i < input->nDimension; ++i)
inner_size *= input->size[i];
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data_base = THTensor_(data)(input);
real *output_data_base = THTensor_(data)(output);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < SOFTMAX_CAST_TYPE (outer_size * inner_size); i++) {
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *input_data = input_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real input_max = -THInf;
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
if (input_data[d * dim_stride] >= input_max) input_max = input_data[d * dim_stride];
}
accreal sum = 0;
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
real z = exp(input_data[d * dim_stride] - input_max);
output_data[d * dim_stride] = z;
sum += z;
}
real invsum = 1 / sum; // NOTE: truncate sum to real once
for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) {
output_data[d * dim_stride] *= invsum;
}
}
THTensor_(free)(input);
}
示例2: THCudaTensor_select
void THCudaTensor_select(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension, long sliceIndex)
{
int d;
if(!src)
src = self;
THArgCheck(src->nDimension > 1, 1, "cannot select on a vector");
THArgCheck((dimension >= 0) && (dimension < src->nDimension), 3, "out of range");
THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size[dimension]), 4, "out of range");
THCudaTensor_set(state, self, src);
THCudaTensor_narrow(state, self, NULL, dimension, sliceIndex, 1);
for(d = dimension; d < self->nDimension-1; d++)
{
self->size[d] = self->size[d+1];
self->stride[d] = self->stride[d+1];
}
self->nDimension--;
}
示例3: THGPUTensor_setStorage
void THGPUTensor_setStorage(THGPUTensor *self, THGPUStorage *storage_, long storageOffset_, THLongStorage *size_, THLongStorage *stride_)
{
if (size_ && stride_)
THArgCheck(size_->size == stride_->size, 5, "inconsistent size/stride sizes");
THGPUTensor_rawSet(self,
storage_,
storageOffset_,
(size_ ? size_->size : (stride_ ? stride_->size : 0)),
(size_ ? size_->data : NULL),
(stride_ ? stride_->data : NULL));
}
示例4: THNN_
static inline void THNN_(VolumetricUpSamplingNearest_shapeCheck)
(THTensor *input, THTensor *gradOutput,
int scale_factor) {
THArgCheck(input != NULL, 2, "5D input tensor expected but got NULL");
THArgCheck(scale_factor > 1, 4,
"scale_factor must be greater than 1, but got: %d", scale_factor);
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D input tensor expected but got: %s");
if (input->nDimension == 4) {
int nChannels = THTensor_(size)(input, 0);
int inputDepth = THTensor_(size)(input, 1);
int inputHeight = THTensor_(size)(input, 2);
int inputWidth = THTensor_(size)(input, 3);
int outputDepth = inputDepth * scale_factor;
int outputHeight = inputHeight * scale_factor;
int outputWidth = inputWidth * scale_factor;
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, 4, 0, nChannels);
THNN_CHECK_DIM_SIZE(gradOutput, 4, 1, outputDepth);
THNN_CHECK_DIM_SIZE(gradOutput, 4, 2, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, 4, 3, outputWidth);
}
} else {
int nBatch = THTensor_(size)(input, 0);
int nChannels = THTensor_(size)(input, 1);
int inputDepth = THTensor_(size)(input, 2);
int inputHeight = THTensor_(size)(input, 3);
int inputWidth = THTensor_(size)(input, 4);
int outputDepth = inputDepth * scale_factor;
int outputHeight = inputHeight * scale_factor;
int outputWidth = inputWidth * scale_factor;
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, 5, 0, nBatch);
THNN_CHECK_DIM_SIZE(gradOutput, 5, 1, nChannels);
THNN_CHECK_DIM_SIZE(gradOutput, 5, 2, outputDepth);
THNN_CHECK_DIM_SIZE(gradOutput, 5, 3, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, 5, 4, outputWidth);
}
}
}
示例5: THApkFile_seekEnd
static void THApkFile_seekEnd(THFile *self)
{
THApkFile *dfself = (THApkFile*)(self);
THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file");
if(fseek(dfself->handle, 0L, SEEK_END) < 0)
{
dfself->file.hasError = 1;
if(!dfself->file.isQuiet)
THError("unable to seek at end of file");
}
}
示例6: THDiskFile_seek
static void THDiskFile_seek(THFile *self, ssize_t position)
{
THDiskFile *dfself = (THDiskFile*)(self);
THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file");
#if defined(_WIN64)
THArgCheck(position <= INT64_MAX, 2, "position must be smaller than INT64_MAX");
if(_fseeki64(dfself->handle, (int64_t)position, SEEK_SET) < 0)
#elif defined(_WIN32)
THArgCheck(position <= LONG_MAX, 2, "position must be smaller than LONG_MAX");
if(fseek(dfself->handle, (int32_t)position, SEEK_SET) < 0)
#else
THArgCheck(position <= LLONG_MAX, 2, "position must be smaller than LLONG_MAX");
if(fseeko(dfself->handle, (off_t)position, SEEK_SET) < 0)
#endif
{
dfself->file.hasError = 1;
if(!dfself->file.isQuiet)
THError("unable to seek to position %zu", position);
}
}
示例7: THCudaTensor_transpose
void THCudaTensor_transpose(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension1, int dimension2)
{
long z;
if(!src)
src = self;
THArgCheck( (dimension1 >= 0) && (dimension1 < src->nDimension), 1, "out of range");
THArgCheck( (dimension2 >= 0) && (dimension2 < src->nDimension), 2, "out of range");
THCudaTensor_set(state, self, src);
if(dimension1 == dimension2)
return;
z = self->stride[dimension1];
self->stride[dimension1] = self->stride[dimension2];
self->stride[dimension2] = z;
z = self->size[dimension1];
self->size[dimension1] = self->size[dimension2];
self->size[dimension2] = z;
}
示例8: THApkFile_position
static size_t THApkFile_position(THFile *self)
{
THApkFile *dfself = (THApkFile*)(self);
THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file");
long offset = ftell(dfself->handle);
if (offset > -1)
return (size_t)offset;
else if(!dfself->file.isQuiet)
THError("unable to obtain disk file offset (maybe a long overflow occured)");
return 0;
}
示例9: nn_
static int nn_(SpatialConvolutionMM_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int padH = luaT_getfieldcheckint(L, 1, "padH");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor);
THTensor *fgradInput = luaT_getfieldcheckudata(L, 1, "fgradInput", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" );
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
THTensor_(transpose)(weight, weight, 0, 1);
if(input->nDimension == 3)
{
nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, weight, fgradInput, kW, kH, dW, dH, padW, padH);
}
else
{
long T = input->size[0];
long t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, weight, fgradInput_t, kW, kH, dW, dH, padW, padH);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(transpose)(weight, weight, 0, 1);
return 1;
}
示例10: THCudaTensor_copyFloat
void THCudaTensor_copyFloat(THCudaTensor *self, struct THFloatTensor *src)
{
THArgCheck(THCudaTensor_nElement(self) == THFloatTensor_nElement(src), 2, "sizes do not match");
{
THCudaTensor *selfc = THCudaTensor_newContiguous(self);
src = THFloatTensor_newContiguous(src);
THCudaCheck(cudaMemcpy(selfc->storage->data + selfc->storageOffset, src->storage->data + src->storageOffset, THFloatTensor_nElement(src) * sizeof(float), cudaMemcpyHostToDevice));
THFloatTensor_free(src);
THCudaTensor_freeCopyTo(selfc, self);
}
}
示例11: THNN_
void THNN_(LogSoftMax_updateOutput)(THNNState *state, THTensor *input, THTensor *output)
{
real *input_data, *output_data;
long nframe = 0, dim = 0;
long t, d;
if (input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
}
else if (input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
}
else
{
THArgCheck(0, 2, "vector or matrix expected");
}
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data0 = THTensor_(data)(input);
real *output_data0 = THTensor_(data)(output);
accreal logsum;
real maxInput;
#pragma omp parallel for private(t, d, maxInput, logsum, input_data, output_data)
for (t = 0; t < nframe; t++)
{
logsum = 0;
maxInput = -THInf;
input_data = input_data0 + dim*t;
output_data = output_data0 + dim*t;
for (d = 0; d < dim; d++)
maxInput = THMax(maxInput, input_data[d]);
for (d = 0; d < dim; d++)
logsum += THExpMinusApprox(maxInput-input_data[d]);
logsum = maxInput + log(logsum);
for (d = 0; d < dim; d++)
output_data[d] = input_data[d] - logsum;
}
THTensor_(free)(input);
}
示例12: nn_
static int nn_(LogSoftMax_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
real *input_data, *output_data;
long nframe = 0, dim = 0;
long t, d;
if(input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
}
else if(input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
}
else
THArgCheck(0, 2, "vector or matrix expected");
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
for(t = 0; t < nframe; t++)
{
accreal logsum = 0;
real maxInput = -THInf;
for(d = 0; d < dim; d++)
maxInput = THMax(maxInput, input_data[d]);
for(d = 0; d < dim; d++)
logsum += THExpMinusApprox(maxInput-input_data[d]);
logsum = maxInput + log(logsum);
for(d = 0; d < dim; d++)
output_data[d] = input_data[d] - logsum;
input_data += dim;
output_data += dim;
}
THTensor_(free)(input);
return 1;
}
示例13: THCSTensor_
// directly assign without cloning or retaining (internal method)
THCSTensor* THCSTensor_(_move)(THCState *state, THCSTensor *self, THCIndexTensor *indices, THCTensor *values) {
int empty = THCTensor_(_nDimension)(state, values) == 0;
if (!empty) {
THArgCheck(THCIndexTensor_(_nDimension)(state, indices) == 2, 2,
"indices must be nDim x nnz");
THArgCheck(THCIndexTensor_(size)(state, indices, 1) == THCTensor_(size)(state, values, 0), 2,
"indices and values must have same nnz");
THArgCheck(THCIndexTensor_(size)(state, indices, 0) == self->nDimensionI, 2,
"indices has incorrect first dimension, expected %d, got %d", self->nDimensionI, THCIndexTensor_(size)(state, indices, 0));
THArgCheck(THCTensor_(_nDimension)(state, values) == self->nDimensionV + 1, 3,
"values has incorrect number of dimensions, expected %d, got %d", self->nDimensionV + 1, THCTensor_(_nDimension)(state, values));
} else {
THArgCheck(THCIndexTensor_(_nDimension)(state, indices) == 0, 2,
"if values is empty, indices must be empty too");
}
THCIndexTensor_(free)(state, self->indices);
THCTensor_(free)(state, self->values);
self->indices = indices;
self->values = values;
self->nnz = empty ? 0 : THCTensor_(size)(state, values, 0);
self->coalesced = 0;
return self;
}
示例14: THNN_
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THNNState *state,
THTensor *input,
THIndexTensor *target,
THTensor *gradInput,
bool sizeAverage,
THTensor *weights,
THTensor *total_weight)
{
INITIAL_CHECK;
THArgCheck(THTensor_(isContiguous)(gradInput), 4,
"gradInput must be contiguous");
real *total_weight_data = THTensor_(data)(total_weight);
if (*total_weight_data <= 0)
return;
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
THIndex_t *target_data = THIndexTensor_(data)(target);
real *weights_data = weights ? THTensor_(data)(weights) : NULL;
real *gradInput_data = THTensor_(data)(gradInput);
long batch_size = THTensor_(size)(input, 0);
long n_classes = THTensor_(size)(input, 1);
long map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3);
long sample_size = map_size * n_classes;
real normalize = sizeAverage ? *total_weight_data : 1.0f;
int b;
#pragma omp parallel for
for (b = 0; b < batch_size; b++) {
int elem;
for (elem = 0; elem < map_size; elem++) {
int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE;
THAssert(cur_target >= 0 && cur_target < n_classes);
gradInput_data[b * sample_size + cur_target * map_size + elem] =
-(weights ? weights_data[cur_target] : 1.0f) / normalize;
}
}
THIndexTensor_(free)(target);
if (weights)
THTensor_(free)(weights);
}
示例15: THArgCheck
/* Storage init */
THGPUTensor *THGPUTensor_newWithStorage(THGPUStorage *storage, long storageOffset, THLongStorage *size, THLongStorage *stride)
{
THGPUTensor *self = (THGPUTensor*)THAlloc(sizeof(THGPUTensor));
if (size && stride)
THArgCheck(size->size == stride->size, 4, "inconsistent size");
THGPUTensor_rawInit(self);
THGPUTensor_rawSet(self,
storage,
storageOffset,
(size ? size->size : (stride ? stride->size : 0)),
(size ? size->data : NULL),
(stride ? stride->data : NULL));
return self;
}