本文整理汇总了C++中THError函数的典型用法代码示例。如果您正苦于以下问题:C++ THError函数的具体用法?C++ THError怎么用?C++ THError使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了THError函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: THLab_
TH_API void THLab_(syev)(THTensor *a_, THTensor *w_, const char *jobz, const char *uplo)
{
int n, lda, lwork, info;
THTensor *A;
THTensor *work;
real wkopt;
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
A = THTensor_(newContiguous)(a_);
n = A->size[1];
lda = n;
THTensor_(resize1d)(w_,n);
// get optimal workspace size
THLapack_(syev)(jobz[0], uplo[0], n, THTensor_(data)(A), lda,
THTensor_(data)(w_), &wkopt, -1, &info);
lwork = (int)wkopt;
work = THTensor_(newWithSize1d)(lwork);
THLapack_(syev)(jobz[0], uplo[0], n, THTensor_(data)(A), lda,
THTensor_(data)(w_), THTensor_(data)(work), lwork, &info);
if (info > 0)
{
THError(" Lapack syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero",info);
}
else if (info < 0)
{
THError("Lapack syev : Argument %d : illegal value", -info);
}
THTensor_(free)(A);
THTensor_(free)(work);
}
示例2: THFloatTensor_addr
void THFloatTensor_addr(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *vec1, THFloatTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch, t: %ld, vec1: %ld, t: %ld, vec2: %ld", t->size[0], vec1->size[0], t->size[1], vec2->size[0]);
if(r_ != t)
THError("r_ != t not implemented");
if(beta != 1)
THFloatTensor_mul(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_ger(vec1->size[0], vec2->size[0],
alpha, THFloatTensor_data(vec1), vec1->stride[0],
THFloatTensor_data(vec2), vec2->stride[0],
THFloatTensor_data(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_ger(vec2->size[0], vec1->size[0],
alpha, THFloatTensor_data(vec2), vec2->stride[0],
THFloatTensor_data(vec1), vec1->stride[0],
THFloatTensor_data(r_), r_->stride[0]);
}
else THError("addr for non-contiguous not implemented");
}
示例3: THRealloc
void* THRealloc(void *ptr, ptrdiff_t size)
{
if(!ptr)
return(THAlloc(size));
if(size == 0)
{
THFree(ptr);
return NULL;
}
if(size < 0)
THError("$ Torch: invalid memory size -- maybe an overflow?");
ptrdiff_t oldSize = -getAllocSize(ptr);
void *newptr = realloc(ptr, size);
if(!newptr && torchGCFunction) {
torchGCFunction(torchGCData);
newptr = realloc(ptr, size);
}
if(!newptr)
THError("$ Torch: not enough memory: you tried to reallocate %dGB. Buy new RAM!", size/1073741824);
// update heapSize only after successfully reallocated
THHeapUpdate(oldSize + getAllocSize(newptr));
return newptr;
}
示例4: checkAndCountListOfStreams
int checkAndCountListOfStreams(lua_State *L, THCState *state, int arg,
int device)
{
if (!lua_istable(L, arg)) {
THError("expecting table of device streams");
}
/* Push table to top */
lua_pushvalue(L, arg);
/* Check that all values in the table are numeric and in bounds */
int streams = 0;
lua_pushnil(L);
while (lua_next(L, -2)) {
if (!lua_isnumber(L, -1)) {
THError("streamWaitFor: list of streams must be numeric");
}
int streamId = (int) lua_tonumber(L, -1);
/* This will error out if the stream is not in bounds */
THCState_getDeviceStream(state, device, streamId);
++streams;
lua_pop(L, 1);
}
/* Pop table from top */
lua_pop(L, 1);
return streams;
}
示例5: THNN_
void THNN_(ClassNLLCriterion_updateOutput)(THNNState *state, THTensor *input,
THIndexTensor *target,
THTensor *output, bool sizeAverage,
THTensor *weights,
THTensor *total_weight)
{
int n_dims = THTensor_(nDimension)(input);
int n_classes = THTensor_(size)(input, n_dims - 1);
if (THIndexTensor_(nDimension)(target) > 1) {
THError("multi-target not supported");
}
if (THTensor_(nDimension)(input) > 2) {
THError("input tensor should be 1D or 2D");
}
input = THTensor_(newContiguous)(input);
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
real *input_data = THTensor_(data)(input);
THIndex_t *target_data = THIndexTensor_(data)(target);
real *weights_data = weights ? THTensor_(data)(weights) : NULL;
real *output_data = THTensor_(data)(output);
real *total_weight_data = THTensor_(data)(total_weight);
output_data[0] = total_weight_data[0] = 0.0;
if (THTensor_(nDimension)(input) == 1) {
int cur_target = target_data[0] - 1;
THAssert(cur_target >= 0 && cur_target < n_classes);
total_weight_data[0] = weights ? weights_data[cur_target] : 1.0f;
output_data[0] = -input_data[cur_target] * total_weight_data[0];
} else if (THTensor_(nDimension)(input) == 2) {
int batch_size = THTensor_(size)(input, 0);
THAssert(THIndexTensor_(size)(target, 0) == batch_size);
int n_target = THTensor_(size)(input, 1);
int i;
for (i = 0; i < batch_size; i++) {
int cur_target = target_data[i] - 1;
THAssert(cur_target >= 0 && cur_target < n_classes);
real cur_weight = weights ? weights_data[cur_target] : 1.0f;
total_weight_data[0] += cur_weight;
output_data[0] -= input_data[i * n_target + cur_target] * cur_weight;
}
}
if (sizeAverage && total_weight_data[0]) {
output_data[0] /= total_weight_data[0];
}
if (weights) {
THTensor_(free)(weights);
}
THTensor_(free)(input);
THIndexTensor_(free)(target);
}
示例6: THMapAllocator_free
static void THMapAllocator_free(void* ctx_, void* data) {
THMapAllocatorContext *ctx = ctx_;
#ifdef _WIN32
if(UnmapViewOfFile(data) == 0)
THError("could not unmap the shared memory file");
#else /* _WIN32 */
if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) {
if (close(ctx->fd) == -1)
THError("could not close file descriptor %d", ctx->fd);
}
if (munmap(data, ctx->size))
THError("could not unmap the shared memory file");
if (!(ctx->flags & (TH_ALLOCATOR_MAPPED_FROMFD | TH_ALLOCATOR_MAPPED_UNLINK)))
{
if (ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM)
{
#ifdef HAVE_SHM_UNLINK
if (shm_unlink(ctx->filename) == -1)
THError("could not unlink the shared memory file %s", ctx->filename);
#else
THError("could not unlink the shared memory file %s, shm_unlink not available on platform", ctx->filename);
#endif
}
}
#endif /* _WIN32 */
THMapAllocatorContext_free(ctx);
}
示例7: THRealloc
void* THRealloc(void *ptr, long size)
{
if(!ptr)
return(THAlloc(size));
if(size == 0)
{
THFree(ptr);
return NULL;
}
if(size < 0)
THError("$ Torch: invalid memory size -- maybe an overflow?");
THHeapUpdate(-getAllocSize(ptr));
void *newptr = realloc(ptr, size);
if(!newptr && torchGCFunction) {
torchGCFunction(torchGCData);
newptr = realloc(ptr, size);
}
THHeapUpdate(getAllocSize(newptr ? newptr : ptr));
if(!newptr)
THError("$ Torch: not enough memory: you tried to reallocate %dGB. Buy new RAM!", size/1073741824);
return newptr;
}
示例8: THNN_
void THNN_(LookupTable_renorm)(
THNNState *state,
THIndexTensor *idx,
THTensor *weight,
real maxNorm,
real normType)
{
if (!THTensor_(isContiguous)(weight))
THError("weight must be contiguous");
if (!THIndexTensor_(isContiguous)(idx))
THError("input must be contiguous");
if (THIndexTensor_(nDimension)(idx) != 1)
THError("idx must be a vector");
if (normType <= 0)
THError("non-positive-norm not supported");
long i;
THIndex_t *row_idx = THIndexTensor_(data)(idx);
long numel = THIndexTensor_(nElement)(idx);
long numw = THTensor_(size)(weight, 0);
long stride = THTensor_(stride)(weight, 0);
real *gw = THTensor_(data)(weight);
for (i=0; i<numel; i++)
if (row_idx[i] < 1 || row_idx[i] > numw)
THError("input out of range");
// get unique indices
qsort(row_idx, numel, sizeof(THIndex_t), THNN_(compare_THIndex));
long ptr = 0;
for (i=0; i<numel; i++)
if (i == 0 || row_idx[i] != row_idx[i-1])
row_idx[ptr++] = row_idx[i];
numel = ptr;
#ifdef _OPENMP
if (numel > 1000)
{
// The strategy is to parallelize over the rows that appear in
// row_idx, so that thread 1 handles the rows in row_idx[0..numel/nThreads].
// This distributes the work evenly to each thread.
#pragma omp parallel for private(i)
for (i=0; i<numel; i++)
{
long k = row_idx[i] - 1;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
return;
}
#endif
for (i=0; i<numel; i++)
{
long k = row_idx[i] - 1;
THNN_(LookupTable_renormRow)(gw + k*stride, stride, maxNorm, normType);
}
}
示例9: applyHeapDelta
static ptrdiff_t applyHeapDelta() {
ptrdiff_t oldHeapSize = THAtomicAddPtrdiff(&heapSize, heapDelta);
#ifdef DEBUG
if (heapDelta > 0 && oldHeapSize > PTRDIFF_MAX - heapDelta)
THError("applyHeapDelta: heapSize(%td) + increased(%td) > PTRDIFF_MAX, heapSize overflow!", oldHeapSize, heapDelta);
if (heapDelta < 0 && oldHeapSize < PTRDIFF_MIN - heapDelta)
THError("applyHeapDelta: heapSize(%td) + decreased(%td) < PTRDIFF_MIN, heapSize underflow!", oldHeapSize, heapDelta);
#endif
ptrdiff_t newHeapSize = oldHeapSize + heapDelta;
heapDelta = 0;
return newHeapSize;
}
示例10: THCState_getPeerToPeerAccess
int THCState_getPeerToPeerAccess(THCState* state, int dev, int devToAccess)
{
if (dev < 0 || dev >= state->numDevices) {
THError("%d is not a device", dev);
}
if (devToAccess < 0 || dev >= state->numDevices) {
THError("%d is not a device", devToAccess);
}
return state->p2pAccessEnabled[dev][devToAccess];
}
示例11: THCState_getPeerToPeerAccess
int THCState_getPeerToPeerAccess(THCState* state, int dev, int devToAccess)
{
int numDevices = 0;
THCudaCheck(cudaGetDeviceCount(&numDevices));
if (dev < 0 || dev >= numDevices) {
THError("%d is not a device", dev);
}
if (devToAccess < 0 || dev >= numDevices) {
THError("%d is not a device", devToAccess);
}
return state->p2pAccessEnabled[dev][devToAccess];
}
示例12: THCState_getCurrentBlasHandle
cublasHandle_t THCState_getCurrentBlasHandle(THCState *state)
{
/* This is called at the point of kernel execution.
For some debugging code or improperly instrumented kernels,
`state` is null */
if (state) {
if (state->currentBlasHandle <= 0) {
THError("%d is not a valid handle, valid range is: (1, %d)",
state->currentBlasHandle, state->numUserBlasHandles);
}
return state->currentBlasHandle;
}
THError("THCState and blasHandles must be set as there is no default blasHandle");
return NULL;
}
示例13: THCudaTensor_rawSet
static void THCudaTensor_rawSet(THCState *state, THCudaTensor *self, THCudaStorage *storage, long storageOffset, int nDimension, long *size, long *stride)
{
THAssert(self->storage != NULL);
/* storage */
if(self->storage != storage)
{
if(self->storage)
THCudaStorage_free(state, self->storage);
if(storage)
{
self->storage = storage;
THCudaStorage_retain(state, self->storage);
}
else
self->storage = THCudaStorage_new(state);
}
/* storageOffset */
if(storageOffset < 0)
THError("Tensor: invalid storage offset");
self->storageOffset = storageOffset;
/* size and stride */
THCudaTensor_rawResize(state, self, nDimension, size, stride);
}
示例14: THNN_
static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
at::parallel_for(0, nslices, 0, [&](int64_t start, int64_t end) {
for (auto k = start; k < end; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight;
scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j]; /* retrieve position of max */
if(maxp < 0 || maxp >= owidth * oheight) {
THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight);
}
gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
});
}
示例15: THFloatTensor_resize4d
THFloatTensor *nn_SpatialConvolutionMM_updateOutput(struct module *module, THFloatTensor *input)
{
int kW = module->SpatialConvolution.kW;
int kH = module->SpatialConvolution.kH;
int dW = module->SpatialConvolution.dW;
int dH = module->SpatialConvolution.dH;
int padW = module->SpatialConvolution.padW;
int padH = module->SpatialConvolution.padH;
THFloatTensor *finput = module->SpatialConvolution.finput;
THFloatTensor *weight = module->SpatialConvolution.weight;
THFloatTensor *bias = module->SpatialConvolution.bias;
THFloatTensor *output = module->output;
int batch = 1;
if (input->nDimension == 3) {
batch = 0;
THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
}
long batchSize = input->size[0];
long nInputPlane = module->SpatialConvolution.nInputPlane;
long nOutputPlane = module->SpatialConvolution.nOutputPlane;
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THFloatTensor_resize3d(finput, batchSize, kW*kH*nInputPlane, outputHeight*outputWidth);
THFloatTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth);
long t;
#pragma omp parallel for if(batchSize >= 4) private(t)
for (t = 0; t < batchSize; t++) {
THFloatTensor *input_t = THFloatTensor_newSelect(input, 0, t);
THFloatTensor *output_t = THFloatTensor_newSelect(output, 0, t);
THFloatTensor *finput_t = THFloatTensor_newSelect(finput, 0, t);
nn_SpatialConvolutionMM_updateOutput_frame(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THFloatTensor_free(input_t);
THFloatTensor_free(output_t);
THFloatTensor_free(finput_t);
}
if (batch == 0) {
THFloatTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth);
THFloatTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
return output;
}