当前位置: 首页>>代码示例>>Java>>正文


Java NativeOpsHolder类代码示例

本文整理汇总了Java中org.nd4j.nativeblas.NativeOpsHolder的典型用法代码示例。如果您正苦于以下问题:Java NativeOpsHolder类的具体用法?Java NativeOpsHolder怎么用?Java NativeOpsHolder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NativeOpsHolder类属于org.nd4j.nativeblas包,在下文中一共展示了NativeOpsHolder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: applyConfiguration

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
public void applyConfiguration() {
    //log.info("Applying CUDA configuration...");

    CudaEnvironment.getInstance().notifyConfigurationApplied();

    NativeOpsHolder.getInstance().getDeviceNativeOps().enableDebugMode(configuration.isDebug());
    //configuration.enableDebug(configuration.isDebug());

    NativeOpsHolder.getInstance().getDeviceNativeOps().enableVerboseMode(configuration.isVerbose());
    //configuration.setVerbose(configuration.isVerbose());

    NativeOpsHolder.getInstance().getDeviceNativeOps().enableP2P(configuration.isCrossDeviceAccessAllowed());
    //configuration.allowCrossDeviceAccess(configuration.isCrossDeviceAccessAllowed());

    NativeOpsHolder.getInstance().getDeviceNativeOps().setGridLimit(configuration.getMaximumGridSize());
    //configuration.setMaximumGridSize(configuration.getMaximumGridSize());

    NativeOpsHolder.getInstance().getDeviceNativeOps().setOmpNumThreads(configuration.getMaximumBlockSize());
    // configuration.setMaximumBlockSize(configuration.getMaximumBlockSize());

    NativeOpsHolder.getInstance().getDeviceNativeOps().setOmpMinThreads(configuration.getMinimumBlockSize());
    // configuration.setMinimumBlockSize(configuration.getMinimumBlockSize());
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:24,代码来源:AtomicAllocator.java

示例2: replicateToDevice

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
/**
 * This method replicates given DataBuffer, and places it to target device.
 *
 * @param deviceId target deviceId
 * @param buffer
 * @return
 */
@Override
public DataBuffer replicateToDevice(Integer deviceId, DataBuffer buffer) {
    if (buffer == null)
        return null;

    int currentDeviceId = AtomicAllocator.getInstance().getDeviceId();
    if (currentDeviceId != deviceId) {
        NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(deviceId));
        Nd4j.getAffinityManager().attachThreadToDevice(Thread.currentThread().getId(), deviceId);
    }

    DataBuffer dstBuffer = Nd4j.createBuffer(buffer.length(), false);
    AtomicAllocator.getInstance().memcpy(dstBuffer, buffer);

    if (currentDeviceId != deviceId) {
        NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(currentDeviceId));
        Nd4j.getAffinityManager().attachThreadToDevice(Thread.currentThread().getId(), currentDeviceId);
    }

    return dstBuffer;
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:29,代码来源:CudaAffinityManager.java

示例3: destroyWorkspace

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
@Override
public synchronized void destroyWorkspace(boolean extended) {
    currentSize.set(0);
    reset();

    if (extended)
        clearExternalAllocations();

    clearPinnedAllocations(extended);

    if (workspace.getHostPointer() != null)
        NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(workspace.getHostPointer());

    if (workspace.getDevicePointer() != null)
        NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(workspace.getDevicePointer(), null);

    workspace.setDevicePointer(null);
    workspace.setHostPointer(null);

}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:21,代码来源:CudaWorkspace.java

示例4: sortCooIndices

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
@Override
public INDArray sortCooIndices(INDArray x) {

    if(x.getFormat() != SparseFormat.COO){
        throw new UnsupportedOperationException("Not a COO ndarray");
    }
    BaseSparseNDArrayCOO array = (BaseSparseNDArrayCOO) x;
    DataBuffer val = array.getValues();
    DataBuffer idx = array.getIndices();
    long length = val.length();
    int rank = array.underlyingRank();
    switch(val.dataType()){
        case FLOAT:
            NativeOpsHolder.getInstance().getDeviceNativeOps().sortCooIndicesFloat(null, (IntPointer) idx.addressPointer(), (FloatPointer) val.addressPointer(), length, rank);
            break;
        case DOUBLE:
            NativeOpsHolder.getInstance().getDeviceNativeOps().sortCooIndicesDouble(null, (IntPointer) idx.addressPointer(), (DoublePointer) val.addressPointer(), length, rank);
            break;
        default:
            throw new UnsupportedOperationException("Unknown datatype " + x.data().dataType());
    }

    return array;
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:25,代码来源:CpuSparseNDArrayFactory.java

示例5: init

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
@Override
protected void init() {
    super.init();

    if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.RAM) {

        if (currentSize.get() > 0) {
            isInit.set(true);


            if (isDebug.get())
                log.info("Allocating [{}] workspace of {} bytes...", id, currentSize.get());

            workspace.setHostPointer(new PagedPointer(memoryManager.allocate(currentSize.get() + SAFETY_OFFSET, MemoryKind.HOST, true)));
        }
    } else if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.MMAP) {
        long flen = tempFile.length();
        mmap = NativeOpsHolder.getInstance().getDeviceNativeOps().mmapFile(null, tempFile.getAbsolutePath(), flen);

        if (mmap == null)
            throw new RuntimeException("MMAP failed");

        workspace.setHostPointer(new PagedPointer(mmap.get(0)));
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:26,代码来源:CpuWorkspace.java

示例6: destroyWorkspace

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
@Override
public synchronized void destroyWorkspace(boolean extended) {
    if (isDebug.get())
        log.info("Destroying workspace...");

    currentSize.set(0);
    hostOffset.set(0);
    deviceOffset.set(0);

    if (extended)
        clearExternalAllocations();

    clearPinnedAllocations(extended);

    if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.RAM) {
        if (workspace.getHostPointer() != null)
            NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(workspace.getHostPointer());
    } else if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.MMAP) {
        if (workspace.getHostPointer() != null)
            NativeOpsHolder.getInstance().getDeviceNativeOps().munmapFile(null, mmap, tempFile.length());
    }

    workspace.setDevicePointer(null);
    workspace.setHostPointer(null);
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:26,代码来源:CpuWorkspace.java

示例7: sortSparseCooIndicesSort2

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
@Test
public void sortSparseCooIndicesSort2() throws Exception {
    // FIXME: we don't want this test running on cuda for now
    if (Nd4j.getExecutioner().getClass().getCanonicalName().toLowerCase().contains("cuda"))
        return;

    int indices[] = new int[] {0, 0, 0, 2, 2, 2, 1, 1, 1};

    // we don't care about
    double values[] = new double[] {2, 1, 3};
    int expIndices[] = new int[] {0, 0, 0, 1, 1, 1, 2, 2, 2};
    double expValues[] = new double[] {2, 3, 1};

    DataBuffer idx = Nd4j.getDataBufferFactory().createInt(indices);
    DataBuffer val = Nd4j.createBuffer(values);

    NativeOpsHolder.getInstance().getDeviceNativeOps().sortCooIndicesFloat(null, (IntPointer) idx.addressPointer(),
                    (FloatPointer) val.addressPointer(), 3, 3);

    assertArrayEquals(expIndices, idx.asInt());
    assertArrayEquals(expValues, val.asDouble(), 1e-5);
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:23,代码来源:SortCooTests.java

示例8: Workspace

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
public Workspace(long size) {
    super(NativeOpsHolder.getInstance().getDeviceNativeOps().mallocDevice(size, null, 0));
    deallocator(new Deallocator() {
        @Override
        public void deallocate() {
            NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(Workspace.this, null);
        }
    });
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:10,代码来源:JcublasLapack.java

示例9: ensureMaps

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
private void ensureMaps(Integer deviceId) {
    if (!buffersCache.containsKey(deviceId)) {
        if (flowController == null)
            flowController = AtomicAllocator.getInstance().getFlowController();

        try {
            synchronized (this) {
                if (!buffersCache.containsKey(deviceId)) {

                    // TODO: this op call should be checked
                    //nativeOps.setDevice(new CudaPointer(deviceId));

                    buffersCache.put(deviceId, new ConcurrentHashMap<ArrayDescriptor, DataBuffer>());
                    constantOffsets.put(deviceId, new AtomicLong(0));
                    deviceLocks.put(deviceId, new Semaphore(1));

                    Pointer cAddr = NativeOpsHolder.getInstance().getDeviceNativeOps().getConstantSpace();
                    //                    logger.info("constant pointer: {}", cAddr.address() );

                    deviceAddresses.put(deviceId, cAddr);
                }
            }
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:28,代码来源:ProtectedCudaConstantHandler.java

示例10: fillPoolWithResources

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
protected synchronized void fillPoolWithResources(int numResources, boolean restoreDevice) {
    List<Integer> devices = CudaEnvironment.getInstance().getConfiguration().getAvailableDevices();

    int cDevice = 0;
    if (restoreDevice) {
        cDevice = AtomicAllocator.getInstance().getDeviceId();
    }

    NativeOps nativeOps = NativeOpsHolder.getInstance().getDeviceNativeOps();

    for (Integer device : devices) {
        nativeOps.setDevice(new CudaPointer(device));
        pool.put(device, new LinkedBlockingQueue<CudaContext>());

        cublasHandle_t handle = createNewCublasHandle();
        cusolverDnHandle_t solverHandle = createNewSolverHandle();
        for (int cnt = 0; cnt < numResources; cnt++) {
            CudaContext context = createNewStream(device);
            context.initOldStream();
            getDeviceBuffers(context, device);
            context.setHandle(handle);
            context.setSolverHandle(solverHandle);

            context.syncOldStream();

            pool.get(device).add(context);
        }
    }

    if (restoreDevice) {
        nativeOps.setDevice(new CudaPointer(cDevice));
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:34,代码来源:LimitedContextPool.java

示例11: getDeviceBuffers

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
/**
 * This method is used to allocate
 * @param context
 * @param deviceId
 */
protected void getDeviceBuffers(CudaContext context, int deviceId) {
    NativeOps nativeOps = NativeOpsHolder.getInstance().getDeviceNativeOps(); //((CudaExecutioner) Nd4j.getExecutioner()).getNativeOps();

    // we hardcode sizeOf to sizeOf(double)
    int sizeOf = 8;

    Pointer reductionPointer = nativeOps.mallocDevice(16385 * sizeOf * 2, new CudaPointer(deviceId), 0);
    if (reductionPointer == null)
        throw new IllegalStateException("Can't allocate [DEVICE] reduction buffer memory!");

    nativeOps.memsetAsync(reductionPointer, 0, 16385 * sizeOf * 2, 0, context.getOldStream());

    context.syncOldStream();

    Pointer allocationPointer = nativeOps.mallocDevice(1024 * 1024, new CudaPointer(deviceId), 0);
    if (allocationPointer == null)
        throw new IllegalStateException("Can't allocate [DEVICE] allocation buffer memory!");

    Pointer scalarPointer = nativeOps.mallocHost(1 * sizeOf, 0);
    if (scalarPointer == null)
        throw new IllegalStateException("Can't allocate [HOST] scalar buffer memory!");

    context.setBufferScalar(scalarPointer);
    context.setBufferAllocation(allocationPointer);
    context.setBufferReduction(reductionPointer);

    Pointer specialPointer = nativeOps.mallocDevice(1024 * 1024 * sizeOf, new CudaPointer(deviceId), 0);
    if (specialPointer == null)
        throw new IllegalStateException("Can't allocate [DEVICE] special buffer memory!");

    nativeOps.memsetAsync(specialPointer, 0, 65536 * sizeOf, 0, context.getOldStream());

    context.setBufferSpecial(specialPointer);
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:40,代码来源:BasicContextPool.java

示例12: synchronize

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
public int synchronize() {
    NativeOps nativeOps = NativeOpsHolder.getInstance().getDeviceNativeOps();
    int res = nativeOps.streamSynchronize(this);
    if (res == 0)
        throw new ND4JException("CUDA exception happened. Terminating. Last op: [" + Nd4j.getExecutioner().getLastOp() +"]");

    return res;
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:9,代码来源:cudaStream_t.java

示例13: synchronize

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
public void synchronize() {
    if (!isDestroyed()) {
        int res = NativeOpsHolder.getInstance().getDeviceNativeOps().eventSynchronize(this);
        if (res == 0)
            throw new ND4JException("CUDA exception happened. Terminating. Last op: [" + Nd4j.getExecutioner().getLastOp() +"]");
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:8,代码来源:cudaEvent_t.java

示例14: register

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
public void register(cudaStream_t stream) {
    if (!isDestroyed()) {
        int res = NativeOpsHolder.getInstance().getDeviceNativeOps().registerEvent(this, stream);
        if (res == 0)
            throw new ND4JException("CUDA exception happened. Terminating. Last op: [" + Nd4j.getExecutioner().getLastOp() +"]");
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:8,代码来源:cudaEvent_t.java

示例15: release

import org.nd4j.nativeblas.NativeOpsHolder; //导入依赖的package包/类
/**
 * This method releases previously allocated memory chunk
 *
 * @param pointer
 * @param kind
 * @return
 */
@Override
public void release(Pointer pointer, MemoryKind kind) {
    if (kind == MemoryKind.DEVICE) {
        NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(pointer, null);
    } else if (kind == MemoryKind.HOST) {
        NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(pointer);
    }
}
 
开发者ID:deeplearning4j,项目名称:nd4j,代码行数:16,代码来源:CudaMemoryManager.java


注:本文中的org.nd4j.nativeblas.NativeOpsHolder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。