當前位置: 首頁>>代碼示例>>TypeScript>>正文


TypeScript tfjs-core.relu函數代碼示例

本文整理匯總了TypeScript中@tensorflow/tfjs-core.relu函數的典型用法代碼示例。如果您正苦於以下問題:TypeScript relu函數的具體用法?TypeScript relu怎麽用?TypeScript relu使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。


在下文中一共展示了relu函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的TypeScript代碼示例。

示例1: residual

export function residual(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {
  let out = conv(x, params.conv1)
  out = convNoRelu(out, params.conv2)
  out = tf.add(out, x)
  out = tf.relu(out)
  return out
}
開發者ID:BakirDiyar,項目名稱:face-api.js,代碼行數:7,代碼來源:residualLayer.ts

示例2: residualDown

export function residualDown(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {
  let out = convDown(x, params.conv1)
  out = convNoRelu(out, params.conv2)

  let pooled = tf.avgPool(x, 2, 2, 'valid') as tf.Tensor4D
  const zeros = tf.zeros<tf.Rank.R4>(pooled.shape)
  const isPad = pooled.shape[3] !== out.shape[3]
  const isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2]

  if (isAdjustShape) {
    const padShapeX = [...out.shape] as [number, number, number, number]
    padShapeX[1] = 1
    const zerosW = tf.zeros<tf.Rank.R4>(padShapeX)
    out = tf.concat([out, zerosW], 1)

    const padShapeY = [...out.shape] as [number, number, number, number]
    padShapeY[2] = 1
    const zerosH = tf.zeros<tf.Rank.R4>(padShapeY)
    out = tf.concat([out, zerosH], 2)
  }

  pooled = isPad ? tf.concat([pooled, zeros], 3) : pooled
  out = tf.add(pooled, out) as tf.Tensor4D

  out = tf.relu(out)
  return out
}
開發者ID:BakirDiyar,項目名稱:face-api.js,代碼行數:27,代碼來源:residualLayer.ts

示例3: getImageTensor

    const outTensor = tf.tidy(() => {
      const params = this._params

      let imgTensor = getImageTensor(netInput)
      const [height, width] = imgTensor.shape.slice(1)
      imageDimensions = { width, height }


      // work with 128 x 128 sized face images
      if (imgTensor.shape[1] !== 128 || imgTensor.shape[2] !== 128) {
        imgTensor = tf.image.resizeBilinear(imgTensor, [128, 128])
      }

      let out = conv(imgTensor, params.conv0_params)
      out = maxPool(out)
      out = conv(out, params.conv1_params)
      out = conv(out, params.conv2_params)
      out = maxPool(out)
      out = conv(out, params.conv3_params)
      out = conv(out, params.conv4_params)
      out = maxPool(out)
      out = conv(out, params.conv5_params)
      out = conv(out, params.conv6_params)
      out = maxPool(out, [1, 1])
      out = conv(out, params.conv7_params)
      const fc0 = tf.relu(fullyConnectedLayer(out.as2D(out.shape[0], -1), params.fc0_params))
      const fc1 = fullyConnectedLayer(fc0, params.fc1_params)

      return fc1
    })
開發者ID:BakirDiyar,項目名稱:face-api.js,代碼行數:30,代碼來源:FaceLandmarkNet.ts

示例4:

  return tf.tidy(() => {
    const out = tf.add(
      tf.conv2d(x, params.filters, [1, 1], padding),
      params.bias
    ) as tf.Tensor4D

    return withRelu ? tf.relu(out) : out
  })
開發者ID:BakirDiyar,項目名稱:face-api.js,代碼行數:8,代碼來源:convLayer.ts

示例5: convLayer

function convLayer(
  x: tf.Tensor4D,
  params: ConvLayerParams,
  strides: [number, number],
  withRelu: boolean,
  padding: 'valid' | 'same' = 'same'
): tf.Tensor4D {
  const { filters, bias } = params.conv

  let out = tf.conv2d(x, filters, strides, padding)
  out = tf.add(out, bias)
  out = scale(out, params.scale)
  return withRelu ? tf.relu(out) : out
}
開發者ID:BakirDiyar,項目名稱:face-api.js,代碼行數:14,代碼來源:convLayer.ts

示例6: switch

export let executeOp: OpExecutor = (node: Node, tensorMap: NamedTensorsMap,
                                    context: ExecutionContext):
                                       tfc.Tensor[] => {
  switch (node.op) {
    case 'abs':
      return [tfc.abs(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'acos':
      return [tfc.acos(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'acosh':
      return [tfc.acosh(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'asin':
      return [tfc.asin(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'asinh':
      return [tfc.asinh(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'atan':
      return [tfc.atan(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'atanh':
      return [tfc.atanh(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'ceil':
      return [tfc.ceil(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'cos':
      return [tfc.cos(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'cosh':
      return [tfc.cosh(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'elu':
      return [tfc.elu(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'erf':
      return [tfc.erf(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'exp':
      return [tfc.exp(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'expm1': {
      return [tfc.expm1(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'floor':
      return [tfc.floor(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'log':
      return [tfc.log(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'log1p': {
      return [tfc.log1p(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'neg':
      return [tfc.neg(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'reciprocal': {
      return [tfc.reciprocal(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'relu':
      return [tfc.relu(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'round': {
      return [tfc.round(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'selu':
      return [tfc.selu(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'sigmoid':
      return [tfc.sigmoid(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'sin':
      return [tfc.sin(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    case 'sign': {
      return [tfc.sign(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'sinh': {
      return [tfc.sinh(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'softplus': {
      return [tfc.softplus(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'sqrt': {
      return [tfc.sqrt(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
    case 'square': {
      return [tfc.square(
          getParamValue('x', node, tensorMap, context) as tfc.Tensor)];
    }
//.........這裏部分代碼省略.........
開發者ID:oveddan,項目名稱:tfjs-converter,代碼行數:101,代碼來源:basic_math_executor.ts


注:本文中的@tensorflow/tfjs-core.relu函數示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。