當前位置: 首頁>>代碼示例>>C++>>正文


C++ std::log1p方法代碼示例

本文整理匯總了C++中std::log1p方法的典型用法代碼示例。如果您正苦於以下問題:C++ std::log1p方法的具體用法?C++ std::log1p怎麽用?C++ std::log1p使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在std的用法示例。


在下文中一共展示了std::log1p方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。

示例1: logpmf_dlaplace

inline double logpmf_dlaplace(double x, double p, double mu,
                              bool& throw_warning) {
#ifdef IEEE_754
  if (ISNAN(x) || ISNAN(p) || ISNAN(mu))
    return x+p+mu;
#endif
  if (p <= 0.0 || p >= 1.0) {
    throw_warning = true;
    return NAN;
  }
  if (!isInteger(x))
    return R_NegInf;
  // (1.0-p)/(1.0+p) * pow(p, abs(x-mu));
  return log1p(-p) - log1p(p) + log(p) * abs(x-mu);
} 
開發者ID:cran,項目名稱:extraDistr,代碼行數:15,代碼來源:discrete-laplace-distribution.cpp

示例2: cdf_dlaplace

inline double cdf_dlaplace(double x, double p, double mu,
                           bool& throw_warning) {
#ifdef IEEE_754
  if (ISNAN(x) || ISNAN(p) || ISNAN(mu))
    return x+p+mu;
#endif
  if (p <= 0.0 || p >= 1.0) {
    throw_warning = true;
    return NAN;
  }
  if (x < 0.0) {
    // pow(p, -floor(x-mu))/(1.0+p);
    return exp( (log(p) * -floor(x-mu)) - log1p(p) );
  } else {
    // 1.0 - (pow(p, floor(x-mu)+1.0)/(1.0+p))
    return 1.0 - exp( log(p) * (floor(x-mu)+1.0) - log1p(p) );
  }
} 
開發者ID:cran,項目名稱:extraDistr,代碼行數:18,代碼來源:discrete-laplace-distribution.cpp

示例3: channels

bool
DeepData::split (int pixel, float depth)
{
#if OIIO_CPLUSPLUS_VERSION >= 11
    using std::log1p;
    using std::expm1;
#endif
    bool splits_occurred = false;
    int zchan = m_impl->m_z_channel;
    int zbackchan = m_impl->m_zback_channel;
    if (zchan < 0)
        return false;   // No channel labeled Z -- we don't know what to do
    if (zbackchan < 0)
        return false;   // The samples are not extended -- nothing to split
    int nchans = channels();
    for (int s = 0; s < samples(pixel); ++s) {
        float zf = deep_value (pixel, zchan, s);     // z front
        float zb = deep_value (pixel, zbackchan, s); // z back
        if (zf < depth && zb > depth) {
            // The sample spans depth, so split it.
            // See http://www.openexr.com/InterpretingDeepPixels.pdf
            splits_occurred = true;
            insert_samples (pixel, s+1);
            copy_deep_sample (pixel, s+1, *this, pixel, s);
            set_deep_value (pixel, zbackchan, s,   depth);
            set_deep_value (pixel, zchan,     s+1, depth);
            // We have to proceed in two passes, since we may reuse the
            // alpha values, we can't overwrite them yet.
            for (int c = 0; c < nchans; ++c) {
                int alphachan = m_impl->m_myalphachannel[c];
                if (alphachan < 0   // No alpha
                      || alphachan == c)  // This is an alpha!
                    continue;
                float a = clamp (deep_value (pixel, alphachan, s), 0.0f, 1.0f);
                if (a == 1.0f) // Opaque or channels without alpha, we're done.
                    continue;
                float xf = (depth - zf) / (zb - zf);
                float xb = (zb - depth) / (zb - zf);
                if (a > std::numeric_limits<float>::min()) {
                    float af = -expm1 (xf * log1p (-a));
                    float ab = -expm1 (xb * log1p (-a));
                    float val = deep_value (pixel, c, s);
                    set_deep_value (pixel, c, s,   (af/a) * val);
                    set_deep_value (pixel, c, s+1, (ab/a) * val);
                } else {
                    float val = deep_value (pixel, c, s);
                    set_deep_value (pixel, c, s,   val * xf);
                    set_deep_value (pixel, c, s+1, val * xb);
                }
            }
            // Now that we've adjusted the colors, do the alphas
            for (int c = 0; c < nchans; ++c) {
                int alphachan = m_impl->m_myalphachannel[c];
                if (alphachan != c)
                    continue;  // skip if not an alpha
                float a = clamp (deep_value (pixel, alphachan, s), 0.0f, 1.0f);
                if (a == 1.0f) // Opaque or channels without alpha, we're done.
                    continue;
                float xf = (depth - zf) / (zb - zf);
                float xb = (zb - depth) / (zb - zf);
                if (a > std::numeric_limits<float>::min()) {
                    float af = -expm1 (xf * log1p (-a));
                    float ab = -expm1 (xb * log1p (-a));
                    set_deep_value (pixel, c, s,   af);
                    set_deep_value (pixel, c, s+1, ab);
                } else {
                    set_deep_value (pixel, c, s,   a * xf);
                    set_deep_value (pixel, c, s+1, a * xb);
                }
            }
        }
    }
    return splits_occurred;
}
開發者ID:KelSolaar,項目名稱:oiio,代碼行數:74,代碼來源:deepdata.cpp

示例4: if

void
DeepData::merge_overlaps (int pixel)
{
#if OIIO_CPLUSPLUS_VERSION >= 11
    using std::log1p;
#endif
    int zchan = m_impl->m_z_channel;
    int zbackchan = m_impl->m_zback_channel;
    if (zchan < 0)
        return;   // No channel labeled Z -- we don't know what to do
    if (zbackchan < 0)
        zbackchan = zchan;  // Missing Zback -- use Z
    int nchans = channels();
    for (int s = 1 /* YES, 1 */; s < samples(pixel); ++s) {
        float zf = deep_value (pixel, zchan, s);     // z front
        float zb = deep_value (pixel, zbackchan, s); // z back
        if (zf == deep_value (pixel, zchan, s-1) &&
            zb == deep_value (pixel, zbackchan, s-1)) {
            // The samples overlap exactly, merge them per
            // See http://www.openexr.com/InterpretingDeepPixels.pdf
            for (int c = 0; c < nchans; ++c) {  // set the colors
                int alphachan = m_impl->m_myalphachannel[c];
                if (alphachan < 0)
                    continue;    // Not color or alpha
                if (alphachan == c)
                    continue;  // Adjust the alphas in a second pass below
                float a1 = (alphachan < 0) ? 1.0f :
                           clamp (deep_value (pixel, alphachan, s-1), 0.0f, 1.0f);
                float a2 = (alphachan < 0) ? 1.0f :
                           clamp (deep_value (pixel, alphachan, s), 0.0f, 1.0f);
                float c1 = deep_value (pixel, c, s-1);
                float c2 = deep_value (pixel, c, s);
                float am = a1 + a2 - a1 * a2;
                float cm;
                if (a1 == 1.0f && a2 == 1.0f)
                    cm = (c1 + c2) / 2.0f;
                else if (a1 == 1.0f)
                    cm = c1;
                else if (a2 == 1.0f)
                    cm = c2;
                else {
                    static const float MAX = std::numeric_limits<float>::max();
                    float u1 = -log1p (-a1);
                    float v1 = (u1 < a1 * MAX)? u1 / a1: 1.0f;
                    float u2 = -log1p (-a2);
                    float v2 = (u2 < a2 * MAX)? u2 / a2: 1.0f;
                    float u = u1 + u2;
                    float w = (u > 1.0f || am < u * MAX)? am / u: 1.0f;
                    cm = (c1 * v1 + c2 * v2) * w;
                }
                set_deep_value (pixel, c, s-1, cm); // setting color
            }
            for (int c = 0; c < nchans; ++c) {  // set the alphas
                int alphachan = m_impl->m_myalphachannel[c];
                if (alphachan != c)
                    continue;    // This pass is only for alphas
                float a1 = (alphachan < 0) ? 1.0f :
                           clamp (deep_value (pixel, alphachan, s-1), 0.0f, 1.0f);
                float a2 = (alphachan < 0) ? 1.0f :
                           clamp (deep_value (pixel, alphachan, s), 0.0f, 1.0f);
                float am = a1 + a2 - a1 * a2;
                set_deep_value (pixel, c, s-1, am); // setting alpha
            }
            // Now eliminate sample s and revisit again
            erase_samples (pixel, s, 1);
            --s;
        }
    }
}
開發者ID:KelSolaar,項目名稱:oiio,代碼行數:69,代碼來源:deepdata.cpp


注:本文中的std::log1p方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。