当前位置: 首页>>代码示例>>C++>>正文


C++ MyMatrix::rows方法代码示例

本文整理汇总了C++中MyMatrix::rows方法的典型用法代码示例。如果您正苦于以下问题:C++ MyMatrix::rows方法的具体用法?C++ MyMatrix::rows怎么用?C++ MyMatrix::rows使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在MyMatrix的用法示例。


在下文中一共展示了MyMatrix::rows方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: defaultCellMatrixImpl

		CellMatrix::CellMatrix(const MyMatrix& data):pimpl(new defaultCellMatrixImpl(data.rows(),data.columns()))
		{
			
			for(size_t i(0); i < data.rows(); ++i)
			{
				for(size_t j(0); j < data.columns(); ++j)
				{
				  (*pimpl)(i,j) = data(i,j);
				}
			}
			
		}
开发者ID:Laeeth,项目名称:d_excelsdk,代码行数:12,代码来源:CellMatrix.cpp

示例2: BSDeltaWithParamsFD

double BSDeltaWithParamsFD(const MyMatrix& parametersMatrix, double epsilon) {
	if (parametersMatrix.columns() != 6 && parametersMatrix.rows() != 1 ) {
	throw("Input matrix should be 1 x 5");}
	double Spot =  parametersMatrix(0,0);
	double Strike =  parametersMatrix(0,1);
	double r =  parametersMatrix(0,2);
	double d =  parametersMatrix(0,3);
	double vol =  parametersMatrix(0,4);
	double expiry = parametersMatrix(0,5); 
	return BlackScholesDeltaFD(Spot, Strike,r,d,vol,expiry, epsilon);
}
开发者ID:quee0849,项目名称:XLLProject7,代码行数:11,代码来源:source.cpp

示例3: BSZeroCouponBondWithParams

double BSZeroCouponBondWithParams(const MyMatrix& parametersMatrix){
	if (parametersMatrix.columns() != 6 && parametersMatrix.rows() != 1 ) {
	throw("Input matrix should be 1 x 5");}
	double Spot =  parametersMatrix(0,0);
	double Strike =  parametersMatrix(0,1);
	double r =  parametersMatrix(0,2);
	double d =  parametersMatrix(0,3);
	double vol =  parametersMatrix(0,4);
	double expiry = parametersMatrix(0,5); 
	return BlackScholesZeroCouponBond(Spot, Strike,r,d,vol,expiry);
}
开发者ID:quee0849,项目名称:XLLProject7,代码行数:11,代码来源:source.cpp

示例4: resultMatrix

CellMatrix //
BSGreeksFD(const MyMatrix& parametersMatrix,double epsilon) {
		if (parametersMatrix.columns() != 6 && parametersMatrix.rows() != 1 ) {
		throw("Input matrix should be 1 x 5");}
	double Spot =  parametersMatrix(0,0);
	double Strike =  parametersMatrix(0,1);
	double r =  parametersMatrix(0,2);
	double d =  parametersMatrix(0,3);
	double vol =  parametersMatrix(0,4);
	double expiry = parametersMatrix(0,5); 
	CellMatrix resultMatrix(1,5); 
	resultMatrix(0,0) = BlackScholesDeltaFD(Spot,Strike,r,d,vol,expiry,epsilon);
	resultMatrix(0,1) = BlackScholesGammaFD(Spot,Strike,r,d,vol,expiry,epsilon);
	resultMatrix(0,2) = BlackScholesVegaFD(Spot,Strike,r,d,vol,expiry,epsilon);
	resultMatrix(0,3) = BlackScholesRhoFD(Spot,Strike,r,d,vol,expiry,epsilon);
	resultMatrix(0,4) = BlackScholesThetaFD(Spot,Strike,r,d,vol,expiry,epsilon);
	return resultMatrix;
}
开发者ID:quee0849,项目名称:XLLProject7,代码行数:18,代码来源:source.cpp

示例5: return

bool MyMatrix::operator==(const MyMatrix& a) const
{
  if (this->rows() != a.rows()) return false;
  if (this->columns() != a.columns()) return false;
  return(((EigenMatrix)(*this)-(EigenMatrix)a).isApproxToConstant(0.0));
}
开发者ID:FalkorSystems,项目名称:hector_common,代码行数:6,代码来源:matrix_EIGEN.cpp

示例6: main

int main(int argc, char *argv[]){
  
	Params params;
  
	std::map<std::string, std::string> args;
	readArgs(argc, argv, args);
	if(args.find("algo")!=args.end()){
		params.algo = args["algo"];
	}else{
		params.algo = "qdMCNat";
	}

	if(args.find("inst_file")!=args.end())
		setParamsFromFile(args["inst_file"], args, params);
	else   
		setParams(params.algo, args, params);
  
	createLogDir(params.dir_path);
  
	gen.seed(params.seed);

	// Load the dataset
	MyMatrix X_train, X_valid;
	VectorXd Y_train, Y_valid;
	loadMnist(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
	//loadCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
	//loadLightCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
  
	// ConvNet parameters
	std::vector<ConvLayerParams> conv_params;
	ConvLayerParams conv_params1;
	conv_params1.Hf = 5;
	conv_params1.stride = 1;
	conv_params1.n_filter = 20;
	conv_params1.padding = 0;
	conv_params.push_back(conv_params1);
  
	ConvLayerParams conv_params2;
	conv_params2.Hf = 5;
	conv_params2.stride = 1;
	conv_params2.n_filter = 50;
	conv_params2.padding = 0;
	conv_params.push_back(conv_params2);

	std::vector<PoolLayerParams> pool_params;
	PoolLayerParams pool_params1;
	pool_params1.Hf = 2;
	pool_params1.stride = 2;
	pool_params.push_back(pool_params1);

	PoolLayerParams pool_params2;
	pool_params2.Hf = 2;
	pool_params2.stride = 2;
	pool_params.push_back(pool_params2);
  
	const unsigned n_conv_layer = conv_params.size();
  
	for(unsigned l = 0; l < conv_params.size(); l++){

		if(l==0){
			conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * params.img_depth;
			conv_params[l].N = (params.img_width - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1;
		}
		else{
			conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * conv_params[l-1].n_filter;
			conv_params[l].N = (pool_params[l-1].N - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1;
		}
		pool_params[l].N = (conv_params[l].N - pool_params[l].Hf)/pool_params[l].stride + 1;
	}
  
	// Neural Network parameters
	const unsigned n_training = X_train.rows();
	const unsigned n_valid = X_valid.rows();
	const unsigned n_feature = X_train.cols();
	const unsigned n_label = Y_train.maxCoeff() + 1;
  
	params.nn_arch.insert(params.nn_arch.begin(),conv_params[n_conv_layer-1].n_filter * pool_params[n_conv_layer-1].N * pool_params[n_conv_layer-1].N);
	params.nn_arch.push_back(n_label);
	const unsigned n_layers = params.nn_arch.size();
  
	// Optimization parameter
	const int n_train_batch = ceil(n_training/(float)params.train_minibatch_size);
	const int n_valid_batch = ceil(n_valid/(float)params.valid_minibatch_size);
	double prev_loss = std::numeric_limits<double>::max();
	double eta = params.eta;

	// Create the convolutional layer
	std::vector<MyMatrix> conv_W(n_conv_layer);
	std::vector<MyMatrix> conv_W_T(n_conv_layer);
	std::vector<MyVector> conv_B(n_conv_layer);
  
	// Create the neural network
	MyMatrix W_out(params.nn_arch[n_layers-2],n_label);
	std::vector<MySpMatrix> W(n_layers-2);
	std::vector<MySpMatrix> Wt(n_layers-2);
	std::vector<MyVector> B(n_layers-1);

	double init_sigma = 0.;
	ActivationFunction act_func;
	ActivationFunction eval_act_func;
//.........这里部分代码省略.........
开发者ID:gmarceaucaron,项目名称:DNN,代码行数:101,代码来源:convnet_sp.cpp


注:本文中的MyMatrix::rows方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。