本文整理汇总了Python中feature_extractor.FeatureExtractor.transform方法的典型用法代码示例。如果您正苦于以下问题:Python FeatureExtractor.transform方法的具体用法?Python FeatureExtractor.transform怎么用?Python FeatureExtractor.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类feature_extractor.FeatureExtractor
的用法示例。
在下文中一共展示了FeatureExtractor.transform方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import transform [as 别名]
def main():
dataset_path = "/path/to/Caltech-101"
modelzoo_path = "/path/to/VGG16"
# create an instance
convnet = FeatureExtractor(
prototxt_path=os.path.join(modelzoo_path, "vgg16_deploy.prototxt"),
caffemodel_path=os.path.join(modelzoo_path, "vgg16.caffemodel"),
target_layer_name="fc7",
image_size=224,
mean_values=[103.939, 116.779, 123.68])
# header
f = open("caltech101_vggnet_fc7_features.csv", "w")
header = ["filepath"]
for i in xrange(4096):
header.append("feat%d" % (i+1))
header = ",".join(header) + "\n"
f.write(header)
# extract features
categories = os.listdir(dataset_path)
for category in pyprind.prog_bar(categories):
file_names = os.listdir(os.path.join(dataset_path, category))
for file_name in file_names:
img = cv2.imread(os.path.join(dataset_path, category, file_name))
feat = convnet.transform(img)
feat_str = [os.path.join(category, file_name)]
for value in feat:
feat_str.append(str(value))
row = ",".join(feat_str)
f.write("%s\n" % row)
f.flush()
f.close()
开发者ID:norikinishida,项目名称:image-feature-extraction-via-convnet,代码行数:37,代码来源:extract_features_caltech101.py
示例2: main
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import transform [as 别名]
def main():
caffe_alexnet_path = "/path/to/caffe-modelzoo/AlexNet"
caffe_vgg16_path = "/path/to/caffe-modelzoo/VGG16"
caffe_googlenet_path = "/path/to/caffe-modelzoo/GoogleNet"
keys_path = "/path/to/dataset/keys.txt"
data_path = "/path/to/dataset/images"
dst_path = "/path/to/dataset/features.npy"
modelname = "VGG16"
# load pre-trained model
if modelname == "AlexNet":
if not os.path.exists(os.path.join(caffe_alexnet_path, "imagenet_mean.npy")):
convert_mean_file(caffe_alexnet_path)
convnet = FeatureExtractor(
prototxt_path=os.path.join(caffe_alexnet_path, "alexnet_deploy.prototxt"),
caffemodel_path=os.path.join(caffe_alexnet_path, "alexnet.caffemodel"),
target_layer_name="fc6",
image_size=227,
mean_path=os.path.join(caffe_alexnet_path, "imagenet_mean.npy")
)
elif modelname == "VGG16":
convnet = FeatureExtractor(
prototxt_path=os.path.join(caffe_vgg16_path, "vgg16_deploy.prototxt"),
caffemodel_path=os.path.join(caffe_vgg16_path, "vgg16.caffemodel"),
target_layer_name="fc6",
image_size=224,
mean_values=[103.939, 116.779, 123.68]
)
elif modelname == "GoogleNet":
googlenet = FeatureExtractor(
prototxt_path=os.path.join(caffe_googlenet_path, "googlenet_deploy.prototxt"),
caffemodel_path=os.path.join(caffe_googlenet_path, "googlenet.caffemodel"),
target_layer_name="pool5/7x7_s1",
image_size=224,
mean_values=[104.0, 117.0, 123.0]
)
else:
print "Unknown model name: %s" % modelname
sys.exit(-1)
# data list
keys = load_keys(keys_path)
# feature extraction
feats = []
for key in keys:
img = cv2.imread(os.path.join(data_path, key))
assert img is not None
feat = convnet.transform(img)
feats.append(feat)
feats = np.asarray(feats)
np.save(dst_path, feats)
print "Done."
示例3: train_model
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import transform [as 别名]
def train_model(X_df, y_array, skf_is):
fe = FeatureExtractor()
fe.fit(X_df, y_array)
X_array = fe.transform(X_df)
# Regression
train_is, _ = skf_is
X_train_array = np.array([X_array[i] for i in train_is])
y_train_array = np.array([y_array[i] for i in train_is])
reg = Regressor()
reg.fit(X_train_array, y_train_array)
return fe, reg
示例4: train_test_split
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import transform [as 别名]
df = df.drop(df_tmp.index)
from regressor import Regressor
from feature_extractor import FeatureExtractor
df_features = df.drop('target', axis=1)
y = df.target.values
df_train, df_test, y_train, y_test = train_test_split(df_features, y, test_size=0.5, random_state=42)
feature_extractor = FeatureExtractor()
model = Regressor()
X_train = feature_extractor.transform(df_train)
model.fit(X_train, y_train)
X_test = feature_extractor.transform(df_test)
y_pred = model.predict(X_test)
print('RMSE = ', np.sqrt(mean_squared_error(y_test, y_pred)))
imputer = model.clf.named_steps['imputer']
valid_idx = imputer.transform(np.arange(df_train.shape[1])).astype(np.int)
et = model.clf.named_steps['extratreesregressor']
feature_importances = pd.DataFrame(data=et.feature_importances_,
index=df_train.columns[valid_idx][0])
feature_importances['counts'] = df_train.count()[valid_idx][0]