本文整理汇总了Python中DataModel类的典型用法代码示例。如果您正苦于以下问题:Python DataModel类的具体用法?Python DataModel怎么用?Python DataModel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataModel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: returnCheckerBoardFunction
def returnCheckerBoardFunction(self, Ndata: int, nx: int, ny: int, delta: float):
"""initCheckerBoardFunction
Generates two classes similar to a chekerBoard table. One classe would be in the same position
as the black part of the board and the second in the white one
Parameters:
INPUT:
Ndata: number of instance of each quadrant
nx: number of quadrants in x
ny: number of quadrants in y
delta: scramble factor, a lower delta means more entangled classes
OUTPUT:
cls1, cls2 : objects from classData with encapsulates all data from a given sub-class.
Example:
import LoadData
import matplotlib.pyplot as plt
myData = LoadData.clsWorkingData()
cls1, cls2 = myData.initCheckerBoardFunction(50,2,2,0.5)
plt.plot(cls1.data[:,0],cls1.data[:,1],'g*')
plt.plot(cls2.data[:,0],cls2.data[:,1],'rd'),
plt.show()
Modified:
(LEO) : 17/01/2016
Version:
v0.1
"""
cls1 = DataModel.clsData(1, 1, 0)
cls2 = DataModel.clsData(0, 1, 0)
data1 = []
data2 = []
i1 = 0
i2 = 0
for k in range(0, Ndata):
for i in range(0, nx):
for j in range(0, ny):
if divmod(i + j, 2)[1] == 1:
dx = -delta + 2.0 * delta * random.random()
dy = -delta + 2 * delta * random.random()
data1.append([i + dx, j + dy])
i1 += 1
else:
dx = -delta + 2.0 * delta * random.random()
dy = -delta + 2.0 * delta * random.random()
data2.append([i + dx, j + dy])
i2 += 1
data1 = np.matrix(data1)
data2 = np.matrix(data2)
cls1.setData(data1)
cls2.setData(data2)
return cls1, cls2
示例2: load_team_info
def load_team_info( global_config, name=None):
session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
if name is None:
global_config['logger'].debug( 'Loading Team Info For All FRC Teams' )
page = 0
done = False
while not done:
url_str = '/api/v2/teams/%d' % (page)
teams_data = TbaIntf.get_from_tba_parsed(url_str)
if len(teams_data) == 0:
done = True
else:
for team_data in teams_data:
DataModel.setTeamInfoFromTba(session, team_data)
page += 1
else:
global_config['logger'].debug( 'Loading Team Info For FRC Team %s' % name )
'''
url_str = '/api/v2/team/frc%s/%s' % (team_str,query_str)
for page in range(0,14):
teams_data = TbaIntf.get_from_tba_parsed(url_str)
for team_data in teams_data:
setTeamInfoFromTba(session, team_data)
'''
session.remove()
示例3: compareImplementations
def compareImplementations():
(x, y) = DataModel.loadData("..\\train.csv")
y = y.astype(int)
(x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)
x_sub = x_train[:500,:]
y_sub = y_train[:500]
s_my = SimpleNN.SimpleNN([784, 70, 10])
s_t = NN_1HL.NN_1HL(reg_lambda = 1, opti_method = 'CG')
np.random.seed(123)
thetas = [s_t.rand_init(784,70), s_t.rand_init(70, 10)]
cost_t = s_t.function(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
grad_t = s_t.function_prime(s_t.pack_thetas(thetas[0], thetas[1]), 784, 70, 10, x_sub, y_sub, 10)
print(cost_t, np.sum(grad_t));
cost_my = s_my.computeCost(s_my.combineTheta(thetas.copy()), x_sub, y_sub, 10)
grad_my = s_my.computeGrad(s_my.combineTheta(thetas), x_sub, y_sub, 10)
print(cost_my, np.sum(grad_my))
示例4: process_delete_attr_form
def process_delete_attr_form(global_config, form):
global_config['logger'].debug( 'Process Attribute Delete Form' )
season = form[attr_delete_season_label].value
comp = form[attr_delete_comp_label].value
team = form[attr_delete_team_number_label].value
attr_name = form[attr_delete_attribute_name_label].value
old_value = form[attr_delete_old_value_label].value
# Initialize the database session connection
db_name = global_config['db_name'] + global_config['this_season']
session = DbSession.open_db_session(db_name)
attrdef_filename = WebCommonUtils.get_attrdef_filename(short_comp=comp)
if attrdef_filename is not None:
attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
attr_definitions.parse(attrdef_filename)
attr_def = attr_definitions.get_definition(attr_name)
try:
DataModel.deleteAttributeValue(session, team, comp+season, attr_name, old_value, attr_def)
result = 'Scouting Data Attribute Value %s Successfully Removed From %s' % (old_value,attr_name)
session.commit()
except ValueError as reason:
result = 'Error Removing Scouting Data Attribute Value %s From %s: %s' % (old_value,attr_name,reason)
session.remove()
return result
示例5: set_team_geo_location
def set_team_geo_location(global_config, team_key=None):
session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
DataModel.setTeamGeoLocation(session, team_key)
session.remove()
示例6: process_form
def process_form(global_config, form):
global_config['logger'].debug( 'Process Attribute Modify Form' )
season = form[attr_modify_season_label].value
comp = form[attr_modify_comp_label].value
team = form[attr_modify_team_number_label].value
attr_name = form[attr_modify_attribute_name_label].value
old_value = form[attr_modify_old_value_label].value
new_value = form[attr_modify_new_value_label].value
# Initialize the database session connection
db_name = global_config['db_name'] + global_config['this_season']
session = DbSession.open_db_session(db_name)
attrdef_filename = WebCommonUtils.get_attrdef_filename(short_comp=comp)
if attrdef_filename is not None:
attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
attr_definitions.parse(attrdef_filename)
attr_def = attr_definitions.get_definition(attr_name)
try:
DataModel.modifyAttributeValue(session, team, comp+season, attr_name, old_value, new_value, attr_def)
result = 'Attribute %s Modified From %s to %s For Team %s' % (attr_name,old_value,new_value,team)
session.commit()
except ValueError as reason:
result = 'Error Modifying Scouting Addribute %s For Team %s: %s' % (attr_name,team,reason)
session.remove()
return result
示例7: compareImplementations2
def compareImplementations2():
(x, y) = DataModel.loadData("..\\train.csv")
y = y.astype(int)
(x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)
x_sub = x_train[:500,:]
y_sub = y_train[:500]
s_my = SimpleNN2.NeuralNetConfig(784, 70, 10)
s_t = NN_1HL.NN_1HL(reg_lambda = 10, opti_method = 'CG')
np.random.seed(123)
thetas = [s_t.rand_init(784,70), s_t.rand_init(70, 10)]
# Check costs
cost_t = s_t.function(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
print("Cost test: ", cost_t)
cost_my = SimpleNN2.computeCost(s_my, thetas[0], thetas[1], x_sub, y_sub, 10)
print("Cost my: ", cost_my)
# Check gradients
grad_t = s_t.function_prime(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
print("Grad sum test: ", np.sum(grad_t))
grad_my1, grad_my2 = SimpleNN2.computeGrad(s_my, thetas[0], thetas[1], x_sub, y_sub, 10)
print("Grad sum my: ", np.sum(grad_my1) + np.sum(grad_my2))
示例8: process_files
def process_files(global_config, attr_definitions, input_dir, recursive=True):
start_time = datetime.datetime.now()
# Initialize the database session connection
db_name = global_config['db_name']
session = DbSession.open_db_session(db_name)
some_files_processed = False
# The following regular expression will select all files that conform to
# the file naming format Team*.txt. Build a list of all datafiles that match
# the naming format within the directory passed in via command line
# arguments.
file_regex = re.compile('Team[a-zA-Z0-9_]+.txt')
files = get_files(global_config, session, db_name, input_dir, file_regex, recursive)
print 'files retrieved, elapsed time - %s' % (str(datetime.datetime.now()-start_time))
# Process data files
for data_filename in files:
try:
process_file( global_config, session, attr_definitions, data_filename)
except Exception, e:
# log the exception but continue processing other files
log_exception(global_config['logger'], e)
# add the file to the set of processed files so that we don't process it again. Do it outside the
# try/except block so that we don't try to process a bogus file over and over again.
DataModel.addProcessedFile(session, data_filename)
some_files_processed = True
# Commit all updates to the database
session.commit()
示例9: load_event_info
def load_event_info(global_config, year_str):
session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
if year_str.lower() == 'all':
# get all events since the beginning of time
year = 1992
done = False
while not done:
url_str = '/api/v2/events/%d' % year
events_data = TbaIntf.get_from_tba_parsed(url_str)
if len(events_data) == 0:
done = True
else:
for event_data in events_data:
#print 'Event: %s' % event_data['key']
DataModel.addOrUpdateEventInfo(session, event_data)
year += 1
else:
url_str = '/api/v2/events/%s' % year_str
events_data = TbaIntf.get_from_tba_parsed(url_str)
for event_data in events_data:
print 'Event: %s' % event_data['key']
DataModel.addOrUpdateEventInfo(session, event_data)
session.commit()
session.remove()
示例10: get_event_geo_location
def get_event_geo_location(global_config, event_key=None):
session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
DataModel.setEventsGeoLocation(session, event_key)
session.remove()
示例11: get_team_list_json
def get_team_list_json(global_config, comp):
global team_info_dict
global_config['logger'].debug( 'GET Team List For Competition %s', comp )
session = DbSession.open_db_session(global_config['db_name'])
web.header('Content-Type', 'application/json')
result = []
result.append('{ "teams" : [\n')
team_list = DataModel.getTeamsInNumericOrder(session, comp)
for team in team_list:
team_info = None
# TODO - Remove this hardcoded number for the valid team number. This check prevents
# requesting information for invalid team numbers, which has been known to happen when
# tablet operators enter bogus team numbers by mistake
if team.team < 10000:
team_info = DataModel.getTeamInfo(session, int(team.team))
if team_info:
result.append(' { "team_number": "%s", "nickname": "%s" }' % (team.team,team_info.nickname))
result.append(',\n')
if len(team_list) > 0:
result = result[:-1]
result.append(' ] }\n')
return ''.join(result)
else:
return get_team_list_json_from_tba(global_config, comp)
示例12: process_files
def process_files(global_config, attr_definitions, input_dir, recursive=True):
start_time = datetime.datetime.now()
# Initialize the database session connection
db_name = global_config["db_name"] + global_config["this_season"]
session = DbSession.open_db_session(db_name)
some_files_processed = False
# read the ignore file list config each time through the loop. Any files
# in the ignore list will be skipped
ignore_filelist = read_ignore_filelist_cfg(input_dir + "IgnoreFiles.txt")
# The following regular expression will select all files that conform to
# the file naming format Team*.txt. Build a list of all datafiles that match
# the naming format within the directory passed in via command line
# arguments.
file_regex = re.compile("Team[a-zA-Z0-9_]+.txt")
files = get_files(global_config, session, db_name, input_dir, file_regex, recursive)
if len(files) > 0:
log_msg = "files retrieved, elapsed time - %s" % (str(datetime.datetime.now() - start_time))
print log_msg
global_config["logger"].debug("%s - %s" % (process_files.__name__, log_msg))
global_config["logger"].debug("%s - %d Files to be processed" % (process_files.__name__, len(files)))
# Process data files
for data_filename in files:
# If the file is on the ignore list (quarantined), then skip it
if data_filename.split("/")[-1] in ignore_filelist:
global_config["logger"].debug("%s - Ignoring file: %s" % (process_files.__name__, data_filename))
continue
# Make sure that the data file has not already been processed. We have seen cases
# where the data file gets inserted into the list of files to be processed more than
# once.
file_processed = isFileProcessed(global_config, session, db_name, data_filename)
if not file_processed:
try:
global_config["logger"].debug("%s - Processing file: %s" % (process_files.__name__, data_filename))
process_file(global_config, session, attr_definitions, data_filename)
except Exception, e:
global_config["logger"].debug(
"%s - Error processing file: %s" % (process_files.__name__, data_filename)
)
# log the exception but continue processing other files
log_exception(global_config["logger"], e)
# add the file to the set of processed files so that we don't process it again. Do it outside the
# try/except block so that we don't try to process a bogus file over and over again.
DataModel.addProcessedFile(session, data_filename)
some_files_processed = True
else:
global_config["logger"].debug(
"%s - Skipping file: %s, already processed" % (process_files.__name__, data_filename)
)
# Commit all updates to the database
session.commit()
示例13: test_new_table
def test_new_table(self):
table_name = 'password_db'
table_model = DataModel(table_name, PasswordData)
res = table_model.new_table()
self.assertTrue(res)
res = table_model.new_table()
self.assertFalse(res)
os.remove("./password_db.db")
示例14: test_create
def test_create(self):
table_name = 'password_db'
table_model = DataModel(table_name, PasswordData)
table_model.new_table()
vo = PasswordData("your", "your_password")
res = table_model.create(vo)
self.assertEqual(res, 1)
os.remove("./password_db.db")
示例15: test1
def test1():
(x, y) = DataModel.loadData("..\\train.csv")
(x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)
x_sub = x_train[:500,:]
y_sub = y_train[:500]
s = SimpleNN.SimpleNN([784, 70, 10])
#s = Train.trainGradientDescent(s, x_sub, y_sub, 5)
s = Train.trainSciPy(s, x_sub, y_sub, 5)
acc_cv = accuracy_score(y_cv, [s.predictClass(w) for w in x_cv])
print("Accuracy on CV set: {0}", acc_cv)