function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def _require(self):
"""Flag this task as required.
If this task was started with a call to lazyStart/lazyStartAfter()
and has not yet been required by some other Task then this will
cause this task and all of it's dependencies to become required.
"""
if self.required:
return | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def _startAfterCallback(self, task):
"""Callback that is called by each task we must start after.
"""
callbacks = None | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def _execute(self):
"""Actually execute this task. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def completeAfter(self, other):
"""Make sure this task doesn't complete until other tasks have completed. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def _completeAfterCallback(self, task):
"""Callback that is called by each task we must complete after.
"""
callbacks = None | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def cancel(self):
"""Cancel this task if it hasn't already started. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def addCallback(self, callback):
"""Register a callback to be run when this task is complete. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def doneCb(status, result):
if 0: print ''
elif status == GoalStatus.PENDING : state='PENDING'
elif status == GoalStatus.ACTIVE : state='ACTIVE'
elif status == GoalStatus.PREEMPTED : state='PREEMPTED'
elif status == GoalStatus.SUCCEEDED : state='SUCCEEDED'
elif status == GoalStatus.ABORTED : state='ABORTED'
elif status == GoalStatus.REJECTED : state='REJECTED'
elif status == GoalStatus.PREEMPTING: state='PREEMPTING'
elif status == GoalStatus.RECALLING : state='RECALLING'
elif status == GoalStatus.RECALLED : state='RECALLED'
elif status == GoalStatus.LOST : state='LOST'
# Print state of action server
print 'Result - [ActionServer: ' + state + ']: ' + result.text | CARMinesDouai/MultiRobotExplorationPackages | [
5,
2,
5,
2,
1465571655
] |
def feedbackCb(feedback):
# Print state of dock_drive module (or node.)
print 'Feedback: [DockDrive: ' + feedback.state + ']: ' + feedback.text | CARMinesDouai/MultiRobotExplorationPackages | [
5,
2,
5,
2,
1465571655
] |
def dock_drive_client():
# add timeout setting
client = actionlib.SimpleActionClient('dock_drive_action', AutoDockingAction)
while not client.wait_for_server(rospy.Duration(5.0)):
if rospy.is_shutdown(): return
print 'Action server is not connected yet. still waiting...'
goal = AutoDockingGoal();
client.send_goal(goal, doneCb, activeCb, feedbackCb)
print 'Goal: Sent.'
rospy.on_shutdown(client.cancel_goal)
client.wait_for_result()
#print ' - status:', client.get_goal_status_text()
return client.get_result() | CARMinesDouai/MultiRobotExplorationPackages | [
5,
2,
5,
2,
1465571655
] |
def run_simulation(force_constant):
assert(os.path.exists(folder_to_store_output_files))
input_pdb_file_of_molecule = args.starting_pdb_file
force_field_file = 'amber99sb.xml'
water_field_file = 'tip3p.xml'
pdb_reporter_file = '%s/output_fc_%f_pc_%s.pdb' %(folder_to_store_output_files, force_constant, str(potential_center).replace(' ',''))
if not args.out_traj is None:
pdb_reporter_file = args.out_traj
state_data_reporter_file = pdb_reporter_file.replace('output_fc', 'report_fc').replace('.pdb', '.txt')
# check if the file exist
for item_filename in [pdb_reporter_file, state_data_reporter_file]:
Helper_func.backup_rename_file_if_exists(item_filename)
index_of_backbone_atoms = CONFIG_57[0]
flag_random_seed = 0 # whether we need to fix this random seed
simulation_temperature = args.temperature
time_step = CONFIG_22 # simulation time step, in ps
pdb = PDBFile(input_pdb_file_of_molecule)
modeller = Modeller(pdb.topology, pdb.getPositions(frame=args.starting_frame))
solvent_opt = 'no_water'
if solvent_opt == 'explicit':
forcefield = ForceField(force_field_file, water_field_file)
modeller.addSolvent(forcefield, model=water_field_file.split('.xml')[0], boxSize=Vec3(3, 3, 3) * nanometers,
ionicStrength=0 * molar)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1.0 * nanometers,
constraints=AllBonds, ewaldErrorTolerance=0.0005)
else:
forcefield = ForceField(force_field_file)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, constraints=AllBonds)
if args.bias_method == "US":
if float(force_constant) != 0:
force = ANN_Force()
force.set_layer_types(layer_types)
force.set_data_type_in_input_layer(args.data_type_in_input_layer)
force.set_list_of_index_of_atoms_forming_dihedrals_from_index_of_backbone_atoms(index_of_backbone_atoms)
force.set_index_of_backbone_atoms(index_of_backbone_atoms)
if args.data_type_in_input_layer == 2:
force.set_list_of_pair_index_for_distances(CONFIG_80)
force.set_num_of_nodes(num_of_nodes)
force.set_potential_center(potential_center)
force.set_force_constant(float(force_constant))
unit_scaling = 1.0 # TODO: check unit scaling
force.set_scaling_factor(float(scaling_factor) / unit_scaling) # since default unit is nm in OpenMM
# TODO: need to fix following for multi-hidden layer cases
temp_coeffs, temp_bias = np.load(autoencoder_info_file)
for item_layer_index in [0, 1]:
assert (len(temp_coeffs[item_layer_index]) ==
num_of_nodes[item_layer_index] * num_of_nodes[item_layer_index + 1]), (len(temp_coeffs[item_layer_index]),
(num_of_nodes[item_layer_index], num_of_nodes[item_layer_index + 1]))
assert (len(temp_bias[item_layer_index]) == num_of_nodes[item_layer_index + 1]), (len(temp_bias[item_layer_index]), num_of_nodes[item_layer_index + 1])
# need tolist() since C++ only accepts Python list
force.set_coeffients_of_connections([item_w.tolist() for item_w in temp_coeffs])
force.set_values_of_biased_nodes([item_w.tolist() for item_w in temp_bias])
system.addForce(force)
elif args.bias_method == "US_on_phipsi":
from openmmplumed import PlumedForce
kappa_string = ','.join([str(force_constant) for _ in potential_center])
plumed_force_string = """ | weiHelloWorld/accelerated_sampling_with_autoencoder | [
22,
8,
22,
1,
1449148947
] |
def get_distance_between_data_cloud_center_and_potential_center(pdb_file):
coor_file = Alanine_dipeptide().generate_coordinates_from_pdb_files(pdb_file)[0]
temp_network = autoencoder.load_from_pkl_file(args.autoencoder_file)
this_simulation_data = single_biased_simulation_data(temp_network, coor_file)
offset = this_simulation_data.get_offset_between_potential_center_and_data_cloud_center(input_data_type)
if layer_types[1] == "Circular":
offset = [min(abs(item), abs(item + 2 * np.pi), abs(item - 2 * np.pi)) for item in offset]
print("circular offset")
print('offset = %s' % str(offset))
distance = sqrt(sum([item * item for item in offset]))
return distance | weiHelloWorld/accelerated_sampling_with_autoencoder | [
22,
8,
22,
1,
1449148947
] |
def truncate_graph_dist(G, source_node, max_dist=1000, weight="length", retain_all=False):
"""
Remove every node farther than some network distance from source_node.
This function can be slow for large graphs, as it must calculate shortest
path distances between source_node and every other graph node.
Parameters
----------
G : networkx.MultiDiGraph
input graph
source_node : int
the node in the graph from which to measure network distances to other
nodes
max_dist : int
remove every node in the graph greater than this distance from the
source_node (along the network)
weight : string
how to weight the graph when measuring distance (default 'length' is
how many meters long the edge is)
retain_all : bool
if True, return the entire graph even if it is not connected.
otherwise, retain only the largest weakly connected component.
Returns
-------
G : networkx.MultiDiGraph
the truncated graph
"""
# get the shortest distance between the node and every other node
distances = nx.shortest_path_length(G, source=source_node, weight=weight)
# then identify every node further than max_dist away
distant_nodes = {k for k, v in distances.items() if v > max_dist}
unreachable_nodes = G.nodes - distances.keys()
# make a copy to not mutate original graph object caller passed in
G = G.copy()
G.remove_nodes_from(distant_nodes | unreachable_nodes)
# remove any isolated nodes and retain only the largest component (if
# retain_all is True)
if not retain_all:
G = utils_graph.remove_isolated_nodes(G)
G = utils_graph.get_largest_component(G)
utils.log(f"Truncated graph by {weight}-weighted network distance")
return G | gboeing/osmnx | [
4088,
747,
4088,
9,
1469329067
] |
def truncate_graph_polygon(
G, polygon, retain_all=False, truncate_by_edge=False, quadrat_width=0.05, min_num=3 | gboeing/osmnx | [
4088,
747,
4088,
9,
1469329067
] |
def showHelp():
print\
'''
This program updates MAF by adding a column of uniprot id list
Usage: %s
Parameter Description
-gene2refseq gene id to refseq id mapping file
-refseq2uniprot refseq id to uniprot id mapping file
-output Output file
(eg. %s -gene2refseq <<absolute gene2refseq file path>> -refseq2uniprot <<absolute refseq2uniprot mapping file path>> \
-output <<absolute output file path>> )
''' % (sys.argv[0], sys.argv[0]) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def merge(output, gene2refseq):
gene2uniprotHandle = open(output, "w")
gene2refseqHandle = open(gene2refseq, "r")
for line in gene2refseqHandle:
line = line.rstrip("\n")
pAssession = line.split("\t")[0].strip()
if pAssession in output:
line += "\t" + refseq2uniprotMapping[pAssession]
gene2uniprotHandle.write(line + "\n")
gene2uniprotHandle.close()
gene2refseqHandle.close() | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def emit(self, record):
pass | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _parse_date_w3dtf(dateString):
# the __extract_date and __extract_time methods were
# copied-out so they could be used by my code --bear
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
return _extract_date(m) + _extract_time(m) + (0, 0, 0) | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
return email.utils.parsedate_tz(dateString) | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0 | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
log.debug('_buildTime: [%s][%s][%s]' % (quantity, modifier, units))
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple() | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += self.ptc.YearParseStyle
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
log.debug("wd %s, wkdy %s, offset %d, style %d" % (wd, wkdy, offset, style))
return diff | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
currDOWParseStyle = self.ptc.DOWParseStyle
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
else:
# enforce selection of the previous period
# driven by DOWParseStyle and CurrentDOWParseStyle
# FIXME: this is not threadsafe!
self.ptc.DOWParseStyle = -1
sourceTime, flag1 = self.parse(chunk2, sourceTime)
# restore DOWParseStyle setting
self.DOWParseStyle = currDOWParseStyle
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2) | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
datetimeString = re.sub(r'(\w)(\.)(\s)', r'\1\3', datetimeString)
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
log.debug('coercing datetime to timetuple')
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
log.debug('parse (top of loop): [%s][%s]' % (s, parseStr))
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
log.debug('parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2))
log.debug('weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag))
log.debug('dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag))
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag) | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def _initSymbols(ptc):
"""
Initialize symbols and single character constants.
"""
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.locale.meridian) > 0:
am = ptc.locale.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.locale.meridian) > 1:
pm = ptc.locale.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ] | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.rangeSep = '-'
self.BirthdayEpoch = 50
# YearParseStyle controls how we parse "Jun 12", i.e. dates that do
# not have a year present. The default is to compare the date given
# to the current date, and if prior, then assume the next year.
# Setting this to 0 will prevent that.
self.YearParseStyle = 1
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
if self.usePyICU:
self.locale = pdtLocales['icu'](self.localeID)
if self.locale.icu is None:
self.usePyICU = False
self.locale = None
if self.locale is None:
if not self.localeID in pdtLocales:
for id in range(0, len(self.fallbackLocales)):
self.localeID = self.fallbackLocales[id]
if self.localeID in pdtLocales:
break
self.locale = pdtLocales[self.localeID]()
if self.locale is not None:
# escape any regex special characters that may be found
wd = tuple(map(re.escape, self.locale.Weekdays))
swd = tuple(map(re.escape, self.locale.shortWeekdays))
mth = tuple(map(re.escape, self.locale.Months))
smth = tuple(map(re.escape, self.locale.shortMonths))
self.locale.re_values['months'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
self.locale.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
self.locale.re_values['days'] = '%s|%s|%s|%s|%s|%s|%s' % wd
self.locale.re_values['shortdays'] = '%s|%s|%s|%s|%s|%s|%s' % swd
l = []
for s in self.locale.units:
l = l + self.locale.units[s]
self.locale.re_values['units'] = '|'.join(tuple(map(re.escape, l)))
l = []
lbefore = []
lafter = []
for s in self.locale.Modifiers:
l.append(s)
if self.locale.Modifiers[s] < 0:
lbefore.append(s)
elif self.locale.Modifiers[s] > 0:
lafter.append(s)
self.locale.re_values['modifiers'] = '|'.join(tuple(map(re.escape, l)))
self.locale.re_values['modifiers-before'] = '|'.join(tuple(map(re.escape, lbefore)))
self.locale.re_values['modifiers-after'] = '|'.join(tuple(map(re.escape, lafter)))
l = []
for s in self.locale.re_sources:
l.append(s)
self.locale.re_values['sources'] = '|'.join(tuple(map(re.escape, l)))
# build weekday offsets - yes, it assumes the Weekday and shortWeekday
# lists are in the same order and Mon..Sun (Python style)
o = 0
for key in self.locale.Weekdays:
self.locale.WeekdayOffsets[key] = o
o += 1
o = 0
for key in self.locale.shortWeekdays:
self.locale.WeekdayOffsets[key] = o
o += 1
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
o = 1
for key in self.locale.Months:
self.locale.MonthOffsets[key] = o
o += 1
o = 1
for key in self.locale.shortMonths:
self.locale.MonthOffsets[key] = o
o += 1
# self.locale.DaySuffixes = self.locale.re_values['daysuffix'].split('|')
_initSymbols(self)
# TODO add code to parse the date formats and build the regexes up from sub-parts
# TODO find all hard-coded uses of date/time seperators
self.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
(?P<mthname>(%(months)s|%(shortmonths)s))\s?
(?P<year>\d\d(\d\d)?)?
)
)''' % self.locale.re_values
# I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
# I suspect the final line was for a trailing time - but testing shows it's not needed
# ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
# ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
# (,\s?(?P<year>\d\d(\d\d)?))?))
# (\s?|$|[^0-9a-zA-Z])''' % ptc.locale.re_values
self.RE_DATE3 = r'''(?P<date>(
(((?P<mthname>(%(months)s|%(shortmonths)s))|
((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
((,)?(\s)?(?P<year>\d\d(\d\d)?))?
)
)''' % self.locale.re_values
self.RE_MONTH = r'''(\s?|^)
(?P<month>(
(?P<mthname>(%(months)s|%(shortmonths)s))
(\s?(?P<year>(\d\d\d\d)))?
))
(\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_WEEKDAY = r'''(\s?|^)
(?P<weekday>(%(days)s|%(shortdays)s))
(\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % self.locale.re_values
self.RE_UNITS = r'''(?P<qty>(-?\d+\s*
(?P<units>((%(units)s)s?))
))''' % self.locale.re_values
self.RE_QUNITS = r'''(?P<qty>(-?\d+\s?
(?P<qunits>%(qunits)s)
(\s?|,|$)
))''' % self.locale.re_values
# self.RE_MODIFIER = r'''(\s?|^)
# (?P<modifier>
# (previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % self.locale.re_values
# self.RE_MODIFIER2 = r'''(\s?|^)
# (?P<modifier>
# (from|before|after|ago|prior))
# (\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_MODIFIER = r'''(\s?|^)
(?P<modifier>
(%(modifiers-after)s))''' % self.locale.re_values
self.RE_MODIFIER2 = r'''(\s?|^)
(?P<modifier>
(%(modifiers-before)s))
(\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_TIMEHMS = r'''(\s?|^)
(?P<hours>\d\d?)
(?P<tsep>%(timeseperator)s|)
(?P<minutes>\d\d)
(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % self.locale.re_values
self.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?))
((?P<tsep>%(timeseperator)s|)
(?P<minutes>(\d\d?))
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[.,]\d+)?))?)?''' % self.locale.re_values
if 'meridian' in self.locale.re_values:
self.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % self.locale.re_values
dateSeps = ''.join(self.locale.dateSep) + '.'
self.RE_DATE = r'''(\s?|^)
(?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
(\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
self.RE_DATE2 = r'[%s]' % dateSeps
self.RE_DAY = r'''(\s?|^)
(?P<day>(today|tomorrow|yesterday))
(\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
''' % self.locale.re_values
# self.RE_TIME = r'''(\s?|^)
# (?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
# (\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_TIME = r'''(\s?|^)
(?P<time>(%(sources)s))
(\s?|$|[^0-9a-zA-Z])''' % self.locale.re_values
self.RE_REMAINING = r'\s+'
# Regex for date/time ranges
self.RE_RTIMEHMS = r'''(\s?|^)
(\d\d?)%(timeseperator)s
(\d\d)
(%(timeseperator)s(\d\d))?
(\s?|$)''' % self.locale.re_values
self.RE_RTIMEHMS2 = r'''(\s?|^)
(\d\d?)
(%(timeseperator)s(\d\d?))?
(%(timeseperator)s(\d\d?))?''' % self.locale.re_values
if 'meridian' in self.locale.re_values:
self.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % self.locale.re_values
self.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
self.RE_RDATE3 = r'''((((%(months)s))\s?
((\d\d?)
(\s?|%(daysuffix)s|$)+)?
(,\s?\d\d\d\d)?))''' % self.locale.re_values
# "06/07/06 - 08/09/06"
self.DATERNG1 = self.RE_RDATE + r'\s?%(rangeseperator)s\s?' + self.RE_RDATE
self.DATERNG1 = self.DATERNG1 % self.locale.re_values
# "march 31 - june 1st, 2006"
self.DATERNG2 = self.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + self.RE_RDATE3
self.DATERNG2 = self.DATERNG2 % self.locale.re_values
# "march 1rd -13th"
self.DATERNG3 = self.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
self.DATERNG3 = self.DATERNG3 % self.locale.re_values
# "4:00:55 pm - 5:90:44 am", '4p-5p'
self.TIMERNG1 = self.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + self.RE_RTIMEHMS2
self.TIMERNG1 = self.TIMERNG1 % self.locale.re_values
# "4:00 - 5:90 ", "4:55:55-3:44:55"
self.TIMERNG2 = self.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + self.RE_RTIMEHMS
self.TIMERNG2 = self.TIMERNG2 % self.locale.re_values
# "4-5pm "
self.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + self.RE_RTIMEHMS2
self.TIMERNG3 = self.TIMERNG3 % self.locale.re_values
# "4:30-5pm "
self.TIMERNG4 = self.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + self.RE_RTIMEHMS2
self.TIMERNG4 = self.TIMERNG4 % self.locale.re_values
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = list(self.cre_source.keys()) | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
log.debug('daysInMonth(%s, %s)' % (month, year))
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result | rec/echomesh | [
36,
23,
36,
124,
1347643383
] |
def get_corpora_list(config):
with CorpusContext(config) as c:
statement = '''MATCH (n:Corpus) RETURN n.name as name ORDER BY name'''
results = c.execute_cypher(statement)
return [x['name'] for x in results] | PhonologicalCorpusTools/PyAnnotationGraph | [
27,
13,
27,
30,
1426715229
] |
def test_is_yaml(self):
res = Check("hello").is_yaml()
self.assertIsInstance(res, Check)
try:
Check(123).is_yaml()
self.fail()
except CheckError:
pass | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_not_yaml(self):
res = Check("xxx: {").is_not_yaml()
self.assertIsInstance(res, Check)
try:
Check("valid_yaml").is_not_yaml()
self.fail()
except CheckError:
pass | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_xml(self):
obj = """<Agenda>
<type>gardening</type>
<Activity>
<type>cooking</type>
</Activity> | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_not_xml(self):
res = Check('[123]').is_not_xml()
self.assertIsInstance(res, Check)
try:
Check("<Agenda>ok</Agenda>").is_not_xml()
self.fail()
except CheckError:
pass | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_json_pass(self):
obj = '{"name": "pass"}'
self.assertIsInstance(Check(obj).is_json(), Check) | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_json_fail(self):
obj = "goodbye"
with self.assertRaises(CheckError):
Check(obj).is_json() | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def test_is_not_json_pass(self):
obj = "Hello world"
self.assertIsInstance(Check(obj).is_not_json(), Check) | csparpa/check | [
81,
7,
81,
5,
1497260401
] |
def testStripAttributes(self):
html = (u"<a href=\"foobar\" name=\"hello\""
u"title=\"View foobar\" onclick=\"malicious()\">hello!</a>")
self.assertEqual(clean_word_text(html),
u"<a href=\"foobar\" name=\"hello\" "
"title=\"View foobar\">"
"hello!</a>") | dominicrodger/django-magazine | [
12,
1,
12,
2,
1315947986
] |
def testStyleStripped(self):
html = u'<style>foobar</style><p>hello!</p>'
self.assertEqual(clean_word_text(html), u'<p>hello!</p>')
# Check we're not reliant on the <style> tag looking a
# particular way
html = u""" | dominicrodger/django-magazine | [
12,
1,
12,
2,
1315947986
] |
def testStyleStrippedEmptyTag(self):
# Check we don't do much other than strip the style tag
# for empty style tags
html = u""" | dominicrodger/django-magazine | [
12,
1,
12,
2,
1315947986
] |
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False | etherex/pyepm | [
29,
15,
29,
1,
1419229262
] |
def tapering_window(time,D,mywindow): | guillaumelenoir/WAVEPAL | [
21,
7,
21,
5,
1472117364
] |
def example01():
'''Clip 2 seconds out of the middle of a video.''' | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def example02():
'''Resize an existing video a few different ways.'''
# Turning a 1280x720 16:9 input video into a 640x480 4:3 video.
source = vedit.Video( "./examples/d005.mp4" )
clip = vedit.Clip( video=source )
#Since the input and output aspect ratios don't match, pad the input onto a blue background.
pad_output = "./example_output/example02-pad.mp4"
pad_display = vedit.Display( display_style=vedit.PAD, pad_bgcolor="Blue" )
window = vedit.Window( width=640, height=480,
display=pad_display,
output_file=pad_output )
window.clips = [ clip ]
window.render()
log.info( "Pad output file at: %s" % ( pad_output ) )
# Render a cropped version as well. Note the watermark is getting cropped out on the right.
crop_output = "./example_output/example02-crop.mp4"
crop_display = vedit.Display( display_style=vedit.CROP )
window = vedit.Window( width=640, height=480,
display=crop_display,
output_file=crop_output )
window.clips = [ clip ]
window.render()
log.info( "Crop output file at: %s" % ( crop_output ) )
# Render a version where we pan over the input image as it plays as well. Note the watermark moves from left to right.
pan_output = "./example_output/example02-pan.mp4"
pan_display = vedit.Display( display_style=vedit.PAN )
window = vedit.Window( width=640, height=480,
display=pan_display,
output_file=pan_output )
window.clips = [ clip ]
window.render()
log.info( "Pan output file at: %s" % ( pan_output ) ) | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def example03():
'''Put two videos next to each other.'''
# Lets set up some source videos, and some clips for use below.
video_1 = vedit.Video( "./examples/i030.mp4" )
# Put two clips from video 1 side by side, with audio from the
# left clip only, ending after 8 seconds (we could also use clips
# from different videos).
clip_1_0_5 = vedit.Clip( video=video_1, start=0, end=5 )
clip_1_10_20 = vedit.Clip( video=video_1, start=10, end=20,
display=vedit.Display( include_audio=False ) )
# Set up two windows, one for each clip, and one to hold the other two, and set the duration.
#
# Since clip 1 is 5 seconds long and we are making an 8 second
# video, there will be time when clip 1 is not playing - set the
# background color to green during this time.
output_file = "./example_output/example03.mp4"
base_window = vedit.Window( width=1280*2, height=720, duration=8, bgcolor='Green',
output_file=output_file )
# Set the x, y coordinates of this window inside its parent, as
# measure from the top right.
#
# Here we are putting the videos flush side by side, but they
# could be on top of each other, overlapping, centered in a much
# larger base_window, etc., etc..
clip_1_window = vedit.Window( width=1280, height=720, x=0, y=0, clips=[ clip_1_0_5 ] )
clip_2_window = vedit.Window( width=1280, height=720, x=1280, y=0, clips=[ clip_1_10_20 ] )
base_window.windows = [ clip_1_window, clip_2_window ]
base_window.render()
log.info( "Side by side output is at: %s" % ( output_file ) )
return | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def example05():
'''Ovarlay videos on top of other videos.'''
# Let's overlay two smaller windows on top of a base video.
base_video = vedit.Video( "./examples/i030.mp4" )
base_clip = vedit.Clip( video=base_video )
output_file = "./example_output/example05.mp4"
# Use the default width, height, and display parameters:
# 1280x1024, which happens to be the size of this input.
base_window = vedit.Window( clips = [ base_clip ],
output_file=output_file )
# We'll create two smaller windows, each 1/3 the size of the
# base_window, and position them towards the top left, and bottom
# right of the base window.
overlay_window1 = vedit.Window( width=base_window.width/3, height=base_window.height/3,
x=base_window.width/12, y=base_window.height/12 )
overlay_window2 = vedit.Window( width=base_window.width/3, height=base_window.height/3,
x=7*base_window.width/12, y=7*base_window.height/12 ) | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def example06():
'''Cascade overlayed videos and images in top of a base video or image.'''
# The OVERLAY display_style when applied to a clip in the window
# makes it shrink a random amount and be played while it scrolls
# across the base window.
#
# Let's use that to combine several things together and make a
# huge mess!
output_file = "./example_output/example06.mp4"
base_video = vedit.Video( "./examples/i030.mp4" )
# Let's use a different audio track for this.
base_clip = vedit.Clip( video=base_video, display=vedit.Display( include_audio=False ) )
base_window = vedit.Window( clips = [ base_clip ],
output_file=output_file,
duration=30,
audio_file="./examples/a2.mp4" )
# Turn our cat images into clips of random length between 3 and 6
# seconds and have them cascade across the screen from left to
# right.
cat_display = vedit.Display( display_style=vedit.OVERLAY,
overlay_direction=vedit.RIGHT,
include_audio=False,
overlay_concurrency=4,
overlay_min_gap=0.8 )
cat_clips = []
for cat_pic in glob.glob( "./examples/cat*jpg" ):
cat_video_file = vedit.gen_background_video( bgimage_file=cat_pic,
duration=random.randint( 3, 6 ) )
cat_video = vedit.Video( cat_video_file )
cat_clips.append( vedit.Clip( video=cat_video, display=cat_display ) )
# Turn our dog images into clips of random length between 2 and 5
# seconds and have them cascade across the screen from top to
# bottom.
dog_display = vedit.Display( display_style=vedit.OVERLAY,
overlay_direction=vedit.DOWN,
include_audio=False,
overlay_concurrency=4,
overlay_min_gap=0.8 )
dog_clips = []
for dog_pic in glob.glob( "./examples/dog*jpg" ):
dog_video_file = vedit.gen_background_video( bgimage_file=dog_pic,
duration=random.randint( 3, 6 ) ) | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def example07():
'''Work with images, including adding an watermark and putting things on top of an image.'''
# Let's make our background an image with a song.
output_file = "./example_output/example07.mp4"
dog_background = vedit.Window( bgimage_file="./examples/dog03.jpg",
width=960, #The dimensions of this image
height=640,
duration=45,
audio_file="./examples/a3.mp4",
output_file=output_file ) | digitalmacgyver/vedit | [
12,
3,
12,
1,
1475905690
] |
def setMappings(mappings):
"""Set the mappings between the model and widgets.
TODO:
- Should this be extended to accept other columns?
- Check if it has a model already.
"""
column = 1
mappers = []
for widget, obj in mappings:
mapper = QDataWidgetMapper(widget)
# logger.debug(obj.model())
mapper.setModel(obj.model())
mapper.addMapping(widget, column)
delegate = Delegate(widget)
mapper.setItemDelegate(delegate)
mapper.setRootIndex(obj.parent().index())
mapper.setCurrentModelIndex(obj.index())
# QDataWidgetMapper needs a focus event to notice a change in the data.
# To make sure the model is informed about the change, I connected the
# stateChanged signal of the QCheckBox to the submit slot of the
# QDataWidgetMapper. The same idea goes for the QComboBox.
# https://bugreports.qt.io/browse/QTBUG-1818
if isinstance(widget, QCheckBox):
signal = widget.stateChanged
try:
signal.disconnect()
except TypeError:
pass
signal.connect(mapper.submit)
elif isinstance(widget, QComboBox):
signal = widget.currentTextChanged
try:
signal.disconnect()
except TypeError:
pass
signal.connect(mapper.submit)
mappers.append(mapper)
return mappers | mretegan/crispy | [
33,
11,
33,
24,
1457693320
] |
def __init__(self, signifier=None, nonce=None):
super(SecretBoxEncryptor, self).__init__(
signifier=signifier or 'sbe::'
)
self.nonce = nonce or nacl.utils.random(
nacl.bindings.crypto_secretbox_NONCEBYTES
) | matrix-org/pymacaroons | [
1,
3,
1,
1,
1439914801
] |
def setUp(self):
super(BernoulliNBJSTest, self).setUp()
self.estimator = BernoulliNB() | nok/sklearn-porter | [
1217,
166,
1217,
41,
1466634094
] |
def test_random_features__iris_data__default(self):
pass | nok/sklearn-porter | [
1217,
166,
1217,
41,
1466634094
] |
def test_existing_features__binary_data__default(self):
pass | nok/sklearn-porter | [
1217,
166,
1217,
41,
1466634094
] |
def test_random_features__binary_data__default(self):
pass | nok/sklearn-porter | [
1217,
166,
1217,
41,
1466634094
] |
def test_random_features__digits_data__default(self):
pass | nok/sklearn-porter | [
1217,
166,
1217,
41,
1466634094
] |
def AnalyzeDenseLandmarks(self, request):
"""对请求图片进行五官定位(也称人脸关键点定位),获得人脸的精准信息,返回多达888点关键信息,对五官和脸部轮廓进行精确定位。
:param request: Request instance for AnalyzeDenseLandmarks.
:type request: :class:`tencentcloud.iai.v20200303.models.AnalyzeDenseLandmarksRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.AnalyzeDenseLandmarksResponse`
"""
try:
params = request._serialize()
body = self.call("AnalyzeDenseLandmarks", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AnalyzeDenseLandmarksResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def CheckSimilarPerson(self, request):
"""对指定的人员库进行人员查重,给出疑似相同人的信息。
可以使用本接口对已有的单个人员库进行人员查重,避免同一人在单个人员库中拥有多个身份;也可以使用本接口对已有的多个人员库进行人员查重,查询同一人是否同时存在多个人员库中。
不支持跨算法模型版本查重,且目前仅支持算法模型为3.0的人员库使用查重功能。
>
- 若对完全相同的指定人员库进行查重操作,需等待上次操作完成才可。即,若两次请求输入的 GroupIds 相同,第一次请求若未完成,第二次请求将返回失败。
>
- 查重的人员库状态为腾讯云开始进行查重任务的那一刻,即您可以理解为当您发起查重请求后,若您的查重任务需要排队,在排队期间您对人员库的增删操作均会会影响查重的结果。腾讯云将以开始进行查重任务的那一刻人员库的状态进行查重。查重任务开始后,您对人员库的任何操作均不影响查重任务的进行。但建议查重任务开始后,请不要对人员库中人员和人脸进行增删操作。
:param request: Request instance for CheckSimilarPerson.
:type request: :class:`tencentcloud.iai.v20200303.models.CheckSimilarPersonRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CheckSimilarPersonResponse`
"""
try:
params = request._serialize()
body = self.call("CheckSimilarPerson", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CheckSimilarPersonResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def CompareMaskFace(self, request):
"""对两张图片中的人脸进行相似度比对,返回人脸相似度分数。
戴口罩人脸比对接口可在人脸戴口罩情况下使用,口罩遮挡程度最高可以遮挡鼻尖。
如图片人脸不存在戴口罩情况,建议使用人脸比对服务。
:param request: Request instance for CompareMaskFace.
:type request: :class:`tencentcloud.iai.v20200303.models.CompareMaskFaceRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CompareMaskFaceResponse`
"""
try:
params = request._serialize()
body = self.call("CompareMaskFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CompareMaskFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def CreateFace(self, request):
"""将一组人脸图片添加到一个人员中。一个人员最多允许包含 5 张图片。若该人员存在多个人员库中,所有人员库中该人员图片均会增加。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for CreateFace.
:type request: :class:`tencentcloud.iai.v20200303.models.CreateFaceRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CreateFaceResponse`
"""
try:
params = request._serialize()
body = self.call("CreateFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def CreatePerson(self, request):
"""创建人员,添加人脸、姓名、性别及其他相关信息。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for CreatePerson.
:type request: :class:`tencentcloud.iai.v20200303.models.CreatePersonRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CreatePersonResponse`
"""
try:
params = request._serialize()
body = self.call("CreatePerson", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreatePersonResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def DeleteGroup(self, request):
"""删除该人员库及包含的所有的人员。同时,人员对应的所有人脸信息将被删除。若某人员同时存在多个人员库中,该人员不会被删除,但属于该人员库中的自定义描述字段信息会被删除,属于其他人员库的自定义描述字段信息不受影响。
:param request: Request instance for DeleteGroup.
:type request: :class:`tencentcloud.iai.v20200303.models.DeleteGroupRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.DeleteGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def DeletePersonFromGroup(self, request):
"""从某人员库中删除人员,此操作仅影响该人员库。若该人员仅存在于指定的人员库中,该人员将被删除,其所有的人脸信息也将被删除。
:param request: Request instance for DeletePersonFromGroup.
:type request: :class:`tencentcloud.iai.v20200303.models.DeletePersonFromGroupRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.DeletePersonFromGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DeletePersonFromGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeletePersonFromGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def DetectFaceAttributes(self, request):
"""检测给定图片中的人脸(Face)的位置、相应的面部属性和人脸质量信息,位置包括 (x,y,w,h),面部属性包括性别(gender)、年龄(age)、表情(expression)、魅力(beauty)、眼镜(glass)、发型(hair)、口罩(mask)和姿态 (pitch,roll,yaw),人脸质量信息包括整体质量分(score)、模糊分(sharpness)、光照分(brightness)和五官遮挡分(completeness)。
其中,人脸质量信息主要用于评价输入的人脸图片的质量。在使用人脸识别服务时,建议您对输入的人脸图片进行质量检测,提升后续业务处理的效果。该功能的应用场景包括:
1) 人员库[创建人员](https://cloud.tencent.com/document/product/867/32793)/[增加人脸](https://cloud.tencent.com/document/product/867/32795):保证人员人脸信息的质量,便于后续的业务处理。
2) [人脸搜索](https://cloud.tencent.com/document/product/867/32798):保证输入的图片质量,快速准确匹配到对应的人员。
3) [人脸验证](https://cloud.tencent.com/document/product/867/32806):保证人脸信息的质量,避免明明是本人却认证不通过的情况。
4) [人脸融合](https://cloud.tencent.com/product/facefusion):保证上传的人脸质量,人脸融合的效果更好。
>
- 本接口是[人脸检测与分析](https://cloud.tencent.com/document/product/867/44989)的升级,具体在于:
1.本接口可以指定需要计算返回的人脸属性,避免无效计算,降低耗时;
2.本接口支持更多属性细项数,也会持续增加更多功能。
请您使用本接口完成相应的人脸检测与属性分析需求。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for DetectFaceAttributes.
:type request: :class:`tencentcloud.iai.v20200303.models.DetectFaceAttributesRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.DetectFaceAttributesResponse`
"""
try:
params = request._serialize()
body = self.call("DetectFaceAttributes", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DetectFaceAttributesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def DetectLiveFaceAccurate(self, request):
"""人脸静态活体检测(高精度版)可用于对用户上传的静态图片进行防翻拍活体检测,以判断是否是翻拍图片。
相比现有静态活体检测服务,高精度版在维持高真人通过率的前提下,增强了对高清屏幕、裁剪纸片、3D面具等攻击的防御能力,攻击拦截率约为业内同类型产品形态4-5倍。同时支持多场景人脸核验,满足移动端、PC端各类型场景的图片活体检验需求,适用于各个行业不同的活体检验应用。
:param request: Request instance for DetectLiveFaceAccurate.
:type request: :class:`tencentcloud.iai.v20200303.models.DetectLiveFaceAccurateRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.DetectLiveFaceAccurateResponse`
"""
try:
params = request._serialize()
body = self.call("DetectLiveFaceAccurate", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DetectLiveFaceAccurateResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def GetCheckSimilarPersonJobIdList(self, request):
"""获取人员查重任务列表,按任务创建时间逆序(最新的在前面)。
只保留最近1年的数据。
:param request: Request instance for GetCheckSimilarPersonJobIdList.
:type request: :class:`tencentcloud.iai.v20200303.models.GetCheckSimilarPersonJobIdListRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.GetCheckSimilarPersonJobIdListResponse`
"""
try:
params = request._serialize()
body = self.call("GetCheckSimilarPersonJobIdList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetCheckSimilarPersonJobIdListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def GetGroupList(self, request):
"""获取人员库列表。
:param request: Request instance for GetGroupList.
:type request: :class:`tencentcloud.iai.v20200303.models.GetGroupListRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.GetGroupListResponse`
"""
try:
params = request._serialize()
body = self.call("GetGroupList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetGroupListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def GetPersonGroupInfo(self, request):
"""获取指定人员的信息,包括加入的人员库、描述内容等。
:param request: Request instance for GetPersonGroupInfo.
:type request: :class:`tencentcloud.iai.v20200303.models.GetPersonGroupInfoRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.GetPersonGroupInfoResponse`
"""
try:
params = request._serialize()
body = self.call("GetPersonGroupInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetPersonGroupInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def GetPersonListNum(self, request):
"""获取指定人员库中人员数量。
:param request: Request instance for GetPersonListNum.
:type request: :class:`tencentcloud.iai.v20200303.models.GetPersonListNumRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.GetPersonListNumResponse`
"""
try:
params = request._serialize()
body = self.call("GetPersonListNum", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetPersonListNumResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def GetUpgradeGroupFaceModelVersionJobList(self, request):
"""获取人员库升级任务列表
:param request: Request instance for GetUpgradeGroupFaceModelVersionJobList.
:type request: :class:`tencentcloud.iai.v20200303.models.GetUpgradeGroupFaceModelVersionJobListRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.GetUpgradeGroupFaceModelVersionJobListResponse`
"""
try:
params = request._serialize()
body = self.call("GetUpgradeGroupFaceModelVersionJobList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetUpgradeGroupFaceModelVersionJobListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def ModifyGroup(self, request):
"""修改人员库名称、备注、自定义描述字段名称。
:param request: Request instance for ModifyGroup.
:type request: :class:`tencentcloud.iai.v20200303.models.ModifyGroupRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.ModifyGroupResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def ModifyPersonGroupInfo(self, request):
"""修改指定人员库人员描述内容。
:param request: Request instance for ModifyPersonGroupInfo.
:type request: :class:`tencentcloud.iai.v20200303.models.ModifyPersonGroupInfoRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.ModifyPersonGroupInfoResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyPersonGroupInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyPersonGroupInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def SearchFaces(self, request):
"""用于对一张待识别的人脸图片,在一个或多个人员库中识别出最相似的 TopK 人员,识别结果按照相似度从大到小排序。
支持一次性识别图片中的最多 10 张人脸,支持一次性跨 100 个人员库(Group)搜索。
单次搜索的人员库人脸总数量和人员库的算法模型版本(FaceModelVersion)相关。算法模型版本为2.0的人员库,单次搜索人员库人脸总数量不得超过 100 万张;算法模型版本为3.0的人员库,单次搜索人员库人脸总数量不得超过 300 万张。
与[人员搜索](https://cloud.tencent.com/document/product/867/44992)及[人员搜索按库返回](https://cloud.tencent.com/document/product/867/44991)接口不同的是,本接口将该人员(Person)下的每个人脸(Face)都作为单独个体进行验证,而人员搜索及人员搜索按库返回接口 会将该人员(Person)下的所有人脸(Face)进行融合特征处理,即若某个Person下有4张 Face,本接口会将4张 Face 的特征进行融合处理,生成对应这个 Person 的特征,使搜索更加准确。
本接口需与[人员库管理相关接口](https://cloud.tencent.com/document/product/867/45015)结合使用。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
>
- 不可同时搜索不同算法模型版本(FaceModelVersion)的人员库。
:param request: Request instance for SearchFaces.
:type request: :class:`tencentcloud.iai.v20200303.models.SearchFacesRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.SearchFacesResponse`
"""
try:
params = request._serialize()
body = self.call("SearchFaces", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.SearchFacesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def SearchPersons(self, request):
"""用于对一张待识别的人脸图片,在一个或多个人员库中识别出最相似的 TopK 人员,按照相似度从大到小排列。
支持一次性识别图片中的最多 10 张人脸,支持一次性跨 100 个人员库(Group)搜索。
单次搜索的人员库人脸总数量和人员库的算法模型版本(FaceModelVersion)相关。算法模型版本为2.0的人员库,单次搜索人员库人脸总数量不得超过 100 万张;算法模型版本为3.0的人员库,单次搜索人员库人脸总数量不得超过 300 万张。
本接口会将该人员(Person)下的所有人脸(Face)进行融合特征处理,即若某个 Person 下有4张 Face ,本接口会将4张 Face 的特征进行融合处理,生成对应这个 Person 的特征,使人员搜索(确定待识别的人脸图片是某人)更加准确。而[人脸搜索](https://cloud.tencent.com/document/product/867/44994)及[人脸搜索按库返回接口](https://cloud.tencent.com/document/product/867/44993)将该人员(Person)下的每个人脸(Face)都作为单独个体进行搜索。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
- 仅支持算法模型版本(FaceModelVersion)为3.0的人员库。
:param request: Request instance for SearchPersons.
:type request: :class:`tencentcloud.iai.v20200303.models.SearchPersonsRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.SearchPersonsResponse`
"""
try:
params = request._serialize()
body = self.call("SearchPersons", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.SearchPersonsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def UpgradeGroupFaceModelVersion(self, request):
"""升级人员库。升级过程中,人员库仍然为原算法版本,人员库相关操作仍然支持。升级完成后,人员库为新算法版本。
单个人员库有且仅支持一次回滚操作。
升级是一个耗时的操作,执行时间与人员库的人脸数相关,升级的人员库中的人脸数越多,升级的耗时越长。升级接口是个异步任务,调用成功后返回JobId,通过GetUpgradeGroupFaceModelVersionResult查询升级进度和结果。如果升级成功,人员库版本将切换到新版本。如果想回滚到旧版本,可以调用RevertGroupFaceModelVersion进行回滚。
注:某些接口无法进行跨人员库版本操作,例如SearchFaces,SearchPersons和CopyPerson等。当业务有多个Group操作的场景时,如同时搜索Group1和Group2,如果升级了Group1,此时Group1和Group2版本不同,造成了跨版本操作,将导致Search接口无法正常执行,返回不允许执行跨版本操作错误,升级前需考虑业务是否有多库操作的场景,否则会影响线上接口表现。
:param request: Request instance for UpgradeGroupFaceModelVersion.
:type request: :class:`tencentcloud.iai.v20200303.models.UpgradeGroupFaceModelVersionRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.UpgradeGroupFaceModelVersionResponse`
"""
try:
params = request._serialize()
body = self.call("UpgradeGroupFaceModelVersion", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UpgradeGroupFaceModelVersionResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(
self,
calculation_rate=None,
trigger=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
trigger=trigger,
) | josiah-wolf-oberholtzer/supriya | [
208,
25,
208,
13,
1394072845
] |
def ar(
cls,
trigger=0,
):
"""
Constructs an audio-rate Timer.
::
>>> timer = supriya.ugens.Timer.ar(
... trigger=0,
... )
>>> timer
Timer.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
trigger=trigger,
)
return ugen | josiah-wolf-oberholtzer/supriya | [
208,
25,
208,
13,
1394072845
] |
def kr(
cls,
trigger=0,
):
"""
Constructs a control-rate Timer.
::
>>> timer = supriya.ugens.Timer.kr(
... trigger=0,
... )
>>> timer
Timer.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
trigger=trigger,
)
return ugen | josiah-wolf-oberholtzer/supriya | [
208,
25,
208,
13,
1394072845
] |
def check_cfg_path(prj_number, cfg_str_or_path, cfg_path):
config = configparser.ConfigParser()
ini_file = cfg_path / "config.ini"
if cfg_str_or_path == "cfg":
if not cfg_str_or_path.exists():
if ini_file.exists():
config.read(ini_file)
if prj_number in config:
config_path = config[prj_number]["path"]
return config_path | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def get_model_hash(rvt_model_path):
"""
Creates a hash of provided rvt model file
:param rvt_model_path:
:return: hash string
"""
BLOCKSIZE = 65536
hasher = hashlib.sha256() | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def check_hash_unchanged(hash_db, rvt_model_path, model_hash, date):
model_info = {"<full_model_path>": str(rvt_model_path),
">last_hash": model_hash,
">last_hash_date": date,
}
unchanged = hash_db.search((Query()["<full_model_path>"] == str(rvt_model_path)) &
(Query()[">last_hash"] == model_hash)
)
if unchanged:
return True
else:
hash_db.upsert(model_info, Query()["<full_model_path>"] == str(rvt_model_path)
) | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def exit_with_log(message, severity=logging.warning, exit_return_code=1):
"""
Ends the whole script with a warning.
:param message:
:param exit_return_code:
:return:
"""
severity(f"{project_code};{current_proc_hash};{exit_return_code};;{message}")
exit() | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def get_jrn_and_post_process(search_command, commands_dir):
"""
Searches command paths for register dict in __init__.py in command roots to
prepare appropriate command strings to be inserted into the journal file
:param search_command: command name to look up
:param commands_dir: commands directory
:return: command module, post process dict
"""
found_dir = False
module_rjm = None
post_proc_dict = defaultdict() | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def get_rvt_proc_journal(process, jrn_file_path):
open_files = process.open_files()
for proc_file in open_files:
file_name = pathlib.Path(proc_file.path).name
if file_name.startswith("journal"):
return proc_file.path | hdm-dt-fb/rvt_model_services | [
43,
11,
43,
2,
1488711415
] |
def clear_all_tables():
db_session.query(FrontPage).delete()
db_session.query(SubredditPage).delete()
db_session.query(Subreddit).delete()
db_session.query(Post).delete()
db_session.query(User).delete()
db_session.query(Comment).delete()
db_session.query(Experiment).delete()
db_session.query(ExperimentThing).delete()
db_session.query(ExperimentAction).delete()
db_session.query(ExperimentThingSnapshot).delete()
db_session.query(EventHook).delete()
db_session.commit() | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def teardown_function(function):
clear_all_tables() | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_initialize_experiment(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
assert len(db_session.query(Experiment).all()) == 0
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
assert len(db_session.query(Experiment).all()) == 1
experiment = controller.experiment
assert experiment.name == experiment_name
assert(experiment.controller == experiment_config['controller'])
settings = json.loads(experiment.settings_json)
for k in ['username', 'subreddit', 'subreddit_id', 'start_time', 'end_time', 'controller']:
assert settings[k] == experiment_config[k]
for condition_name in experiment_config['conditions']:
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_config['conditions'][condition_name]['randomizations']), "r") as f:
conditions = []
for row in csv.DictReader(f):
conditions.append(row)
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_config['conditions'][condition_name]['randomizations']), "r") as f:
nonconditions = []
for row in csv.DictReader(f):
nonconditions.append(row)
assert len(settings['conditions'][condition_name]['randomizations']) == len(conditions)
assert settings['conditions'][condition_name]['next_randomization'] == 0 | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_determine_intervention_eligible(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
assert len(db_session.query(Experiment).all()) == 0
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
## in the case with no interventions, confirm eligibility
assert controller.determine_intervention_eligible() == True
## now create an action and confirm ineligibility outside the interval
experiment_action = ExperimentAction(
experiment_id = controller.experiment.id,
praw_key_id = "TEST",
action = "Intervention:{0}.{1}".format("TEST","TEST"),
action_object_type = ThingType.STYLESHEET.value,
action_object_id = None,
metadata_json = json.dumps({"arm":"TEST", "condition":"TEST"})
)
db_session.add(experiment_action)
db_session.commit()
assert controller.determine_intervention_eligible() == False | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_select_condition(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
controller = StylesheetExperimentController(experiment_name, db_session, r, log) | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_set_stylesheet(mock_reddit):
r = mock_reddit.return_value
with open(os.path.join(BASE_DIR,"tests", "fixture_data", "stylesheet_0" + ".json"), "r") as f:
stylesheet = json.loads(f.read())
r.get_stylesheet.return_value = stylesheet
r.set_stylesheet.return_value = {"errors":[]}
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
for condition in ['special', 'normal']:
for arm in ["arm_0", "arm_1"]:
assert (controller.experiment_settings['conditions'][condition]['arms'][arm] in stylesheet['stylesheet'].split("\n"))!=True
for condition in ['special', 'normal']:
for arm in ["arm_0", "arm_1"]:
line_length = len(stylesheet['stylesheet'].split("\n"))
result_lines = controller.set_stylesheet(condition, arm).split("\n")
assert controller.experiment_settings['conditions'][condition]['arms'][arm] in result_lines
assert len(result_lines) == line_length + 3 | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_post_snapshotting(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
yesterday_posts = 10
today_posts = 20
controller, today_post_list, comment_counter = setup_comment_monitoring(r, yesterday_posts, today_posts)
posts = controller.identify_posts_that_need_snapshotting()
assert len(posts) == today_posts
assert db_session.query(Post).outerjoin(
ExperimentThing, Post.id == ExperimentThing.id).filter(
ExperimentThing.id==None,
Post.id.in_([x.id for x in posts])).count() == 0
assert db_session.query(Post).outerjoin(
ExperimentThing, Post.id == ExperimentThing.id).filter(
Post.id.in_([x.id for x in posts])).count() == len(posts)
# now confirm that it doesn't add more ExperimentThings if we run it a second time
posts = controller.identify_posts_that_need_snapshotting()
assert len(posts) == today_posts
assert db_session.query(Post).outerjoin(
ExperimentThing, Post.id == ExperimentThing.id).filter(
ExperimentThing.id==None,
Post.id.in_([x.id for x in posts])).count() == 0
assert db_session.query(Post).outerjoin(
ExperimentThing, Post.id == ExperimentThing.id).filter(
Post.id.in_([x.id for x in posts])).count() == len(posts) | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_observe_comment_snapshots(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
yesterday_posts = 10
today_posts = 20
# SET UP TEST BY PROPAGATING POSTS AND COMMENTS
controller, today_post_list, comment_counter = setup_comment_monitoring(r, yesterday_posts, today_posts)
posts = controller.identify_posts_that_need_snapshotting()
assert len(posts) == today_posts
comments = controller.sample_comments(posts)
## EXPIRE SOME OF THE COMMENTS
current_time = datetime.datetime.utcnow()
expired_time = (current_time - datetime.timedelta(seconds=controller.experiment_settings['intervention_window_seconds'] + 10))
for i in range(0, math.floor(len(comments)/2)):
comments[i].created_utc = expired_time
db_session.commit()
## LOAD COMMENT FIXTURES
comment_fixtures = []
filename = "{script_dir}/fixture_data/comments_0.json".format(script_dir=TEST_DIR)
f = open(filename, "r")
comment_fixtures = json.loads(f.read())
f.close() | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def test_sample_comments(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
yesterday_posts = 10
today_posts = 20
controller, today_post_list, comment_counter = setup_comment_monitoring(r, yesterday_posts, today_posts)
posts = controller.identify_posts_that_need_snapshotting()
assert len(posts) == today_posts | c4fcm/CivilServant | [
21,
6,
21,
7,
1465248322
] |
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def _configure(
self,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def rms(signal):
"""Calculate the Root-Mean-Square (RMS) value of a signal"""
return np.sqrt(np.mean(np.square(signal))) | riggsd/zcant | [
3,
1,
3,
8,
1483577342
] |
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian=0, align=self.align)
if self._file.getname() != 'RIFF':
raise wave.Error('file does not start with RIFF id')
if self._file.read(4) != 'WAVE':
raise wave.Error('not a WAVE file')
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian=0, align=self.align)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == 'data':
if not self._fmt_chunk_read:
raise wave.Error('data chunk before fmt chunk')
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise wave.Error('fmt chunk and/or data chunk missing') | riggsd/zcant | [
3,
1,
3,
8,
1483577342
] |
def __init__(self, f, align=True):
self.align = align
wave.Wave_read.__init__(self, f) | riggsd/zcant | [
3,
1,
3,
8,
1483577342
] |
def load_wav(fname):
"""Produce (samplerate, signal) from a .WAV file""" | riggsd/zcant | [
3,
1,
3,
8,
1483577342
] |
def load_windowed_wav(fname, start, duration):
"""Produce (samplerate, signal) for a subset of a .WAV file. `start` and `duration` in seconds."""
# we currently load the entire .WAV every time; consider being more efficient
samplerate, signal = load_wav(fname)
start_i = int(start * samplerate)
end_i = int(start_i + duration * samplerate)
return samplerate, signal[start_i:end_i] | riggsd/zcant | [
3,
1,
3,
8,
1483577342
] |
Subsets and Splits