content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def extract_rawfile_unique_values(
file: str
) -> list:
"""Extract the unique raw file names from "R.FileName" (Spectronaut output), "Raw file" (MaxQuant output),
"shortname" (AlphaPept output) or "Run" (DIA-NN output) column or from the "Spectral Count" column from the
combined_peptide.tsv file without modifications for the FragPipe.
Args:
file (str): The name of a file.
Raises:
ValueError: if a column with the unique raw file names is not in the file.
Returns:
list: A sorted list of unique raw file names from the file.
"""
file_ext = os.path.splitext(file)[-1]
if file_ext == '.csv':
sep = ','
elif file_ext in ['.tsv', '.txt']:
sep = '\t'
with open(file) as filelines:
i = 0
filename_col_index = None
filename_data = []
for l in filelines:
l = l.split(sep)
# just do it for the first line
if i == 0:
for col in ['R.FileName', 'Raw file', 'Run', 'shortname']:
if col in l:
filename_col_index = l.index(col)
break
if not isinstance(filename_col_index, int):
# to check the case with the FragPipe peptide.tsv file when we don't have the info about the experiment name
if ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)) and ("Peptide" in "".join(l)):
return []
# to check the case with the FragPipe combined_peptide.tsv file when the experiment name is included in the "Spectral Count" column
elif ("Sequence" in "".join(l)) and ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)):
return sorted(list(set([col.replace('_', '').replace(' Spectral Count', '') for col in l if 'Spectral Count' in col])))
else:
raise ValueError('A column with the raw file names is not in the file.')
else:
filename_data.append(l[filename_col_index])
i += 1
unique_filenames = set(filename_data)
sorted_unique_filenames = sorted(list(unique_filenames))
return sorted_unique_filenames | 5,355,000 |
def _fetch_from_s3(bucket_name, path):
"""Fetch the contents of an S3 object
Args:
bucket_name (str): The S3 bucket name
path (str): The path to the S3 object
Returns:
str: The content of the S3 object in string format
"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(path)
data = obj.get()['Body'].read().decode('utf-8')
return data | 5,355,001 |
def deterministic(seed):
"""
Make the experiment reproducible
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
cudnn.deterministic = True | 5,355,002 |
def vrv_getatttype(schema, module, gp, aname, includes_dir = ""):
""" returns the attribut type for element name, or string if not detectable."""
# Look up if there is an override for this type in the current module, and return it
# Note that we do not honor pseudo-hungarian notation
attype, hungarian = vrv_get_att_config_type(module, gp, aname)
if attype:
return (attype, hungarian)
# No override, get it from the schema
# First numbers
el = schema.xpath("//tei:attDef[@ident=$name]/tei:datatype/rng:data/@type", name=aname, namespaces=TEI_NS)
if el:
if el[0] == "nonNegativeInteger" or el[0] == "positiveInteger":
return ("int", "")
elif el[0] == "decimal":
return ("double", "")
# The data types
ref = schema.xpath("//tei:classSpec[@ident=$gp]//tei:attDef[@ident=$name]/tei:datatype/rng:ref/@name", gp=gp, name=aname, namespaces=TEI_NS)
if ref:
return (vrv_getformattedtype("{0}".format(ref[0])), "")
# Finally from val lists
vl = schema.xpath("//tei:classSpec[@ident=$gp]//tei:attDef[@ident=$name]/tei:valList[@type=\"closed\"]", gp=gp, name=aname, namespaces=TEI_NS)
if vl:
element = vl[0].xpath("./ancestor::tei:classSpec", namespaces=TEI_NS)
attName = vl[0].xpath("./parent::tei:attDef/@ident", namespaces=TEI_NS)
if element:
return(vrv_getformattedvallist(element[0].get("ident"),attName[0]), "")
#data_list = "{0}.{1}".format(element[0].get("ident"),attName[0])
#elif attName:
# elName = vl[0].xpath("./ancestor::tei:elementSpec/@ident", namespaces=TEI_NS)
# lg.debug("VALLIST {0} --- {1}".format(elName[0],attName[0]))
# Otherwise as string
return ("std::string", "") | 5,355,003 |
def test_create_splits_mid_year():
"""
Make sure that year splits are properly generated when not using first day of year
as start of time series
"""
swe = np.zeros((3000, 50, 50))
a = analysis.Analysis(datetime.date(1993, 5, 1), swe)
years = a.create_year_splits()
assert years[0] == 121 | 5,355,004 |
def _tf1_setpar_ ( func , par , value ) :
"""Set parameter of TF1
>>> fun = ... ## function
>>> fun.setPar(1,1) ## set parameter #1 to be 1
>>> fun.setPar('m',2) ## set parameter 'm' to be 2
"""
if not par in func : raise IndexError("Invalid parameter index %s" % par )
#
if isinstance ( par , str ) : par = func.GetParNumber( par )
#
func.SetParameter ( par , float ( value ) ) | 5,355,005 |
def inc_date(date_obj, num, date_fmt):
"""Increment the date by a certain number and return date object.
as the specific string format.
"""
return (date_obj + timedelta(days=num)).strftime(date_fmt) | 5,355,006 |
def recombine_edges(output_edges):
"""
Recombine a list of edges based on their rules.
Recombines identical Xe isotopes. Remove isotopes.
:param output_edges:
:return:
"""
mol = Chem.MolFromSmiles(".".join(output_edges))
# Dictionary of atom's to bond together and delete if they come in pairs
iso_dict = {}
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 54:
# Get the isotope
iso = atom.GetIsotope()
if iso in iso_dict:
iso_dict[iso].append(get_info(atom))
else:
iso_dict[iso] = [get_info(atom)]
mw = Chem.RWMol(mol)
# Add bonds first
del_indices = []
for isotope in iso_dict:
if len(iso_dict[isotope]) > 1:
mw.AddBond(
iso_dict[isotope][0][1], iso_dict[isotope][1][1], Chem.BondType.SINGLE
)
del_indices.append(iso_dict[isotope][0][0])
del_indices.append(iso_dict[isotope][1][0])
# Now delete atoms
del_count = 0
for atom_index in sorted(del_indices):
mw.RemoveAtom(atom_index - del_count)
del_count += 1
Chem.SanitizeMol(mw)
return Chem.MolToSmiles(mw, isomericSmiles=True) | 5,355,007 |
def parse_comments(content: str) -> List[str]:
"""Parses comments in LDF files
:param content: LDF file content as string
:type content: str
:returns: a list of all comments in the LDF file
:rtype: List[str]
"""
comment = os.path.join(os.path.dirname(__file__), 'lark', 'comment.lark')
parser = Lark(grammar=open(comment), parser='lalr')
tree = parser.parse(content)
return CommentCollector().transform(tree) | 5,355,008 |
def excludevars(vdict, filters):
"""
Remove dictionary items by filter
"""
vdict_remove = dict()
for filtr in filters:
a = filtervars_sub(vdict, filtr)
vdict_remove.update(a)
vdict_filtered = vdict.copy()
for key in vdict_remove.keys():
del vdict_filtered[key]
return vdict_filtered | 5,355,009 |
def __updateEntityAttributes(fc, fldList, dom, logFile):
"""For each attribute (field) in fldList,
adds attribute definition and definition source,
classifies as range domain, unrepresentable-value domain or enumerated-value domain, and
for range domains, adds rangemin, rangemax, and units;
for unrepresentable value domains, adds unrepresentable value statement;
for enumerated value domains:
1) Finds all controlled-vocabulary fields in the table sent to it
2) Builds a set of unique terms in each field, ie, the domain
3) Matches each domain value to an entry in the glossary
4) Builds a dictionary of term:(definition, source) items
5) Takes the dictionary items and put them into the metadata
document as Attribute_Domain_Values
Field MapUnit in table DescriptionOfMapUnits is treated as a special case.
"""
cantfindTerm = []
cantfindValue = []
for fld in fldList:
addMsgAndPrint( ' Field: '+ fld)
# if is _ID field or if field definition is available, update definition
if fld.find('_ID') > -1 or fld in attribDict:
dom = __updateAttrDef(fld,dom)
else:
cantfindTerm.append(fld)
#if this is an _ID field
if fld.find('_ID') > -1:
dom = __updateUdom(fld,dom,unrepresentableDomainDict['_ID'])
#if this is another unrepresentable-domain field
if fld in unrepresentableDomainDict:
dom = __updateUdom(fld,dom,unrepresentableDomainDict[fld])
#if this is a defined range-domain field
elif fld in rangeDomainDict:
dom = __updateRdom(fld,dom)
#if this is MapUnit in DMU
elif fld == 'MapUnit' and fc == 'DescriptionOfMapUnits':
dom = __updateUdom(fld,dom,unrepresentableDomainDict['default'])
#if this is a defined Enumerated Value Domain field
elif fld in enumeratedValueDomainFieldList:
valList = []
#create a search cursor on the field
rows = arcpy.SearchCursor(fc,'','', fld)
row = next(rows)
#collect all values/terms in that field
while row:
if not row.getValue(fld) is None:
valList.append(row.getValue(fld))
row = next(rows)
#uniquify the list by converting it to a set object
valList = set(valList)
#create an empty dictionary object to hold the matches between the unique terms
#and their definitions (grabbed from the glossary)
defs = {}
#for each unique term, try to create a search cursor of just one record where the term
#matchs a Term field value from the glossary
if fld == 'MapUnit' and fc != 'DescriptionOfMapUnits':
for t in valList:
query = '"MapUnit" = \'' + t + '\''
rows = arcpy.SearchCursor(DMU, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
if row.FullName != None:
defs[t].append(row.FullName.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
addMsgAndPrint('MapUnit = '+t+', FullName not defined')
defs[t].append(row.Name.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
if not t in ('',' '): cantfindValue.append([fld,t])
elif fld == 'GeoMaterialConfidence' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterialsConfidence')
defs = GeoMatConfDict
elif fld == 'GeoMaterial' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterials!')
for t in valList:
query = '"GeoMaterial" = \'' + t + '\''
if debug:
addMsgAndPrint('query='+query)
rows = arcpy.SearchCursor(gmDict, query)
row = next(rows)
#if the searchcursor contains a row
if row:
if debug:
addMsgAndPrint(row.GeoMaterial+' : '+row.Definition.encode('utf_8'))
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Definition.encode('utf_8'))
defs[t].append(' GeMS documentation')
else:
addMsgAndPrint('GeoMaterial = '+t+': not defined in GeoMaterialDict')
cantfindValue.append([fld,t])
elif fld.find('SourceID') > -1: # is a source field
for t in valList:
query = '"DataSources_ID" = \'' + t + '\''
rows = arcpy.SearchCursor(dataSources, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Source.encode('utf_8'))
defs[t].append('this report, table DataSources')
else:
cantfindValue.append([fld,t])
else:
for t in valList:
query = '"Term" = '+"'"+ t + "'"
if debug:
addMsgAndPrint('query='+query)
rows = arcpy.SearchCursor(gloss, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Definition.encode('utf_8'))
defs[t].append(__findInlineRef(row.DefinitionSourceID).encode('utf_8'))
else:
if fld != 'GeoMaterial' and fc != 'GeoMaterialDict':
cantfindValue.append([fld,t])
dom = __updateEdom(fld, defs, dom)
else: #presumed to be an unrepresentable domain
dom = __updateUdom(fld,dom,unrepresentableDomainDict['default'])
if len(cantfindValue) > 0:
logFile.write('Missing enumerated-domain values\n')
logFile.write(' ENTITY TERM VALUE\n')
for term in cantfindValue:
logFile.write(' '+fc+' '+term[0]+' **'+term[1]+'**\n')
if len(cantfindTerm) > 0:
logFile.write('Missing terms\n')
logFile.write(' ENTITY TERM\n')
for term in cantfindTerm:
logFile.write(' '+fc + ' '+term+'\n')
return dom | 5,355,010 |
def make_laplace_pyramid(x, levels):
"""
Make Laplacian Pyramid
"""
pyramid = []
current = x
for i in range(levels):
pyramid.append(laplacian(current))
current = tensor_resample(
current,
(max(current.shape[2] // 2, 1), max(current.shape[3] // 2, 1)))
pyramid.append(current)
return pyramid | 5,355,011 |
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ToolsManager(ctx) | 5,355,012 |
def _load_csv_key(symbol_key):
"""
针对csv存储模式,通过symbol_key字符串找到对应的csv具体文件名称,
如从usTSLA->找到usTSLA_2014-7-26_2016_7_26这个具体csv文件路径
:param symbol_key: str对象,eg. usTSLA
"""
# noinspection PyProtectedMember
csv_dir = ABuEnv.g_project_kl_df_data_example if ABuEnv._g_enable_example_env_ipython \
else ABuEnv.g_project_kl_df_data_csv
if file_exist(csv_dir):
for name in os.listdir(csv_dir):
# 从csv缓存文件夹下进行模糊查询通过fnmatch匹配具体csv文件路径,eg. usTSLA->usTSLA_2014-7-26_2016_7_26
# if fnmatch(name, '{}*'.format(symbol_key)):
"""
这里不能模糊匹配,否则会因为TSL匹配上TSLA导致删除原有的symbol
而且必须要加'_'做为symbol结束匹配标记
"""
if name.startswith(symbol_key + '_'):
# []只是为了配合外面针对不同store统一使用key[0]
return [name]
return None | 5,355,013 |
def test_create_simple_gantt(tmpdir):
"""test_create_simple_gantt."""
c1 = Component("c1")
c1.state_record_list = [
BaseComponentState.WORKING,
BaseComponentState.FINISHED,
BaseComponentState.FINISHED,
BaseComponentState.FINISHED,
BaseComponentState.FINISHED,
]
init_datetime = datetime.datetime(2020, 4, 1, 8, 0, 0)
timedelta = datetime.timedelta(days=1)
c1.create_data_for_gantt_plotly(init_datetime, timedelta)
product = Product([c1])
product.create_simple_gantt(save_fig_path=os.path.join(str(tmpdir), "test.png")) | 5,355,014 |
def generate_metadata(year, files, datatype = 'inventory'):
"""
Gets metadata and writes to .json
"""
if datatype == 'source':
source_path = [rcra_external_dir + p for p in files]
source_path = [os.path.realpath(p) for p in source_path]
source_meta = compile_source_metadata(source_path, _config, year)
source_meta['SourceType'] = 'Zip file'
source_meta['SourceURL'] = _config['url']
write_metadata('RCRAInfo_'+ str(year), source_meta,
category=ext_folder, datatype='source')
else:
source_meta = read_source_metadata(paths, set_stewi_meta('RCRAInfo_'+ year,
ext_folder),
force_JSON=True)['tool_meta']
write_metadata('RCRAInfo_'+year, source_meta, datatype=datatype) | 5,355,015 |
def get_routing_table() -> RouteCommandResult:
"""
Execute route command via subprocess. Blocks while waiting for output.
Returns the routing table in the form of a list of routes.
"""
return list(subprocess_workflow.exec_and_parse_subprocesses(
[RouteCommandParams()],
_get_route_command_args_list,
parse_route_output,
))[0] | 5,355,016 |
def xor_arrays(arr1, arr2):
""" Does a XOR on 2 arrays, very slow"""
retarr = array('B')
for i in range(len(arr1)):
retarr.append(arr1[i] ^ arr2[i])
return retarr | 5,355,017 |
def delete_category():
"""Delete category specified by id from database"""
category = Category.query.get(request.form['id'])
db.session.delete(category)
db.session.commit()
return '' | 5,355,018 |
def gen_dd(acc, amt):
"""Generate a DD (low-level)"""
read()
dd_num = dd_no()
while dd_num in dds.keys():
dd_num = dd_no()
dd = {
'ac_no': acc,
'amount': amt
}
return dd_num, dd | 5,355,019 |
def coranking_matrix(high_data, low_data):
"""Generate a co-ranking matrix from two data frames of high and low
dimensional data.
:param high_data: DataFrame containing the higher dimensional data.
:param low_data: DataFrame containing the lower dimensional data.
:returns: the co-ranking matrix of the two data sets.
"""
n, m = high_data.shape
high_distance = distance.squareform(distance.pdist(high_data))
low_distance = distance.squareform(distance.pdist(low_data))
high_ranking = high_distance.argsort(axis=1).argsort(axis=1)
low_ranking = low_distance.argsort(axis=1).argsort(axis=1)
Q, xedges, yedges = np.histogram2d(high_ranking.flatten(),
low_ranking.flatten(),
bins=n)
Q = Q[1:, 1:] # remove rankings which correspond to themselves
return Q | 5,355,020 |
def gaussian_dist_xmu1xmu2_product_x(mu1,Sigma1,mu2,Sigma2):
"""Compute distribution of N(x|mu1,Sigma1)N(x|mu2,Sigma2)"""
InvSigmaHat = np.linalg.inv(Sigma1) + np.linalg.inv(Sigma2)
SigmaHat = np.linalg.inv(InvSigmaHat)
muHat = np.dot(SigmaHat,np.linalg.solve(Sigma1, mu1) + np.linalg.solve(Sigma2,mu2))
logC = gaussian_logprob(mu1,mu2,Sigma1 + Sigma2)
return (logC,muHat,SigmaHat) | 5,355,021 |
def default_marker_size(fmt):
""" Find a default matplotlib marker size such that different marker types
look roughly the same size.
"""
temp = fmt.replace('.-', '')
if '.' in temp:
ms = 10
elif 'D' in temp:
ms = 7
elif set(temp).intersection('<>^vd'):
ms = 9
else:
ms = 8
return ms | 5,355,022 |
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered | 5,355,023 |
def write_network_file(path, conf):
"""
Write the key=val string to a file.
It's not possible to use native file output because these files must
be written with euid=0. Of course, there is no way to elevate this
process. Instead, shell out so that sudo can be applied.
Parameters
----------
path : str
The full path of the file.
conf : dict
The configuration key, value's.
Returns
-------
No return value.
"""
sudo_utils.call(
['sh', '-c', 'echo \'{}\' > {}'.format(build_env_file(conf), path)]) | 5,355,024 |
def get_syntax(view):
""" get_syntax(view : sublime.View) -> str
>>> get_syntax(view)
'newLISP'
>>> get_syntax(view)
'Lisp'
Retuns current file syntax/language
"""
syntax = view.settings().get('syntax')
syntax = syntax.split('/')[-1].replace('.tmLanguage', '')
return syntax | 5,355,025 |
def test_field_nested_resource_provided(db_session):
"""Test providing a resource to a Relationship works."""
resource = AlbumResource(session=db_session)
field = Relationship(nested=resource)
assert field.resource == resource | 5,355,026 |
def undoInfo(*args, **kwargs):
"""
This command controls the undo/redo parameters.
Flags:
- chunkName : cn (unicode) [create]
Sets the name used to identify a chunk for undo/redo purposes when opening a chunk.
- closeChunk : cck (bool) [create]
Closes the chunk that was opened earlier by openChunk. Once close chunk is called, all undoable operations in the chunk
will undo as a single undo operation. Use with CAUTION!! Improper use of this command can leave the undo queue in a bad
state.
- infinity : infinity (bool) [create,query]
Set the queue length to infinity.
- length : l (int) [create,query]
Specifies the number of items in the undo queue. The infinity flag overrides this one.
- openChunk : ock (bool) [create]
Opens a chunk so that all undoable operations after this call will fall into the newly opened chunk, until close chunk
is called. Once close chunk is called, all undoable operations in the chunk will undo as a single undo operation. Use
with CAUTION!! Improper use of this command can leave the undo queue in a bad state.
- printQueue : pq (bool) [query]
Prints to the Script Editor the contents of the undo queue.
- redoName : rn (unicode) [query]
Returns what will be redone (if anything)
- redoQueueEmpty : rqe (bool) [query]
Return true if the redo queue is empty. Return false if there is at least one command in the queue to be redone.
- state : st (bool) [create,query]
Turns undo/redo on or off.
- stateWithoutFlush : swf (bool) [create,query]
Turns undo/redo on or off without flushing the queue. Use with CAUTION!! Note that if you perform destructive
operations while stateWithoutFlush is disabled, and you then enable it again, subsequent undo operations that try to go
past the destructive operations may be unstable since undo will not be able to properly reconstruct the former state of
the scene. Flag can have multiple arguments, passed either as a tuple or a list.
- undoName : un (unicode) [query]
Returns what will be undone (if anything)
- undoQueueEmpty : uqe (bool) [query]
Return true if the undo queue is empty. Return false if there is at least one command in the queue to be undone.
Derived from mel command `maya.cmds.undoInfo`
"""
pass | 5,355,027 |
def run(cmd_str,cwd='.'):
""" an OS agnostic function to execute command
Parameters
----------
cmd_str : str
the str to execute with os.system()
cwd : str
the directory to execute the command in
Note
----
uses platform to detect OS and adds .exe or ./ as appropriate
for Windows, if os.system returns non-zero, raises exception
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.run("pestpp pest.pst")``
"""
bwd = os.getcwd()
os.chdir(cwd)
try:
exe_name = cmd_str.split()[0]
if "window" in platform.platform().lower():
if not exe_name.lower().endswith("exe"):
raw = cmd_str.split()
raw[0] = exe_name + ".exe"
cmd_str = ' '.join(raw)
else:
if exe_name.lower().endswith('exe'):
raw = cmd_str.split()
exe_name = exe_name.replace('.exe','')
raw[0] = exe_name
cmd_str = '{0} {1} '.format(*raw)
if os.path.exists(exe_name) and not exe_name.startswith('./'):
cmd_str = "./" + cmd_str
except Exception as e:
os.chdir(bwd)
raise Exception("run() raised :{0}".format(str(e)))
print("run():{0}".format(cmd_str))
try:
ret_val = os.system(cmd_str)
except Exception as e:
os.chdir(bwd)
raise Exception("run() raised :{0}".format(str(e)))
os.chdir(bwd)
if "window" in platform.platform().lower():
if ret_val != 0:
raise Exception("run() returned non-zero") | 5,355,028 |
def random_otp():
"""
:return: OTP for Event
:return type: string
"""
try:
all_events = Events.query.all() # Here Error if no Event
all_holded_events = HoldedEvents.query.all()
used_otps = set()
for otp_ in all_events:
used_otps.add(str(otp_.otp))
for otp_ in all_holded_events:
used_otps.add(str(otp_.otp))
total_otps = set()
available_otps = set()
for otp_ in range(0, 999999+1):
otp = str(otp_)
if len(otp)!=6:
diff = 6-len(otp)
otp = '0'*diff + otp
total_otps.add(otp)
available_otps = total_otps - used_otps
if len(available_otps) == 1:
return available_otps.pop()
else:
return 'Fail'
except:
return 'Fail' | 5,355,029 |
def resume_training(out: ModelDir, notes: str = None, dry_run=False, start_eval=False):
""" Resume training an existing model """
train_params = out.get_last_train_params()
model = out.get_model()
train_data = train_params["data"]
evaluators = train_params["evaluators"]
params = train_params["train_params"]
params.num_epochs = 24*3
if isinstance(train_data, PreprocessedData):
# TODO don't hard code # of processes
train_data.preprocess(6, 1000)
latest = tf.train.latest_checkpoint(out.save_dir)
if latest is None:
raise ValueError("No checkpoint to resume from found in " + out.save_dir)
_train(model, train_data, latest, None, False, params, evaluators, out, notes, dry_run, start_eval) | 5,355,030 |
def read_config(path):
"""Read the complete INI file and check its version number
if OK, pass values to config-database
"""
return _read_config(path) | 5,355,031 |
def getProbaForAllMeasures():
"""
Algorithm for calculating conditional probabilities for all categories in all measures
"""
logging.info( "Calculate all conditionnal probabilities" )
istats = 0
measures = getAllMeasures()
measures = measures[:,1:]
measures = np.array([list(m) for m in measures],dtype=object)
measures = np.reshape(measures, (measures.shape[0],measures.shape[2]))
if(len(measures) == 0):
logging.info( "No values are available for learning" )
else:
logging.info( "Delete all previous calculated stats" )
#first, remove all
session.query(Stats).delete()
session.commit()
#loop through criteria
for c in criteria:
logging.info( "\nCriterion id : %s",format(c.id) )
if len(c.values) == 0:
continue
stats = list()
#if criterion has one or more parents
if(len(c.parents) > 0):
sortParents = sorted(c.parents, key=lambda p: p.id)
ids = [str(c.id),] + [str(sp.id) for sp in sortParents]
catChild = list(set([v.category for v in c.values]))
catParent = [flatten(set([str(upc.category) for upc in p.values])) for p in sortParents]
catChildAndParent = [catChild,] + catParent
productNumerator = list(itertools.product(*catChildAndParent))
#reshape combinationCatParents in a matrix [values number,parent number]
#catNumerator = np.reshape(productNumerator, [len(productNumerator),len(c.parents) + 1])
catNumerator = np.array(productNumerator)
if len(catNumerator) > 0:
catDenominator = catNumerator[:,1:]
#index for truncation measures matrix
index = 0
#init truncation matrix with 0
truncation = ()
truncation = np.zeros((measures.shape[0],len(c.parents) + 1),dtype=object)
#truncate measures with only current criterion and parent columns
truncation[:,index] = measures[:,c.id - 1]
for p in c.parents:
index += 1
truncation[:,index] = measures[:,p.id - 1]
#for each combination of category, calculation is done for denominator
den = [np.count_nonzero([(cd == t).all() for t in truncation[:,1:]]) for cd in catDenominator]
#for each combination of category, calculation is done for numerator
num = [np.count_nonzero([(cn == t).all() for t in truncation]) for cn in catNumerator]
#for avoiding to divide by 0
num = np.take(num,np.nonzero(den))
#get categories of parents
productNumerator = [productNumerator[i] for i in list(np.nonzero(den)[0])]
den = np.take(den,np.nonzero(den))
results = np.divide(num,den,dtype=float)
#persist stats to db
for i in range(0,len(productNumerator)):
istats += 1
listProduct = list(productNumerator[i])
stats.append(Stats(id=istats,association=';'.join(ids),parent=';'.join(listProduct[:1]),children=';'.join(listProduct[1:]),value=';'.join(listProduct),proba=results[0][i]))
session.add_all(stats)
session.commit()
logging.info( "Criteria : %s",format(ids) )
logging.info( "Categories : %s",format(productNumerator) )
logging.info( "Proba : %s",format(results[0]) )
else:
logging.warning( 'No measure available for this criterion and/or parents' )
#if there is no parent for this criterion
else:
logging.warning( 'No relationship find for criterion id : %s',format(c.id) )
#TODO useless?
#catChild = np.array(list(set([v.category for v in c.values])))
#print [np.count_nonzero([(m == cc).all() for cc in catChild]) for m in measures]
if istats > 0:
print 'SUCCESS: {} stats have been calculated and inserted in database'.format(istats) | 5,355,032 |
def LinkAndroidDeviceID(request, callback, customData = None, extraHeaders = None):
"""
Links the Android device identifier to the user's PlayFab account
https://docs.microsoft.com/rest/api/playfab/client/account-management/linkandroiddeviceid
"""
if not PlayFabSettings._internalSettings.ClientSessionTicket:
raise PlayFabErrors.PlayFabException("Must be logged in to call this method")
def wrappedCallback(playFabResult, error):
if callback:
callback(playFabResult, error)
PlayFabHTTP.DoPost("/Client/LinkAndroidDeviceID", request, "X-Authorization", PlayFabSettings._internalSettings.ClientSessionTicket, wrappedCallback, customData, extraHeaders) | 5,355,033 |
def getPath(file):
"""Get the path of a source file.
Use this to extract the path of a file/directory when the file
could be specified either as a FileTarget, DirectoryTarget or string.
@param file: The object representing the file.
@type file: L{FileTarget}, L{DirectoryTarget} or C{basestring}
"""
assert not isinstance(file, AsyncResult)
if isinstance(file, (FileTarget, DirectoryTarget)):
return file.path
elif isinstance(file, basestring):
return file
else:
return None | 5,355,034 |
def optical_flow_to_rgb(flows):
"""
Args:
A tensor with a batch of flow fields of shape [b*num_src, 2, h, w]
"""
flows = flows.cpu().numpy()
_, h, w = flows[0].shape
rgbs = []
for i in range(len(flows)):
mag, ang = cv2.cartToPolar(flows[i, 0, ...], flows[i, 1, ...])
hsv = np.zeros(shape=(h, w, 3), dtype="float32")
# true_angle / 2, hue range [0, 180]
hsv[..., 0] = (ang * 180 / np.pi) / 2
hsv[..., 1] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv[..., 2] = 255
rgb = cv2.cvtColor(hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
rgbs.append(rgb)
rgbs = np.array(rgbs).transpose([0, 3, 1, 2])
return torch.tensor(rgbs) | 5,355,035 |
def get_motes_from_simulation(simfile, as_dictionary=True):
"""
This function retrieves motes data from a simulation file (.csc).
:param simfile: path to the simulation file
:param as_dictionary: flag to indicate that the output has to be formatted as a dictionary
:return: the list of motes formatted as dictionaries with 'id', 'x', 'y' and 'motetype_identifier' keys if
short is False or a dictionary with each mote id as the key and its tuple (x, y) as the value
"""
motes = []
with open(simfile) as f:
content = f.read()
iterables, fields = [], ['mote_id']
for it in ['id', 'x', 'y', 'motetype_identifier']:
iterables.append(finditer(r'^\s*<{0}>(?P<{0}>.*)</{0}>\s*$'.format(it), content, MULTILINE))
for matches in zip(*iterables):
mote = {}
for m in matches:
mote.update(m.groupdict())
motes.append(mote)
if as_dictionary:
motes = {int(m['id']): (float(m['x']), float(m['y'])) for m in motes}
return motes | 5,355,036 |
def menu_bar():
"""each mini-game has a menu bar that allows direct access to
the main menu. This allows story mode to be bypassed after
starting war, but the game state will not be saved"""
pygame.draw.rect(SCREEN, TEAL, (0, 460, 640, 40))
menu_font = pygame.font.Font('freesansbold.ttf', 15)
menu_txt = menu_font.render("Menu", True, BLACK, TEAL)
menu_rect = menu_txt.get_rect()
menu_rect.center = (60, 480)
SCREEN.blit(menu_txt, menu_rect)
instr_txt = menu_font.render("Instructions", True, BLACK, TEAL)
instr_rect = instr_txt.get_rect()
instr_rect.center = (150, 480)
SCREEN.blit(instr_txt, instr_rect)
return menu_rect, instr_rect | 5,355,037 |
def merge_sort(a, p, r):
""" merge sort
:param a: a array to sort, a[p:r+1] need to be sorted
:param p: index of array, p < r, if p >= r , the length of a is 1, return
:param r: index of array, p < r, if p >= r , the length of a is 1, return
"""
if p < r:
q = int((p + r) / 2)
# divider
a = merge_sort(a, p, q)
a = merge_sort(a, q + 1, r)
# conquer
merge(a, p, q, r)
return a | 5,355,038 |
def uint8_to_binary(folder, out_folder):
"""
Convert a folder of mask in 0 255 format to binary format
:param folder: folder to examine
:param out_folder: folder
"""
try:
os.mkdir(out_folder)
except:
pass
if is_mask(folder, 255):
list_mask_path = os.listdir(folder)
for mask_path in list_mask_path:
mask = cv2.imread(folder + mask_path, cv2.IMREAD_UNCHANGED)
ret, thresh = cv2.threshold(mask, 127, 1, cv2.THRESH_BINARY)
cv2.imwrite(out_folder + mask_path, thresh)
print("Conversion is done") | 5,355,039 |
def green_foreground(greentext):
"""Green foreground for notice messages.
Green foreground for error messages.
Arguments:
greentext {str} -- text, which will be colored in green.
"""
LOG.notice(pyfancy().green().bold(greentext)) | 5,355,040 |
def get_flow_graph(limit, period):
"""
:type limit int
:type period int
:rtype: list[dict]
"""
rows = ElasticsearchQuery(
es_host=ELASTICSEARCH_HOST,
period=period,
index_prefix='logstash-other'
).query_by_string(
query='kubernetes.labels.job-name:* AND '
'kubernetes.container_name: "portability-metric" AND ("SELECT" OR "UPDATE")',
fields=[
'log',
'kubernetes.labels.job-name'
],
limit=limit
)
entries = []
for row in rows:
for entry in get_portability_metrics_query(
row['log'], row['kubernetes']['labels']['job-name']):
entries.append(entry)
# print(entries)
# process the logs
def _map(item):
return '{}'.join(item)
def _reduce(items):
# ('MetricArticleProvider.py', 'UPDATE', 'articledata')
first = items[0]
script = 'cron:{}'.format(first[0])
query_type = first[1]
table_name = 'db:{}'.format(first[2])
return {
'source': table_name if query_type == 'SELECT' else script,
'edge': query_type,
'target': table_name if query_type != 'SELECT' else script,
}
return logs_map_and_reduce(entries, _map, _reduce) | 5,355,041 |
def upload(server_ip, share, username, password, domain, remote_path, local_path, verbose=True):
""" Get file and folder on the remote file server.
server_ip (str): This value is the ip smb server's ip.
share (str): This value is the share file name.
username (str): This value is the login username required to connect to smb service.
password (str): This value is the login password required to connect to smb service.
domain (str): This value is the server domain name.
remote_path (str): This value is the remote file path to uploaded.
local_path (str): This value is the remote path where the file will uploaded.
verbose (boolean): Print information about function progress.
Returns:
boolean: 0 if fuction runs correctly. If an error occured return 1.
"""
try:
smb = connect_samba_server(server_ip, share, username, password, domain, verbose=True)
smb.upload(local_path, remote_path)
smb.close()
regex = re.compile("((?:[^/]*/)*)(.*)")
for file in get_remote_dir(server_ip, share, username, password, domain, "/", verbose=True):
if regex.match(remote_path).group(2) in file:
print(Fore.GREEN+" ===> [upload] {"+regex.match(local_path).group(2)+"} -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return True
print(Fore.RED+" ===> [upload] {"+regex.match(local_path).group(2)+"} failed! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False
except Exception as e:
print(Fore.RED+" ===> [upload] failed during execution! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False | 5,355,042 |
def list_directory_command(api_client: CBCloudAPI, device_id: str, directory_path: str, limit: Union[int, str]):
"""
Get list of directory entries in the remote device
:param api_client: The API client
:param device_id: The device id
:param directory_path: Directory to list. This parameter should end with the path separator
:param limit: Limit the result entries count to be the given limit
:return: CommandResult represent the API command result
:rtype: ``CommandResults``
"""
session = api_client.select(platform.Device, device_id).lr_session()
items = [item for item in session.list_directory(directory_path) if item['filename'] not in IGNORED_FILES_IN_DIR]
items, partial_res_msg = get_limited_results(original_results=items, limit=limit)
directories_readable = []
context_entry_items = []
headers = ['name', 'type', 'date_modified', 'size']
for item in items:
context_entry_items.append(item)
directories_readable.append({
'name': item['filename'],
'type': 'Directory' if item['attributes'] and 'DIRECTORY' in item['attributes'] else 'File',
'date_modified': item['last_write_time'],
'size': item['size'],
})
context_entry = dict(content=context_entry_items, device_id=device_id, directory_path=directory_path)
readable_output = tableToMarkdown(f'Directory of {directory_path}{partial_res_msg}',
t=directories_readable,
headers=headers,
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
outputs_prefix='CarbonBlackDefenseLR.Directory',
outputs_key_field=['device_id', 'directory_path'],
outputs=context_entry,
readable_output=readable_output,
raw_response=items,
) | 5,355,043 |
def _hparams(network, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
hparams = {}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
random_state = np.random.RandomState(
misc.seed_hash(random_seed, name)
)
hparams[name] = (default_val, random_val_fn(random_state))
# Unconditional hparam definitions.
_hparam('lr', 0.001, lambda r: 10**r.uniform(-5, -2)) #
_hparam('weight_decay', 0, lambda r: 10**r.uniform(-6, -2))
_hparam('batch_size', 16, lambda r: int(r.choice([8,12,16])))
_hparam('epoch', 100, lambda r: int(r.choice([60,90,120,150])))
_hparam('transform_aug', False, lambda r: bool(r.choice([True,False])))
_hparam('lr_schedule', 1, lambda r: int(r.choice([0,1,2,3])))
if network == 'PoseResNet':
_hparam('num_layers', 50, lambda r: int(r.choice([50]))) #[18,34,50,101,152]
_hparam('pretrained', False, lambda r: bool(r.choice([False]))) #True,
return hparams | 5,355,044 |
def bidding_search(request):
"""
"""
query = ''
form = BiddingSearchForm(shop=request.shop, data=request.GET)
if form.is_valid():
query = form.get_query()
results = form.search()
else:
results = form.all_results()
pager = Paginator(results, PAGE_SEARCH)
try:
page = int(request.GET.get('page','1'))
except:
page = 1
try:
products = pager.page(page)
except (EmptyPage, InvalidPage):
products = pager.page(pager.num_pages)
paged = (pager.num_pages > 1)
t = loader.get_template('bidding/blocks/search.html')
c = RequestContext(request, {'form': form,
'products' : products,
'pages': pager.page_range,
'paged': paged })
block_search = (t.render(c))
getvars = "&q=%s" % form.cleaned_data.get("q")
t = loader.get_template('paginator.html')
filter_params = {'q': form.cleaned_data.get("q", '')}
c = RequestContext(request, {'objects': products,
'getvars': getvars,
'filter_params': filter_params,
'pages': pager.page_range,
'paged': paged})
paginator = (t.render(c))
try:
page = DynamicPageContent.objects.filter(shop=request.shop, page="search").get()
description = striptags(page.meta_content)
except DynamicPageContent.DoesNotExist:
description = "No meta description found"
return HttpResponse(my_render(request, {'results': block_search,
'paginator': paginator,
'page_title': 'Search',
'page_description': description
}, 'search')) | 5,355,045 |
def clean_code(code, code_type):
""" Returns the provided code string as a List of lines """
if code_type.startswith(BOOTSTRAP):
if code_type.endswith(CLEAN):
return code.split("\n")
code = code.replace("\\", "\\\\")
if code_type.startswith(PERMUTATION):
if code_type.endswith(CLEAN):
return code.split("\n")
if code_type.startswith(FRAGMENT):
if code_type.endswith(CLEAN):
return bytes(code, encoding="ascii").decode('unicode_escape')
code = code.replace("{", "{\\n").replace("}", "\\n}\\n").replace(";", ";\\n")
code = retab(bytes(code, encoding="ascii").decode('unicode_escape'))
return code.split("\n") | 5,355,046 |
def main():
"""Parse the arguments."""
tic = datetime.datetime.now()
parser = argparse.ArgumentParser(
description=('Examine a balance_data.py output file and '
'look for taxids with data sizes that are too large.'))
parser.add_argument("file",
type=str,
help=("The output of balance_data.py"))
parser.add_argument("--threshold",
"-t",
type=int,
help=("The maximum size of a data set."),
default=10000000)
args = parser.parse_args()
print(args, file=sys.stderr)
output = process_file(args.file, args.threshold)
for taxid in output:
print(taxid[0], file=sys.stdout)
print(taxid, file=sys.stderr)
toc = datetime.datetime.now()
print("The process took time {}.".format(toc - tic), file=sys.stderr) | 5,355,047 |
def level(arr, l, ax=2, t=None, rounding=False):
"""
As level 1D but accepts general arrays and level is taken is some
specified axis.
"""
return np.apply_along_axis(level1D, ax, arr, l, t, rounding) | 5,355,048 |
def updateStore(request, storeId):
""" view for updating store """
if canViewThisStore(storeId, request.user.id):
# get the corresponding store
store = Store.objects.get(id=storeId)
metadata = getFBEOnboardingDetails(store.id)
if request.method == "POST":
# Create a form instance and populate it with data from the request (binding):
form = UpdateStoreForm(request.POST)
# Check if the form is valid:
if form.is_valid():
store.name = form.cleaned_data["business_name"]
store.save()
return redirect("viewStore", storeId)
form = UpdateStoreForm(initial={"business_name": store.name})
breadcrumbs = [(store.name, "viewStore", store.id)]
context = {
"form": form,
"store": store,
"fb_metadata": metadata,
"page_title": "Update Shop",
"breadcrumbs": breadcrumbs,
"button": "Update",
}
return render(request, "core/update.html", context)
else:
return render(request, "403.html") | 5,355,049 |
def i(t, T, r, a, b, c):
"""Chicago design storm equation - intensity. Uses ia and ib functions.
Args:
t: time in minutes from storm eginning
T: total storm duration in minutes
r: time to peak ratio (peak time divided by total duration)
a: IDF A parameter - can be calculated from getABC
b: IDF B parameter - can be calculated from getABC
c: IDF C parameter - can be calculated from getABC
Returns:
Returns intensity in mm/hr.
"""
if t < T*r:
return ib(T*r - t, r, a, b, c)
elif t > T*r:
return ia(t - T*r, r, a, b, c)
else:
# Should be infinity, but this does the job
return 1000 | 5,355,050 |
def get_primary_id_from_equivalent_ids(equivalent_ids, _type):
"""find primary id from equivalent id dict
params
------
equivalent_ids: a dictionary containing all equivalent ids of a bio-entity
_type: the type of the bio-entity
"""
if not equivalent_ids:
return None
id_rank = [('bts:' + _item) for _item in id_ranks.get(_type)]
# loop through id_rank, if the id is found in equivalent ids, return it
for _item in id_rank:
if equivalent_ids.get(_item):
return (_item[4:] + ':' + equivalent_ids[_item][0])
# if no id found, return a random one from equivalent ids
for k, v in equivalent_ids.items():
if v:
return (k[4:] + ':' + v[0]) | 5,355,051 |
def _set_coverage_build():
"""Set the right environment variables for a coverage build."""
os.environ['SANITIZER'] = 'coverage'
os.environ['ENGINE'] = 'libfuzzer'
os.environ['ARCHITECTURE'] = 'x86_64' | 5,355,052 |
def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i):
"""1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
Args:
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d)
"""
# 暖冷房区画軸合算(暖冷房区画の次元をなくす)
L_CS_x_t = np.sum(L_CS_x_t_i, axis=0)
L_CL_x_t = np.sum(L_CL_x_t_i, axis=0)
# L_CS_x_tとL_CL_x_tの要素同士を足す
L_C_x_t = L_CS_x_t + L_CL_x_t
# 1次元配列を2次元配列に形状変換する
L_C_x_t = np.reshape(L_C_x_t, (365, 24))
# 時間軸合算
L_C_x = np.sum(L_C_x_t, axis=1)
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = np.max(L_C_x)
return L_max_C | 5,355,053 |
def complete_session(session: namedtuple, speeches: list) -> dict:
"""
This will result in loss of data bc content will be reduced to speeches.
HTML_classes, speaker_flow, speaker_role etc. will not be given any longer
since it's assumed that speakers are either members of parliament or
ministers.
Another important reduction is that speeches have been stripped of
annotations like applause or calls.
Updated keys in speeches:
date
protocol_no
agenda_item - topic
speaker
party - if mop, will be ministry if minister
speech - complete speech; no hall action, no interruptions
Updated keys in session:
date
period
index
content - all speeches of a single session
Speeches are given as a list of complete sentences.
"""
reduced_data = {}
period = int(session.protocol_no.split('/')[0])
index = int(session.protocol_no.split('/')[-1])
reduced_data["date"] = session.date
reduced_data["period"] = period
reduced_data["index"] = index
reduced_data["content"] = speeches
return reduced_data | 5,355,054 |
def replace_text_in_file(file_path, replace_this, for_that, case_insensitive=False, is_regex=False, keep_copy=False,
number_of_subs=0):
""" replace a string or regex (if is_regex is set) from a file given in file_path, with another string.
This is a replacement for sed if needed.
@param str file_path: path to the file to be changed
@param str replace_this: string or regex to match and replace
@param str for_that: string that will replace the match
@param bool case_insensitive: flag to indicate if case is important
@param bool is_regex: flag to indicate if replace_this is a regular expression or a plain string
@param bool keep_copy: flag to keep copy of original file or not. The original file will be timestamped
@param int number_of_subs: number of times to do the substitution. A zero means replace all
@rtype: tuple
"""
if not is_regex:
replace_this = re.escape(replace_this)
new_file_path = duplicate_file_with_stamp(file_path) if keep_copy else file_path
import fileinput
for current_line in fileinput.input(file_path, inplace=True):
current_line, num_subs_made = re.subn(replace_this, for_that, current_line,
flags=(re.IGNORECASE if case_insensitive else 0), count=number_of_subs)
number_of_subs = 0 if not number_of_subs else (number_of_subs - num_subs_made)
return file_path, new_file_path | 5,355,055 |
def load_annotations(ann_file):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos | 5,355,056 |
def commandline(args):
"""
Settings for the commandline arguments.
Returns the parsed arguments.
"""
parser = argparse.ArgumentParser(description='Checks the timestamps for files in a directory.')
parser.add_argument("-p", "--path", required=True,
help="Path to offline backup list file or directory")
parser.add_argument("-w", "--warning",
help="Threshold for warnings in days. Default: 2 Days")
parser.add_argument("-c", "--critical",
help="Threshold for criticals in days. Default: 5 Days")
parser.add_argument("-f", "--format",
help="Format of the date in the file. Default: Y-m-d")
parser.add_argument("-r", "--regex",
help="Regular Expression to extract date from file. Default: [0-9]{4}-[0-9]{2}-[0-9]{2}")
parser.add_argument("-v", "--verbose",
help="Increase output verbosity",
action="store_true")
parser.set_defaults(verbose=False,
critical=5,
warning=2)
return parser.parse_args(args) | 5,355,057 |
def seq_hist(seq_lens: List[int]) -> Dict[int, int]:
"""Returns a dict of sequence_length/count key/val pairs.
For each entry in the list of sequence lengths, tabulates
the frequency of appearance in the list and returns the
data as a dict. Useful for histogram operations on sequence
length.
"""
seq_count = {}
for slen in seq_lens:
if slen in seq_count:
seq_count[slen] += 1
else:
seq_count[slen] = 1
return seq_count | 5,355,058 |
def turn_south():
"""
Karel will turn to South side.
"""
while not facing_south():
turn_left() | 5,355,059 |
def test_ostfullness_serializer():
"""lfshealth.LfsOstFullness: can serialize and deserialize circularly
"""
# Read from a cache file
ostfullness = tokio.connectors.lfshealth.LfsOstFullness(cache_file=tokiotest.SAMPLE_LFS_DF_FILE)
print(ostfullness)
# Serialize the object, then re-read it and verify it
print("Caching to %s" % tokiotest.TEMP_FILE.name)
ostfullness.save_cache(tokiotest.TEMP_FILE.name)
print("Cache file has size %s" % os.path.getsize(tokiotest.TEMP_FILE.name))
# Open a second file handle to this cached file to load it
ostfullness = tokio.connectors.lfshealth.LfsOstFullness(cache_file=tokiotest.TEMP_FILE.name)
print(ostfullness)
tokiotest.TEMP_FILE.close()
verify_ost(ostfullness, input_type='ostfullness') | 5,355,060 |
def load_staging_tables(cur, conn):
"""
Load data from files stored in S3 to the staging tables.
"""
print("Loading data from JSON files stored in S3 buckets into staging tables")
for query in copy_table_queries:
cur.execute(query)
conn.commit()
print("Complete.\n") | 5,355,061 |
def make_ms_url( syndicate_host, syndicate_port, no_tls, urlpath="" ):
"""
Make a URL to the MS.
Return the URL.
"""
scheme = "https://"
default_port = 80
if no_tls:
default_port = 443
scheme = "http://"
if syndicate_port != default_port:
return scheme + os.path.join( syndicate_host.strip("/") + ":%s" % syndicate_port, urlpath )
else:
return scheme + os.path.join( syndicate_host.strip("/"), urlpath ) | 5,355,062 |
def display(ip=None, port=5555, device_id=None, debug=False):
"""
获取屏幕显示信息
:param ip: 地址
:param port: 端口(默认值5555)
:param device_id: 设备ID
:param debug: 调试开关(默认关闭)
:return: 不涉及
"""
adb_core.shell('dumpsys display | grep DisplayDeviceInfo', ip=ip, port=port, device_id=device_id, debug=debug)
pass | 5,355,063 |
def clifford_canonical_F(
pauli_layer: List[int], gamma: np.ndarray, delta: np.ndarray
) -> Circuit:
"""
Returns a Hadamard free Clifford circuit using the canonical form of elements of the Borel group
introduced in https://arxiv.org/abs/2003.09412. The canonical form has the structure O P CZ CX where
O is a pauli operator, P is a layer of sqrt(Z) gates, CZ is a layer of CZ gates, and CX is a layer of
CX gates. The inputs describe on which qubits the gates in these layers act.
:param pauli_layer: Description of which Pauli gate should act on each qubits. This is an element of {0,1,2,3}^n
with 0 -> I, 1->X, 2->Y, 3->Z.
:type pauli_layer: List[int]
:param gamma: Describes on which qubits CX acts. In particular the circuit contains CX_{i,j} if
gamma[i][j]=1. The gates are ordered such the control qubit index increases with time.
:type gamma: List[List[int]]
:param delta: Describes on which qubits CZ acts. In particular the circuit contains CX_{i,j} if
delta[i][j]=1. The gates are ordered such the control qubit index increases with time. The circuit include S_i
if delta[i][i]=1.
:type delta: List[List[int]]
:return: A Hadamard free Clifford circuit.
:rtype: Circuit
"""
circ = Circuit(len(pauli_layer))
# Add layer of CX gates
for j in range(len(delta)):
for i in range(j):
if delta[i][j]:
circ.CX(i, j, opgroup="Clifford 2")
# Add layer of CZ gates
for j in range(len(gamma)):
for i in range(j):
if gamma[i][j]:
circ.CZ(i, j, opgroup="Clifford 2")
# Add layer of S gates
for i in range(len(gamma)):
if gamma[i][i]:
circ.S(i, opgroup="Clifford 1")
# Add Pauli gate
for i, gate in enumerate(pauli_layer):
if gate == 0:
circ.X(i, opgroup="Clifford 1")
elif gate == 1:
circ.Y(i, opgroup="Clifford 1")
elif gate == 2:
circ.Z(i, opgroup="Clifford 1")
return circ | 5,355,064 |
def calculate_second_moment_nondegenerate(
mu1: float, mu2: float, sigma1: float, sigma2: float, a: float, alpha: float
) -> float:
"""The second (raw) moment of a random variable :math:`\\min(Y_1, Y_2)`.
Args:
mu1: mean of the first Gaussian random variable :math:`Y_1`
mu2: mean of the second Gaussian random variable :math:`Y_2`
sigma1: standard deviation of the first Gaussian random variable :math:`Y_1`
sigma2: standard deviation of the second Gaussian random variable :math:`Y_2`
a: value of a(X1, X2)
alpha: value of alpha(X1, X2)
Note:
For a Gaussian variable, the relationship between the raw second moment, mean, and the standard deviation
(which is calculated using the *central* moment) is
.. math::
\\nu_2 = \\nu_1^2 + \\sigma^2
"""
# The first, second and third term
first = (mu1 ** 2 + sigma1 ** 2) * numeric.normal_cdf(alpha)
secnd = (mu2 ** 2 + sigma2 ** 2) * numeric.normal_cdf(-alpha)
third = (mu1 + mu2) * a * numeric.normal_pdf(alpha)
return first + secnd - third | 5,355,065 |
def game_loop():
"""
The core game loop, handling input, rendering and logic.
"""
while not state.get_current() == GameState.EXIT_GAME:
# progress frame
delta_time = state.update_clock()
# get info to support UI updates and handling events
current_state = state.get_current()
turn_holder = chronicle.get_turn_holder()
# process any deletions from last frame
snecs.process_pending_deletions(default_world)
# have enemy take turn
if current_state == GameState.GAMEMAP and turn_holder != world.get_player():
# just in case the turn holder has died but not been replaced as expected
try:
world.take_turn(turn_holder)
except AttributeError:
chronicle.rebuild_turn_queue()
# update based on input events
for event in pygame.event.get():
input_processors.process_event(event, current_state)
ui.process_ui_events(event)
# allow everything to update in response to new state
display_processors.process_display_updates(delta_time)
debug.update()
ui.update(delta_time)
# show the new state
ui.draw() | 5,355,066 |
def q_make( x, y, z, angle):
"""q_make: make a quaternion given an axis and an angle (in radians)
notes:
- rotation is counter-clockwise when rotation axis vector is
pointing at you
- if angle or vector are 0, the identity quaternion is returned.
double x, y, z : axis of rotation
double angle : angle of rotation about axis in radians
"""
length=0
cosA=0
sinA=0
destQuat = [0.0,0.0,0.0,0.0]
#/* normalize vector */
length = sqrt( x*x + y*y + z*z )
#/* if zero vector passed in, just return identity quaternion */
if ( length < Q_EPSILON ) :
destQuat[X] = 0
destQuat[Y] = 0
destQuat[Z] = 0
destQuat[W] = 1
return
x /= length
y /= length
z /= length
cosA = cos(angle / 2.0)
sinA = sin(angle / 2.0)
destQuat[W] = cosA
destQuat[X] = sinA * x
destQuat[Y] = sinA * y
destQuat[Z] = sinA * z
return destQuat | 5,355,067 |
def follow(file):
"""generator function that yields new lines in a file
"""
# seek the end of the file
file.seek(0, os.SEEK_END)
# start infinite loop
while True:
# read last line of file
line = file.readline()
# sleep if file hasn't been updated
if not line:
time.sleep(0.1)
continue
yield line | 5,355,068 |
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pymssql.
"""
return create_engine(
_create_mssql_pymssql(username, password, host, port, database),
**kwargs
) | 5,355,069 |
def get_right_list_elements(result):
"""Some of the results are empty - therefore, the try-except.
Others are lists with more than one element and only specific
elements are relevant.
Args:
result (dict of lists): result of the xpath elements.
Returns:
dict of strs
"""
for key in ["title", "ort", "merkmale", "weitere_eigenschaften", "beschreibung"]:
try:
result[key] = result[key][0]
except:
pass
for key in ["preis", "anzahl_raeume", "wohnflaeche", "grundstuecksflaeche"]:
try:
result[key] = result[key][1]
except:
pass
return result | 5,355,070 |
async def test_get_display(aresponses: ResponsesMockServer) -> None:
"""Test getting display information."""
aresponses.add(
"127.0.0.2:4343",
"/api/v2/device/display",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("display.json"),
),
)
async with aiohttp.ClientSession() as session:
demetriek = LaMetricDevice(host="127.0.0.2", api_key="abc", session=session)
display = await demetriek.display()
assert display
assert display.brightness == 100
assert display.brightness_mode is BrightnessMode.AUTO
assert display.width == 37
assert display.height == 8
assert display.display_type is DisplayType.MIXED | 5,355,071 |
def test_short_file_name_with_ALWAYS():
"""Tests how Boost Jam handles the case when a Windows short file name is
passed to the builtin ALWAYS rule.
"""
if ( not BoostBuild.windows ):
return
t = BoostBuild.Tester(pass_toolset=0)
long_file_name1 = "1__target that should be rebuilt.txt"
long_file_name2 = "2__target that should be rebuilt.txt"
short_file_name2 = "2__tar~1.txt"
prepare_file(t, long_file_name1)
prepare_file(t, long_file_name2)
t.write("testScript.jam", """
actions create-file
{
echo Modified file content ($(1:E="")).> "$(1:E="")"
}
ALWAYS "%(long_file_name1)s" ;
DEPENDS all : "%(long_file_name1)s" ;
create-file "%(long_file_name1)s" ;
ALWAYS "%(short_file_name2)s" ;
DEPENDS all : "%(long_file_name2)s" ;
create-file "%(long_file_name2)s" ;
""" % {'long_file_name1': long_file_name1,
'long_file_name2' : long_file_name2,
'short_file_name2' : short_file_name2})
t.run_build_system("-ftestScript.jam")
t.expect_modification(long_file_name1)
t.expect_modification(long_file_name2)
t.expect_nothing_more()
t.cleanup() | 5,355,072 |
def gen_pixloc(frame_shape, xgap=0, ygap=0, ysize=1., gen=True):
"""
Generate an array of physical pixel coordinates
Parameters
----------
frame : ndarray
uniformly illuminated and normalized flat field frame
xgap : int (optional)
ygap : int (optional)
ysize : float (optional)
gen : bool, optional
Only allows True right now
Returns
-------
locations : ndarray
A 3D array containing the x center, y center, x width and y width of each pixel.
The returned array has a shape: frame.shape + (4,)
"""
#dnum = settings.get_dnum(det)
msgs.info("Deriving physical pixel locations on the detector")
locations = np.zeros((frame_shape[0],frame_shape[1],4))
if gen:
msgs.info("Pixel gap in the dispersion direction = {0:4.3f}".format(xgap))
msgs.info("Pixel size in the dispersion direction = {0:4.3f}".format(1.0))
xs = np.arange(frame_shape[0]*1.0)*xgap
xt = 0.5 + np.arange(frame_shape[0]*1.0) + xs
msgs.info("Pixel gap in the spatial direction = {0:4.3f}".format(ygap))
msgs.info("Pixel size in the spatial direction = {0:4.3f}".format(ysize))
ys = np.arange(frame_shape[1])*ygap*ysize
yt = ysize*(0.5 + np.arange(frame_shape[1]*1.0)) + ys
xloc, yloc = np.meshgrid(xt, yt)
# xwid, ywid = np.meshgrid(xs,ys)
msgs.info("Saving pixel locations")
locations[:,:,0] = xloc.T
locations[:,:,1] = yloc.T
locations[:,:,2] = 1.0
locations[:,:,3] = ysize
else:
msgs.error("Have not yet included an algorithm to automatically generate pixel locations")
return locations | 5,355,073 |
def select(df: pd.DataFrame, time_key,
from_time='00-00-00 00', to_time='99-01-01 00'):
"""
:param df:
:param time_key:
:param from_time:
:param to_time:
:return:
:rtype: pandas.DataFrame
"""
select_index = (df[time_key] >= from_time) & (df[time_key] < to_time)
return df.loc[select_index, :].reset_index(drop=True) | 5,355,074 |
def load_apogee_distances(dr=None, unit='distance', cuts=True, extinction=True, keepdims=False):
"""
Load apogee distances (absolute magnitude from stellar model)
:param dr: Apogee DR
:type dr: int
:param unit: which unit you want to get back
- "absmag" for absolute magnitude
- "fakemag" for fake magnitude
- "distance" for distance in parsec
:type unit: string
:param cuts: Whether to cut bad data (negative parallax and percentage error more than 20%), or a float to set the threshold
:type cuts: Union[boolean, float]
:param extinction: Whether to take extinction into account, only affect when unit is NOT 'distance'
:type extinction: bool
:param keepdims: Whether to preserve indices the same as APOGEE allstar DR14, no effect when cuts=False, set to -9999 for bad indices when cuts=True keepdims=True
:type keepdims: boolean
:return: numpy array of ra, dec, array, err_array
:rtype: ndarrays
:History:
| 2018-Jan-25 - Written - Henry Leung (University of Toronto)
| 2021-Jan-29 - Updated - Henry Leung (University of Toronto)
"""
fullfilename = apogee_distances(dr=dr)
with fits.open(fullfilename) as F:
hdulist = F[1].data
# Convert kpc to pc
distance = hdulist['BPG_dist50'] * 1000
dist_err = (hdulist['BPG_dist84'] - hdulist['BPG_dist16']) * 1000
allstarfullpath = allstar(dr=dr)
with fits.open(allstarfullpath) as F:
k_mag = F[1].data['K']
if extinction:
k_mag = extinction_correction(k_mag, F[1].data['AK_TARG'])
ra = F[1].data['RA']
dec = F[1].data['DEC']
# Bad index refers to nan index
bad_index = np.argwhere(np.isnan(distance))
if unit == 'distance':
# removed astropy units because of -9999. is dimensionless, will have issues
output = distance
output_err = dist_err
elif unit == 'absmag':
absmag, absmag_err = mag_to_absmag(k_mag, 1 / distance * u.arcsec, (1 / distance) * (dist_err / distance))
output = absmag
output_err = absmag_err
elif unit == 'fakemag':
# fakemag requires parallax (mas)
fakemag, fakemag_err = mag_to_fakemag(k_mag, 1000 / distance * u.mas, (1000 / distance) * (dist_err / distance))
output = fakemag
output_err = fakemag_err
else:
raise ValueError('Unknown unit')
# Set the nan index to -9999. as they are bad and unknown. Not magic_number as this is an APOGEE dataset
output[bad_index], output_err[bad_index] = -9999., -9999.
if cuts is False:
pass
else:
distance[bad_index], dist_err[bad_index] = -9999., -9999.
good_idx = ((dist_err / distance < (0.2 if cuts is True else cuts)) & (distance != -9999.))
if not keepdims:
ra = ra[good_idx]
dec = dec[good_idx]
output = output[good_idx]
output_err = output_err[good_idx]
else:
output[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
output_err[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
return ra, dec, output, output_err | 5,355,075 |
def userstudy(config, data_train):
"""
Update the model based on feedback from user study.
- [config]: hyperparameters for model fine-tuning
- [data_train]: data pool to sample from
"""
def preprocess_data(doc, queries):
"""
Create a new field in [doc] called [antecedent_map] which processes
the user-labeled [antecedents]. Add all labeled spans to [queries].
in queries).
"""
ante_map = {}
for entry in doc['antecedents']:
span = tuple(entry[0])
if entry[1] == -1:
label = None
elif entry[1] == 0:
label = '0'
else:
label = [tuple(entry[1])]
ante_map[span] = label
doc['antecedent_map'] = ante_map
del doc['antecedents']
# update queries to know what has been queried
queries[doc['doc_key']] = list(ante_map.keys())
# return # spans labeled
return len(ante_map)
# preprocess antecedents and get queries
data_fp = config['userstudy'] / 'train_data.jsonl'
data = []
queries = defaultdict(list)
num_queries = 0
with open(data_fp, 'r') as f:
for line in f:
doc = json.loads(line)
# update doc and queries
n = preprocess_data(doc, queries)
num_queries += n
data.append(doc)
# finetune model on data
src_path = config['src_path']
logging.info(
f'Finetuning src model on {num_queries} queries from {len(data)} docs'
)
scores_dev, model = finetune_on_queries(config, data, config['userstudy'], src_path)
# test model
results_fp = config['userstudy'] / 'results_test.json'
scores_test = eval_scores(model, config, "test")
output_results(results_fp, config, 1, scores_test) | 5,355,076 |
def clique_create(request):
"""
Creates a new grouping in the database (this integration must be stored in the db to be useful)
Arguments: /group-create "groupname" "@user1 @user2"
"""
requesting_user_id = request.POST.get('user_id')
args = re.findall(DOUBLE_QUOTE_ARG_REGEX, request.POST.get("text"))
# Check to see if everything looks right
if len(args) != 2:
return make_clique_group_error("Error in arguments (Double quotes are required!). Usage:\n"
"`/group-create \"groupName\" \"@user1 @user2\"")
if CliqueGroup.objects.filter(name=args[0]).count() > 0:
return make_clique_group_error("This group <{}> already exists!".format(args[0]))
# Move on to creating the group
raw_group_members = re.findall(SLACK_ID_REGEX, args[1])
group_users = []
for slack_id in raw_group_members:
try:
group_users.append(CliqueUser.objects.get(slack_id=slack_id))
except CliqueUser.DoesNotExist:
# This is the first time that we've seen this user
# we need to add them to the db
new_user = CliqueUser(slack_id=slack_id)
new_user.save()
group_users.append(new_user)
# Case where the owner is 1) new and 2) not in the group
try:
CliqueUser.objects.get(slack_id=requesting_user_id)
except CliqueUser.DoesNotExist:
# This is the first time that we've seen this user
# we need to add them to the db
CliqueUser(slack_id=requesting_user_id).save()
new_group = CliqueGroup(
creator=CliqueUser.objects.get(slack_id=requesting_user_id),
name=args[0]
)
new_group.save()
for clique_user in group_users:
new_group.members.add(clique_user)
new_group.save()
# Testing response string
resp_string = 'Group <{0}> has been created with users:'.format(args[0])
resp_string += ' '.join(format_user(user.slack_id) for user in new_group.members.all())
return JsonResponse({"replace_original": True, "text": resp_string}) | 5,355,077 |
def cg_atoms(atoms, units, sites, scale, scaleValue, siteMap, keepSingleAtoms,
package):
"""
Get positions for atoms in the coarse-grained structure and the final
bond description. Returns a dictionary of the lattice, fractional
coordinates, and bonds. Also provides the option to scale the lattice.
Args
----
atoms: pymatgen.core.Structure
Pymatgen Structure object.
units: list
List of tuple(atomIndex, Image) for all atoms found in the building unit
so far in the algorithm.
sites: list
Specifying atoms in each site-type. One list per site-type. I.e. for
ZIF-8 (Zn(mIm)2) Zn is an A site, and the C, N, H (imidazolate ring)
are B sites, so you would pass:
scale: str
Scaling method to be used. Currently supported:
"min_xx": minimum bond length between any atoms.
"min_ab": minimum bond length between building units.
"avg_ab": average bond length between building units.
scaleValue: float
Length (Å) to scale the characteristic bond length (defined by
"scale") to.
siteMap: list
A list of atoms to map each building unit to. Should be of the same
length as the number of site-types. E.g. to map Zn(mIm)2 to a
coarse-grained structure,
siteMap = ["Si", "O"]
would map all A sites (Zn) to Si, and all B sites (mIm) to O. If
not set, will default to "Dummy Species" with labels DA, DB, DC, ...
Note if creating an ASE Atoms object, real atoms must be used, and
so siteMap *must* be set.
keepSingleAtoms: bool
If True, the chemical identity of the single atom building units
will be preserved. E.g. for BIF-1-Li ( [LiB(im)]4 ) where Li and B
are A sites, the final coarse-grained structure would keep the Li
and B atoms, but add dummy species for the imidazolate units.
package: str
"pymatgen" or "ase". If set, will return the Structure/Atoms object
of the specified package, respectively. As noted in siteMap, ASE
requires that real elements are set for the Atoms object.
"""
# Extract unit cell.
lattice = atoms.lattice.copy()
# Extract labels, positions, and images for each building unit.
l, p, _ = zip(*[(l,*u.frac_img) for l,u in units.items()])
# Extract bonds in format consistent with TopoCIF specification; i.e.
# node1_label, node2_label, distance, sym_op1, x1, y1, z1, sym_op2,
# x2, y2, z2, link_type, multiplicity. There will be a list of tuples,
# one tuple per unit, and the length of each tuple will be the number of
# bonds stored.
b = [u.unit_bonds for u in units.values()]
# Determine scaling type now, because can avoid calling next section
# twice to calculate the bond distances if it is "min_xx" scaling.
if scale is not None:
scale = scale.lower()
if scale == "min_xx":
# Get all distances (ignoring self-distances along diagonal).
d = lattice.get_all_distances(p,p)
np.fill_diagonal(d, 1000)
# Get scale factor and scale the lattice to the new volume.
sf = ( scaleValue / np.amin(d) )**3
lattice = lattice.scale(lattice.volume * sf)
elif scale in ["min_ab", "avg_ab"]:
# Get the bond distances from the formatted bonds.
_, d = format_bonds(lattice,l,p,b,return_lengths=True)
# Get scale factor and scale the lattice to new volume.
if scale == "min_ab":
sf = ( scaleValue / np.amin(d) )**3
elif scale == "avg_ab":
sf = ( scaleValue / np.mean(d) )**3
lattice = lattice.scale(lattice.volume * sf)
else:
warnings.warn(f"Scale method {scale} is not supported.")
# Get the final TopoCIF-formatted bonds.
b = format_bonds(lattice, l, p, b)
# The atomMap must provide a one-to-one mapping for every site-type
# in the structure.
assert len(siteMap) == len(sites), "Povide a one-to-one " + \
f"mapping of dummy-sites to atomic symbols " + \
f"({len(sites)} != {len(siteMap)})"
# Relabel each atom with a new symbol.
l, symbols, b = relabel(units, siteMap, keepSingleAtoms, b)
# Sort structure information into a dictionary.
s_info = { "lattice": lattice,
"symbols": symbols,
"labels": l,
"frac_coords": p,
"bonds": b }
# If package specified return either a Pymatgen Structure object, or an ASE
# atoms object.
s = py_structure(s_info["lattice"],s_info["symbols"],s_info["frac_coords"])
if package is not None and package.lower() == "ase":
s = AseAtomsAdaptor.get_atoms(s)
return s_info, s | 5,355,078 |
def read_tickers(video, ocr = None, debug = False, **kwargs):
"""
Reads news stories from sliding tickers on video.
Returns lists of dictionaries which contain:
text: news story text
start time: time when news story shows up
end time: time when news story disappears
Each list corresponds to one ticker.
"""
if debug:
print('Language: ', video.language)
if ocr is None:
ocr = TesseractOCR(**kwargs)
tickers, height, width = get_tickers_hw(video)
kwargs['height'] = height
kwargs['width'] = width
ocr._preprocesses['height'] = height
if debug:
print('tickers')
print(tickers)
stories = []
for ticker in tickers:
stories.append(read_ticker(video, ticker, ocr, **kwargs))
return stories | 5,355,079 |
def compute_msa_weights(msa, threshold=.8):
"""
msa (Bio.Align.MultipleSeqAlignment): alignment for which sequence frequency based weights are to be computed
threshold (float): sequence identity threshold for reweighting
NOTE that columns where both sequences have a gap will not be taken into account when computing identity
"""
weights = np.zeros(len(msa))
seq_identities = np.zeros((len(msa), len(msa)))
for i in range(len(msa)):
for j in range(i+1, len(msa)):
seq_identities[i, j] = _compute_sequence_identity(msa[i], msa[j])
seq_identities = seq_identities + np.diag(np.ones(len(msa)))
ms = np.sum(seq_identities>threshold, 1)
weights = 1./ms
return weights | 5,355,080 |
def do_CreateRedis(client, args):
""" Create redis """
val = client.CreateRedis(args.mem,
duration=args.duration,
name=args.name,
zone=args.zone)
utils.print_dict(val) | 5,355,081 |
def search_storefront(client, phrase):
"""Execute storefront search on client matching phrase."""
resp = client.get(reverse("search:search"), {"q": phrase})
return [prod for prod, _ in resp.context["results"].object_list] | 5,355,082 |
def is_repo_in_config(config, repo, rev, hook_id):
"""Get if a repository is defined in a pre-commit configuration.
Parameters
----------
config : dict
Pre-commit configuration dictionary.
repo : str
Repository to search.
rev : str
Repository tag revision.
hook_id : Hook identifier.
Returns
-------
dict : Information about if the repository and the hook have been found.
"""
response = {"repo_found": False, "hook_found": False, "same_rev": False}
for repo_ in config["repos"]:
if repo_["repo"] == repo:
response["repo_found"] = True
response["hook_found"] = hook_id in [hook["id"] for hook in repo_["hooks"]]
response["same_rev"] = repo_["rev"] == rev
break
return response | 5,355,083 |
def mat33_to_quat(mat):
"""
Convert matrix to quaternion.
:param mat: 3x3 matrix
:return: list, quaternion [x, y, z, w]
"""
wxyz = transforms3d.quaternions.mat2quat(mat)
return [wxyz[1], wxyz[2], wxyz[3], wxyz[0]] | 5,355,084 |
def reshape(x, new_shape):
"""
Reshapes a tensor without changing its data.
Args:
x (Tensor): A tensor to be reshaped.
new_shape (Union[int, list(int), tuple(int)]): The new shape should be
compatible with the original shape. If the tuple has only one element,
the result will be a 1-D tensor of that length. One shape dimension
can be :math:`-1`. In this case, the value is inferred from the length of
the tensor and remaining dimensions.
Returns:
Reshaped Tensor. Has the same data type as the original tensor `x`.
Raises:
TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
ValueError: If new_shape is not compatible with the original shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
>>> output = np.reshape(x, (3, 2))
>>> print(output)
[[-0.1 0.3]
[ 3.6 0.4]
[ 0.5 -3.2]]
>>> output = np.reshape(x, (3, -1))
>>> print(output)
[[-0.1 0.3]
[ 3.6 0.4]
[ 0.5 -3.2]]
>>> output = np.reshape(x, (6, ))
>>> print(output)
[-0.1 0.3 3.6 0.4 0.5 -3.2]
"""
_check_input_tensor(x)
return x.reshape(new_shape) | 5,355,085 |
def update_depth(depth_grid, elapsed_ts, depth_factor):
"""Just in time Update Depth for lake to pond
Parameters
----------
depth_grid: np.array like (float)
grid of current lake depths
elapsed_ts: float
number timesteps since start year
depth_factor: float
Returns
-------
np.array
updated depth grid
"""
new = np.zeros(depth_grid.shape)
for row in range(depth_grid.shape[0]):
for col in range(depth_grid.shape[0]):
new[row,col] = \
depth_grid[row,col] + (np.sqrt(elapsed_ts) / depth_factor)
return new | 5,355,086 |
def get_submissions(config, event_name, state='new'):
"""
Retrieve a list of submissions and their associated files
depending on their current status
Parameters
----------
config : dict
configuration
event_name : str
name of the RAMP event
state : str, optional
state of the requested submissions (default is 'new')
Returns
-------
List of tuples (int, List[str]) :
(submission_id, [path to submission files on the db])
Raises
------
ValueError :
when mandatory connexion parameters are missing from config
UnknownStateError :
when the requested state does not exist in the database
"""
if state not in STATES:
raise UnknownStateError("Unrecognized state : '{}'".format(state))
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submissions = select_submissions_by_state(session, event_name, state)
if not submissions:
return []
subids = [submission.id for submission in submissions]
subfiles = [submission.files for submission in submissions]
filenames = [[f.path for f in files] for files in subfiles]
return list(zip(subids, filenames)) | 5,355,087 |
def split_dataframe(df, size=10*1024*1024):
"""Splits huge dataframes(CSVs) into smaller segments of given size in bytes"""
# size of each row
row_size = df.memory_usage().sum() / len(df)
# maximum number of rows in each segment
row_limit = int(size // row_size)
# number of segments
seg_num = (len(df)+row_limit-1)//row_limit
# split df into segments
segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]
return segments | 5,355,088 |
def convert_table_codes(input_filename: Path, output_filename: Path = None, column: str = 'countryCode',
namespace: Optional[str] = None, fuzzy:int = 0) -> Path:
"""
Adds a 'regionCode' column to the given table containing iso-3 country codes.
Parameters
----------
input_filename: Path
output_filename: Path
column: str, default 'countryCode
namespace: {'iso2', 'iso3', 'm49'}; default None
fuzzy: int; default 0
The score to use when fuzzy matching when above 0. If 0, the regular code search is used instead.
Returns
-------
path: Path
Location of the output table.
"""
table = load_table(input_filename)
if column not in table.columns:
message = "'{}' is not a valid column. Expected one of {}".format(column, list(table.columns))
raise ValueError(message)
old_values = table[column].values
if fuzzy:
new_values = [fuzzy_search(i,fuzzy) for i in old_values]
else:
new_values = [get_codes(i, namespace) for i in old_values]
new_values = [(v['iso3'] if v else v) for v in new_values]
table['regionCode'] = new_values
if output_filename is None:
output_filename = input_filename.with_suffix('.edited.tsv')
elif output_filename.is_dir():
output_filename = output_filename / input_filename.name
opath = save_table(table, output_filename)
return opath | 5,355,089 |
def add_message(exception, message):
"""
Embeds an error message into an exception that can be retrieved by
try_get_error_message().
Parameters
----------
exception : Exception
message : str
"""
exception.args += (_Message(message),) | 5,355,090 |
def allow_view(user):
"""Is the current user allowed to view the user account?
Yes, if current user is admin, staff or self.
"""
if not flask.g.current_user: return False
if flask.g.am_admin: return True
if flask.g.am_staff: return True
if flask.g.current_user['username'] == user['username']: return True
return False | 5,355,091 |
def httptimestamp(inhttpdate):
"""
Return timestamp from RFC1123 (HTTP/1.1).
"""
dat = datetime.datetime(*eut.parsedate(inhttpdate)[:5])
return int(time.mktime(dat.timetuple())) | 5,355,092 |
def main():
"""Read a theme list file, and moves file and create a fixed theme list."""
tml = read(
r"c:\tmp\xxx\AKR Theme List.tml"
) # r"X:\GIS\ThemeMgr\AKR Theme List.tml")
path_maps = build_file_mapping(
"data/tmpaths.txt", "data/moves_extra.csv"
) # data/PDS Moves - inpakrovmdist%5Cgisdata.csv')
for old, new in path_maps:
print(old, new)
tml = tml.replace(old, new)
save(
tml, r"c:\tmp\xxx\AKR Theme List1.tml"
) | 5,355,093 |
def calculate_duration(start_date, end_date=None):
""" Calculate how many years and months have passed between start and end dates """
# If end date not defined, use current date
if not end_date:
end_date = datetime.date.today()
years = end_date.year - start_date.year
months = end_date.month - start_date.month
if months < 0:
years = years - 1
months = months + 12
return years, months | 5,355,094 |
def write(path, *content):
"""
写出文件
:param path:位置
:param content:内容
:return:
"""
# 防止有些使用`/`有些用`\\`
_sep_path = []
s = path.split('/')
[_sep_path.extend(item.split('\\')) for item in s]
_path = ''
for i in _sep_path:
_end = _sep_path[len(_sep_path) - 1]
if i != _end:
_path += str(i) + os.sep
else:
_path += str(i)
if not os.path.exists(_path):
if '.' not in i:
os.makedirs(_path)
_write_content = logTupleToText(True, *content)
with open(os.path.join(_path), mode="a", encoding="UTF-8") as f:
f.write(_write_content)
f.close() | 5,355,095 |
def GetSourceRoot(filename):
"""Try to determine the root of the package which contains |filename|.
The current heuristic attempts to determine the root of the Chromium source
tree by searching up the directory hierarchy until we find a directory
containing src/.gn.
"""
# If filename is not absolute, then we are going to assume that it is
# relative to the current directory.
if not os.path.isabs(filename):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise NoSourceRootError('File not found: {}'.format(filename))
source_root = os.path.dirname(filename)
while True:
gnfile = os.path.join(source_root, 'src', '.gn')
if os.path.exists(gnfile):
return source_root
new_package_root = os.path.dirname(source_root)
if new_package_root == source_root:
raise NoSourceRootError("Can't determine package root")
source_root = new_package_root | 5,355,096 |
def actives(apikey: str) -> typing.List[typing.Dict]:
"""
Query FMP /actives/ API
:param apikey: Your API key.
:return: A list of dictionaries.
"""
path = f"actives"
query_vars = {"apikey": apikey}
return __return_json_v3(path=path, query_vars=query_vars) | 5,355,097 |
def write_json_file(data: dict, filename: str) -> None:
"""
Write a JSON file.
Example:
{'a': 1, 'b': {'c': 3, 'd': 4}}
{
"a": 1,
"b": {
"c": 3,
"d": 4
}
}
:type data: dict
:param data: data to write
:type filename: str
:param filename: name of file
:rtype: None
:return: None
"""
with open(filename, 'w') as f:
json.dump(data, f, indent=2) | 5,355,098 |
def find_sue_de_coq(board: Board):
"""we look at each intersection (3 cells) of a block and a row/col:
- we need either two cells containing (together) 4 distinct candidates or
three cells containing (together) 5 distinct candidates
- now we need to find two bi-value cells:
a. one in the row/col outside the intersection/block
b. one in the block outside the intersection/row/col
- the two bi-value cells' candidates must be drawn entirely from the
4/5 candidates above
- the two bi-value cells must have disjunct candidates
We may then invalidate the two row cell candidates from the rest of the cells in the row (except intersection cells)
and invalidate the two block cell candidates from the rest of the cells in the block (except intersection cells).
We may also invalidate any intersection candidate (the 4/5 candidates) that is left (i.e. in neither bi-value
cells) from both the row and the block (but not from the intersection).
"""
blocks = board.get_all_houses(house_type=HouseType.BLOCK)
for block in blocks:
block: Block
# get crossing columns and rows and try out each combination
crossing_houses = block.get_crossing_houses()
for crossing_house in crossing_houses:
preview = _find_sue_de_coq_in_intersection(board=board, block=block, crossing_house=crossing_house)
if preview:
board.notify_preview(preview=preview)
return | 5,355,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.