code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def domain(self, domain=None, last_domain=None):
"""
Manage the case that we want to test only a domain.
:param domain: The domain or IP to test.
:type domain: str
:param last_domain:
The last domain to test if we are testing a file.
:type last_domain: str
:param return_status: Tell us if we need to return the status.
:type return_status: bool
"""
# We print the header.
self._print_header()
if domain:
# A domain is given.
# We format and set the domain we are testing and treating.
PyFunceble.INTERN["to_test"] = self._format_domain(domain)
else:
# A domain is not given.
# We set the domain we are testing and treating to None.
PyFunceble.INTERN["to_test"] = None
if PyFunceble.INTERN["to_test"]:
# The domain is given (Not None).
if PyFunceble.CONFIGURATION["syntax"]:
# The syntax mode is activated.
# We get the status from Syntax.
status = self.syntax_status.get()
else:
# We test and get the status of the domain.
status, _ = self.status.get()
# We run the file decision logic.
self._file_decision(PyFunceble.INTERN["to_test"], last_domain, status)
if PyFunceble.CONFIGURATION["simple"]:
# The simple mode is activated.
# We print the domain and the status.
print(PyFunceble.INTERN["to_test"], status)
# We return the tested domain and its status.
return PyFunceble.INTERN["to_test"], status
# We return None, there is nothing to test.
return None | Manage the case that we want to test only a domain.
:param domain: The domain or IP to test.
:type domain: str
:param last_domain:
The last domain to test if we are testing a file.
:type last_domain: str
:param return_status: Tell us if we need to return the status.
:type return_status: bool |
def tvBrowserExposure_selection_changed(self):
"""Update layer description label."""
(is_compatible, desc) = self.get_layer_description_from_browser(
'exposure')
self.lblDescribeBrowserExpLayer.setText(desc)
self.parent.pbnNext.setEnabled(is_compatible) | Update layer description label. |
def to_struct(self, value):
"""Cast `time` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.isoformat() | Cast `time` object to string. |
def run(self):
"""
Run import.
"""
latest_track = Track.objects.all().order_by('-last_played')
latest_track = latest_track[0] if latest_track else None
importer = self.get_importer()
tracks = importer.run()
# Create/update Django Track objects for importer tracks.
for track in tracks:
# Only create/update if tracks with start times greater than what already exists are imported.
if not latest_track or not latest_track.last_played \
or track.start_time > latest_track.last_played:
obj = self.lookup_track(track)
# Don't update importing track that is regarded as the latest. This prevents start times from constantly incrementing.
if latest_track and obj == latest_track:
print "[%s-%s]: Start time not updated as it is the latest track." % (track.title, track.artist)
continue
# If no existing track object could be resolved, create it.
if not obj:
print "[%s-%s]: Created." % (track.title, track.artist)
obj = Track.objects.create(title=track.title)
obj.length = track.length
roles = MusicCreditOption.objects.all().order_by('role_priority')
role = roles[0].role_priority if roles else 1
obj.create_credit(track.artist, role)
else:
print "[%s-%s]: Not created as it already exists." % (track.title, track.artist)
# Update last played time to start time.
obj.last_played = track.start_time
obj.save()
print "[%s-%s]: Start time updated to %s." % (track.title, track.artist, track.start_time)
else:
print "[%s-%s]: Not created as it has a past start time of %s (latest %s). " % (track.title, track.artist, track.start_time, latest_track.last_played) | Run import. |
def rgba(self, val):
"""Set the color using an Nx4 array of RGBA floats"""
# Note: all other attribute sets get routed here!
# This method is meant to do the heavy lifting of setting data
rgba = _user_to_rgba(val, expand=False)
if self._rgba is None:
self._rgba = rgba # only on init
else:
self._rgba[:, :rgba.shape[1]] = rgba | Set the color using an Nx4 array of RGBA floats |
def apt_autoremove(purge=True, fatal=False):
"""Purge one or more packages."""
cmd = ['apt-get', '--assume-yes', 'autoremove']
if purge:
cmd.append('--purge')
_run_apt_command(cmd, fatal) | Purge one or more packages. |
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
"""Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
# Do not extract metadata from the root file entry when it is virtual.
if file_entry.IsRoot() and file_entry.type_indicator not in (
self._TYPES_WITH_ROOT_METADATA):
return
# We always want to extract the file entry metadata but we only want
# to parse it once per file entry, so we only use it if we are
# processing the default data stream of regular files.
if data_stream and not data_stream.IsDefault():
return
display_name = mediator.GetDisplayName()
logger.debug(
'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING | Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream. |
def str_blockIndent(astr_buf, a_tabs=1, a_tabLength=4, **kwargs):
"""
For the input string <astr_buf>, replace each '\n'
with '\n<tab>' where the number of tabs is indicated
by <a_tabs> and the length of the tab by <a_tabLength>
Trailing '\n' are *not* replaced.
"""
str_tabBoundary = " "
for key, value in kwargs.iteritems():
if key == 'tabBoundary': str_tabBoundary = value
b_trailN = False
length = len(astr_buf)
ch_trailN = astr_buf[length - 1]
if ch_trailN == '\n':
b_trailN = True
astr_buf = astr_buf[0:length - 1]
str_ret = astr_buf
str_tab = ''
str_Indent = ''
for i in range(a_tabLength):
str_tab = '%s ' % str_tab
str_tab = "%s%s" % (str_tab, str_tabBoundary)
for i in range(a_tabs):
str_Indent = '%s%s' % (str_Indent, str_tab)
str_ret = re.sub('\n', '\n%s' % str_Indent, astr_buf)
str_ret = '%s%s' % (str_Indent, str_ret)
if b_trailN: str_ret = str_ret + '\n'
return str_ret | For the input string <astr_buf>, replace each '\n'
with '\n<tab>' where the number of tabs is indicated
by <a_tabs> and the length of the tab by <a_tabLength>
Trailing '\n' are *not* replaced. |
def gen_method_keys(self, *args, **kwargs):
'''Given a node, return the string to use in computing the
matching visitor methodname. Can also be a generator of strings.
'''
token = args[0]
for mro_type in type(token).__mro__[:-1]:
name = mro_type.__name__
yield name | Given a node, return the string to use in computing the
matching visitor methodname. Can also be a generator of strings. |
def findBinomialNsWithLowerBoundSampleMinimum(confidence, desiredValuesSorted,
p, numSamples, nMax):
"""
For each desired value, find an approximate n for which the sample minimum
has a probabilistic lower bound equal to this value.
For each value, find an adjacent pair of n values whose lower bound sample
minima are below and above the desired value, respectively, and return a
linearly-interpolated n between these two values.
@param confidence (float)
For the probabilistic lower bound, this specifies the probability. If this is
0.8, that means that there's an 80% chance that the sample minimum is >= the
desired value, and 20% chance that it's < the desired value.
@param p (float)
The p if the binomial distribution.
@param numSamples (int)
The number of samples in the sample minimum distribution.
@return
A list of results. Each result contains
(interpolated_n, lower_value, upper_value).
where each lower_value and upper_value are the probabilistic lower bound
sample minimum for floor(interpolated_n) and ceil(interpolated_n)
respectively.
...]
"""
def P(n, numOccurrences):
"""
Given n, return probability than the sample minimum is >= numOccurrences
"""
return 1 - SampleMinimumDistribution(numSamples, BinomialDistribution(n, p)).cdf(
numOccurrences - 1)
results = []
n = 0
for desiredValue in desiredValuesSorted:
while n + 1 <= nMax and P(n + 1, desiredValue) < confidence:
n += 1
if n + 1 > nMax:
break
left = P(n, desiredValue)
right = P(n + 1, desiredValue)
interpolated = n + ((confidence - left) /
(right - left))
result = (interpolated, left, right)
results.append(result)
return results | For each desired value, find an approximate n for which the sample minimum
has a probabilistic lower bound equal to this value.
For each value, find an adjacent pair of n values whose lower bound sample
minima are below and above the desired value, respectively, and return a
linearly-interpolated n between these two values.
@param confidence (float)
For the probabilistic lower bound, this specifies the probability. If this is
0.8, that means that there's an 80% chance that the sample minimum is >= the
desired value, and 20% chance that it's < the desired value.
@param p (float)
The p if the binomial distribution.
@param numSamples (int)
The number of samples in the sample minimum distribution.
@return
A list of results. Each result contains
(interpolated_n, lower_value, upper_value).
where each lower_value and upper_value are the probabilistic lower bound
sample minimum for floor(interpolated_n) and ceil(interpolated_n)
respectively.
...] |
def get(self, id):
"""
根据 id 获取数据。
:param id: 要获取的数据的 id
:return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象
"""
document = self._get_document(id)
if document:
session_json = document["session"]
return json_loads(session_json)
return {} | 根据 id 获取数据。
:param id: 要获取的数据的 id
:return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象 |
def display_lookback_returns(self):
"""
Displays the current lookback returns for each series.
"""
return self.lookback_returns.apply(
lambda x: x.map('{:,.2%}'.format), axis=1) | Displays the current lookback returns for each series. |
def create_event(self, type, data, **kwargs):
"""
Create event.
The **Events** API can be used to create log entries that are associated with
specific queries. For example, you can record which documents in the results set
were \"clicked\" by a user and when that click occured.
:param str type: The event type to be created.
:param EventData data: Query event data object.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if type is None:
raise ValueError('type must be provided')
if data is None:
raise ValueError('data must be provided')
data = self._convert_model(data, EventData)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('discovery', 'V1', 'create_event')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'type': type, 'data': data}
url = '/v1/events'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | Create event.
The **Events** API can be used to create log entries that are associated with
specific queries. For example, you can record which documents in the results set
were \"clicked\" by a user and when that click occured.
:param str type: The event type to be created.
:param EventData data: Query event data object.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def aDiffCytoscape(df,aging_genes,target,species="caenorhabditis elegans",limit=None, cutoff=0.4,\
taxon=None,host=cytoscape_host,port=cytoscape_port):
"""
Plots tables from aDiff/cuffdiff into cytoscape using String protein queries.
Uses top changed genes as well as first neighbours and difusion fo generate subnetworks.
:param df: df as outputed by aDiff for differential gene expression
:param aging_genes: ENS gene ids to be labeled with a diagonal
:param species: species for string app query. eg. "caenorhabditis elegans", "drosophila melanogaster", "mus musculus", "homo sapiens"
:param limit: limit for string app query. Number of extra genes to recover. If None, limit=N(query_genes)*.25
:param cuttoff: confidence cuttoff for sting app query. Default=0.4
:param taxon: taxon id for string app query. For the species shown above, taxon id will be automatically identified
:param cytoscape_host: host address for cytoscape
:param cytoscape_port: cytoscape port
:param target: target destination for saving files without prefix. eg. "/beegfs/group_bit/home/JBoucas/test/N2_vs_daf2"
:returns: nothing
"""
##### TEMPORARY FIX - STRING APP NOT ACCEPTING QUERIES ABOVE 2000 GENES ####
df=df.sort_values(by=["q_value"],ascending=True)
df.reset_index(inplace=True, drop=True)
tmp=df[:1999]
df=tmp.copy()
##### END OF TEMPORARY FIX #####
query_genes=df["ensembl_gene_id"].tolist()
df["NormInt"]=df["value_1"]*df["value_2"]
df["NormInt"]=df["NormInt"].apply(lambda x: np.log10(np.sqrt(x)) )
if not limit:
limit=int(len(query_genes)*.25)
# Annotate aging evindence
def CheckEvidence(x,aging_genes=aging_genes):
if x in aging_genes:
res="aging_gene"
else:
res="no"
return res
df["evidence"]=df["ensembl_gene_id"].apply(lambda x:CheckEvidence(x) )
# fix infinit values
def FixInfs(x):
if str(x) in ["-inf","inf"]:
res=np.nan
else:
res=x
return res
df["NormInt"]=df["NormInt"].apply( lambda x: FixInfs(x) )
df["log2(fold_change)"]=df["log2(fold_change)"].apply( lambda x: FixInfs(x) )
taxons={"caenorhabditis elegans":"6239","drosophila melanogaster":"7227",\
"mus musculus":"10090","homo sapiens":"9606"}
if not taxon:
taxon=taxons[species]
# destroy any existing network still present in cytoscape
response=cytoscape("network", "list",\
host=host, port=port)
if "networks" in response.keys():
response=response["networks"]
#print response
if len(response) > 0:
for r in response:
rr=cytoscape("network", "destroy",{"network":"SUID:"+str(r)},\
host=host, port=port)
# String protein query
query_genes=[ str(s) for s in query_genes ]
response=cytoscape("string", "protein query",\
{"query":",".join(query_genes),\
"cutoff":str(cutoff),\
"species":species,\
"limit":str(limit),\
"taxonID":taxon},\
host=host, port=port)
print("giving some time to cytoscape..")
sys.stdout.flush()
sleep(10)
# apply new layout
response=cytoscape("layout", "force-directed",\
{"defaultSpringCoefficient":".000004",\
"defaultSpringLength":"5"},\
host=host, port=port)
# redefine defaults for node visualization
response=loadTableData(df[["ensembl_gene_id","log2(fold_change)","NormInt","evidence"]].dropna(),\
df_key="ensembl_gene_id",table_key_column="query term",\
host=host, port=port)
defaults_dic={"NODE_SHAPE":"ellipse",\
"NODE_SIZE":60,\
"NODE_FILL_COLOR":"#AAAAAA",\
"EDGE_TRANSPARENCY":120}
defaults_list=simple_defaults(defaults_dic)
# apply mappings - blue / white / red - from -4 to +4 log2FC
NODE_LABEL=mapVisualProperty("NODE_LABEL","passthrough","display name",host=host, port=port)
create_styles("dataStyle",defaults_list,[NODE_LABEL],host=host, port=port)
response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},host=host, port=port)
cmap = matplotlib.cm.get_cmap("bwr")
norm = matplotlib.colors.Normalize(vmin=-4, vmax=4)
min_color=matplotlib.colors.rgb2hex(cmap(norm(-4)))
center_color=matplotlib.colors.rgb2hex(cmap(norm(0)))
max_color=matplotlib.colors.rgb2hex(cmap(norm(4)))
NODE_FILL_COLOR=mapVisualProperty('NODE_FILL_COLOR','continuous','log2(fold_change)',\
lower=[-4,min_color],center=[0.0,center_color],upper=[4,max_color],\
host=host, port=port)
# apply diamond shape and increase node size to nodes with aging evidence
NODE_SHAPE=mapVisualProperty('NODE_SHAPE','discrete','evidence',discrete=[ ["aging_gene","no"], ["DIAMOND", "ellipse"] ],\
host=host, port=port)
NODE_SIZE=mapVisualProperty('NODE_SIZE','discrete','evidence',discrete=[ ["aging_gene","no"], ["100.0","60.0"] ],\
host=host, port=port)
update_style("dataStyle",mappings=[NODE_SIZE,NODE_SHAPE,NODE_FILL_COLOR],\
host=host, port=port)
response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\
host=host, port=port)
# apply mappings - reds - to Normalized expression (as in MA plots) to border color and border size
NormIntDf = getTableColumns('node',['NormInt'],host=host, port=port)
if 'NormInt' in NormIntDf.columns.tolist():
min_NormInt = min(NormIntDf.dropna()['NormInt'].tolist())
max_NormInt = max(NormIntDf.dropna()['NormInt'].tolist())
cent_NormInt = np.mean([min_NormInt,max_NormInt])
cmap = matplotlib.cm.get_cmap("Reds")
norm = matplotlib.colors.Normalize(vmin=min_NormInt, vmax=max_NormInt)
min_color=matplotlib.colors.rgb2hex(cmap(norm(np.mean([min_NormInt,max_NormInt]))))
center_color=matplotlib.colors.rgb2hex(cmap(norm(cent_NormInt)))
max_color=matplotlib.colors.rgb2hex(cmap(norm(max_NormInt)))
NODE_BORDER_PAINT=mapVisualProperty('NODE_BORDER_PAINT','continuous','NormInt',\
lower=[min_NormInt,min_color],center=[np.mean([min_NormInt,max_NormInt]),center_color],upper=[max_NormInt,max_color],\
host=host, port=port)
update_style("dataStyle",mappings=[NODE_BORDER_PAINT],\
host=host, port=port)
response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\
host=host, port=port)
NODE_BORDER_WIDTH=mapVisualProperty('NODE_BORDER_WIDTH','continuous','NormInt',\
lower=[min_NormInt,2],center=[np.mean([min_NormInt,max_NormInt]),4],upper=[max_NormInt,8],\
host=host, port=port)
update_style("dataStyle",mappings=[NODE_BORDER_WIDTH],\
host=host, port=port)
response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\
host=host, port=port)
response=cytoscape("network","rename",\
{"name":'main String network'},\
host=host, port=port)
# create network with edges only
response=cytoscape("network","select",\
{"edgeList":"all",\
"extendEdges":"true"},\
host=host, port=port)
response=cytoscape("network","create",\
{"source":"current",\
"nodeList":"selected"},\
host=host, port=port)
response=cytoscape("network","rename",\
{"name":'main String network (edges only)'},\
host=host, port=port)
# top 10 changed genes > first neighbours
response=cytoscape("network","set current",
{"network":"main String network (edges only)"},\
host=host, port=port)
log2fcDf = getTableColumns('node',['log2(fold_change)'],host=host, port=port)
if 'log2(fold_change)' in log2fcDf.columns.tolist():
log2fcDf['log2(fold_change)']=log2fcDf['log2(fold_change)'].apply(lambda x: abs(x))
log2fcDf=log2fcDf.sort_values(by=['log2(fold_change)'],ascending=False)
top_nodes=log2fcDf.index.tolist()[:int(len(log2fcDf)*.10)]
response=cytoscape("network","select",
{"nodeList":"name:"+",".join(top_nodes)},\
host=host, port=port)
response=cytoscape("network","select",
{"firstNeighbors":"",\
"direction":"any",\
"network":"current"},\
host=host, port=port)
response=cytoscape("network","create",
{"source":"current",\
"nodeList":"selected"},\
host=host, port=port)
response=cytoscape("network","select",
{"edgeList":"all",\
"extendEdges":"true"},\
host=host, port=port)
response=cytoscape("network","delete",
{"nodeList":"unselected"},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host,port=port)
response=cytoscape("layout", "force-directed",\
host=host, port=port)
response=cytoscape("network","rename",\
{"name":'top '+str(int(len(log2fcDf)*.10))+' changed firstNeighbors'},\
host=host, port=port)
#top 10 changed genes difusion
response=cytoscape("network","set current",
{"network":"main String network (edges only)"},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
response=cytoscape("network","select",
{"nodeList":"name:"+",".join(top_nodes)},\
host=host, port=port)
response=cytoscape("diffusion","diffuse",host=host, port=port)
response=cytoscape("network","create",
{"source":"current",\
"nodeList":"selected"},\
host=host, port=port)
response=cytoscape("network","select",
{"edgeList":"all",\
"extendEdges":"true"},\
host=host, port=port)
response=cytoscape("network","delete",
{"nodeList":"unselected"},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
response=cytoscape("layout", "force-directed",host=host, port=port)
response=cytoscape("network","rename",\
{"name":'top '+str(int(len(log2fcDf)*.10))+' changed diffusion'},\
host=host, port=port)
def MAKETMP():
(fd, f) = tempfile.mkstemp()
f="/tmp/"+f.split("/")[-1]
return f
cys=MAKETMP()
cyjs=MAKETMP()
main_png=MAKETMP()
main_pdf=MAKETMP()
edg_png=MAKETMP()
edg_pdf=MAKETMP()
neig_png=MAKETMP()
neig_pdf=MAKETMP()
dif_png=MAKETMP()
dif_pdf=MAKETMP()
response=cytoscape("session", "save as" , \
{"file":cys},\
host=host, port=port)
response=cytoscape("network", "export" , \
{"options":'CYJS',\
"OutputFile":cyjs},\
host=host, port=port)
response=cytoscape("network","set current",
{"network":"main String network"},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
sleep(5)
response=cytoscape("view", "export" , \
{"options":"PNG",\
"OutputFile":main_png},\
host=host, port=port)
response=cytoscape("view", "export" , \
{"options":"PDF",\
"OutputFile":main_pdf},\
host=host, port=port)
response=cytoscape("network","set current",
{"network":"main String network (edges only)"},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
sleep(5)
response=cytoscape("view", "export" , \
{"options":"PNG",\
"OutputFile":edg_png},\
host=host, port=port)
response=cytoscape("view", "export" , \
{"options":"PDF",\
"OutputFile":edg_pdf},\
host=host, port=port)
try:
response=cytoscape("network","set current",
{"network":'top '+str(int(len(log2fcDf)*.10))+' changed firstNeighbors'},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
sleep(5)
response=cytoscape("view", "export" , \
{"options":"PNG",\
"OutputFile":neig_png},\
host=host, port=port)
response=cytoscape("view", "export" , \
{"options":"PDF",\
"OutputFile":neig_pdf},\
host=host, port=port)
except:
print("No "+"changed firstNeighbors")
sys.stdout.flush()
try:
response=cytoscape("network","set current",
{"network":'top '+str(int(len(log2fcDf)*.10))+' changed diffusion'},\
host=host, port=port)
response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\
host=host, port=port)
sleep(5)
response=cytoscape("view", "export" , \
{"options":"PNG",\
"OutputFile":dif_png},\
host=host, port=port)
response=cytoscape("view", "export" , \
{"options":"PDF",\
"OutputFile":dif_pdf},\
host=host, port=port)
except:
print("No "+"changed diffusion")
sys.stdout.flush()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host)
ftp_client=ssh.open_sftp()
for f, extension, local in zip([cys,cyjs,main_png,main_pdf,edg_png,edg_pdf,neig_png,neig_pdf,dif_png,dif_pdf],\
[".cys",".cyjs",".png",".pdf",".png",".pdf",".png",".pdf",".png",".pdf" ],\
[target+".cys",target+".cyjs",target+".main.png",target+".main.pdf",\
target+".main.edges.png",target+".main.edges.pdf",\
target+".topFirstNeighbors.png",target+".topFirstNeighbors.pdf",\
target+".topDiffusion.png",target+".topDiffusion.pdf"]):
try:
ftp_client.get(f+extension,local)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("rm "+f+extension )
except:
print("No "+local)
sys.stdout.flush() | Plots tables from aDiff/cuffdiff into cytoscape using String protein queries.
Uses top changed genes as well as first neighbours and difusion fo generate subnetworks.
:param df: df as outputed by aDiff for differential gene expression
:param aging_genes: ENS gene ids to be labeled with a diagonal
:param species: species for string app query. eg. "caenorhabditis elegans", "drosophila melanogaster", "mus musculus", "homo sapiens"
:param limit: limit for string app query. Number of extra genes to recover. If None, limit=N(query_genes)*.25
:param cuttoff: confidence cuttoff for sting app query. Default=0.4
:param taxon: taxon id for string app query. For the species shown above, taxon id will be automatically identified
:param cytoscape_host: host address for cytoscape
:param cytoscape_port: cytoscape port
:param target: target destination for saving files without prefix. eg. "/beegfs/group_bit/home/JBoucas/test/N2_vs_daf2"
:returns: nothing |
def draw_zijderveld(self):
"""
Draws the zijderveld plot in the GUI on canvas1
"""
self.fig1.clf()
axis_bounds = [0, .1, 1, .85]
self.zijplot = self.fig1.add_axes(
axis_bounds, frameon=False, facecolor='None', label='zig_orig', zorder=0)
self.zijplot.clear()
self.zijplot.axis('equal')
self.zijplot.xaxis.set_visible(False)
self.zijplot.yaxis.set_visible(False)
self.MS = 6*self.GUI_RESOLUTION
self.dec_MEC = 'k'
self.dec_MFC = 'r'
self.inc_MEC = 'k'
self.inc_MFC = 'b'
self.MS = 6*self.GUI_RESOLUTION
self.zijdblock_steps = self.Data[self.s]['zijdblock_steps']
self.vds = self.Data[self.s]['vds']
self.zijplot.plot(self.CART_rot_good[:, 0], -1*self.CART_rot_good[:, 1], 'ro-',
markersize=self.MS, clip_on=False, picker=True, zorder=1) # x,y or N,E
self.zijplot.plot(self.CART_rot_good[:, 0], -1*self.CART_rot_good[:, 2], 'bs-',
markersize=self.MS, clip_on=False, picker=True, zorder=1) # x-z or N,D
for i in range(len(self.CART_rot_bad)):
self.zijplot.plot(self.CART_rot_bad[:, 0][i], -1 * self.CART_rot_bad[:, 1][i], 'o', mfc='None',
mec=self.dec_MEC, markersize=self.MS, clip_on=False, picker=False) # x,y or N,E
self.zijplot.plot(self.CART_rot_bad[:, 0][i], -1 * self.CART_rot_bad[:, 2][i], 's', mfc='None',
mec=self.inc_MEC, markersize=self.MS, clip_on=False, picker=False) # x-z or N,D
if self.preferences['show_Zij_treatments']:
for i in range(len(self.zijdblock_steps)):
if int(self.preferences['show_Zij_treatments_steps']) != 1:
if i != 0 and (i+1) % int(self.preferences['show_Zij_treatments_steps']) == 0:
self.zijplot.text(self.CART_rot[i][0], -1*self.CART_rot[i][2], " %s" % (
self.zijdblock_steps[i]), fontsize=8*self.GUI_RESOLUTION, color='gray', ha='left', va='center') # inc
else:
self.zijplot.text(self.CART_rot[i][0], -1*self.CART_rot[i][2], " %s" % (
self.zijdblock_steps[i]), fontsize=10*self.GUI_RESOLUTION, color='gray', ha='left', va='center') # inc
# -----
xmin, xmax = self.zijplot.get_xlim()
if xmax < 0:
xmax = 0
if xmin > 0:
xmin = 0
# else:
# xmin=xmin+xmin%0.2
props = dict(color='black', linewidth=1.0, markeredgewidth=0.5)
xlocs = array(list(arange(0.2, xmax, 0.2)) +
list(arange(-0.2, xmin, -0.2)))
if len(xlocs) > 0:
xtickline, = self.zijplot.plot(
xlocs, [0]*len(xlocs), linestyle='', marker='+', **props)
xtickline.set_clip_on(False)
axxline, = self.zijplot.plot([xmin, xmax], [0, 0], **props)
axxline.set_clip_on(False)
TEXT = ""
if self.COORDINATE_SYSTEM == 'specimen':
self.zijplot.text(xmax, 0, ' x', fontsize=10,
verticalalignment='bottom')
else:
if self.ORTHO_PLOT_TYPE == 'N-S':
TEXT = " N"
elif self.ORTHO_PLOT_TYPE == 'E-W':
TEXT = " E"
else:
TEXT = " x"
self.zijplot.text(xmax, 0, TEXT, fontsize=10,
verticalalignment='bottom')
# -----
ymin, ymax = self.zijplot.get_ylim()
if ymax < 0:
ymax = 0
if ymin > 0:
ymin = 0
ylocs = array(list(arange(0.2, ymax, 0.2)) +
list(arange(-0.2, ymin, -0.2)))
if len(ylocs) > 0:
ytickline, = self.zijplot.plot(
[0]*len(ylocs), ylocs, linestyle='', marker='+', **props)
ytickline.set_clip_on(False)
axyline, = self.zijplot.plot([0, 0], [ymin, ymax], **props)
axyline.set_clip_on(False)
TEXT1, TEXT2 = "", ""
if self.COORDINATE_SYSTEM == 'specimen':
TEXT1, TEXT2 = " y", " z"
else:
if self.ORTHO_PLOT_TYPE == 'N-S':
TEXT1, TEXT2 = " E", " D"
elif self.ORTHO_PLOT_TYPE == 'E-W':
TEXT1, TEXT2 = " S", " D"
else:
TEXT1, TEXT2 = " y", " z"
self.zijplot.text(0, ymin, TEXT1, fontsize=10,
color='r', verticalalignment='top')
self.zijplot.text(0, ymin, ' ,', fontsize=10,
color='k', verticalalignment='top')
self.zijplot.text(0, ymin, TEXT2, fontsize=10,
color='b', verticalalignment='top')
# ----
if self.ORTHO_PLOT_TYPE == 'N-S':
STRING = ""
#STRING1="N-S orthogonal plot"
self.fig1.text(0.01, 0.98, "Zijderveld plot: x = North", {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
elif self.ORTHO_PLOT_TYPE == 'E-W':
STRING = ""
#STRING1="E-W orthogonal plot"
self.fig1.text(0.01, 0.98, "Zijderveld plot:: x = East", {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
elif self.ORTHO_PLOT_TYPE == 'PCA_dec':
self.fig1.text(0.01, 0.98, "Zijderveld plot", {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
if 'specimen_dec' in list(self.current_fit.pars.keys()) and type(self.current_fit.pars['specimen_dec']) != str:
STRING = "X-axis rotated to best fit line declination (%.0f); " % (
self.current_fit.pars['specimen_dec'])
else:
STRING = "X-axis rotated to NRM (%.0f); " % (
self.zijblock[0][1])
else:
self.fig1.text(0.01, 0.98, "Zijderveld plot", {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
STRING = "X-axis rotated to NRM (%.0f); " % (self.zijblock[0][1])
#STRING1="Zijderveld plot"
STRING = STRING+"NRM=%.2e " % (self.zijblock[0][3]) + 'Am^2'
self.fig1.text(0.01, 0.95, STRING, {'family': self.font_type, 'fontsize': 8 *
self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
xmin, xmax = self.zijplot.get_xlim()
ymin, ymax = self.zijplot.get_ylim()
self.zij_xlim_initial = (xmin, xmax)
self.zij_ylim_initial = (ymin, ymax)
self.canvas1.draw() | Draws the zijderveld plot in the GUI on canvas1 |
def server_list(self):
'''
List servers
'''
nt_ks = self.compute_conn
ret = {}
for item in nt_ks.servers.list():
try:
ret[item.name] = {
'id': item.id,
'name': item.name,
'state': item.status,
'accessIPv4': item.accessIPv4,
'accessIPv6': item.accessIPv6,
'flavor': {'id': item.flavor['id'],
'links': item.flavor['links']},
'image': {'id': item.image['id'] if item.image else 'Boot From Volume',
'links': item.image['links'] if item.image else ''},
}
except TypeError:
pass
return ret | List servers |
def reverse_dummies(self, X, mapping):
"""
Convert dummy variable into numerical variables
Parameters
----------
X : DataFrame
mapping: list-like
Contains mappings of column to be transformed to it's new columns and value represented
Returns
-------
numerical: DataFrame
"""
out_cols = X.columns.values.tolist()
mapped_columns = []
for switch in mapping:
col = switch.get('col')
mod = switch.get('mapping')
insert_at = out_cols.index(mod.columns[0])
X.insert(insert_at, col, 0)
positive_indexes = mod.index[mod.index > 0]
for i in range(positive_indexes.shape[0]):
existing_col = mod.columns[i]
val = positive_indexes[i]
X.loc[X[existing_col] == 1, col] = val
mapped_columns.append(existing_col)
X.drop(mod.columns, axis=1, inplace=True)
out_cols = X.columns.values.tolist()
return X | Convert dummy variable into numerical variables
Parameters
----------
X : DataFrame
mapping: list-like
Contains mappings of column to be transformed to it's new columns and value represented
Returns
-------
numerical: DataFrame |
def generate_context(context_file='cookiecutter.json', default_context=None,
extra_context=None):
"""Generate the context for a Cookiecutter project template.
Loads the JSON file as a Python object, with key being the JSON filename.
:param context_file: JSON file containing key/value pairs for populating
the cookiecutter's variables.
:param default_context: Dictionary containing config to take into account.
:param extra_context: Dictionary containing configuration overrides
"""
context = OrderedDict([])
try:
with open(context_file) as file_handle:
obj = json.load(file_handle, object_pairs_hook=OrderedDict)
except ValueError as e:
# JSON decoding error. Let's throw a new exception that is more
# friendly for the developer or user.
full_fpath = os.path.abspath(context_file)
json_exc_message = str(e)
our_exc_message = (
'JSON decoding error while loading "{0}". Decoding'
' error details: "{1}"'.format(full_fpath, json_exc_message))
raise ContextDecodingException(our_exc_message)
# Add the Python object to the context dictionary
file_name = os.path.split(context_file)[1]
file_stem = file_name.split('.')[0]
context[file_stem] = obj
# Overwrite context variable defaults with the default context from the
# user's global config, if available
if default_context:
apply_overwrites_to_context(obj, default_context)
if extra_context:
apply_overwrites_to_context(obj, extra_context)
logger.debug('Context generated is {}'.format(context))
return context | Generate the context for a Cookiecutter project template.
Loads the JSON file as a Python object, with key being the JSON filename.
:param context_file: JSON file containing key/value pairs for populating
the cookiecutter's variables.
:param default_context: Dictionary containing config to take into account.
:param extra_context: Dictionary containing configuration overrides |
def update(self, name: str, value=None, default=None, description: str=None):
"""
Like add, but can tolerate existing values; also updates the value.
Mostly used for setting fields from imported INI files and modified CLI flags.
"""
if name in self._vars:
description = description or self._vars[name].description
default = default or self._vars[name].default
elif name == 'name':
raise ConfigError("'name' is a reserved name for a group.")
v = _Var(name, description=description, default=default, defined=False)
v.value = value
self._vars[name] = v | Like add, but can tolerate existing values; also updates the value.
Mostly used for setting fields from imported INI files and modified CLI flags. |
def get_max_bond_lengths(structure, el_radius_updates=None):
"""
Provides max bond length estimates for a structure based on the JMol
table and algorithms.
Args:
structure: (structure)
el_radius_updates: (dict) symbol->float to update atomic radii
Returns: (dict) - (Element1, Element2) -> float. The two elements are
ordered by Z.
"""
#jmc = JMolCoordFinder(el_radius_updates)
jmnn = JmolNN(el_radius_updates=el_radius_updates)
bonds_lens = {}
els = sorted(structure.composition.elements, key=lambda x: x.Z)
for i1 in range(len(els)):
for i2 in range(len(els) - i1):
bonds_lens[els[i1], els[i1 + i2]] = jmnn.get_max_bond_distance(
els[i1].symbol, els[i1 + i2].symbol)
return bonds_lens | Provides max bond length estimates for a structure based on the JMol
table and algorithms.
Args:
structure: (structure)
el_radius_updates: (dict) symbol->float to update atomic radii
Returns: (dict) - (Element1, Element2) -> float. The two elements are
ordered by Z. |
def unlock(self):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
try:
self.os.remove(self.os.path.join(self.tmp_dir, 'lock'))
except Exception:
pass
try:
self.os.rmdir(self.tmp_dir)
except Exception:
pass | Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit). |
def do_execute(self):
"""
Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str
"""
self._stopped = False
self._stopping = False
not_finished_actor = self.owner.first_active
pending_actors = []
finished = False
actor_result = None
while not (self.is_stopping() or self.is_stopped()) and not finished:
# determing starting point of next iteration
if len(pending_actors) > 0:
start_index = self.owner.index_of(pending_actors[-1].name)
else:
start_index = self.owner.index_of(not_finished_actor.name)
not_finished_actor = None
# iterate over actors
token = None
last_active = -1
if self.owner.active > 0:
last_active = self.owner.last_active.index
for i in range(start_index, last_active + 1):
# do we have to stop the execution?
if self.is_stopped() or self.is_stopping():
break
curr = self.owner.actors[i]
if curr.skip:
continue
# no token? get pending one or produce new one
if token is None:
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.pop()
else:
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
if isinstance(curr, OutputProducer) and curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.append(curr)
else:
# process token
curr.input = token
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
# was a new token produced?
if isinstance(curr, OutputProducer):
if curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if curr.has_output():
pending_actors.append(curr)
else:
token = None
# token from last actor generated? -> store
if (i == self.owner.last_active.index) and (token is not None):
if self._record_output:
self._recorded_output.append(token)
# no token produced, ignore rest of actors
if isinstance(curr, OutputProducer) and (token is None):
break
# all actors finished?
finished = (not_finished_actor is None) and (len(pending_actors) == 0)
return actor_result | Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str |
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date):
"""
With this functionality you can query previously the Credit Cards Token.
Args:
payer_id:
credit_card_token_id:
start_date:
end_date:
Returns:
"""
payload = {
"language": self.client.language.value,
"command": PaymentCommand.GET_TOKENS.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"creditCardTokenInformation": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id,
"startDate": start_date.strftime('%Y-%m-%dT%H:%M:%S'),
"endDate": end_date.strftime('%Y-%m-%dT%H:%M:%S')
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) | With this functionality you can query previously the Credit Cards Token.
Args:
payer_id:
credit_card_token_id:
start_date:
end_date:
Returns: |
def get_fptr(self):
"""Get the function pointer."""
cmpfunc = ctypes.CFUNCTYPE(ctypes.c_int,
WPARAM,
LPARAM,
ctypes.POINTER(KBDLLHookStruct))
return cmpfunc(self.handle_input) | Get the function pointer. |
def plot_labels(ax, label_fontsize=14,
xlabel=None, xlabel_arg=None,
ylabel=None, ylabel_arg=None,
zlabel=None, zlabel_arg=None):
"""Sets the labels options of a matplotlib plot
Args:
ax: matplotlib axes
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
"""
xlabel = xlabel if xlabel is not None else ax.get_xlabel() or 'X'
ylabel = ylabel if ylabel is not None else ax.get_ylabel() or 'Y'
xlabel_arg = dict_if_none(xlabel_arg)
ylabel_arg = dict_if_none(ylabel_arg)
ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)
ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)
if hasattr(ax, 'zaxis'):
zlabel = zlabel if zlabel is not None else ax.get_zlabel() or 'Z'
zlabel_arg = dict_if_none(zlabel_arg)
ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg) | Sets the labels options of a matplotlib plot
Args:
ax: matplotlib axes
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments |
def remove_overlap(self, also_remove_contiguous: bool = False) -> None:
"""
Merges any overlapping intervals.
Args:
also_remove_contiguous: treat contiguous (as well as overlapping)
intervals as worthy of merging?
"""
overlap = True
while overlap:
overlap = self._remove_overlap_sub(also_remove_contiguous)
self._sort() | Merges any overlapping intervals.
Args:
also_remove_contiguous: treat contiguous (as well as overlapping)
intervals as worthy of merging? |
def emissive_part_3x(self, tb=True):
"""Get the emissive part of the 3.x band"""
try:
# Emissive part:
self._e3x = self._rad3x_t11 * (1 - self._r3x)
# Unsure how much sense it makes to apply the co2 correction term here!?
# FIXME!
# self._e3x *= self._rad3x_correction
except TypeError:
LOG.warning(
"Couldn't derive the emissive part \n" +
"Please derive the relfectance prior to requesting the emissive part")
if tb:
return self.radiance2tb(self._e3x)
else:
return self._e3x | Get the emissive part of the 3.x band |
def execute_ccm_remotely(remote_options, ccm_args):
"""
Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection
"""
if not PARAMIKO_IS_AVAILABLE:
logging.warn("Paramiko is not Availble: Skipping remote execution of CCM command")
return None, None
# Create the SSH client
ssh_client = SSHClient(remote_options.ssh_host, remote_options.ssh_port,
remote_options.ssh_username, remote_options.ssh_password,
remote_options.ssh_private_key)
# Handle CCM arguments that require SFTP
for index, argument in enumerate(ccm_args):
# Determine if DSE credentials argument is being used
if "--dse-credentials" in argument:
# Get the filename being used for the DSE credentials
tokens = argument.split("=")
credentials_path = os.path.join(os.path.expanduser("~"), ".ccm", ".dse.ini")
if len(tokens) == 2:
credentials_path = tokens[1]
# Ensure the credential file exists locally and copy to remote host
if not os.path.isfile(credentials_path):
raise Exception("DSE Credentials File Does not Exist: %s"
% credentials_path)
ssh_client.put(credentials_path, ssh_client.ccm_config_dir)
# Update the DSE credentials argument
ccm_args[index] = "--dse-credentials"
# Determine if SSL or node SSL path argument is being used
if "--ssl" in argument or "--node-ssl" in argument:
# Get the directory being used for the path
tokens = argument.split("=")
if len(tokens) != 2:
raise Exception("Path is not Specified: %s" % argument)
ssl_path = tokens[1]
# Ensure the path exists locally and copy to remote host
if not os.path.isdir(ssl_path):
raise Exception("Path Does not Exist: %s" % ssl_path)
remote_ssl_path = ssh_client.temp + os.path.basename(ssl_path)
ssh_client.put(ssl_path, remote_ssl_path)
# Update the argument
ccm_args[index] = tokens[0] + "=" + remote_ssl_path
# Execute the CCM request, return output and exit status
return ssh_client.execute_ccm_command(ccm_args) | Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection |
def check_guest_exist(check_index=0):
"""Check guest exist in database.
:param check_index: The parameter index of userid(s), default as 0
"""
def outer(f):
@six.wraps(f)
def inner(self, *args, **kw):
userids = args[check_index]
if isinstance(userids, list):
# convert all userids to upper case
userids = [uid.upper() for uid in userids]
new_args = (args[:check_index] + (userids,) +
args[check_index + 1:])
else:
# convert the userid to upper case
userids = userids.upper()
new_args = (args[:check_index] + (userids,) +
args[check_index + 1:])
userids = [userids]
self._vmops.check_guests_exist_in_db(userids)
return f(self, *new_args, **kw)
return inner
return outer | Check guest exist in database.
:param check_index: The parameter index of userid(s), default as 0 |
def to_genshi(walker):
"""Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
"""
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1) | Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes |
def occupy(self, start, stop):
"""
Mark a given interval as occupied so that the manager could skip
the values from ``start`` to ``stop`` (**inclusive**).
:param start: beginning of the interval.
:param stop: end of the interval.
:type start: int
:type stop: int
"""
self._occupied.append([start, stop])
self._occupied.sort(key=lambda x: x[0]) | Mark a given interval as occupied so that the manager could skip
the values from ``start`` to ``stop`` (**inclusive**).
:param start: beginning of the interval.
:param stop: end of the interval.
:type start: int
:type stop: int |
def get_confirmation_url(email, request, name="email_registration_confirm", **kwargs):
"""
Returns the confirmation URL
"""
return request.build_absolute_uri(
reverse(name, kwargs={"code": get_confirmation_code(email, request, **kwargs)})
) | Returns the confirmation URL |
def load_nddata(self, ndd, naxispath=None):
"""Load from an astropy.nddata.NDData object.
"""
self.clear_metadata()
# Make a header based on any NDData metadata
ahdr = self.get_header()
ahdr.update(ndd.meta)
self.setup_data(ndd.data, naxispath=naxispath)
if ndd.wcs is None:
# no wcs in ndd obj--let's try to make one from the header
self.wcs = wcsmod.WCS(logger=self.logger)
self.wcs.load_header(ahdr)
else:
# already have a valid wcs in the ndd object
# we assume it needs an astropy compatible wcs
wcsinfo = wcsmod.get_wcs_class('astropy')
self.wcs = wcsinfo.wrapper_class(logger=self.logger)
self.wcs.load_nddata(ndd) | Load from an astropy.nddata.NDData object. |
def add(self, resource, replace=False):
"""Add a resource or an iterable collection of resources.
Will throw a ValueError if the resource (ie. same uri) already
exists in the ResourceList, unless replace=True.
"""
if isinstance(resource, collections.Iterable):
for r in resource:
self.resources.add(r, replace)
else:
self.resources.add(resource, replace) | Add a resource or an iterable collection of resources.
Will throw a ValueError if the resource (ie. same uri) already
exists in the ResourceList, unless replace=True. |
def get_user_list(host_name, client_name, client_pass):
"""
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
"""
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getusrs",
values="whr=*")
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall("./result/row/usr")
for xml_row in xml_rows:
append_user_id(xml_row.text)
return user_id_list | Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids. |
def updateGroup(self, group, vendorSpecific=None):
"""See Also: updateGroupResponse()
Args:
group:
vendorSpecific:
Returns:
"""
response = self.updateGroupResponse(group, vendorSpecific)
return self._read_boolean_response(response) | See Also: updateGroupResponse()
Args:
group:
vendorSpecific:
Returns: |
def time_at_shadow_length(day, latitude, multiplier):
"""Compute the time at which an object's shadow is a multiple of its length.
Specifically, determine the time the length of the shadow is a multiple of
the object's length + the length of the object's shadow at noon
This is used in the calculation for Asr time. Hanafi uses a multiplier of
2, and everyone else uses a multiplier of 1
Algorithm taken almost directly from PrayTimes.org code
:param day: The day which to compute for
:param latitude: The latitude of the place of interest
:param: multiplier: The multiplier of the object's length
:returns: The floating point time delta between Zuhr and the time at which
the lenghth of the shadow is as defined
"""
latitude_rad = radians(latitude)
declination = radians(sun_declination(day))
angle = arccot(
multiplier +
tan(abs(latitude_rad - declination))
)
numerator = sin(angle) - sin(latitude_rad)*sin(declination)
denominator = cos(latitude_rad) * cos(declination)
return degrees(acos(numerator/denominator)) / 15 | Compute the time at which an object's shadow is a multiple of its length.
Specifically, determine the time the length of the shadow is a multiple of
the object's length + the length of the object's shadow at noon
This is used in the calculation for Asr time. Hanafi uses a multiplier of
2, and everyone else uses a multiplier of 1
Algorithm taken almost directly from PrayTimes.org code
:param day: The day which to compute for
:param latitude: The latitude of the place of interest
:param: multiplier: The multiplier of the object's length
:returns: The floating point time delta between Zuhr and the time at which
the lenghth of the shadow is as defined |
def chunked(data, chunksize):
"""
Returns a list of chunks containing at most ``chunksize`` elements of data.
"""
if chunksize < 1:
raise ValueError("Chunksize must be at least 1!")
if int(chunksize) != chunksize:
raise ValueError("Chunksize needs to be an integer")
res = []
cur = []
for e in data:
cur.append(e)
if len(cur) >= chunksize:
res.append(cur)
cur = []
if cur:
res.append(cur)
return res | Returns a list of chunks containing at most ``chunksize`` elements of data. |
def calcAFunc(self,MaggNow,AaggNow):
'''
Calculate a new aggregate savings rule based on the history
of the aggregate savings and aggregate market resources from a simulation.
Parameters
----------
MaggNow : [float]
List of the history of the simulated aggregate market resources for an economy.
AaggNow : [float]
List of the history of the simulated aggregate savings for an economy.
Returns
-------
(unnamed) : CapDynamicRule
Object containing a new savings rule
'''
verbose = self.verbose
discard_periods = self.T_discard # Throw out the first T periods to allow the simulation to approach the SS
update_weight = 1. - self.DampingFac # Proportional weight to put on new function vs old function parameters
total_periods = len(MaggNow)
# Regress the log savings against log market resources
logAagg = np.log(AaggNow[discard_periods:total_periods])
logMagg = np.log(MaggNow[discard_periods-1:total_periods-1])
slope, intercept, r_value, p_value, std_err = stats.linregress(logMagg,logAagg)
# Make a new aggregate savings rule by combining the new regression parameters
# with the previous guess
intercept = update_weight*intercept + (1.0-update_weight)*self.intercept_prev
slope = update_weight*slope + (1.0-update_weight)*self.slope_prev
AFunc = AggregateSavingRule(intercept,slope) # Make a new next-period capital function
# Save the new values as "previous" values for the next iteration
self.intercept_prev = intercept
self.slope_prev = slope
# Plot aggregate resources vs aggregate savings for this run and print the new parameters
if verbose:
print('intercept=' + str(intercept) + ', slope=' + str(slope) + ', r-sq=' + str(r_value**2))
#plot_start = discard_periods
#plt.plot(logMagg[plot_start:],logAagg[plot_start:],'.k')
#plt.show()
return AggShocksDynamicRule(AFunc) | Calculate a new aggregate savings rule based on the history
of the aggregate savings and aggregate market resources from a simulation.
Parameters
----------
MaggNow : [float]
List of the history of the simulated aggregate market resources for an economy.
AaggNow : [float]
List of the history of the simulated aggregate savings for an economy.
Returns
-------
(unnamed) : CapDynamicRule
Object containing a new savings rule |
def audio_output_enumerate_devices(self):
"""Enumerate the defined audio output devices.
@return: list of dicts {name:, description:, devices:}
"""
r = []
head = libvlc_audio_output_list_get(self)
if head:
i = head
while i:
i = i.contents
d = [{'id': libvlc_audio_output_device_id (self, i.name, d),
'longname': libvlc_audio_output_device_longname(self, i.name, d)}
for d in range(libvlc_audio_output_device_count (self, i.name))]
r.append({'name': i.name, 'description': i.description, 'devices': d})
i = i.next
libvlc_audio_output_list_release(head)
return r | Enumerate the defined audio output devices.
@return: list of dicts {name:, description:, devices:} |
def create_connection(self, *args, **kwargs):
"""This method is trying to establish connection with one of the zookeeper nodes.
Somehow strategy "fail earlier and retry more often" works way better comparing to
the original strategy "try to connect with specified timeout".
Since we want to try connect to zookeeper more often (with the smaller connect_timeout),
he have to override `create_connection` method in the `SequentialThreadingHandler`
class (which is used by `kazoo.Client`).
:param args: always contains `tuple(host, port)` as the first element and could contain
`connect_timeout` (negotiated session timeout) as the second element."""
args = list(args)
if len(args) == 0: # kazoo 2.6.0 slightly changed the way how it calls create_connection method
kwargs['timeout'] = max(self._connect_timeout, kwargs.get('timeout', self._connect_timeout*10)/10.0)
elif len(args) == 1:
args.append(self._connect_timeout)
else:
args[1] = max(self._connect_timeout, args[1]/10.0)
return super(PatroniSequentialThreadingHandler, self).create_connection(*args, **kwargs) | This method is trying to establish connection with one of the zookeeper nodes.
Somehow strategy "fail earlier and retry more often" works way better comparing to
the original strategy "try to connect with specified timeout".
Since we want to try connect to zookeeper more often (with the smaller connect_timeout),
he have to override `create_connection` method in the `SequentialThreadingHandler`
class (which is used by `kazoo.Client`).
:param args: always contains `tuple(host, port)` as the first element and could contain
`connect_timeout` (negotiated session timeout) as the second element. |
def _get_coordinator_for_group(self, consumer_group):
"""Returns the coordinator (broker) for a consumer group
Returns the broker for a given consumer group or
Raises ConsumerCoordinatorNotAvailableError
"""
if self.consumer_group_to_brokers.get(consumer_group) is None:
yield self.load_consumer_metadata_for_group(consumer_group)
returnValue(self.consumer_group_to_brokers.get(consumer_group)) | Returns the coordinator (broker) for a consumer group
Returns the broker for a given consumer group or
Raises ConsumerCoordinatorNotAvailableError |
def checksum(self):
"""Grab checksum string
"""
md5sum, md5sum64, = [], []
for line in self.SLACKBUILDS_TXT.splitlines():
if line.startswith(self.line_name):
sbo_name = line[17:].strip()
if line.startswith(self.line_md5_64):
if sbo_name == self.name and line[26:].strip():
md5sum64 = line[26:].strip().split()
if line.startswith(self.line_md5):
if sbo_name == self.name and line[19:].strip():
md5sum = line[19:].strip().split()
return self._select_md5sum_arch(md5sum, md5sum64) | Grab checksum string |
def _all_same_area(self, dataset_ids):
"""Return True if all areas for the provided IDs are equal."""
all_areas = []
for ds_id in dataset_ids:
for scn in self.scenes:
ds = scn.get(ds_id)
if ds is None:
continue
all_areas.append(ds.attrs.get('area'))
all_areas = [area for area in all_areas if area is not None]
return all(all_areas[0] == area for area in all_areas[1:]) | Return True if all areas for the provided IDs are equal. |
def open_file(self, path):
"""
Creates a new GenericCodeEdit, opens the requested file and adds it
to the tab widget.
:param path: Path of the file to open
"""
if path:
editor = self.tabWidget.open_document(path)
editor.cursorPositionChanged.connect(
self.on_cursor_pos_changed)
self.recent_files_manager.open_file(path)
self.menu_recents.update_actions() | Creates a new GenericCodeEdit, opens the requested file and adds it
to the tab widget.
:param path: Path of the file to open |
def get_fragment(self, **kwargs):
"""
Return a complete fragment.
:param gp:
:return:
"""
gen, namespaces, plan = self.get_fragment_generator(**kwargs)
graph = ConjunctiveGraph()
[graph.bind(prefix, u) for (prefix, u) in namespaces]
[graph.add((s, p, o)) for (_, s, p, o) in gen]
return graph | Return a complete fragment.
:param gp:
:return: |
def sign_remote_certificate(argdic, **kwargs):
'''
Request a certificate to be remotely signed according to a signing policy.
argdic:
A dict containing all the arguments to be passed into the
create_certificate function. This will become kwargs when passed
to create_certificate.
kwargs:
kwargs delivered from publish.publish
CLI Example:
.. code-block:: bash
salt '*' x509.sign_remote_certificate argdic="{'public_key': '/etc/pki/www.key', 'signing_policy': 'www'}" __pub_id='www1'
'''
if 'signing_policy' not in argdic:
return 'signing_policy must be specified'
if not isinstance(argdic, dict):
argdic = ast.literal_eval(argdic)
signing_policy = {}
if 'signing_policy' in argdic:
signing_policy = _get_signing_policy(argdic['signing_policy'])
if not signing_policy:
return 'Signing policy {0} does not exist.'.format(argdic['signing_policy'])
if isinstance(signing_policy, list):
dict_ = {}
for item in signing_policy:
dict_.update(item)
signing_policy = dict_
if 'minions' in signing_policy:
if '__pub_id' not in kwargs:
return 'minion sending this request could not be identified'
matcher = 'match.glob'
if '@' in signing_policy['minions']:
matcher = 'match.compound'
if not __salt__[matcher](
signing_policy['minions'], kwargs['__pub_id']):
return '{0} not permitted to use signing policy {1}'.format(
kwargs['__pub_id'], argdic['signing_policy'])
try:
return create_certificate(path=None, text=True, **argdic)
except Exception as except_: # pylint: disable=broad-except
return six.text_type(except_) | Request a certificate to be remotely signed according to a signing policy.
argdic:
A dict containing all the arguments to be passed into the
create_certificate function. This will become kwargs when passed
to create_certificate.
kwargs:
kwargs delivered from publish.publish
CLI Example:
.. code-block:: bash
salt '*' x509.sign_remote_certificate argdic="{'public_key': '/etc/pki/www.key', 'signing_policy': 'www'}" __pub_id='www1' |
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response | Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied. |
def readFromFile(self, filename):
'''
read the distortion coeffs from file
'''
s = dict(np.load(filename))
try:
self.coeffs = s['coeffs'][()]
except KeyError:
#LEGENCY - remove
self.coeffs = s
try:
self.opts = s['opts'][()]
except KeyError:
pass
return self.coeffs | read the distortion coeffs from file |
def postalCodeLookup(self, countryCode, postalCode):
"""
Looks up locations for this country and postal code.
"""
params = {"country": countryCode, "postalcode": postalCode}
d = self._call("postalCodeLookupJSON", params)
d.addCallback(operator.itemgetter("postalcodes"))
return d | Looks up locations for this country and postal code. |
def is_connectable(host: str, port: Union[int, str]) -> bool:
"""Tries to connect to the device to see if it is connectable.
Args:
host: The host to connect.
port: The port to connect.
Returns:
True or False.
"""
socket_ = None
try:
socket_ = socket.create_connection((host, port), 1)
result = True
except socket.timeout:
result = False
finally:
if socket_:
socket_.close()
return result | Tries to connect to the device to see if it is connectable.
Args:
host: The host to connect.
port: The port to connect.
Returns:
True or False. |
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
"""Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
"""
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ('=' * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext | Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext |
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["text"]
if settings.COMMENTS_ALLOW_PROFANITIES is False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.",
len(bad_words)) % get_text_list(
['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1])
for i in bad_words], ugettext('and')))
return comment | If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST. |
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break | Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list |
def new_action(project_id):
"""Add action."""
project = get_data_or_404('project', project_id)
if project['owner_id'] != get_current_user_id():
return jsonify(message='forbidden'), 403
form = NewActionForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['project_id'] = project_id
id = add_instance('action', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
action = get_data_or_404('action', id)
return jsonify(**action) | Add action. |
def parse_args(self, command_selected, flags, _free_args):
# type: (str, Dict[str, str], List[str]) -> bool
"""
Parse the args and fill the global data
Currently we disregard the free parameters
:param command_selected:
:param flags:
:param _free_args:
:return:
"""
configs = self.function_name_to_configs[command_selected]
suggested_configs = self.function_name_to_suggest_configs[command_selected]
# create the attribute_to_config map
attribute_to_config = dict() # type: Dict[str, Config]
for config in itertools.chain(configs, suggested_configs):
for attribute in config.get_attributes():
if attribute in attribute_to_config:
raise ValueError("attribute [{}] double".format(attribute))
attribute_to_config[attribute] = config
# set the flags into the "default" field
unknown_flags = []
for flag_raw, value in flags.items():
edit = value.startswith('=')
if flag_raw not in attribute_to_config:
unknown_flags.append(flag_raw)
config = attribute_to_config[flag_raw]
param = config.get_param_by_name(flag_raw)
if edit:
v = param.s2t_generate_from_default(value[1:])
else:
v = param.s2t(value)
setattr(config, flag_raw, v)
# check for missing parameters and show help if there are any missing
missing_parameters = []
for config in configs:
for attribute in config.get_attributes():
value = getattr(config, attribute)
if value is NO_DEFAULT:
missing_parameters.append(attribute)
if unknown_flags or missing_parameters:
if missing_parameters:
print()
print_warn("missing parameters [{}]".format(",".join(missing_parameters)))
if unknown_flags:
print()
print_warn("unknown flags [{}]".format(",".join(unknown_flags)))
print("problems found, not running")
print()
self.show_help_for_function(command_selected, show_help_full=False, show_help_suggest=False)
return False
# move all default values to place
for config in itertools.chain(configs, self._configs):
for attribute in config.get_attributes():
param = getattr(config, attribute) # type: Param
if isinstance(param, Param):
if param.default is not NO_DEFAULT:
setattr(config, attribute, param.default)
return True | Parse the args and fill the global data
Currently we disregard the free parameters
:param command_selected:
:param flags:
:param _free_args:
:return: |
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
while True:
try:
return f(*args, **kwargs)
except exc_type:
if not retries:
raise
delay = base_delay * multiplier
multiplier += 1
log("Retrying '%s' %d more times (delay=%s)" %
(f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_exception_inner_2
return _retry_on_exception_inner_1 | If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception. |
def load_emacs_open_in_editor_bindings():
"""
Pressing C-X C-E will open the buffer in an external editor.
"""
registry = Registry()
registry.add_binding(Keys.ControlX, Keys.ControlE,
filter=EmacsMode() & ~HasSelection())(
get_by_name('edit-and-execute-command'))
return registry | Pressing C-X C-E will open the buffer in an external editor. |
def installSite(self):
"""
Not using the dependency system for this class because it's only
installed via the command line, and multiple instances can be
installed.
"""
for iface, priority in self.__getPowerupInterfaces__([]):
self.store.powerUp(self, iface, priority) | Not using the dependency system for this class because it's only
installed via the command line, and multiple instances can be
installed. |
def assemble_notification_request(method, params=tuple()):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return {
"method": method,
"params": params,
"id": None
} | serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request |
def _drawContents(self, currentRti=None):
""" Draws the attributes of the currentRTI
"""
#logger.debug("_drawContents: {}".format(currentRti))
table = self.table
table.setUpdatesEnabled(False)
try:
table.clearContents()
verticalHeader = table.verticalHeader()
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
attributes = currentRti.attributes if currentRti is not None else {}
table.setRowCount(len(attributes))
for row, (attrName, attrValue) in enumerate(sorted(attributes.items())):
attrStr = to_string(attrValue, decode_bytes='utf-8')
try:
type_str = type_name(attrValue)
except Exception as ex:
logger.exception(ex)
type_str = "<???>"
nameItem = QtWidgets.QTableWidgetItem(attrName)
nameItem.setToolTip(attrName)
table.setItem(row, self.COL_ATTR_NAME, nameItem)
valItem = QtWidgets.QTableWidgetItem(attrStr)
valItem.setToolTip(attrStr)
table.setItem(row, self.COL_VALUE, valItem)
table.setItem(row, self.COL_ELEM_TYPE, QtWidgets.QTableWidgetItem(type_str))
table.resizeRowToContents(row)
verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
finally:
table.setUpdatesEnabled(True) | Draws the attributes of the currentRTI |
def c_typedefs(self):
"""Get the typedefs of the module."""
defs = []
attrs = self.opts.attrs + '\n' if self.opts.attrs else ''
for name, args in self.funcs:
logging.debug('name: %s args: %s', name, args)
defs.append(
'typedef\n{}\n{}{}({});\n'.format(
args[0], attrs,
self._c_type_name(name), make_c_args(args[2])
)
)
return defs | Get the typedefs of the module. |
def save_rst(self, file_name='pysb_model.rst', module_name='pysb_module'):
"""Save the assembled model as an RST file for literate modeling.
Parameters
----------
file_name : Optional[str]
The name of the file to save the RST in.
Default: pysb_model.rst
module_name : Optional[str]
The name of the python function defining the module.
Default: pysb_module
"""
if self.model is not None:
with open(file_name, 'wt') as fh:
fh.write('.. _%s:\n\n' % module_name)
fh.write('Module\n======\n\n')
fh.write('INDRA-assembled model\n---------------------\n\n')
fh.write('::\n\n')
model_str = pysb.export.export(self.model, 'pysb_flat')
model_str = '\t' + model_str.replace('\n', '\n\t')
fh.write(model_str) | Save the assembled model as an RST file for literate modeling.
Parameters
----------
file_name : Optional[str]
The name of the file to save the RST in.
Default: pysb_model.rst
module_name : Optional[str]
The name of the python function defining the module.
Default: pysb_module |
def getTerms(self, term=None, getFingerprint=None, startIndex=0, maxResults=10):
"""Get term objects
Args:
term, str: A term in the retina (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful
"""
return self._terms.getTerm(self._retina, term, getFingerprint, startIndex, maxResults) | Get term objects
Args:
term, str: A term in the retina (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful |
def QA_indicator_BBI(DataFrame, N1=3, N2=6, N3=12, N4=24):
'多空指标'
C = DataFrame['close']
bbi = (MA(C, N1) + MA(C, N2) + MA(C, N3) + MA(C, N4)) / 4
DICT = {'BBI': bbi}
return pd.DataFrame(DICT) | 多空指标 |
def datetime(
self, year, month, day, hour=0, minute=0, second=0, microsecond=0
): # type: (int, int, int, int, int, int, int) -> datetime
"""
Return a normalized datetime for the current timezone.
"""
if _HAS_FOLD:
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond, fold=1)
)
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond),
dst_rule=POST_TRANSITION,
) | Return a normalized datetime for the current timezone. |
def assert_valid_path(self, path):
"""
Ensures that the path represents an existing file
@type path: str
@param path: path to check
"""
if not isinstance(path, str):
raise NotFoundResourceException(
"Resource passed to load() method must be a file path")
if not os.path.isfile(path):
raise NotFoundResourceException(
'File "{0}" does not exist'.format(path)) | Ensures that the path represents an existing file
@type path: str
@param path: path to check |
def set_led(self, led_number, led_value):
"""
Set front-panel controller LEDs. The DS3 controller has four, labelled, LEDs on the front panel that can
be either on or off.
:param led_number:
Integer between 1 and 4
:param led_value:
Value, set to 0 to turn the LED off, 1 to turn it on
"""
if 1 > led_number > 4:
return
write_led_value(hw_id=self.device_unique_name, led_name='sony{}'.format(led_number), value=led_value) | Set front-panel controller LEDs. The DS3 controller has four, labelled, LEDs on the front panel that can
be either on or off.
:param led_number:
Integer between 1 and 4
:param led_value:
Value, set to 0 to turn the LED off, 1 to turn it on |
def slice_image(image, axis=None, idx=None):
"""
Slice an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = ants.slice_image(mni, axis=1, idx=100)
"""
if image.dimension < 3:
raise ValueError('image must have at least 3 dimensions')
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('sliceImageF%i' % ndim)
itkimage = libfn(image.pointer, axis, idx)
return iio.ANTsImage(pixeltype='float', dimension=ndim-1,
components=image.components, pointer=itkimage).clone(inpixeltype) | Slice an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = ants.slice_image(mni, axis=1, idx=100) |
def _quantize_channelwise_linear(weight, nbits, axis=0):
"""
Linearly quantize weight blob.
:param weight: numpy.array
Weight to be quantized.
:param nbits: int
Number of bits per weight element
:param axis: int
Axis of the weight blob to compute channel-wise quantization, can be 0 or 1
Returns
-------
quantized_weight: numpy.array
quantized weight as float numpy array, with the same shape as weight
scale: numpy.array
per channel scale
bias: numpy.array
per channel bias
"""
if len(weight.shape) == 1: # vector situation, treat as 1 channel
weight = weight.reshape((1, weight.shape[0]))
rank = len(weight.shape)
if axis == 1:
transposed_axis_order = (1,0) + tuple(range(2,rank))
weight = _np.transpose(weight, transposed_axis_order)
num_channels = weight.shape[0]
shape = weight.shape
weight = weight.reshape((num_channels, -1)) # [C, L]
a = _np.amin(weight, axis=-1) # [C,]
b = _np.amax(weight, axis=-1) # [C,]
# Quantize weights to full range [0, (1 << nbits) - 1]
qa = 0
qb = (1 << nbits) - 1
# Use a mask to filter out channels with very close weight values
mask = (b - a) > 1e-5 # [C,1] (normal channels)
r_mask = ~mask # (all-same-value) channels
qw = _np.zeros_like(weight) # [C, L]
scale = _np.ones((num_channels,))
bias = _np.zeros((num_channels,))
if _np.any(mask): # normal channels
qw[mask] = (weight[mask] - a[mask][:,None]) / (b[mask] - a[mask])[:,None] * (qb - qa) + qa
scale[mask] = (b[mask] - a[mask]) / (qb - qa)
bias[mask] = - scale[mask] * qa + a[mask]
if _np.any(r_mask): # singular channels
qw[r_mask] = qa
scale[r_mask] = 0
bias[r_mask] = a[r_mask]
# Reshape
quantized_weight = qw.reshape(shape)
if axis == 1:
quantized_weight = _np.transpose(quantized_weight, transposed_axis_order)
return (quantized_weight, scale, bias) | Linearly quantize weight blob.
:param weight: numpy.array
Weight to be quantized.
:param nbits: int
Number of bits per weight element
:param axis: int
Axis of the weight blob to compute channel-wise quantization, can be 0 or 1
Returns
-------
quantized_weight: numpy.array
quantized weight as float numpy array, with the same shape as weight
scale: numpy.array
per channel scale
bias: numpy.array
per channel bias |
def limit(self, n, skip=None):
"""
Limit the result set. However when the query set already has limit field before,
this would raise an exception
:Parameters:
- n : The maximum number of rows returned
- skip: how many rows to skip
:Return: a new QuerySet object so we can chain operations
"""
if self.query.limit is not None:
raise MonSQLException('LIMIT already defined')
new_query_set = self.clone()
new_query_set.query.limit = n
new_query_set.query.skip = skip
return new_query_set | Limit the result set. However when the query set already has limit field before,
this would raise an exception
:Parameters:
- n : The maximum number of rows returned
- skip: how many rows to skip
:Return: a new QuerySet object so we can chain operations |
def close_project(self):
"""
Close current project and return to a window without an active
project
"""
if self.current_active_project:
self.switch_to_plugin()
if self.main.editor is not None:
self.set_project_filenames(
self.main.editor.get_open_filenames())
path = self.current_active_project.root_path
self.current_active_project = None
self.set_option('current_project_path', None)
self.setup_menu_actions()
self.sig_project_closed.emit(path)
self.sig_pythonpath_changed.emit()
if self.dockwidget is not None:
self.set_option('visible_if_project_open',
self.dockwidget.isVisible())
self.dockwidget.close()
self.explorer.clear()
self.restart_consoles() | Close current project and return to a window without an active
project |
def __make_thumbnail(self, width, height):
"""
Create the page's thumbnail
"""
(w, h) = self.size
factor = max(
(float(w) / width),
(float(h) / height)
)
w /= factor
h /= factor
return self.get_image((round(w), round(h))) | Create the page's thumbnail |
def _get_sv_exclude_file(items):
"""Retrieve SV file of regions to exclude.
"""
sv_bed = utils.get_in(items[0], ("genome_resources", "variation", "sv_repeat"))
if sv_bed and os.path.exists(sv_bed):
return sv_bed | Retrieve SV file of regions to exclude. |
def _preprocess(project_dict):
"""Pre-process certain special keys to convert them from None values
into empty containers, and to turn strings into arrays of strings.
"""
handlers = {
('archive',): _list_if_none,
('on-run-start',): _list_if_none_or_string,
('on-run-end',): _list_if_none_or_string,
}
for k in ('models', 'seeds'):
handlers[(k,)] = _dict_if_none
handlers[(k, 'vars')] = _dict_if_none
handlers[(k, 'pre-hook')] = _list_if_none_or_string
handlers[(k, 'post-hook')] = _list_if_none_or_string
handlers[('seeds', 'column_types')] = _dict_if_none
def converter(value, keypath):
if keypath in handlers:
handler = handlers[keypath]
return handler(value)
else:
return value
return deep_map(converter, project_dict) | Pre-process certain special keys to convert them from None values
into empty containers, and to turn strings into arrays of strings. |
def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True):
'''
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
'''
if maxsize < 576:
maxsize = 576
max_options_size = maxsize - 240
# Ignore OPTION_PAD and OPTION_END
options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)]
# Only preserve data
option_data = [(o.tag, o._tobytes()[2:]) for o in options]
def split_options(option_data, limits):
"""
Split options into multiple fields
:param option_data: list of (tag, data) pair
:param limits: list of int for limit of each field (excluding PAD and END)
:return: number of options that are dropped
"""
# List of (dhcp_option_partial, option_not_finished)
partial_options = []
buffers = [0]
if not options:
return ([], 0)
def create_result():
# Remove any unfinished partial options
while partial_options and partial_options[-1][1]:
partial_options.pop()
buffers.append(len(partial_options))
r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)]
# Remove empty fields
while r and not r[-1]:
r.pop()
return r
# Current field used size
current_size = 0
limit_iter = iter(limits)
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return ([], False)
for i, (tag, data) in enumerate(option_data):
# Current used data size
data_size = 0
# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers
# to cause problem
nosplit = (len(data) <= 32)
while True:
# next partial option size should be:
# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)
# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)
# 3. no more than the rest data size
next_size = min(next_limit - current_size - 2, 255, len(data) - data_size)
if next_size < 0 or (next_size == 0 and data_size < len(data)) \
or (next_size < len(data) - data_size and nosplit):
# Cannot put this part of data on the current field, find the next field
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return (create_result(), len(option_data) - i)
# Record field boundary
buffers.append(len(partial_options))
current_size = 0
else:
# Put this partial option on current field
partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]),
(next_size < len(data) - data_size)))
data_size += next_size
current_size += next_size + 2
if data_size >= len(data):
# finished current option
break
return (create_result(), 0)
# First try to fit all options in options field
# preserve a byte for OPTION_END
result, not_finished = split_options(option_data, [max_options_size - 1])
if not_finished:
if overload & (OVERLOAD_FILE | OVERLOAD_SNAME):
# Try overload
# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END
limits = [max_options_size - 4]
if overload & OVERLOAD_FILE:
# preserve a byte for OPTION_END
limits.append(127)
if overload & OVERLOAD_SNAME:
# preserve a byte for OPTION_END
limits.append(63)
result2, not_finished2 = split_options(option_data, limits)
# Only overload if we have a better result
if len(result2) > 1:
result = result2
not_finished = not_finished2
if not allowpartial and not_finished:
raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,))
if not result:
return not_finished
elif len(result) <= 1:
# No overload
payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)]
else:
overload_option = 0
if len(result) >= 2 and result[1]:
overload_option |= OVERLOAD_FILE
# overload file field
payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)])
if len(result) >= 3 and result[2]:
overload_option |= OVERLOAD_SNAME
# overload sname field
payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)])
# Put an overload option before any other options
payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \
+ result[0] + [dhcp_option_partial(tag = OPTION_END)]
return not_finished | Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped |
def run_cmd(cmd, return_output=False, ignore_status=False, log_output=True, **kwargs):
"""
run provided command on host system using the same user as you invoked this code, raises
subprocess.CalledProcessError if it fails
:param cmd: list of str
:param return_output: bool, return output of the command
:param ignore_status: bool, do not fail in case nonzero return code
:param log_output: bool, if True, log output to debug log
:param kwargs: pass keyword arguments to subprocess.check_* functions; for more info,
please check `help(subprocess.Popen)`
:return: None or str
"""
logger.debug('command: "%s"' % ' '.join(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, **kwargs)
output = process.communicate()[0]
if log_output:
logger.debug(output)
if process.returncode > 0:
if ignore_status:
if return_output:
return output
else:
return process.returncode
else:
raise subprocess.CalledProcessError(cmd=cmd, returncode=process.returncode)
if return_output:
return output | run provided command on host system using the same user as you invoked this code, raises
subprocess.CalledProcessError if it fails
:param cmd: list of str
:param return_output: bool, return output of the command
:param ignore_status: bool, do not fail in case nonzero return code
:param log_output: bool, if True, log output to debug log
:param kwargs: pass keyword arguments to subprocess.check_* functions; for more info,
please check `help(subprocess.Popen)`
:return: None or str |
def send_media_group(chat_id, media,
reply_to_message_id=None, disable_notification=False,
**kwargs):
"""
Use this method to send a group of photos or videos as an album. On success, an array of the sent Messages is returned.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param media: A list of InputMedia objects to be sent, must include 2–10 items
:param reply_to_message_id: If the message is a reply, ID of the original message
:param disable_notification: Sends the messages silently. Users will receive a notification with no sound.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type media: `list` of :class:`InputMedia`
:type reply_to_message_id: int
:returns: On success, an array of the sent Messages is returned.
:rtype: TelegramBotRPCRequest
"""
files = []
if len(media) < 2 or len(media) > 10:
raise ValueError('media must contain between 2 and 10 InputMedia items')
for i, entry in media:
if isinstance(entry.media, InputFile):
files.append(entry.media) # Queue for multipart/form-data POSTING
media[i].media = "attach://{}".format(entry[1][0]) # Replace with file name and add to attachments
# required args
params = dict(
chat_id=chat_id,
media=json.dumps(media)
)
# optional args
params.update(
_clean_params(
reply_to_message_id=reply_to_message_id,
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest('sendMediaGroup', params=params, files=files, on_result=lambda result: [Message.from_result(message) for message in result], **kwargs) | Use this method to send a group of photos or videos as an album. On success, an array of the sent Messages is returned.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param media: A list of InputMedia objects to be sent, must include 2–10 items
:param reply_to_message_id: If the message is a reply, ID of the original message
:param disable_notification: Sends the messages silently. Users will receive a notification with no sound.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type media: `list` of :class:`InputMedia`
:type reply_to_message_id: int
:returns: On success, an array of the sent Messages is returned.
:rtype: TelegramBotRPCRequest |
def common_req(self, execute, send_body=True):
"Common code for GET and POST requests"
self._SERVER = {'CLIENT_ADDR_HOST': self.client_address[0],
'CLIENT_ADDR_PORT': self.client_address[1]}
self._to_log = True
self._cmd = None
self._payload = None
self._path = None
self._payload_params = None
self._query_params = {}
self._fragment = None
(cmd, res, req) = (None, None, None)
try:
try:
path = self._pathify() # pylint: disable-msg=W0612
cmd = path[1:]
res = execute(cmd)
except HttpReqError, e:
e.report(self)
except Exception:
try:
self.send_exception(500) # XXX 500
except Exception: # pylint: disable-msg=W0703
pass
raise
else:
if not isinstance(res, HttpResponse):
req = self.build_response()
if send_body:
req.add_data(res)
req.set_send_body(send_body)
else:
req = res
self.end_response(req)
except socket.error, e:
if e.errno in (errno.ECONNRESET, errno.EPIPE):
return
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
except Exception: # pylint: disable-msg=W0703
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
finally:
del req, res | Common code for GET and POST requests |
def derivative(self, x):
"""Derivative of the broadcast operator.
Parameters
----------
x : `domain` element
The point to take the derivative in
Returns
-------
adjoint : linear `BroadcastOperator`
The derivative
Examples
--------
Example with an affine operator:
>>> I = odl.IdentityOperator(odl.rn(3))
>>> residual_op = I - I.domain.element([1, 1, 1])
>>> op = BroadcastOperator(residual_op, 2 * residual_op)
Calling operator offsets by ``[1, 1, 1]``:
>>> x = [1, 2, 3]
>>> op(x)
ProductSpace(rn(3), 2).element([
[ 0., 1., 2.],
[ 0., 2., 4.]
])
The derivative of this affine operator does not have an offset:
>>> op.derivative(x)(x)
ProductSpace(rn(3), 2).element([
[ 1., 2., 3.],
[ 2., 4., 6.]
])
"""
return BroadcastOperator(*[op.derivative(x) for op in
self.operators]) | Derivative of the broadcast operator.
Parameters
----------
x : `domain` element
The point to take the derivative in
Returns
-------
adjoint : linear `BroadcastOperator`
The derivative
Examples
--------
Example with an affine operator:
>>> I = odl.IdentityOperator(odl.rn(3))
>>> residual_op = I - I.domain.element([1, 1, 1])
>>> op = BroadcastOperator(residual_op, 2 * residual_op)
Calling operator offsets by ``[1, 1, 1]``:
>>> x = [1, 2, 3]
>>> op(x)
ProductSpace(rn(3), 2).element([
[ 0., 1., 2.],
[ 0., 2., 4.]
])
The derivative of this affine operator does not have an offset:
>>> op.derivative(x)(x)
ProductSpace(rn(3), 2).element([
[ 1., 2., 3.],
[ 2., 4., 6.]
]) |
def cancel_registration(self):
"""
Cancels the currents client's account with the server.
Even if the cancelation is succesful, this method will raise an
exception due to he account no longer exists for the server, so the
client will fail.
To continue with the execution, this method should be surrounded by a
try/except statement.
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.SET,
payload=xso.Query()
)
iq.payload.remove = True
yield from self.client.send(iq) | Cancels the currents client's account with the server.
Even if the cancelation is succesful, this method will raise an
exception due to he account no longer exists for the server, so the
client will fail.
To continue with the execution, this method should be surrounded by a
try/except statement. |
def masked_arith_op(x, y, op):
"""
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
"""
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
# the logic valid for both Series and DataFrame ops.
xrav = x.ravel()
assert isinstance(x, (np.ndarray, ABCSeries)), type(x)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
# PeriodIndex.ravel() returns int64 dtype, so we have
# to work around that case. See GH#19956
yrav = y if is_period_dtype(y) else y.ravel()
mask = notna(xrav) & notna(yrav)
if yrav.shape != mask.shape:
# FIXME: GH#5284, GH#5035, GH#19448
# Without specifically raising here we get mismatched
# errors in Py3 (TypeError) vs Py2 (ValueError)
# Note: Only = an issue in DataFrame case
raise ValueError('Cannot broadcast operands together.')
if mask.any():
with np.errstate(all='ignore'):
result[mask] = op(xrav[mask],
com.values_from_object(yrav[mask]))
else:
assert is_scalar(y), type(y)
assert isinstance(x, np.ndarray), type(x)
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
# 1 ** np.nan is 1. So we have to unmask those.
if op == pow:
mask = np.where(x == 1, False, mask)
elif op == rpow:
mask = np.where(y == 1, False, mask)
if mask.any():
with np.errstate(all='ignore'):
result[mask] = op(xrav[mask], y)
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape) # 2D compat
return result | If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator |
def _get_columns(self, X, cols):
"""
Get a subset of columns from the given table X.
X a Pandas dataframe; the table to select columns from
cols a string or list of strings representing the columns
to select
Returns a numpy array with the data from the selected columns
"""
if isinstance(X, DataSet):
X = X[cols]
return_vector = False
if isinstance(cols, basestring):
return_vector = True
cols = [cols]
if isinstance(X, list):
X = [x[cols] for x in X]
X = pd.DataFrame(X)
if return_vector:
t = X[cols[0]]
else:
t = X.as_matrix(cols)
return t | Get a subset of columns from the given table X.
X a Pandas dataframe; the table to select columns from
cols a string or list of strings representing the columns
to select
Returns a numpy array with the data from the selected columns |
def valid_level(value):
"""Validation function for parser, logging level argument."""
value = value.upper()
if getattr(logging, value, None) is None:
raise argparse.ArgumentTypeError("%s is not a valid level" % value)
return value | Validation function for parser, logging level argument. |
def get_filetypes(filelist, path=None, size=os.path.getsize):
""" Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype)
"""
path = path or (lambda _: _)
# Get total size for each file extension
histo = defaultdict(int)
for entry in filelist:
ext = os.path.splitext(path(entry))[1].lstrip('.').lower()
if ext and ext[0] == 'r' and ext[1:].isdigit():
ext = "rar"
elif ext == "jpeg":
ext = "jpg"
elif ext == "mpeg":
ext = "mpg"
histo[ext] += size(entry)
# Normalize values to integer percent
total = sum(histo.values())
if total:
for ext, val in histo.items():
histo[ext] = int(val * 100.0 / total + .499)
return sorted(zip(histo.values(), histo.keys()), reverse=True) | Get a sorted list of file types and their weight in percent
from an iterable of file names.
@return: List of weighted file extensions (no '.'), sorted in descending order
@rtype: list of (weight, filetype) |
def to_frame(self, *args):
"""Convert the cells in the view into a DataFrame object.
If ``args`` is not given, this method returns a DataFrame that
has an Index or a MultiIndex depending of the number of
cells parameters and columns each of which corresponds to each
cells included in the view.
``args`` can be given to calculate cells values and limit the
DataFrame indexes to the given arguments.
The cells in this view may have different number of parameters,
but parameters shared among multiple cells
must appear in the same position in all the parameter lists.
For example,
Having ``foo()``, ``bar(x)`` and ``baz(x, y=1)`` is okay
because the shared parameter ``x`` is always the first parameter,
but this method does not work if the view has ``quz(x, z=2, y=1)``
cells in addition to the first three cells, because ``y`` appears
in different positions.
Args:
args(optional): multiple arguments,
or an iterator of arguments to the cells.
"""
if sys.version_info < (3, 6, 0):
from collections import OrderedDict
impls = OrderedDict()
for name, obj in self.items():
impls[name] = obj._impl
else:
impls = get_impls(self)
return _to_frame_inner(impls, args) | Convert the cells in the view into a DataFrame object.
If ``args`` is not given, this method returns a DataFrame that
has an Index or a MultiIndex depending of the number of
cells parameters and columns each of which corresponds to each
cells included in the view.
``args`` can be given to calculate cells values and limit the
DataFrame indexes to the given arguments.
The cells in this view may have different number of parameters,
but parameters shared among multiple cells
must appear in the same position in all the parameter lists.
For example,
Having ``foo()``, ``bar(x)`` and ``baz(x, y=1)`` is okay
because the shared parameter ``x`` is always the first parameter,
but this method does not work if the view has ``quz(x, z=2, y=1)``
cells in addition to the first three cells, because ``y`` appears
in different positions.
Args:
args(optional): multiple arguments,
or an iterator of arguments to the cells. |
def _update_object_map(self, obj_map):
"""stub"""
creation_time = obj_map['creationTime']
obj_map['creationTime'] = dict()
obj_map['creationTime']['year'] = creation_time.year
obj_map['creationTime']['month'] = creation_time.month
obj_map['creationTime']['day'] = creation_time.day
obj_map['creationTime']['hour'] = creation_time.hour
obj_map['creationTime']['minute'] = creation_time.minute
obj_map['creationTime']['second'] = creation_time.second
obj_map['creationTime']['microsecond'] = creation_time.microsecond | stub |
def invert(self):
""" Multiplying a matrix by its inverse produces the identity matrix.
"""
m = self.matrix
d = m[0] * m[4] - m[1] * m[3]
self.matrix = [
m[4] / d, -m[1] / d, 0,
-m[3] / d, m[0] / d, 0,
(m[3] * m[7] - m[4] * m[6]) / d,
-(m[0] * m[7] - m[1] * m[6]) / d,
1
] | Multiplying a matrix by its inverse produces the identity matrix. |
def setHintColor(self, color):
"""
Sets the hint color for this combo box provided its line edit is
an XLineEdit instance.
:param color | <QColor>
"""
lineEdit = self.lineEdit()
if isinstance(lineEdit, XLineEdit):
lineEdit.setHintColor(color) | Sets the hint color for this combo box provided its line edit is
an XLineEdit instance.
:param color | <QColor> |
def create_project_config_path(
path, mode=0o777, parents=False, exist_ok=False
):
"""Create new project configuration folder."""
# FIXME check default directory mode
project_path = Path(path).absolute().joinpath(RENKU_HOME)
project_path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
return str(project_path) | Create new project configuration folder. |
def _determine_supported_alleles(command, supported_allele_flag):
"""
Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports.
"""
try:
# convert to str since Python3 returns a `bytes` object
supported_alleles_output = check_output([
command, supported_allele_flag
])
supported_alleles_str = supported_alleles_output.decode("ascii", "ignore")
assert len(supported_alleles_str) > 0, \
'%s returned empty allele list' % command
supported_alleles = set([])
for line in supported_alleles_str.split("\n"):
line = line.strip()
if not line.startswith('#') and len(line) > 0:
try:
# We need to normalize these alleles (the output of the predictor
# when it lists its supported alleles) so that they are comparable with
# our own alleles.
supported_alleles.add(normalize_allele_name(line))
except AlleleParseError as error:
logger.info("Skipping allele %s: %s", line, error)
continue
if len(supported_alleles) == 0:
raise ValueError("Unable to determine supported alleles")
return supported_alleles
except Exception as e:
logger.exception(e)
raise SystemError("Failed to run %s %s. Possibly an incorrect executable version?" % (
command,
supported_allele_flag)) | Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports. |
def write_document(self, gh_user, doc_id, file_content, branch, author, commit_msg=None):
"""Given a document id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master...
"""
parent_sha = None
fc = tempfile.NamedTemporaryFile()
# N.B. we currently assume file_content is text/JSON, or should be serialized from a dict
if is_str_type(file_content):
fc.write(file_content)
else:
write_as_json(file_content, fc)
fc.flush()
try:
doc_filepath = self.path_for_doc(doc_id)
doc_dir = os.path.split(doc_filepath)[0]
if parent_sha is None:
self.checkout_master()
parent_sha = self.get_master_sha()
branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha, force_branch_name=True)
# create a document directory if this is a new doc EJM- what if it isn't?
if not os.path.isdir(doc_dir):
os.makedirs(doc_dir)
shutil.copy(fc.name, doc_filepath)
git(self.gitdir, self.gitwd, "add", doc_filepath)
if commit_msg is None:
commit_msg = "Update document '%s' via OpenTree API" % doc_id
try:
git(self.gitdir,
self.gitwd,
"commit",
author=author,
message=commit_msg)
except Exception as e:
# We can ignore this if no changes are new,
# otherwise raise a 400
if "nothing to commit" in e.message: # @EJM is this dangerous?
pass
else:
_LOG.exception('"git commit" failed')
self.reset_hard()
raise
new_sha = git(self.gitdir, self.gitwd, "rev-parse", "HEAD")
except Exception as e:
_LOG.exception('write_document exception')
raise GitWorkflowError("Could not write to document #%s ! Details: \n%s" % (doc_id, e.message))
finally:
fc.close()
return new_sha | Given a document id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master... |
def json_dumps(inbox):
"""
Serializes the first element of the input using the JSON protocol as
implemented by the ``json`` Python 2.6 library.
"""
gc.disable()
str_ = json.dumps(inbox[0])
gc.enable()
return str_ | Serializes the first element of the input using the JSON protocol as
implemented by the ``json`` Python 2.6 library. |
def _single_orbit_find_actions(orbit, N_max, toy_potential=None,
force_harmonic_oscillator=False):
"""
Find approximate actions and angles for samples of a phase-space orbit,
`w`, at times `t`. Uses toy potentials with known, analytic action-angle
transformations to approximate the true coordinates as a Fourier sum.
This code is adapted from Jason Sanders'
`genfunc <https://github.com/jlsanders/genfunc>`_
.. todo::
Wrong shape for w -- should be (6,n) as usual...
Parameters
----------
orbit : `~gala.dynamics.Orbit`
N_max : int
Maximum integer Fourier mode vector length, |n|.
toy_potential : Potential (optional)
Fix the toy potential class.
force_harmonic_oscillator : bool (optional)
Force using the harmonic oscillator potential as the toy potential.
"""
if orbit.norbits > 1:
raise ValueError("must be a single orbit")
if toy_potential is None:
toy_potential = fit_toy_potential(
orbit, force_harmonic_oscillator=force_harmonic_oscillator)
else:
logger.debug("Using *fixed* toy potential: {}"
.format(toy_potential.parameters))
if isinstance(toy_potential, IsochronePotential):
orbit_align = orbit.align_circulation_with_z()
w = orbit_align.w()
dxyz = (1, 2, 2)
circ = np.sign(w[0, 0]*w[4, 0]-w[1, 0]*w[3, 0])
sign = np.array([1., circ, 1.])
orbit = orbit_align
elif isinstance(toy_potential, HarmonicOscillatorPotential):
dxyz = (2, 2, 2)
sign = 1.
w = orbit.w()
else:
raise ValueError("Invalid toy potential.")
t = orbit.t.value
# Now find toy actions and angles
aaf = toy_potential.action_angle(orbit)
if aaf[0].ndim > 2:
aa = np.vstack((aaf[0].value[..., 0], aaf[1].value[..., 0]))
else:
aa = np.vstack((aaf[0].value, aaf[1].value))
if np.any(np.isnan(aa)):
ix = ~np.any(np.isnan(aa), axis=0)
aa = aa[:, ix]
t = t[ix]
warnings.warn("NaN value in toy actions or angles!")
if sum(ix) > 1:
raise ValueError("Too many NaN value in toy actions or angles!")
t1 = time.time()
A, b, nvecs = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2])
actions = np.array(solve(A,b))
logger.debug("Action solution found for N_max={}, size {} symmetric"
" matrix in {} seconds"
.format(N_max, len(actions), time.time()-t1))
t1 = time.time()
A, b, nvecs = _angle_prepare(aa, t, N_max, dx=dxyz[0],
dy=dxyz[1], dz=dxyz[2], sign=sign)
angles = np.array(solve(A, b))
logger.debug("Angle solution found for N_max={}, size {} symmetric"
" matrix in {} seconds"
.format(N_max, len(angles), time.time()-t1))
# Just some checks
if len(angles) > len(aa):
warnings.warn("More unknowns than equations!")
J = actions[:3] # * sign
theta = angles[:3]
freqs = angles[3:6] # * sign
return dict(actions=J*aaf[0].unit, angles=theta*aaf[1].unit,
freqs=freqs*aaf[2].unit,
Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs) | Find approximate actions and angles for samples of a phase-space orbit,
`w`, at times `t`. Uses toy potentials with known, analytic action-angle
transformations to approximate the true coordinates as a Fourier sum.
This code is adapted from Jason Sanders'
`genfunc <https://github.com/jlsanders/genfunc>`_
.. todo::
Wrong shape for w -- should be (6,n) as usual...
Parameters
----------
orbit : `~gala.dynamics.Orbit`
N_max : int
Maximum integer Fourier mode vector length, |n|.
toy_potential : Potential (optional)
Fix the toy potential class.
force_harmonic_oscillator : bool (optional)
Force using the harmonic oscillator potential as the toy potential. |
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri) | Retuns a new link relative to this resource. |
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf | Return the value for key for the field in the metadata |
def copy(self):
"""Return a deep copy"""
result = Scalar(self.size, self.deriv)
result.v = self.v
if self.deriv > 0: result.d[:] = self.d[:]
if self.deriv > 1: result.dd[:] = self.dd[:]
return result | Return a deep copy |
def return_features_numpy_base(dbpath, set_object, points_amt, names):
"""
Generic function which returns a 2d numpy array of extracted features
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
points_amt : int, number of data points in the database
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned
Returns
-------
return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature
is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not
supported.
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(1)
if names == 'all':
columns_amt = 0
for feature in tmp_object.features:
if type(tmp_object.features[feature]) is np.ndarray:
columns_amt += tmp_object.features[feature].shape[0]
else:
columns_amt += 1
return_array = np.zeros([points_amt, columns_amt])
for i in enumerate(session.query(set_object).order_by(set_object.id)):
counter = 0
for feature in i[1].features:
feature_val = i[1].features[feature]
if type(feature_val) is np.ndarray:
columns_amt = feature_val.shape[0]
return_array[i[0], counter:counter + columns_amt] = feature_val[:]
counter += feature_val.shape[0]
else:
return_array[i[0], counter] = feature_val
counter += 1
else:
columns_amt = 0
for feature in tmp_object.features:
if feature in names:
if type(tmp_object.features[feature]) is np.ndarray:
columns_amt += tmp_object.features[feature].shape[0]
else:
columns_amt += 1
return_array = np.zeros([points_amt, columns_amt])
for i in enumerate(session.query(set_object).order_by(set_object.id)):
counter = 0
for feature in i[1].features:
if feature in names:
feature_val = i[1].features[feature]
if type(feature_val) is np.ndarray:
columns_amt = feature_val.shape[0]
return_array[i[0], counter:counter + columns_amt] = feature_val[:]
counter += feature_val.shape[0]
else:
return_array[i[0], counter] = feature_val
counter += 1
session.close()
return return_array | Generic function which returns a 2d numpy array of extracted features
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
points_amt : int, number of data points in the database
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned
Returns
-------
return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature
is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not
supported. |
def running(concurrent=False):
'''
Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running
'''
ret = []
if concurrent:
return ret
active = __salt__['saltutil.is_running']('state.*')
for data in active:
err = (
'The function "{0}" is running as PID {1} and was started at '
'{2} with jid {3}'
).format(
data['fun'],
data['pid'],
salt.utils.jid.jid_to_time(data['jid']),
data['jid'],
)
ret.append(err)
return ret | Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running |
def tables_insert(self, table_name, schema=None, query=None, friendly_name=None,
description=None):
"""Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + \
(Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', ''))
data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
}
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. |
Subsets and Splits