Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
1,000
def domain(self, domain=None, last_domain=None): self._print_header() if domain: PyFunceble.INTERN["to_test"] = self._format_domain(domain) else: PyFunceble.INTERN["to_test"] = None if PyFunceble.INTERN["to_test"]: if PyFunceble.CONFIGURATION["syntax"]: status = self.syntax_status.get() else: status, _ = self.status.get() self._file_decision(PyFunceble.INTERN["to_test"], last_domain, status) if PyFunceble.CONFIGURATION["simple"]: print(PyFunceble.INTERN["to_test"], status) return PyFunceble.INTERN["to_test"], status return None
Manage the case that we want to test only a domain. :param domain: The domain or IP to test. :type domain: str :param last_domain: The last domain to test if we are testing a file. :type last_domain: str :param return_status: Tell us if we need to return the status. :type return_status: bool
1,001
def tvBrowserExposure_selection_changed(self): (is_compatible, desc) = self.get_layer_description_from_browser( ) self.lblDescribeBrowserExpLayer.setText(desc) self.parent.pbnNext.setEnabled(is_compatible)
Update layer description label.
1,002
def to_struct(self, value): if self.str_format: return value.strftime(self.str_format) return value.isoformat()
Cast `time` object to string.
1,003
def run(self): latest_track = Track.objects.all().order_by() latest_track = latest_track[0] if latest_track else None importer = self.get_importer() tracks = importer.run() for track in tracks: if not latest_track or not latest_track.last_played \ or track.start_time > latest_track.last_played: obj = self.lookup_track(track) role = roles[0].role_priority if roles else 1 obj.create_credit(track.artist, role) else: print "[%s-%s]: Not created as it already exists." % (track.title, track.artist) obj.last_played = track.start_time obj.save() print "[%s-%s]: Start time updated to %s." % (track.title, track.artist, track.start_time) else: print "[%s-%s]: Not created as it has a past start time of %s (latest %s). " % (track.title, track.artist, track.start_time, latest_track.last_played)
Run import.
1,004
def rgba(self, val): rgba = _user_to_rgba(val, expand=False) if self._rgba is None: self._rgba = rgba else: self._rgba[:, :rgba.shape[1]] = rgba
Set the color using an Nx4 array of RGBA floats
1,005
def apt_autoremove(purge=True, fatal=False): cmd = [, , ] if purge: cmd.append() _run_apt_command(cmd, fatal)
Purge one or more packages.
1,006
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): if file_entry.IsRoot() and file_entry.type_indicator not in ( self._TYPES_WITH_ROOT_METADATA): return if data_stream and not data_stream.IsDefault(): return display_name = mediator.GetDisplayName() logger.debug( .format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming() self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming() self.processing_status = definitions.STATUS_INDICATOR_RUNNING
Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
1,007
def str_blockIndent(astr_buf, a_tabs=1, a_tabLength=4, **kwargs): str_tabBoundary = " " for key, value in kwargs.iteritems(): if key == : str_tabBoundary = value b_trailN = False length = len(astr_buf) ch_trailN = astr_buf[length - 1] if ch_trailN == : b_trailN = True astr_buf = astr_buf[0:length - 1] str_ret = astr_buf str_tab = str_Indent = for i in range(a_tabLength): str_tab = % str_tab str_tab = "%s%s" % (str_tab, str_tabBoundary) for i in range(a_tabs): str_Indent = % (str_Indent, str_tab) str_ret = re.sub(, % str_Indent, astr_buf) str_ret = % (str_Indent, str_ret) if b_trailN: str_ret = str_ret + return str_ret
For the input string <astr_buf>, replace each '\n' with '\n<tab>' where the number of tabs is indicated by <a_tabs> and the length of the tab by <a_tabLength> Trailing '\n' are *not* replaced.
1,008
def gen_method_keys(self, *args, **kwargs): token = args[0] for mro_type in type(token).__mro__[:-1]: name = mro_type.__name__ yield name
Given a node, return the string to use in computing the matching visitor methodname. Can also be a generator of strings.
1,009
def findBinomialNsWithLowerBoundSampleMinimum(confidence, desiredValuesSorted, p, numSamples, nMax): def P(n, numOccurrences): return 1 - SampleMinimumDistribution(numSamples, BinomialDistribution(n, p)).cdf( numOccurrences - 1) results = [] n = 0 for desiredValue in desiredValuesSorted: while n + 1 <= nMax and P(n + 1, desiredValue) < confidence: n += 1 if n + 1 > nMax: break left = P(n, desiredValue) right = P(n + 1, desiredValue) interpolated = n + ((confidence - left) / (right - left)) result = (interpolated, left, right) results.append(result) return results
For each desired value, find an approximate n for which the sample minimum has a probabilistic lower bound equal to this value. For each value, find an adjacent pair of n values whose lower bound sample minima are below and above the desired value, respectively, and return a linearly-interpolated n between these two values. @param confidence (float) For the probabilistic lower bound, this specifies the probability. If this is 0.8, that means that there's an 80% chance that the sample minimum is >= the desired value, and 20% chance that it's < the desired value. @param p (float) The p if the binomial distribution. @param numSamples (int) The number of samples in the sample minimum distribution. @return A list of results. Each result contains (interpolated_n, lower_value, upper_value). where each lower_value and upper_value are the probabilistic lower bound sample minimum for floor(interpolated_n) and ceil(interpolated_n) respectively. ...]
1,010
def get(self, id): document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {}
根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象
1,011
def display_lookback_returns(self): return self.lookback_returns.apply( lambda x: x.map(.format), axis=1)
Displays the current lookback returns for each series.
1,012
def create_event(self, type, data, **kwargs): if type is None: raise ValueError() if data is None: raise ValueError() data = self._convert_model(data, EventData) headers = {} if in kwargs: headers.update(kwargs.get()) sdk_headers = get_sdk_headers(, , ) headers.update(sdk_headers) params = {: self.version} data = {: type, : data} url = response = self.request( method=, url=url, headers=headers, params=params, json=data, accept_json=True) return response
Create event. The **Events** API can be used to create log entries that are associated with specific queries. For example, you can record which documents in the results set were \"clicked\" by a user and when that click occured. :param str type: The event type to be created. :param EventData data: Query event data object. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
1,013
def aDiffCytoscape(df,aging_genes,target,species="caenorhabditis elegans",limit=None, cutoff=0.4,\ taxon=None,host=cytoscape_host,port=cytoscape_port): df=df.sort_values(by=["q_value"],ascending=True) df.reset_index(inplace=True, drop=True) tmp=df[:1999] df=tmp.copy() query_genes=df["ensembl_gene_id"].tolist() df["NormInt"]=df["value_1"]*df["value_2"] df["NormInt"]=df["NormInt"].apply(lambda x: np.log10(np.sqrt(x)) ) if not limit: limit=int(len(query_genes)*.25) def CheckEvidence(x,aging_genes=aging_genes): if x in aging_genes: res="aging_gene" else: res="no" return res df["evidence"]=df["ensembl_gene_id"].apply(lambda x:CheckEvidence(x) ) def FixInfs(x): if str(x) in ["-inf","inf"]: res=np.nan else: res=x return res df["NormInt"]=df["NormInt"].apply( lambda x: FixInfs(x) ) df["log2(fold_change)"]=df["log2(fold_change)"].apply( lambda x: FixInfs(x) ) taxons={"caenorhabditis elegans":"6239","drosophila melanogaster":"7227",\ "mus musculus":"10090","homo sapiens":"9606"} if not taxon: taxon=taxons[species] response=cytoscape("network", "list",\ host=host, port=port) if "networks" in response.keys(): response=response["networks"] if len(response) > 0: for r in response: rr=cytoscape("network", "destroy",{"network":"SUID:"+str(r)},\ host=host, port=port) query_genes=[ str(s) for s in query_genes ] response=cytoscape("string", "protein query",\ {"query":",".join(query_genes),\ "cutoff":str(cutoff),\ "species":species,\ "limit":str(limit),\ "taxonID":taxon},\ host=host, port=port) print("giving some time to cytoscape..") sys.stdout.flush() sleep(10) response=cytoscape("layout", "force-directed",\ {"defaultSpringCoefficient":".000004",\ "defaultSpringLength":"5"},\ host=host, port=port) response=loadTableData(df[["ensembl_gene_id","log2(fold_change)","NormInt","evidence"]].dropna(),\ df_key="ensembl_gene_id",table_key_column="query term",\ host=host, port=port) defaults_dic={"NODE_SHAPE":"ellipse",\ "NODE_SIZE":60,\ "NODE_FILL_COLOR":" "EDGE_TRANSPARENCY":120} defaults_list=simple_defaults(defaults_dic) NODE_LABEL=mapVisualProperty("NODE_LABEL","passthrough","display name",host=host, port=port) create_styles("dataStyle",defaults_list,[NODE_LABEL],host=host, port=port) response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},host=host, port=port) cmap = matplotlib.cm.get_cmap("bwr") norm = matplotlib.colors.Normalize(vmin=-4, vmax=4) min_color=matplotlib.colors.rgb2hex(cmap(norm(-4))) center_color=matplotlib.colors.rgb2hex(cmap(norm(0))) max_color=matplotlib.colors.rgb2hex(cmap(norm(4))) NODE_FILL_COLOR=mapVisualProperty(,,,\ lower=[-4,min_color],center=[0.0,center_color],upper=[4,max_color],\ host=host, port=port) NODE_SHAPE=mapVisualProperty(,,,discrete=[ ["aging_gene","no"], ["DIAMOND", "ellipse"] ],\ host=host, port=port) NODE_SIZE=mapVisualProperty(,,,discrete=[ ["aging_gene","no"], ["100.0","60.0"] ],\ host=host, port=port) update_style("dataStyle",mappings=[NODE_SIZE,NODE_SHAPE,NODE_FILL_COLOR],\ host=host, port=port) response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\ host=host, port=port) NormIntDf = getTableColumns(,[],host=host, port=port) if in NormIntDf.columns.tolist(): min_NormInt = min(NormIntDf.dropna()[].tolist()) max_NormInt = max(NormIntDf.dropna()[].tolist()) cent_NormInt = np.mean([min_NormInt,max_NormInt]) cmap = matplotlib.cm.get_cmap("Reds") norm = matplotlib.colors.Normalize(vmin=min_NormInt, vmax=max_NormInt) min_color=matplotlib.colors.rgb2hex(cmap(norm(np.mean([min_NormInt,max_NormInt])))) center_color=matplotlib.colors.rgb2hex(cmap(norm(cent_NormInt))) max_color=matplotlib.colors.rgb2hex(cmap(norm(max_NormInt))) NODE_BORDER_PAINT=mapVisualProperty(,,,\ lower=[min_NormInt,min_color],center=[np.mean([min_NormInt,max_NormInt]),center_color],upper=[max_NormInt,max_color],\ host=host, port=port) update_style("dataStyle",mappings=[NODE_BORDER_PAINT],\ host=host, port=port) response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\ host=host, port=port) NODE_BORDER_WIDTH=mapVisualProperty(,,,\ lower=[min_NormInt,2],center=[np.mean([min_NormInt,max_NormInt]),4],upper=[max_NormInt,8],\ host=host, port=port) update_style("dataStyle",mappings=[NODE_BORDER_WIDTH],\ host=host, port=port) response=cytoscape("vizmap", "apply", {"styles":"dataStyle"},\ host=host, port=port) response=cytoscape("network","rename",\ {"name":},\ host=host, port=port) response=cytoscape("network","select",\ {"edgeList":"all",\ "extendEdges":"true"},\ host=host, port=port) response=cytoscape("network","create",\ {"source":"current",\ "nodeList":"selected"},\ host=host, port=port) response=cytoscape("network","rename",\ {"name":},\ host=host, port=port) response=cytoscape("network","set current", {"network":"main String network (edges only)"},\ host=host, port=port) log2fcDf = getTableColumns(,[],host=host, port=port) if in log2fcDf.columns.tolist(): log2fcDf[]=log2fcDf[].apply(lambda x: abs(x)) log2fcDf=log2fcDf.sort_values(by=[],ascending=False) top_nodes=log2fcDf.index.tolist()[:int(len(log2fcDf)*.10)] response=cytoscape("network","select", {"nodeList":"name:"+",".join(top_nodes)},\ host=host, port=port) response=cytoscape("network","select", {"firstNeighbors":"",\ "direction":"any",\ "network":"current"},\ host=host, port=port) response=cytoscape("network","create", {"source":"current",\ "nodeList":"selected"},\ host=host, port=port) response=cytoscape("network","select", {"edgeList":"all",\ "extendEdges":"true"},\ host=host, port=port) response=cytoscape("network","delete", {"nodeList":"unselected"},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host,port=port) response=cytoscape("layout", "force-directed",\ host=host, port=port) response=cytoscape("network","rename",\ {"name":+str(int(len(log2fcDf)*.10))+},\ host=host, port=port) response=cytoscape("network","set current", {"network":"main String network (edges only)"},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) response=cytoscape("network","select", {"nodeList":"name:"+",".join(top_nodes)},\ host=host, port=port) response=cytoscape("diffusion","diffuse",host=host, port=port) response=cytoscape("network","create", {"source":"current",\ "nodeList":"selected"},\ host=host, port=port) response=cytoscape("network","select", {"edgeList":"all",\ "extendEdges":"true"},\ host=host, port=port) response=cytoscape("network","delete", {"nodeList":"unselected"},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) response=cytoscape("layout", "force-directed",host=host, port=port) response=cytoscape("network","rename",\ {"name":+str(int(len(log2fcDf)*.10))+},\ host=host, port=port) def MAKETMP(): (fd, f) = tempfile.mkstemp() f="/tmp/"+f.split("/")[-1] return f cys=MAKETMP() cyjs=MAKETMP() main_png=MAKETMP() main_pdf=MAKETMP() edg_png=MAKETMP() edg_pdf=MAKETMP() neig_png=MAKETMP() neig_pdf=MAKETMP() dif_png=MAKETMP() dif_pdf=MAKETMP() response=cytoscape("session", "save as" , \ {"file":cys},\ host=host, port=port) response=cytoscape("network", "export" , \ {"options":,\ "OutputFile":cyjs},\ host=host, port=port) response=cytoscape("network","set current", {"network":"main String network"},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) sleep(5) response=cytoscape("view", "export" , \ {"options":"PNG",\ "OutputFile":main_png},\ host=host, port=port) response=cytoscape("view", "export" , \ {"options":"PDF",\ "OutputFile":main_pdf},\ host=host, port=port) response=cytoscape("network","set current", {"network":"main String network (edges only)"},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) sleep(5) response=cytoscape("view", "export" , \ {"options":"PNG",\ "OutputFile":edg_png},\ host=host, port=port) response=cytoscape("view", "export" , \ {"options":"PDF",\ "OutputFile":edg_pdf},\ host=host, port=port) try: response=cytoscape("network","set current", {"network":+str(int(len(log2fcDf)*.10))+},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) sleep(5) response=cytoscape("view", "export" , \ {"options":"PNG",\ "OutputFile":neig_png},\ host=host, port=port) response=cytoscape("view", "export" , \ {"options":"PDF",\ "OutputFile":neig_pdf},\ host=host, port=port) except: print("No "+"changed firstNeighbors") sys.stdout.flush() try: response=cytoscape("network","set current", {"network":+str(int(len(log2fcDf)*.10))+},\ host=host, port=port) response=cytoscape("network","deselect",{"edgeList":"all", "nodeList":"all"},\ host=host, port=port) sleep(5) response=cytoscape("view", "export" , \ {"options":"PNG",\ "OutputFile":dif_png},\ host=host, port=port) response=cytoscape("view", "export" , \ {"options":"PDF",\ "OutputFile":dif_pdf},\ host=host, port=port) except: print("No "+"changed diffusion") sys.stdout.flush() ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host) ftp_client=ssh.open_sftp() for f, extension, local in zip([cys,cyjs,main_png,main_pdf,edg_png,edg_pdf,neig_png,neig_pdf,dif_png,dif_pdf],\ [".cys",".cyjs",".png",".pdf",".png",".pdf",".png",".pdf",".png",".pdf" ],\ [target+".cys",target+".cyjs",target+".main.png",target+".main.pdf",\ target+".main.edges.png",target+".main.edges.pdf",\ target+".topFirstNeighbors.png",target+".topFirstNeighbors.pdf",\ target+".topDiffusion.png",target+".topDiffusion.pdf"]): try: ftp_client.get(f+extension,local) ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("rm "+f+extension ) except: print("No "+local) sys.stdout.flush()
Plots tables from aDiff/cuffdiff into cytoscape using String protein queries. Uses top changed genes as well as first neighbours and difusion fo generate subnetworks. :param df: df as outputed by aDiff for differential gene expression :param aging_genes: ENS gene ids to be labeled with a diagonal :param species: species for string app query. eg. "caenorhabditis elegans", "drosophila melanogaster", "mus musculus", "homo sapiens" :param limit: limit for string app query. Number of extra genes to recover. If None, limit=N(query_genes)*.25 :param cuttoff: confidence cuttoff for sting app query. Default=0.4 :param taxon: taxon id for string app query. For the species shown above, taxon id will be automatically identified :param cytoscape_host: host address for cytoscape :param cytoscape_port: cytoscape port :param target: target destination for saving files without prefix. eg. "/beegfs/group_bit/home/JBoucas/test/N2_vs_daf2" :returns: nothing
1,014
def draw_zijderveld(self): self.fig1.clf() axis_bounds = [0, .1, 1, .85] self.zijplot = self.fig1.add_axes( axis_bounds, frameon=False, facecolor=, label=, zorder=0) self.zijplot.clear() self.zijplot.axis() self.zijplot.xaxis.set_visible(False) self.zijplot.yaxis.set_visible(False) self.MS = 6*self.GUI_RESOLUTION self.dec_MEC = self.dec_MFC = self.inc_MEC = self.inc_MFC = self.MS = 6*self.GUI_RESOLUTION self.zijdblock_steps = self.Data[self.s][] self.vds = self.Data[self.s][] self.zijplot.plot(self.CART_rot_good[:, 0], -1*self.CART_rot_good[:, 1], , markersize=self.MS, clip_on=False, picker=True, zorder=1) self.zijplot.plot(self.CART_rot_good[:, 0], -1*self.CART_rot_good[:, 2], , markersize=self.MS, clip_on=False, picker=True, zorder=1) for i in range(len(self.CART_rot_bad)): self.zijplot.plot(self.CART_rot_bad[:, 0][i], -1 * self.CART_rot_bad[:, 1][i], , mfc=, mec=self.dec_MEC, markersize=self.MS, clip_on=False, picker=False) self.zijplot.plot(self.CART_rot_bad[:, 0][i], -1 * self.CART_rot_bad[:, 2][i], , mfc=, mec=self.inc_MEC, markersize=self.MS, clip_on=False, picker=False) if self.preferences[]: for i in range(len(self.zijdblock_steps)): if int(self.preferences[]) != 1: if i != 0 and (i+1) % int(self.preferences[]) == 0: self.zijplot.text(self.CART_rot[i][0], -1*self.CART_rot[i][2], " %s" % ( self.zijdblock_steps[i]), fontsize=8*self.GUI_RESOLUTION, color=, ha=, va=) else: self.zijplot.text(self.CART_rot[i][0], -1*self.CART_rot[i][2], " %s" % ( self.zijdblock_steps[i]), fontsize=10*self.GUI_RESOLUTION, color=, ha=, va=) xmin, xmax = self.zijplot.get_xlim() if xmax < 0: xmax = 0 if xmin > 0: xmin = 0 props = dict(color=, linewidth=1.0, markeredgewidth=0.5) xlocs = array(list(arange(0.2, xmax, 0.2)) + list(arange(-0.2, xmin, -0.2))) if len(xlocs) > 0: xtickline, = self.zijplot.plot( xlocs, [0]*len(xlocs), linestyle=, marker=, **props) xtickline.set_clip_on(False) axxline, = self.zijplot.plot([xmin, xmax], [0, 0], **props) axxline.set_clip_on(False) TEXT = "" if self.COORDINATE_SYSTEM == : self.zijplot.text(xmax, 0, , fontsize=10, verticalalignment=) else: if self.ORTHO_PLOT_TYPE == : TEXT = " N" elif self.ORTHO_PLOT_TYPE == : TEXT = " E" else: TEXT = " x" self.zijplot.text(xmax, 0, TEXT, fontsize=10, verticalalignment=) ymin, ymax = self.zijplot.get_ylim() if ymax < 0: ymax = 0 if ymin > 0: ymin = 0 ylocs = array(list(arange(0.2, ymax, 0.2)) + list(arange(-0.2, ymin, -0.2))) if len(ylocs) > 0: ytickline, = self.zijplot.plot( [0]*len(ylocs), ylocs, linestyle=, marker=, **props) ytickline.set_clip_on(False) axyline, = self.zijplot.plot([0, 0], [ymin, ymax], **props) axyline.set_clip_on(False) TEXT1, TEXT2 = "", "" if self.COORDINATE_SYSTEM == : TEXT1, TEXT2 = " y", " z" else: if self.ORTHO_PLOT_TYPE == : TEXT1, TEXT2 = " E", " D" elif self.ORTHO_PLOT_TYPE == : TEXT1, TEXT2 = " S", " D" else: TEXT1, TEXT2 = " y", " z" self.zijplot.text(0, ymin, TEXT1, fontsize=10, color=, verticalalignment=) self.zijplot.text(0, ymin, , fontsize=10, color=, verticalalignment=) self.zijplot.text(0, ymin, TEXT2, fontsize=10, color=, verticalalignment=) if self.ORTHO_PLOT_TYPE == : STRING = "" self.fig1.text(0.01, 0.98, "Zijderveld plot: x = North", { : self.font_type, : 10*self.GUI_RESOLUTION, : , : , : }) elif self.ORTHO_PLOT_TYPE == : STRING = "" self.fig1.text(0.01, 0.98, "Zijderveld plot:: x = East", { : self.font_type, : 10*self.GUI_RESOLUTION, : , : , : }) elif self.ORTHO_PLOT_TYPE == : self.fig1.text(0.01, 0.98, "Zijderveld plot", { : self.font_type, : 10*self.GUI_RESOLUTION, : , : , : }) if in list(self.current_fit.pars.keys()) and type(self.current_fit.pars[]) != str: STRING = "X-axis rotated to best fit line declination (%.0f); " % ( self.current_fit.pars[]) else: STRING = "X-axis rotated to NRM (%.0f); " % ( self.zijblock[0][1]) else: self.fig1.text(0.01, 0.98, "Zijderveld plot", { : self.font_type, : 10*self.GUI_RESOLUTION, : , : , : }) STRING = "X-axis rotated to NRM (%.0f); " % (self.zijblock[0][1]) STRING = STRING+"NRM=%.2e " % (self.zijblock[0][3]) + self.fig1.text(0.01, 0.95, STRING, {: self.font_type, : 8 * self.GUI_RESOLUTION, : , : , : }) xmin, xmax = self.zijplot.get_xlim() ymin, ymax = self.zijplot.get_ylim() self.zij_xlim_initial = (xmin, xmax) self.zij_ylim_initial = (ymin, ymax) self.canvas1.draw()
Draws the zijderveld plot in the GUI on canvas1
1,015
def server_list(self): nt_ks = self.compute_conn ret = {} for item in nt_ks.servers.list(): try: ret[item.name] = { : item.id, : item.name, : item.status, : item.accessIPv4, : item.accessIPv6, : {: item.flavor[], : item.flavor[]}, : {: item.image[] if item.image else , : item.image[] if item.image else }, } except TypeError: pass return ret
List servers
1,016
def reverse_dummies(self, X, mapping): out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get() mod = switch.get() insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X
Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame
1,017
def generate_context(context_file=, default_context=None, extra_context=None): context = OrderedDict([]) try: with open(context_file) as file_handle: obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: if default_context: apply_overwrites_to_context(obj, default_context) if extra_context: apply_overwrites_to_context(obj, extra_context) logger.debug(.format(context)) return context
Generate the context for a Cookiecutter project template. Loads the JSON file as a Python object, with key being the JSON filename. :param context_file: JSON file containing key/value pairs for populating the cookiecutter's variables. :param default_context: Dictionary containing config to take into account. :param extra_context: Dictionary containing configuration overrides
1,018
def update(self, name: str, value=None, default=None, description: str=None): if name in self._vars: description = description or self._vars[name].description default = default or self._vars[name].default elif name == : raise ConfigError(" is a reserved name for a group.") v = _Var(name, description=description, default=default, defined=False) v.value = value self._vars[name] = v
Like add, but can tolerate existing values; also updates the value. Mostly used for setting fields from imported INI files and modified CLI flags.
1,019
def get_max_bond_lengths(structure, el_radius_updates=None): jmnn = JmolNN(el_radius_updates=el_radius_updates) bonds_lens = {} els = sorted(structure.composition.elements, key=lambda x: x.Z) for i1 in range(len(els)): for i2 in range(len(els) - i1): bonds_lens[els[i1], els[i1 + i2]] = jmnn.get_max_bond_distance( els[i1].symbol, els[i1 + i2].symbol) return bonds_lens
Provides max bond length estimates for a structure based on the JMol table and algorithms. Args: structure: (structure) el_radius_updates: (dict) symbol->float to update atomic radii Returns: (dict) - (Element1, Element2) -> float. The two elements are ordered by Z.
1,020
def unlock(self): try: self.os.remove(self.os.path.join(self.tmp_dir, )) except Exception: pass try: self.os.rmdir(self.tmp_dir) except Exception: pass
Remove current lock. This function does not crash if it is unable to properly delete the lock file and directory. The reason is that it should be allowed for multiple jobs running in parallel to unlock the same directory at the same time (e.g. when reaching their timeout limit).
1,021
def do_execute(self): self._stopped = False self._stopping = False not_finished_actor = self.owner.first_active pending_actors = [] finished = False actor_result = None while not (self.is_stopping() or self.is_stopped()) and not finished: if len(pending_actors) > 0: start_index = self.owner.index_of(pending_actors[-1].name) else: start_index = self.owner.index_of(not_finished_actor.name) not_finished_actor = None token = None last_active = -1 if self.owner.active > 0: last_active = self.owner.last_active.index for i in range(start_index, last_active + 1): if self.is_stopped() or self.is_stopping(): break curr = self.owner.actors[i] if curr.skip: continue if token is None: if isinstance(curr, OutputProducer) and curr.has_output(): pending_actors.pop() else: actor_result = curr.execute() if actor_result is not None: self.owner.logger.error( curr.full_name + " generated following error output:\n" + actor_result) break if isinstance(curr, OutputProducer) and curr.has_output(): token = curr.output() else: token = None if isinstance(curr, OutputProducer) and curr.has_output(): pending_actors.append(curr) else: curr.input = token actor_result = curr.execute() if actor_result is not None: self.owner.logger.error( curr.full_name + " generated following error output:\n" + actor_result) break if isinstance(curr, OutputProducer): if curr.has_output(): token = curr.output() else: token = None if curr.has_output(): pending_actors.append(curr) else: token = None if (i == self.owner.last_active.index) and (token is not None): if self._record_output: self._recorded_output.append(token) if isinstance(curr, OutputProducer) and (token is None): break finished = (not_finished_actor is None) and (len(pending_actors) == 0) return actor_result
Actual execution of the director. :return: None if successful, otherwise error message :rtype: str
1,022
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date): payload = { "language": self.client.language.value, "command": PaymentCommand.GET_TOKENS.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "creditCardTokenInformation": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id, "startDate": start_date.strftime(), "endDate": end_date.strftime() }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
With this functionality you can query previously the Credit Cards Token. Args: payer_id: credit_card_token_id: start_date: end_date: Returns:
1,023
def get_fptr(self): cmpfunc = ctypes.CFUNCTYPE(ctypes.c_int, WPARAM, LPARAM, ctypes.POINTER(KBDLLHookStruct)) return cmpfunc(self.handle_input)
Get the function pointer.
1,024
def plot_labels(ax, label_fontsize=14, xlabel=None, xlabel_arg=None, ylabel=None, ylabel_arg=None, zlabel=None, zlabel_arg=None): xlabel = xlabel if xlabel is not None else ax.get_xlabel() or ylabel = ylabel if ylabel is not None else ax.get_ylabel() or xlabel_arg = dict_if_none(xlabel_arg) ylabel_arg = dict_if_none(ylabel_arg) ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg) ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg) if hasattr(ax, ): zlabel = zlabel if zlabel is not None else ax.get_zlabel() or zlabel_arg = dict_if_none(zlabel_arg) ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)
Sets the labels options of a matplotlib plot Args: ax: matplotlib axes label_fontsize(int): Size of the labels' font xlabel(str): The xlabel for the figure xlabel_arg(dict): Passsed into matplotlib as xlabel arguments ylabel(str): The ylabel for the figure ylabel_arg(dict): Passsed into matplotlib as ylabel arguments zlabel(str): The zlabel for the figure zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
1,025
def remove_overlap(self, also_remove_contiguous: bool = False) -> None: overlap = True while overlap: overlap = self._remove_overlap_sub(also_remove_contiguous) self._sort()
Merges any overlapping intervals. Args: also_remove_contiguous: treat contiguous (as well as overlapping) intervals as worthy of merging?
1,026
def emissive_part_3x(self, tb=True): try: self._e3x = self._rad3x_t11 * (1 - self._r3x) except TypeError: LOG.warning( "Couldn't derive the emissive part \n" + "Please derive the relfectance prior to requesting the emissive part") if tb: return self.radiance2tb(self._e3x) else: return self._e3x
Get the emissive part of the 3.x band
1,027
def execute_ccm_remotely(remote_options, ccm_args): if not PARAMIKO_IS_AVAILABLE: logging.warn("Paramiko is not Availble: Skipping remote execution of CCM command") return None, None ssh_client = SSHClient(remote_options.ssh_host, remote_options.ssh_port, remote_options.ssh_username, remote_options.ssh_password, remote_options.ssh_private_key) for index, argument in enumerate(ccm_args): if "--dse-credentials" in argument: tokens = argument.split("=") credentials_path = os.path.join(os.path.expanduser("~"), ".ccm", ".dse.ini") if len(tokens) == 2: credentials_path = tokens[1] if not os.path.isfile(credentials_path): raise Exception("DSE Credentials File Does not Exist: %s" % credentials_path) ssh_client.put(credentials_path, ssh_client.ccm_config_dir) ccm_args[index] = "--dse-credentials" if "--ssl" in argument or "--node-ssl" in argument: tokens = argument.split("=") if len(tokens) != 2: raise Exception("Path is not Specified: %s" % argument) ssl_path = tokens[1] if not os.path.isdir(ssl_path): raise Exception("Path Does not Exist: %s" % ssl_path) remote_ssl_path = ssh_client.temp + os.path.basename(ssl_path) ssh_client.put(ssl_path, remote_ssl_path) ccm_args[index] = tokens[0] + "=" + remote_ssl_path return ssh_client.execute_ccm_command(ccm_args)
Execute CCM operation(s) remotely :return A tuple defining the execution of the command * output - The output of the execution if the output was not displayed * exit_status - The exit status of remotely executed script :raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or `--node-ssl` when initiating a remote execution; also if error occured during ssh connection
1,028
def check_guest_exist(check_index=0): def outer(f): @six.wraps(f) def inner(self, *args, **kw): userids = args[check_index] if isinstance(userids, list): userids = [uid.upper() for uid in userids] new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) else: userids = userids.upper() new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) userids = [userids] self._vmops.check_guests_exist_in_db(userids) return f(self, *new_args, **kw) return inner return outer
Check guest exist in database. :param check_index: The parameter index of userid(s), default as 0
1,029
def to_genshi(walker): text = [] for token in walker: type = token["type"] if type in ("Characters", "SpaceCharacters"): text.append(token["data"]) elif text: yield TEXT, "".join(text), (None, -1, -1) text = [] if type in ("StartTag", "EmptyTag"): if token["namespace"]: name = "{%s}%s" % (token["namespace"], token["name"]) else: name = token["name"] attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) for attr, value in token["data"].items()]) yield (START, (QName(name), attrs), (None, -1, -1)) if type == "EmptyTag": type = "EndTag" if type == "EndTag": if token["namespace"]: name = "{%s}%s" % (token["namespace"], token["name"]) else: name = token["name"] yield END, QName(name), (None, -1, -1) elif type == "Comment": yield COMMENT, token["data"], (None, -1, -1) elif type == "Doctype": yield DOCTYPE, (token["name"], token["publicId"], token["systemId"]), (None, -1, -1) else: pass if text: yield TEXT, "".join(text), (None, -1, -1)
Convert a tree to a genshi tree :arg walker: the treewalker to use to walk the tree to convert it :returns: generator of genshi nodes
1,030
def occupy(self, start, stop): self._occupied.append([start, stop]) self._occupied.sort(key=lambda x: x[0])
Mark a given interval as occupied so that the manager could skip the values from ``start`` to ``stop`` (**inclusive**). :param start: beginning of the interval. :param stop: end of the interval. :type start: int :type stop: int
1,031
def get_confirmation_url(email, request, name="email_registration_confirm", **kwargs): return request.build_absolute_uri( reverse(name, kwargs={"code": get_confirmation_code(email, request, **kwargs)}) )
Returns the confirmation URL
1,032
def load_nddata(self, ndd, naxispath=None): self.clear_metadata() ahdr = self.get_header() ahdr.update(ndd.meta) self.setup_data(ndd.data, naxispath=naxispath) if ndd.wcs is None: self.wcs = wcsinfo.wrapper_class(logger=self.logger) self.wcs.load_nddata(ndd)
Load from an astropy.nddata.NDData object.
1,033
def add(self, resource, replace=False): if isinstance(resource, collections.Iterable): for r in resource: self.resources.add(r, replace) else: self.resources.add(resource, replace)
Add a resource or an iterable collection of resources. Will throw a ValueError if the resource (ie. same uri) already exists in the ResourceList, unless replace=True.
1,034
def get_user_list(host_name, client_name, client_pass): request = construct_request(model_type="pers", client_name=client_name, client_pass=client_pass, command="getusrs", values="whr=*") request_result = send_request(host_name, request) user_id_list = list() append_user_id = user_id_list.append if request_result is not None: user_list_xml = request_result.text tree = etree.parse(StringIO(user_list_xml)) root = tree.getroot() xml_rows = root.findall("./result/row/usr") for xml_row in xml_rows: append_user_id(xml_row.text) return user_id_list
Pulls the list of users in a client. Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted. - client_name: The PServer client name. - client_pass: The PServer client's password. Output: - user_id_list: A python list of user ids.
1,035
def updateGroup(self, group, vendorSpecific=None): response = self.updateGroupResponse(group, vendorSpecific) return self._read_boolean_response(response)
See Also: updateGroupResponse() Args: group: vendorSpecific: Returns:
1,036
def time_at_shadow_length(day, latitude, multiplier): latitude_rad = radians(latitude) declination = radians(sun_declination(day)) angle = arccot( multiplier + tan(abs(latitude_rad - declination)) ) numerator = sin(angle) - sin(latitude_rad)*sin(declination) denominator = cos(latitude_rad) * cos(declination) return degrees(acos(numerator/denominator)) / 15
Compute the time at which an object's shadow is a multiple of its length. Specifically, determine the time the length of the shadow is a multiple of the object's length + the length of the object's shadow at noon This is used in the calculation for Asr time. Hanafi uses a multiplier of 2, and everyone else uses a multiplier of 1 Algorithm taken almost directly from PrayTimes.org code :param day: The day which to compute for :param latitude: The latitude of the place of interest :param: multiplier: The multiplier of the object's length :returns: The floating point time delta between Zuhr and the time at which the lenghth of the shadow is as defined
1,037
def chunked(data, chunksize): if chunksize < 1: raise ValueError("Chunksize must be at least 1!") if int(chunksize) != chunksize: raise ValueError("Chunksize needs to be an integer") res = [] cur = [] for e in data: cur.append(e) if len(cur) >= chunksize: res.append(cur) cur = [] if cur: res.append(cur) return res
Returns a list of chunks containing at most ``chunksize`` elements of data.
1,038
def calcAFunc(self,MaggNow,AaggNow): verbose = self.verbose discard_periods = self.T_discard update_weight = 1. - self.DampingFac total_periods = len(MaggNow) logAagg = np.log(AaggNow[discard_periods:total_periods]) logMagg = np.log(MaggNow[discard_periods-1:total_periods-1]) slope, intercept, r_value, p_value, std_err = stats.linregress(logMagg,logAagg) intercept = update_weight*intercept + (1.0-update_weight)*self.intercept_prev slope = update_weight*slope + (1.0-update_weight)*self.slope_prev AFunc = AggregateSavingRule(intercept,slope) self.intercept_prev = intercept self.slope_prev = slope if verbose: print( + str(intercept) + + str(slope) + + str(r_value**2)) return AggShocksDynamicRule(AFunc)
Calculate a new aggregate savings rule based on the history of the aggregate savings and aggregate market resources from a simulation. Parameters ---------- MaggNow : [float] List of the history of the simulated aggregate market resources for an economy. AaggNow : [float] List of the history of the simulated aggregate savings for an economy. Returns ------- (unnamed) : CapDynamicRule Object containing a new savings rule
1,039
def audio_output_enumerate_devices(self): r = [] head = libvlc_audio_output_list_get(self) if head: i = head while i: i = i.contents d = [{: libvlc_audio_output_device_id (self, i.name, d), : libvlc_audio_output_device_longname(self, i.name, d)} for d in range(libvlc_audio_output_device_count (self, i.name))] r.append({: i.name, : i.description, : d}) i = i.next libvlc_audio_output_list_release(head) return r
Enumerate the defined audio output devices. @return: list of dicts {name:, description:, devices:}
1,040
def create_connection(self, *args, **kwargs): args = list(args) if len(args) == 0: kwargs[] = max(self._connect_timeout, kwargs.get(, self._connect_timeout*10)/10.0) elif len(args) == 1: args.append(self._connect_timeout) else: args[1] = max(self._connect_timeout, args[1]/10.0) return super(PatroniSequentialThreadingHandler, self).create_connection(*args, **kwargs)
This method is trying to establish connection with one of the zookeeper nodes. Somehow strategy "fail earlier and retry more often" works way better comparing to the original strategy "try to connect with specified timeout". Since we want to try connect to zookeeper more often (with the smaller connect_timeout), he have to override `create_connection` method in the `SequentialThreadingHandler` class (which is used by `kazoo.Client`). :param args: always contains `tuple(host, port)` as the first element and could contain `connect_timeout` (negotiated session timeout) as the second element.
1,041
def _get_coordinator_for_group(self, consumer_group): if self.consumer_group_to_brokers.get(consumer_group) is None: yield self.load_consumer_metadata_for_group(consumer_group) returnValue(self.consumer_group_to_brokers.get(consumer_group))
Returns the coordinator (broker) for a consumer group Returns the broker for a given consumer group or Raises ConsumerCoordinatorNotAvailableError
1,042
def checksum(self): md5sum, md5sum64, = [], [] for line in self.SLACKBUILDS_TXT.splitlines(): if line.startswith(self.line_name): sbo_name = line[17:].strip() if line.startswith(self.line_md5_64): if sbo_name == self.name and line[26:].strip(): md5sum64 = line[26:].strip().split() if line.startswith(self.line_md5): if sbo_name == self.name and line[19:].strip(): md5sum = line[19:].strip().split() return self._select_md5sum_arch(md5sum, md5sum64)
Grab checksum string
1,043
def _all_same_area(self, dataset_ids): all_areas = [] for ds_id in dataset_ids: for scn in self.scenes: ds = scn.get(ds_id) if ds is None: continue all_areas.append(ds.attrs.get()) all_areas = [area for area in all_areas if area is not None] return all(all_areas[0] == area for area in all_areas[1:])
Return True if all areas for the provided IDs are equal.
1,044
def open_file(self, path): if path: editor = self.tabWidget.open_document(path) editor.cursorPositionChanged.connect( self.on_cursor_pos_changed) self.recent_files_manager.open_file(path) self.menu_recents.update_actions()
Creates a new GenericCodeEdit, opens the requested file and adds it to the tab widget. :param path: Path of the file to open
1,045
def get_fragment(self, **kwargs): gen, namespaces, plan = self.get_fragment_generator(**kwargs) graph = ConjunctiveGraph() [graph.bind(prefix, u) for (prefix, u) in namespaces] [graph.add((s, p, o)) for (_, s, p, o) in gen] return graph
Return a complete fragment. :param gp: :return:
1,046
def sign_remote_certificate(argdic, **kwargs): *public_key/etc/pki/www.keysigning_policywwwwww1 if not in argdic: return if not isinstance(argdic, dict): argdic = ast.literal_eval(argdic) signing_policy = {} if in argdic: signing_policy = _get_signing_policy(argdic[]) if not signing_policy: return .format(argdic[]) if isinstance(signing_policy, list): dict_ = {} for item in signing_policy: dict_.update(item) signing_policy = dict_ if in signing_policy: if not in kwargs: return matcher = if in signing_policy[]: matcher = if not __salt__[matcher]( signing_policy[], kwargs[]): return .format( kwargs[], argdic[]) try: return create_certificate(path=None, text=True, **argdic) except Exception as except_: return six.text_type(except_)
Request a certificate to be remotely signed according to a signing policy. argdic: A dict containing all the arguments to be passed into the create_certificate function. This will become kwargs when passed to create_certificate. kwargs: kwargs delivered from publish.publish CLI Example: .. code-block:: bash salt '*' x509.sign_remote_certificate argdic="{'public_key': '/etc/pki/www.key', 'signing_policy': 'www'}" __pub_id='www1'
1,047
def global_request(self, kind, data=None, wait=True): if wait: self.completion_event = threading.Event() m = Message() m.add_byte(cMSG_GLOBAL_REQUEST) m.add_string(kind) m.add_boolean(wait) if data is not None: m.add(*data) self._log(DEBUG, % kind) self._send_user_message(m) if not wait: return None while True: self.completion_event.wait(0.1) if not self.active: return None if self.completion_event.isSet(): break return self.global_response
Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` if this method should not return until a response is received; ``False`` otherwise. :return: a `.Message` containing possible additional data if the request was successful (or an empty `.Message` if ``wait`` was ``False``); ``None`` if the request was denied.
1,048
def readFromFile(self, filename): s = dict(np.load(filename)) try: self.coeffs = s[][()] except KeyError: self.coeffs = s try: self.opts = s[][()] except KeyError: pass return self.coeffs
read the distortion coeffs from file
1,049
def postalCodeLookup(self, countryCode, postalCode): params = {"country": countryCode, "postalcode": postalCode} d = self._call("postalCodeLookupJSON", params) d.addCallback(operator.itemgetter("postalcodes")) return d
Looks up locations for this country and postal code.
1,050
def is_connectable(host: str, port: Union[int, str]) -> bool: socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False.
1,051
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True): secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) try: padded = ciphertext + ( * (len(ciphertext) % 4)) decoded = base64.urlsafe_b64decode(str(padded)) plaintext = encobj.decrypt(decoded) except (TypeError, binascii.Error): raise InvalidKeyError("invalid key") if checksum: try: crc, plaintext = (base64.urlsafe_b64decode( plaintext[-8:]), plaintext[:-8]) except (TypeError, binascii.Error): raise CheckSumError("checksum mismatch") if not crc == _pack_crc(plaintext): raise CheckSumError("checksum mismatch") return plaintext
Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext
1,052
def clean_comment(self): comment = self.cleaned_data["text"] if settings.COMMENTS_ALLOW_PROFANITIES is False: bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()] if bad_words: raise forms.ValidationError(ungettext( "Watch your mouth! The word %s is not allowed here.", "Watch your mouth! The words %s are not allowed here.", len(bad_words)) % get_text_list( [ % (i[0], *(len(i)-2), i[-1]) for i in bad_words], ugettext())) return comment
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't contain anything in PROFANITIES_LIST.
1,053
def appendInputWithNSimilarValues(inputs, numNear = 10): numInputs = len(inputs) skipOne = False for i in xrange(numInputs): input = inputs[i] numChanged = 0 newInput = copy.deepcopy(input) for j in xrange(len(input)-1): if skipOne: skipOne = False continue if input[j] == 1 and input[j+1] == 0: newInput[j] = 0 newInput[j+1] = 1 inputs.append(newInput) newInput = copy.deepcopy(newInput) numChanged += 1 skipOne = True if numChanged == numNear: break
Creates a neighboring record for each record in the inputs and adds new records at the end of the inputs list
1,054
def new_action(project_id): project = get_data_or_404(, project_id) if project[] != get_current_user_id(): return jsonify(message=), 403 form = NewActionForm() if not form.validate_on_submit(): return jsonify(errors=form.errors), 400 data = form.data data[] = project_id id = add_instance(, **data) if not id: return jsonify(errors={: []}), 400 action = get_data_or_404(, id) return jsonify(**action)
Add action.
1,055
def parse_args(self, command_selected, flags, _free_args): configs = self.function_name_to_configs[command_selected] suggested_configs = self.function_name_to_suggest_configs[command_selected] attribute_to_config = dict() for config in itertools.chain(configs, suggested_configs): for attribute in config.get_attributes(): if attribute in attribute_to_config: raise ValueError("attribute [{}] double".format(attribute)) attribute_to_config[attribute] = config unknown_flags = [] for flag_raw, value in flags.items(): edit = value.startswith() if flag_raw not in attribute_to_config: unknown_flags.append(flag_raw) config = attribute_to_config[flag_raw] param = config.get_param_by_name(flag_raw) if edit: v = param.s2t_generate_from_default(value[1:]) else: v = param.s2t(value) setattr(config, flag_raw, v) missing_parameters = [] for config in configs: for attribute in config.get_attributes(): value = getattr(config, attribute) if value is NO_DEFAULT: missing_parameters.append(attribute) if unknown_flags or missing_parameters: if missing_parameters: print() print_warn("missing parameters [{}]".format(",".join(missing_parameters))) if unknown_flags: print() print_warn("unknown flags [{}]".format(",".join(unknown_flags))) print("problems found, not running") print() self.show_help_for_function(command_selected, show_help_full=False, show_help_suggest=False) return False for config in itertools.chain(configs, self._configs): for attribute in config.get_attributes(): param = getattr(config, attribute) if isinstance(param, Param): if param.default is not NO_DEFAULT: setattr(config, attribute, param.default) return True
Parse the args and fill the global data Currently we disregard the free parameters :param command_selected: :param flags: :param _free_args: :return:
1,056
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): def _retry_on_exception_inner_1(f): def _retry_on_exception_inner_2(*args, **kwargs): retries = num_retries multiplier = 1 while True: try: return f(*args, **kwargs) except exc_type: if not retries: raise delay = base_delay * multiplier multiplier += 1 log("Retrying %d more times (delay=%s)" % (f.__name__, retries, delay), level=INFO) retries -= 1 if delay: time.sleep(delay) return _retry_on_exception_inner_2 return _retry_on_exception_inner_1
If the decorated function raises exception exc_type, allow num_retries retry attempts before raise the exception.
1,057
def load_emacs_open_in_editor_bindings(): registry = Registry() registry.add_binding(Keys.ControlX, Keys.ControlE, filter=EmacsMode() & ~HasSelection())( get_by_name()) return registry
Pressing C-X C-E will open the buffer in an external editor.
1,058
def installSite(self): for iface, priority in self.__getPowerupInterfaces__([]): self.store.powerUp(self, iface, priority)
Not using the dependency system for this class because it's only installed via the command line, and multiple instances can be installed.
1,059
def assemble_notification_request(method, params=tuple()): if not isinstance(method, (str, unicode)): raise TypeError() if not isinstance(params, (tuple, list)): raise TypeError("params must be a tuple/list.") return { "method": method, "params": params, "id": None }
serialize a JSON-RPC-Notification :Parameters: see dumps_request :Returns: | {"method": "...", "params": ..., "id": null} | "method", "params" and "id" are always in this order. :Raises: see dumps_request
1,060
def _drawContents(self, currentRti=None): table = self.table table.setUpdatesEnabled(False) try: table.clearContents() verticalHeader = table.verticalHeader() verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed) attributes = currentRti.attributes if currentRti is not None else {} table.setRowCount(len(attributes)) for row, (attrName, attrValue) in enumerate(sorted(attributes.items())): attrStr = to_string(attrValue, decode_bytes=) try: type_str = type_name(attrValue) except Exception as ex: logger.exception(ex) type_str = "<???>" nameItem = QtWidgets.QTableWidgetItem(attrName) nameItem.setToolTip(attrName) table.setItem(row, self.COL_ATTR_NAME, nameItem) valItem = QtWidgets.QTableWidgetItem(attrStr) valItem.setToolTip(attrStr) table.setItem(row, self.COL_VALUE, valItem) table.setItem(row, self.COL_ELEM_TYPE, QtWidgets.QTableWidgetItem(type_str)) table.resizeRowToContents(row) verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) finally: table.setUpdatesEnabled(True)
Draws the attributes of the currentRTI
1,061
def c_typedefs(self): defs = [] attrs = self.opts.attrs + if self.opts.attrs else for name, args in self.funcs: logging.debug(, name, args) defs.append( .format( args[0], attrs, self._c_type_name(name), make_c_args(args[2]) ) ) return defs
Get the typedefs of the module.
1,062
def save_rst(self, file_name=, module_name=): if self.model is not None: with open(file_name, ) as fh: fh.write( % module_name) fh.write() fh.write() fh.write() model_str = pysb.export.export(self.model, ) model_str = + model_str.replace(, ) fh.write(model_str)
Save the assembled model as an RST file for literate modeling. Parameters ---------- file_name : Optional[str] The name of the file to save the RST in. Default: pysb_model.rst module_name : Optional[str] The name of the python function defining the module. Default: pysb_module
1,063
def getTerms(self, term=None, getFingerprint=None, startIndex=0, maxResults=10): return self._terms.getTerm(self._retina, term, getFingerprint, startIndex, maxResults)
Get term objects Args: term, str: A term in the retina (optional) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Term Raises: CorticalioException: if the request was not successful
1,064
def QA_indicator_BBI(DataFrame, N1=3, N2=6, N3=12, N4=24): C = DataFrame[] bbi = (MA(C, N1) + MA(C, N2) + MA(C, N3) + MA(C, N4)) / 4 DICT = {: bbi} return pd.DataFrame(DICT)
多空指标
1,065
def datetime( self, year, month, day, hour=0, minute=0, second=0, microsecond=0 ): if _HAS_FOLD: return self.convert( datetime(year, month, day, hour, minute, second, microsecond, fold=1) ) return self.convert( datetime(year, month, day, hour, minute, second, microsecond), dst_rule=POST_TRANSITION, )
Return a normalized datetime for the current timezone.
1,066
def assert_valid_path(self, path): if not isinstance(path, str): raise NotFoundResourceException( "Resource passed to load() method must be a file path") if not os.path.isfile(path): raise NotFoundResourceException( .format(path))
Ensures that the path represents an existing file @type path: str @param path: path to check
1,067
def set_led(self, led_number, led_value): if 1 > led_number > 4: return write_led_value(hw_id=self.device_unique_name, led_name=.format(led_number), value=led_value)
Set front-panel controller LEDs. The DS3 controller has four, labelled, LEDs on the front panel that can be either on or off. :param led_number: Integer between 1 and 4 :param led_value: Value, set to 0 to turn the LED off, 1 to turn it on
1,068
def slice_image(image, axis=None, idx=None): if image.dimension < 3: raise ValueError() inpixeltype = image.pixeltype ndim = image.dimension if image.pixeltype != : image = image.clone() libfn = utils.get_lib_fn( % ndim) itkimage = libfn(image.pointer, axis, idx) return iio.ANTsImage(pixeltype=, dimension=ndim-1, components=image.components, pointer=itkimage).clone(inpixeltype)
Slice an image. Example ------- >>> import ants >>> mni = ants.image_read(ants.get_data('mni')) >>> mni2 = ants.slice_image(mni, axis=1, idx=100)
1,069
def _quantize_channelwise_linear(weight, nbits, axis=0): if len(weight.shape) == 1: weight = weight.reshape((1, weight.shape[0])) rank = len(weight.shape) if axis == 1: transposed_axis_order = (1,0) + tuple(range(2,rank)) weight = _np.transpose(weight, transposed_axis_order) num_channels = weight.shape[0] shape = weight.shape weight = weight.reshape((num_channels, -1)) a = _np.amin(weight, axis=-1) b = _np.amax(weight, axis=-1) qa = 0 qb = (1 << nbits) - 1 mask = (b - a) > 1e-5 r_mask = ~mask qw = _np.zeros_like(weight) scale = _np.ones((num_channels,)) bias = _np.zeros((num_channels,)) if _np.any(mask): qw[mask] = (weight[mask] - a[mask][:,None]) / (b[mask] - a[mask])[:,None] * (qb - qa) + qa scale[mask] = (b[mask] - a[mask]) / (qb - qa) bias[mask] = - scale[mask] * qa + a[mask] if _np.any(r_mask): qw[r_mask] = qa scale[r_mask] = 0 bias[r_mask] = a[r_mask] quantized_weight = qw.reshape(shape) if axis == 1: quantized_weight = _np.transpose(quantized_weight, transposed_axis_order) return (quantized_weight, scale, bias)
Linearly quantize weight blob. :param weight: numpy.array Weight to be quantized. :param nbits: int Number of bits per weight element :param axis: int Axis of the weight blob to compute channel-wise quantization, can be 0 or 1 Returns ------- quantized_weight: numpy.array quantized weight as float numpy array, with the same shape as weight scale: numpy.array per channel scale bias: numpy.array per channel bias
1,070
def limit(self, n, skip=None): if self.query.limit is not None: raise MonSQLException() new_query_set = self.clone() new_query_set.query.limit = n new_query_set.query.skip = skip return new_query_set
Limit the result set. However when the query set already has limit field before, this would raise an exception :Parameters: - n : The maximum number of rows returned - skip: how many rows to skip :Return: a new QuerySet object so we can chain operations
1,071
def close_project(self): if self.current_active_project: self.switch_to_plugin() if self.main.editor is not None: self.set_project_filenames( self.main.editor.get_open_filenames()) path = self.current_active_project.root_path self.current_active_project = None self.set_option(, None) self.setup_menu_actions() self.sig_project_closed.emit(path) self.sig_pythonpath_changed.emit() if self.dockwidget is not None: self.set_option(, self.dockwidget.isVisible()) self.dockwidget.close() self.explorer.clear() self.restart_consoles()
Close current project and return to a window without an active project
1,072
def __make_thumbnail(self, width, height): (w, h) = self.size factor = max( (float(w) / width), (float(h) / height) ) w /= factor h /= factor return self.get_image((round(w), round(h)))
Create the page's thumbnail
1,073
def _get_sv_exclude_file(items): sv_bed = utils.get_in(items[0], ("genome_resources", "variation", "sv_repeat")) if sv_bed and os.path.exists(sv_bed): return sv_bed
Retrieve SV file of regions to exclude.
1,074
def _preprocess(project_dict): handlers = { (,): _list_if_none, (,): _list_if_none_or_string, (,): _list_if_none_or_string, } for k in (, ): handlers[(k,)] = _dict_if_none handlers[(k, )] = _dict_if_none handlers[(k, )] = _list_if_none_or_string handlers[(k, )] = _list_if_none_or_string handlers[(, )] = _dict_if_none def converter(value, keypath): if keypath in handlers: handler = handlers[keypath] return handler(value) else: return value return deep_map(converter, project_dict)
Pre-process certain special keys to convert them from None values into empty containers, and to turn strings into arrays of strings.
1,075
def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True): if maxsize < 576: maxsize = 576 max_options_size = maxsize - 240 options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)] option_data = [(o.tag, o._tobytes()[2:]) for o in options] def split_options(option_data, limits): partial_options = [] buffers = [0] if not options: return ([], 0) def create_result(): while partial_options and partial_options[-1][1]: partial_options.pop() buffers.append(len(partial_options)) r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)] while r and not r[-1]: r.pop() return r current_size = 0 limit_iter = iter(limits) try: next_limit = next(limit_iter) except (StopIteration, GeneratorExit): return ([], False) for i, (tag, data) in enumerate(option_data): data_size = 0 nosplit = (len(data) <= 32) while True: next_size = min(next_limit - current_size - 2, 255, len(data) - data_size) if next_size < 0 or (next_size == 0 and data_size < len(data)) \ or (next_size < len(data) - data_size and nosplit): try: next_limit = next(limit_iter) except (StopIteration, GeneratorExit): return (create_result(), len(option_data) - i) buffers.append(len(partial_options)) current_size = 0 else: partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]), (next_size < len(data) - data_size))) data_size += next_size current_size += next_size + 2 if data_size >= len(data): break return (create_result(), 0) result, not_finished = split_options(option_data, [max_options_size - 1]) if not_finished: if overload & (OVERLOAD_FILE | OVERLOAD_SNAME): limits = [max_options_size - 4] if overload & OVERLOAD_FILE: limits.append(127) if overload & OVERLOAD_SNAME: limits.append(63) result2, not_finished2 = split_options(option_data, limits) if len(result2) > 1: result = result2 not_finished = not_finished2 if not allowpartial and not_finished: raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,)) if not result: return not_finished elif len(result) <= 1: payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)] else: overload_option = 0 if len(result) >= 2 and result[1]: overload_option |= OVERLOAD_FILE payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)]) if len(result) >= 3 and result[2]: overload_option |= OVERLOAD_SNAME payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)]) payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \ + result[0] + [dhcp_option_partial(tag = OPTION_END)] return not_finished
Split a list of options This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into `dhcp_option_partial` if necessary, and set overload option if field overloading is used. :param options: a list of `dhcp_option` :param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP message, specified fields are overloaded for options. If options cannot fit after overloading, extra options are DROPPED if allowpartial = True. It is important to sort the dhcp options by priority. :param overload: fields that are allowed to be overloaded :param allowpartial: When options cannot fit into the DHCP message, allow the rest options to be dropped. :return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
1,076
def run_cmd(cmd, return_output=False, ignore_status=False, log_output=True, **kwargs): logger.debug( % .join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kwargs) output = process.communicate()[0] if log_output: logger.debug(output) if process.returncode > 0: if ignore_status: if return_output: return output else: return process.returncode else: raise subprocess.CalledProcessError(cmd=cmd, returncode=process.returncode) if return_output: return output
run provided command on host system using the same user as you invoked this code, raises subprocess.CalledProcessError if it fails :param cmd: list of str :param return_output: bool, return output of the command :param ignore_status: bool, do not fail in case nonzero return code :param log_output: bool, if True, log output to debug log :param kwargs: pass keyword arguments to subprocess.check_* functions; for more info, please check `help(subprocess.Popen)` :return: None or str
1,077
def send_media_group(chat_id, media, reply_to_message_id=None, disable_notification=False, **kwargs): files = [] if len(media) < 2 or len(media) > 10: raise ValueError() for i, entry in media: if isinstance(entry.media, InputFile): files.append(entry.media) media[i].media = "attach://{}".format(entry[1][0]) params = dict( chat_id=chat_id, media=json.dumps(media) ) params.update( _clean_params( reply_to_message_id=reply_to_message_id, disable_notification=disable_notification, ) ) return TelegramBotRPCRequest(, params=params, files=files, on_result=lambda result: [Message.from_result(message) for message in result], **kwargs)
Use this method to send a group of photos or videos as an album. On success, an array of the sent Messages is returned. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param media: A list of InputMedia objects to be sent, must include 2–10 items :param reply_to_message_id: If the message is a reply, ID of the original message :param disable_notification: Sends the messages silently. Users will receive a notification with no sound. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type media: `list` of :class:`InputMedia` :type reply_to_message_id: int :returns: On success, an array of the sent Messages is returned. :rtype: TelegramBotRPCRequest
1,078
def common_req(self, execute, send_body=True): "Common code for GET and POST requests" self._SERVER = {: self.client_address[0], : self.client_address[1]} self._to_log = True self._cmd = None self._payload = None self._path = None self._payload_params = None self._query_params = {} self._fragment = None (cmd, res, req) = (None, None, None) try: try: path = self._pathify() cmd = path[1:] res = execute(cmd) except HttpReqError, e: e.report(self) except Exception: try: self.send_exception(500) except Exception: pass raise else: if not isinstance(res, HttpResponse): req = self.build_response() if send_body: req.add_data(res) req.set_send_body(send_body) else: req = res self.end_response(req) except socket.error, e: if e.errno in (errno.ECONNRESET, errno.EPIPE): return LOG.exception("exception - cmd=%r - method=%r", cmd, self.command) except Exception: LOG.exception("exception - cmd=%r - method=%r", cmd, self.command) finally: del req, res
Common code for GET and POST requests
1,079
def derivative(self, x): return BroadcastOperator(*[op.derivative(x) for op in self.operators])
Derivative of the broadcast operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear `BroadcastOperator` The derivative Examples -------- Example with an affine operator: >>> I = odl.IdentityOperator(odl.rn(3)) >>> residual_op = I - I.domain.element([1, 1, 1]) >>> op = BroadcastOperator(residual_op, 2 * residual_op) Calling operator offsets by ``[1, 1, 1]``: >>> x = [1, 2, 3] >>> op(x) ProductSpace(rn(3), 2).element([ [ 0., 1., 2.], [ 0., 2., 4.] ]) The derivative of this affine operator does not have an offset: >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 2., 4., 6.] ])
1,080
def cancel_registration(self): iq = aioxmpp.IQ( to=self.client.local_jid.bare().replace(localpart=None), type_=aioxmpp.IQType.SET, payload=xso.Query() ) iq.payload.remove = True yield from self.client.send(iq)
Cancels the currents client's account with the server. Even if the cancelation is succesful, this method will raise an exception due to he account no longer exists for the server, so the client will fail. To continue with the execution, this method should be surrounded by a try/except statement.
1,081
def masked_arith_op(x, y, op): xrav = x.ravel() assert isinstance(x, (np.ndarray, ABCSeries)), type(x) if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) yrav = y if is_period_dtype(y) else y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: raise ValueError() if mask.any(): with np.errstate(all=): result[mask] = op(xrav[mask], com.values_from_object(yrav[mask])) else: assert is_scalar(y), type(y) assert isinstance(x, np.ndarray), type(x) result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) if op == pow: mask = np.where(x == 1, False, mask) elif op == rpow: mask = np.where(y == 1, False, mask) if mask.any(): with np.errstate(all=): result[mask] = op(xrav[mask], y) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) return result
If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator
1,082
def _get_columns(self, X, cols): if isinstance(X, DataSet): X = X[cols] return_vector = False if isinstance(cols, basestring): return_vector = True cols = [cols] if isinstance(X, list): X = [x[cols] for x in X] X = pd.DataFrame(X) if return_vector: t = X[cols[0]] else: t = X.as_matrix(cols) return t
Get a subset of columns from the given table X. X a Pandas dataframe; the table to select columns from cols a string or list of strings representing the columns to select Returns a numpy array with the data from the selected columns
1,083
def valid_level(value): value = value.upper() if getattr(logging, value, None) is None: raise argparse.ArgumentTypeError("%s is not a valid level" % value) return value
Validation function for parser, logging level argument.
1,084
def get_filetypes(filelist, path=None, size=os.path.getsize): path = path or (lambda _: _) histo = defaultdict(int) for entry in filelist: ext = os.path.splitext(path(entry))[1].lstrip().lower() if ext and ext[0] == and ext[1:].isdigit(): ext = "rar" elif ext == "jpeg": ext = "jpg" elif ext == "mpeg": ext = "mpg" histo[ext] += size(entry) total = sum(histo.values()) if total: for ext, val in histo.items(): histo[ext] = int(val * 100.0 / total + .499) return sorted(zip(histo.values(), histo.keys()), reverse=True)
Get a sorted list of file types and their weight in percent from an iterable of file names. @return: List of weighted file extensions (no '.'), sorted in descending order @rtype: list of (weight, filetype)
1,085
def to_frame(self, *args): if sys.version_info < (3, 6, 0): from collections import OrderedDict impls = OrderedDict() for name, obj in self.items(): impls[name] = obj._impl else: impls = get_impls(self) return _to_frame_inner(impls, args)
Convert the cells in the view into a DataFrame object. If ``args`` is not given, this method returns a DataFrame that has an Index or a MultiIndex depending of the number of cells parameters and columns each of which corresponds to each cells included in the view. ``args`` can be given to calculate cells values and limit the DataFrame indexes to the given arguments. The cells in this view may have different number of parameters, but parameters shared among multiple cells must appear in the same position in all the parameter lists. For example, Having ``foo()``, ``bar(x)`` and ``baz(x, y=1)`` is okay because the shared parameter ``x`` is always the first parameter, but this method does not work if the view has ``quz(x, z=2, y=1)`` cells in addition to the first three cells, because ``y`` appears in different positions. Args: args(optional): multiple arguments, or an iterator of arguments to the cells.
1,086
def _update_object_map(self, obj_map): creation_time = obj_map[] obj_map[] = dict() obj_map[][] = creation_time.year obj_map[][] = creation_time.month obj_map[][] = creation_time.day obj_map[][] = creation_time.hour obj_map[][] = creation_time.minute obj_map[][] = creation_time.second obj_map[][] = creation_time.microsecond
stub
1,087
def invert(self): m = self.matrix d = m[0] * m[4] - m[1] * m[3] self.matrix = [ m[4] / d, -m[1] / d, 0, -m[3] / d, m[0] / d, 0, (m[3] * m[7] - m[4] * m[6]) / d, -(m[0] * m[7] - m[1] * m[6]) / d, 1 ]
Multiplying a matrix by its inverse produces the identity matrix.
1,088
def setHintColor(self, color): lineEdit = self.lineEdit() if isinstance(lineEdit, XLineEdit): lineEdit.setHintColor(color)
Sets the hint color for this combo box provided its line edit is an XLineEdit instance. :param color | <QColor>
1,089
def create_project_config_path( path, mode=0o777, parents=False, exist_ok=False ): project_path = Path(path).absolute().joinpath(RENKU_HOME) project_path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok) return str(project_path)
Create new project configuration folder.
1,090
def _determine_supported_alleles(command, supported_allele_flag): try: supported_alleles_output = check_output([ command, supported_allele_flag ]) supported_alleles_str = supported_alleles_output.decode("ascii", "ignore") assert len(supported_alleles_str) > 0, \ % command supported_alleles = set([]) for line in supported_alleles_str.split("\n"): line = line.strip() if not line.startswith() and len(line) > 0: try: supported_alleles.add(normalize_allele_name(line)) except AlleleParseError as error: logger.info("Skipping allele %s: %s", line, error) continue if len(supported_alleles) == 0: raise ValueError("Unable to determine supported alleles") return supported_alleles except Exception as e: logger.exception(e) raise SystemError("Failed to run %s %s. Possibly an incorrect executable version?" % ( command, supported_allele_flag))
Try asking the commandline predictor (e.g. netMHCpan) which alleles it supports.
1,091
def write_document(self, gh_user, doc_id, file_content, branch, author, commit_msg=None): parent_sha = None fc = tempfile.NamedTemporaryFile() if is_str_type(file_content): fc.write(file_content) else: write_as_json(file_content, fc) fc.flush() try: doc_filepath = self.path_for_doc(doc_id) doc_dir = os.path.split(doc_filepath)[0] if parent_sha is None: self.checkout_master() parent_sha = self.get_master_sha() branch = self.create_or_checkout_branch(gh_user, doc_id, parent_sha, force_branch_name=True) raise GitWorkflowError("Could not write to document finally: fc.close() return new_sha
Given a document id, temporary filename of content, branch and auth_info Deprecated but needed until we merge api local-dep to master...
1,092
def json_dumps(inbox): gc.disable() str_ = json.dumps(inbox[0]) gc.enable() return str_
Serializes the first element of the input using the JSON protocol as implemented by the ``json`` Python 2.6 library.
1,093
def _single_orbit_find_actions(orbit, N_max, toy_potential=None, force_harmonic_oscillator=False): if orbit.norbits > 1: raise ValueError("must be a single orbit") if toy_potential is None: toy_potential = fit_toy_potential( orbit, force_harmonic_oscillator=force_harmonic_oscillator) else: logger.debug("Using *fixed* toy potential: {}" .format(toy_potential.parameters)) if isinstance(toy_potential, IsochronePotential): orbit_align = orbit.align_circulation_with_z() w = orbit_align.w() dxyz = (1, 2, 2) circ = np.sign(w[0, 0]*w[4, 0]-w[1, 0]*w[3, 0]) sign = np.array([1., circ, 1.]) orbit = orbit_align elif isinstance(toy_potential, HarmonicOscillatorPotential): dxyz = (2, 2, 2) sign = 1. w = orbit.w() else: raise ValueError("Invalid toy potential.") t = orbit.t.value aaf = toy_potential.action_angle(orbit) if aaf[0].ndim > 2: aa = np.vstack((aaf[0].value[..., 0], aaf[1].value[..., 0])) else: aa = np.vstack((aaf[0].value, aaf[1].value)) if np.any(np.isnan(aa)): ix = ~np.any(np.isnan(aa), axis=0) aa = aa[:, ix] t = t[ix] warnings.warn("NaN value in toy actions or angles!") if sum(ix) > 1: raise ValueError("Too many NaN value in toy actions or angles!") t1 = time.time() A, b, nvecs = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2]) actions = np.array(solve(A,b)) logger.debug("Action solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(actions), time.time()-t1)) t1 = time.time() A, b, nvecs = _angle_prepare(aa, t, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2], sign=sign) angles = np.array(solve(A, b)) logger.debug("Angle solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(angles), time.time()-t1)) if len(angles) > len(aa): warnings.warn("More unknowns than equations!") J = actions[:3] theta = angles[:3] freqs = angles[3:6] return dict(actions=J*aaf[0].unit, angles=theta*aaf[1].unit, freqs=freqs*aaf[2].unit, Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs)
Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential.
1,094
def link(self, href, **kwargs): return link.Link(dict(href=href, **kwargs), self.base_uri)
Retuns a new link relative to this resource.
1,095
def __meta_metadata(self, field, key): mf = try: mf = str([f[key] for f in self.metadata if f[] == field][0]) except IndexError: print("%s not in metadata field:%s" % (key, field)) return mf else: return mf
Return the value for key for the field in the metadata
1,096
def copy(self): result = Scalar(self.size, self.deriv) result.v = self.v if self.deriv > 0: result.d[:] = self.d[:] if self.deriv > 1: result.dd[:] = self.dd[:] return result
Return a deep copy
1,097
def return_features_numpy_base(dbpath, set_object, points_amt, names): engine = create_engine( + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if names == : columns_amt = 0 for feature in tmp_object.features: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 else: columns_amt = 0 for feature in tmp_object.features: if feature in names: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: if feature in names: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 session.close() return return_array
Generic function which returns a 2d numpy array of extracted features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported.
1,098
def running(concurrent=False): * ret = [] if concurrent: return ret active = __salt__[]() for data in active: err = ( ).format( data[], data[], salt.utils.jid.jid_to_time(data[]), data[], ) ret.append(err) return ret
Return a list of strings that contain state return data if a state function is already running. This function is used to prevent multiple state calls from being run at the same time. CLI Example: .. code-block:: bash salt '*' state.running
1,099
def tables_insert(self, table_name, schema=None, query=None, friendly_name=None, description=None): url = Api._ENDPOINT + \ (Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, , )) data = { : , : { : table_name.project_id, : table_name.dataset_id, : table_name.table_id } } if schema: data[] = {: schema} if query: data[] = {: query} if friendly_name: data[] = friendly_name if description: data[] = description return datalab.utils.Http.request(url, data=data, credentials=self._credentials)
Issues a request to create a table or view in the specified dataset with the specified id. A schema must be provided to create a Table, or a query must be provided to create a View. Args: table_name: the name of the table as a tuple of components. schema: the schema, if this is a Table creation. query: the query, if this is a View creation. friendly_name: an optional friendly name. description: an optional description. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.