text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def select_down(self): """move cursor down""" r, c = self._index self._select_index(r+1, c)
[ "def", "select_down", "(", "self", ")", ":", "r", ",", "c", "=", "self", ".", "_index", "self", ".", "_select_index", "(", "r", "+", "1", ",", "c", ")" ]
28
9.5
def create(self, language, query, tasks=values.unset, model_build=values.unset, field=values.unset): """ Create a new QueryInstance :param unicode language: An ISO language-country string of the sample. :param unicode query: A user-provided string that uniquely identifies this resource as an alternative to the sid. It can be up to 2048 characters long. :param unicode tasks: Constraints the query to a set of tasks. Useful when you need to constrain the paths the user can take. Tasks should be comma separated task-unique-name-1, task-unique-name-2 :param unicode model_build: The Model Build Sid or unique name of the Model Build to be queried. :param unicode field: Constraints the query to a given Field with an task. Useful when you know the Field you are expecting. It accepts one field in the format task-unique-name-1:field-unique-name :returns: Newly created QueryInstance :rtype: twilio.rest.preview.understand.assistant.query.QueryInstance """ data = values.of({ 'Language': language, 'Query': query, 'Tasks': tasks, 'ModelBuild': model_build, 'Field': field, }) payload = self._version.create( 'POST', self._uri, data=data, ) return QueryInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
[ "def", "create", "(", "self", ",", "language", ",", "query", ",", "tasks", "=", "values", ".", "unset", ",", "model_build", "=", "values", ".", "unset", ",", "field", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Language'", ":", "language", ",", "'Query'", ":", "query", ",", "'Tasks'", ":", "tasks", ",", "'ModelBuild'", ":", "model_build", ",", "'Field'", ":", "field", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "QueryInstance", "(", "self", ".", "_version", ",", "payload", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'assistant_sid'", "]", ",", ")" ]
49.655172
35.931034
def get_frames(self): "Define an iterator that will return frames at the given blocksize" nb_frames = self.input_totalframes // self.output_blocksize if self.input_totalframes % self.output_blocksize == 0: nb_frames -= 1 # Last frame must send eod=True for index in xrange(0, nb_frames * self.output_blocksize, self.output_blocksize): yield (self.samples[index:index + self.output_blocksize], False) yield (self.samples[nb_frames * self.output_blocksize:], True)
[ "def", "get_frames", "(", "self", ")", ":", "nb_frames", "=", "self", ".", "input_totalframes", "//", "self", ".", "output_blocksize", "if", "self", ".", "input_totalframes", "%", "self", ".", "output_blocksize", "==", "0", ":", "nb_frames", "-=", "1", "# Last frame must send eod=True", "for", "index", "in", "xrange", "(", "0", ",", "nb_frames", "*", "self", ".", "output_blocksize", ",", "self", ".", "output_blocksize", ")", ":", "yield", "(", "self", ".", "samples", "[", "index", ":", "index", "+", "self", ".", "output_blocksize", "]", ",", "False", ")", "yield", "(", "self", ".", "samples", "[", "nb_frames", "*", "self", ".", "output_blocksize", ":", "]", ",", "True", ")" ]
44.153846
27.076923
def stop(self): """ Stop the reader """ # print('stopping NonBlockingStreamReader..') # print('acquire..') NonBlockingStreamReader._stream_mtx.acquire() # print('acquire..ok') NonBlockingStreamReader._streams.remove(self._descriptor) if not NonBlockingStreamReader._streams: NonBlockingStreamReader._run_flag = False # print('release..') NonBlockingStreamReader._stream_mtx.release() # print('release..ok') if NonBlockingStreamReader._run_flag is False: # print('join..') NonBlockingStreamReader._rt.join() # print('join..ok') del NonBlockingStreamReader._rt NonBlockingStreamReader._rt = None
[ "def", "stop", "(", "self", ")", ":", "# print('stopping NonBlockingStreamReader..')", "# print('acquire..')", "NonBlockingStreamReader", ".", "_stream_mtx", ".", "acquire", "(", ")", "# print('acquire..ok')", "NonBlockingStreamReader", ".", "_streams", ".", "remove", "(", "self", ".", "_descriptor", ")", "if", "not", "NonBlockingStreamReader", ".", "_streams", ":", "NonBlockingStreamReader", ".", "_run_flag", "=", "False", "# print('release..')", "NonBlockingStreamReader", ".", "_stream_mtx", ".", "release", "(", ")", "# print('release..ok')", "if", "NonBlockingStreamReader", ".", "_run_flag", "is", "False", ":", "# print('join..')", "NonBlockingStreamReader", ".", "_rt", ".", "join", "(", ")", "# print('join..ok')", "del", "NonBlockingStreamReader", ".", "_rt", "NonBlockingStreamReader", ".", "_rt", "=", "None" ]
37.5
11
def do_speak(self, args): """Repeats what you tell me to.""" words = [] for word in args.words: if args.piglatin: word = '%s%say' % (word[1:], word[0]) if args.shout: word = word.upper() words.append(word) repetitions = args.repeat or 1 color_on = '' if args.fg: color_on += FG_COLORS[args.fg] if args.bg: color_on += BG_COLORS[args.bg] color_off = Fore.RESET + Back.RESET for i in range(min(repetitions, self.maxrepeats)): # .poutput handles newlines, and accommodates output redirection too self.poutput(color_on + ' '.join(words) + color_off)
[ "def", "do_speak", "(", "self", ",", "args", ")", ":", "words", "=", "[", "]", "for", "word", "in", "args", ".", "words", ":", "if", "args", ".", "piglatin", ":", "word", "=", "'%s%say'", "%", "(", "word", "[", "1", ":", "]", ",", "word", "[", "0", "]", ")", "if", "args", ".", "shout", ":", "word", "=", "word", ".", "upper", "(", ")", "words", ".", "append", "(", "word", ")", "repetitions", "=", "args", ".", "repeat", "or", "1", "color_on", "=", "''", "if", "args", ".", "fg", ":", "color_on", "+=", "FG_COLORS", "[", "args", ".", "fg", "]", "if", "args", ".", "bg", ":", "color_on", "+=", "BG_COLORS", "[", "args", ".", "bg", "]", "color_off", "=", "Fore", ".", "RESET", "+", "Back", ".", "RESET", "for", "i", "in", "range", "(", "min", "(", "repetitions", ",", "self", ".", "maxrepeats", ")", ")", ":", "# .poutput handles newlines, and accommodates output redirection too", "self", ".", "poutput", "(", "color_on", "+", "' '", ".", "join", "(", "words", ")", "+", "color_off", ")" ]
32.5
16.863636
def network_expansion(network, method = 'rel', ext_min=0.1, ext_width=False, filename=None, boundaries=[]): """Plot relative or absolute network extension of AC- and DC-lines. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis method: str Choose 'rel' for extension relative to s_nom and 'abs' for absolute extensions. ext_min: float Choose minimum relative line extension shown in plot in p.u.. ext_width: float or bool Choose if line_width respects line extension. Turn off with 'False' or set linear factor to decremise extension line_width. filename: str or None Save figure in this direction boundaries: array Set boundaries of heatmap axis """ cmap = plt.cm.jet overlay_network = network.copy() overlay_network.lines = overlay_network.lines[ overlay_network.lines.s_nom_extendable & (( overlay_network.lines.s_nom_opt - overlay_network.lines.s_nom_min) / overlay_network.lines.s_nom >= ext_min)] overlay_network.links = overlay_network.links[ overlay_network.links.p_nom_extendable & (( overlay_network.links.p_nom_opt - overlay_network.links.p_nom_min)/ overlay_network.links.p_nom >= ext_min)] for i, row in overlay_network.links.iterrows(): linked = overlay_network.links[(row['bus1'] == overlay_network.links.bus0) & ( row['bus0'] == overlay_network.links.bus1)] if not linked.empty: if row['p_nom_opt'] < linked.p_nom_opt.values[0]: overlay_network.links.p_nom_opt[i] = linked.p_nom_opt.values[0] array_line = [['Line'] * len(overlay_network.lines), overlay_network.lines.index] array_link = [['Link'] * len(overlay_network.links), overlay_network.links.index] if method == 'rel': extension_lines = pd.Series((100 * (overlay_network.lines.s_nom_opt - overlay_network.lines.s_nom_min) / overlay_network.lines.s_nom).data, index=array_line) extension_links = pd.Series((100 * (overlay_network.links.p_nom_opt - overlay_network.links.p_nom_min)/ (overlay_network.links.p_nom)).data, index=array_link) if method == 'abs': extension_lines = pd.Series( (overlay_network.lines.s_nom_opt - overlay_network.lines.s_nom_min).data, index=array_line) extension_links = pd.Series( (overlay_network.links.p_nom_opt - overlay_network.links.p_nom_min).data, index=array_link) extension = extension_lines.append(extension_links) # Plot whole network in backgroud of plot network.plot( line_colors=pd.Series("grey", index = [['Line'] * len( network.lines), network.lines.index]).append( pd.Series("grey", index = [['Link'] * len(network.links), network.links.index])), bus_sizes=0, line_widths=pd.Series(0.5, index = [['Line'] * len(network.lines), network.lines.index]).append( pd.Series(0.55, index = [['Link'] * len(network.links), network.links.index]))) if not ext_width: line_widths= pd.Series(0.8, index = array_line).append( pd.Series(0.8, index = array_link)) else: line_widths= 0.5 + (extension / ext_width) ll = overlay_network.plot( line_colors=extension, line_cmap=cmap, bus_sizes=0, title="Optimized AC- and DC-line expansion", line_widths=line_widths) if not boundaries: v = np.linspace(min(extension), max(extension), 101) boundaries = [min(extension), max(extension)] else: v = np.linspace(boundaries[0], boundaries[1], 101) if not extension_links.empty: cb_Link = plt.colorbar(ll[2], boundaries=v, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb_Link.remove() cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10], fraction=0.046, pad=0.04) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) if method == 'rel': cb.set_label('line expansion relative to s_nom in %') if method == 'abs': cb.set_label('line expansion in MW') if filename is None: plt.show() else: plt.savefig(filename) plt.close()
[ "def", "network_expansion", "(", "network", ",", "method", "=", "'rel'", ",", "ext_min", "=", "0.1", ",", "ext_width", "=", "False", ",", "filename", "=", "None", ",", "boundaries", "=", "[", "]", ")", ":", "cmap", "=", "plt", ".", "cm", ".", "jet", "overlay_network", "=", "network", ".", "copy", "(", ")", "overlay_network", ".", "lines", "=", "overlay_network", ".", "lines", "[", "overlay_network", ".", "lines", ".", "s_nom_extendable", "&", "(", "(", "overlay_network", ".", "lines", ".", "s_nom_opt", "-", "overlay_network", ".", "lines", ".", "s_nom_min", ")", "/", "overlay_network", ".", "lines", ".", "s_nom", ">=", "ext_min", ")", "]", "overlay_network", ".", "links", "=", "overlay_network", ".", "links", "[", "overlay_network", ".", "links", ".", "p_nom_extendable", "&", "(", "(", "overlay_network", ".", "links", ".", "p_nom_opt", "-", "overlay_network", ".", "links", ".", "p_nom_min", ")", "/", "overlay_network", ".", "links", ".", "p_nom", ">=", "ext_min", ")", "]", "for", "i", ",", "row", "in", "overlay_network", ".", "links", ".", "iterrows", "(", ")", ":", "linked", "=", "overlay_network", ".", "links", "[", "(", "row", "[", "'bus1'", "]", "==", "overlay_network", ".", "links", ".", "bus0", ")", "&", "(", "row", "[", "'bus0'", "]", "==", "overlay_network", ".", "links", ".", "bus1", ")", "]", "if", "not", "linked", ".", "empty", ":", "if", "row", "[", "'p_nom_opt'", "]", "<", "linked", ".", "p_nom_opt", ".", "values", "[", "0", "]", ":", "overlay_network", ".", "links", ".", "p_nom_opt", "[", "i", "]", "=", "linked", ".", "p_nom_opt", ".", "values", "[", "0", "]", "array_line", "=", "[", "[", "'Line'", "]", "*", "len", "(", "overlay_network", ".", "lines", ")", ",", "overlay_network", ".", "lines", ".", "index", "]", "array_link", "=", "[", "[", "'Link'", "]", "*", "len", "(", "overlay_network", ".", "links", ")", ",", "overlay_network", ".", "links", ".", "index", "]", "if", "method", "==", "'rel'", ":", "extension_lines", "=", "pd", ".", "Series", "(", "(", "100", "*", "(", "overlay_network", ".", "lines", ".", "s_nom_opt", "-", "overlay_network", ".", "lines", ".", "s_nom_min", ")", "/", "overlay_network", ".", "lines", ".", "s_nom", ")", ".", "data", ",", "index", "=", "array_line", ")", "extension_links", "=", "pd", ".", "Series", "(", "(", "100", "*", "(", "overlay_network", ".", "links", ".", "p_nom_opt", "-", "overlay_network", ".", "links", ".", "p_nom_min", ")", "/", "(", "overlay_network", ".", "links", ".", "p_nom", ")", ")", ".", "data", ",", "index", "=", "array_link", ")", "if", "method", "==", "'abs'", ":", "extension_lines", "=", "pd", ".", "Series", "(", "(", "overlay_network", ".", "lines", ".", "s_nom_opt", "-", "overlay_network", ".", "lines", ".", "s_nom_min", ")", ".", "data", ",", "index", "=", "array_line", ")", "extension_links", "=", "pd", ".", "Series", "(", "(", "overlay_network", ".", "links", ".", "p_nom_opt", "-", "overlay_network", ".", "links", ".", "p_nom_min", ")", ".", "data", ",", "index", "=", "array_link", ")", "extension", "=", "extension_lines", ".", "append", "(", "extension_links", ")", "# Plot whole network in backgroud of plot", "network", ".", "plot", "(", "line_colors", "=", "pd", ".", "Series", "(", "\"grey\"", ",", "index", "=", "[", "[", "'Line'", "]", "*", "len", "(", "network", ".", "lines", ")", ",", "network", ".", "lines", ".", "index", "]", ")", ".", "append", "(", "pd", ".", "Series", "(", "\"grey\"", ",", "index", "=", "[", "[", "'Link'", "]", "*", "len", "(", "network", ".", "links", ")", ",", "network", ".", "links", ".", "index", "]", ")", ")", ",", "bus_sizes", "=", "0", ",", "line_widths", "=", "pd", ".", "Series", "(", "0.5", ",", "index", "=", "[", "[", "'Line'", "]", "*", "len", "(", "network", ".", "lines", ")", ",", "network", ".", "lines", ".", "index", "]", ")", ".", "append", "(", "pd", ".", "Series", "(", "0.55", ",", "index", "=", "[", "[", "'Link'", "]", "*", "len", "(", "network", ".", "links", ")", ",", "network", ".", "links", ".", "index", "]", ")", ")", ")", "if", "not", "ext_width", ":", "line_widths", "=", "pd", ".", "Series", "(", "0.8", ",", "index", "=", "array_line", ")", ".", "append", "(", "pd", ".", "Series", "(", "0.8", ",", "index", "=", "array_link", ")", ")", "else", ":", "line_widths", "=", "0.5", "+", "(", "extension", "/", "ext_width", ")", "ll", "=", "overlay_network", ".", "plot", "(", "line_colors", "=", "extension", ",", "line_cmap", "=", "cmap", ",", "bus_sizes", "=", "0", ",", "title", "=", "\"Optimized AC- and DC-line expansion\"", ",", "line_widths", "=", "line_widths", ")", "if", "not", "boundaries", ":", "v", "=", "np", ".", "linspace", "(", "min", "(", "extension", ")", ",", "max", "(", "extension", ")", ",", "101", ")", "boundaries", "=", "[", "min", "(", "extension", ")", ",", "max", "(", "extension", ")", "]", "else", ":", "v", "=", "np", ".", "linspace", "(", "boundaries", "[", "0", "]", ",", "boundaries", "[", "1", "]", ",", "101", ")", "if", "not", "extension_links", ".", "empty", ":", "cb_Link", "=", "plt", ".", "colorbar", "(", "ll", "[", "2", "]", ",", "boundaries", "=", "v", ",", "ticks", "=", "v", "[", "0", ":", "101", ":", "10", "]", ")", "cb_Link", ".", "set_clim", "(", "vmin", "=", "boundaries", "[", "0", "]", ",", "vmax", "=", "boundaries", "[", "1", "]", ")", "cb_Link", ".", "remove", "(", ")", "cb", "=", "plt", ".", "colorbar", "(", "ll", "[", "1", "]", ",", "boundaries", "=", "v", ",", "ticks", "=", "v", "[", "0", ":", "101", ":", "10", "]", ",", "fraction", "=", "0.046", ",", "pad", "=", "0.04", ")", "cb", ".", "set_clim", "(", "vmin", "=", "boundaries", "[", "0", "]", ",", "vmax", "=", "boundaries", "[", "1", "]", ")", "if", "method", "==", "'rel'", ":", "cb", ".", "set_label", "(", "'line expansion relative to s_nom in %'", ")", "if", "method", "==", "'abs'", ":", "cb", ".", "set_label", "(", "'line expansion in MW'", ")", "if", "filename", "is", "None", ":", "plt", ".", "show", "(", ")", "else", ":", "plt", ".", "savefig", "(", "filename", ")", "plt", ".", "close", "(", ")" ]
38.780303
21.25
def get_subarray_sbi_ids(sub_array_id): """Return list of scheduling block Id's associated with the given sub_array_id """ ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) if config['sub_array_id'] == sub_array_id: ids.append(config['id']) return ids
[ "def", "get_subarray_sbi_ids", "(", "sub_array_id", ")", ":", "ids", "=", "[", "]", "for", "key", "in", "sorted", "(", "DB", ".", "keys", "(", "pattern", "=", "'scheduling_block/*'", ")", ")", ":", "config", "=", "json", ".", "loads", "(", "DB", ".", "get", "(", "key", ")", ")", "if", "config", "[", "'sub_array_id'", "]", "==", "sub_array_id", ":", "ids", ".", "append", "(", "config", "[", "'id'", "]", ")", "return", "ids" ]
34.4
11.4
def get_time(filename): """ Get the modified time for a file as a datetime instance """ ts = os.stat(filename).st_mtime return datetime.datetime.utcfromtimestamp(ts)
[ "def", "get_time", "(", "filename", ")", ":", "ts", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_mtime", "return", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "ts", ")" ]
28.333333
7.666667
def get_vmware_inventory_path(): """ Returns VMware inventory file path. :returns: path to the inventory file """ if sys.platform.startswith("win"): return os.path.expandvars(r"%APPDATA%\Vmware\Inventory.vmls") elif sys.platform.startswith("darwin"): return os.path.expanduser("~/Library/Application Support/VMware Fusion/vmInventory") else: return os.path.expanduser("~/.vmware/inventory.vmls")
[ "def", "get_vmware_inventory_path", "(", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "return", "os", ".", "path", ".", "expandvars", "(", "r\"%APPDATA%\\Vmware\\Inventory.vmls\"", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "\"darwin\"", ")", ":", "return", "os", ".", "path", ".", "expanduser", "(", "\"~/Library/Application Support/VMware Fusion/vmInventory\"", ")", "else", ":", "return", "os", ".", "path", ".", "expanduser", "(", "\"~/.vmware/inventory.vmls\"", ")" ]
36.692308
18.846154
def fit(self, data, debug=False): """ Fit each segment. Segments that have not already been explicitly added will be automatically added with default model and ytransform. Parameters ---------- data : pandas.DataFrame Must have a column with the same name as `segmentation_col`. debug : bool If set to true will pass debug to the fit method of each model. Returns ------- fits : dict of statsmodels.regression.linear_model.OLSResults Keys are the segment names. """ data = util.apply_filter_query(data, self.fit_filters) unique = data[self.segmentation_col].unique() value_counts = data[self.segmentation_col].value_counts() # Remove any existing segments that may no longer have counterparts # in the data. This can happen when loading a saved model and then # calling this method with data that no longer has segments that # were there the last time this was called. gone = set(self._group.models) - set(unique) for g in gone: del self._group.models[g] for x in unique: if x not in self._group.models and \ value_counts[x] > self.min_segment_size: self.add_segment(x) with log_start_finish( 'fitting models in segmented model {}'.format(self.name), logger): return self._group.fit(data, debug=debug)
[ "def", "fit", "(", "self", ",", "data", ",", "debug", "=", "False", ")", ":", "data", "=", "util", ".", "apply_filter_query", "(", "data", ",", "self", ".", "fit_filters", ")", "unique", "=", "data", "[", "self", ".", "segmentation_col", "]", ".", "unique", "(", ")", "value_counts", "=", "data", "[", "self", ".", "segmentation_col", "]", ".", "value_counts", "(", ")", "# Remove any existing segments that may no longer have counterparts", "# in the data. This can happen when loading a saved model and then", "# calling this method with data that no longer has segments that", "# were there the last time this was called.", "gone", "=", "set", "(", "self", ".", "_group", ".", "models", ")", "-", "set", "(", "unique", ")", "for", "g", "in", "gone", ":", "del", "self", ".", "_group", ".", "models", "[", "g", "]", "for", "x", "in", "unique", ":", "if", "x", "not", "in", "self", ".", "_group", ".", "models", "and", "value_counts", "[", "x", "]", ">", "self", ".", "min_segment_size", ":", "self", ".", "add_segment", "(", "x", ")", "with", "log_start_finish", "(", "'fitting models in segmented model {}'", ".", "format", "(", "self", ".", "name", ")", ",", "logger", ")", ":", "return", "self", ".", "_group", ".", "fit", "(", "data", ",", "debug", "=", "debug", ")" ]
37.125
22.525
def _get_dbal_column_type(self, type_): """ Get the dbal column type. :param type_: The fluent type :type type_: str :rtype: str """ type_ = type_.lower() if type_ == "enum": return "string" return super(PostgresSchemaGrammar, self)._get_dbal_column_type(type_)
[ "def", "_get_dbal_column_type", "(", "self", ",", "type_", ")", ":", "type_", "=", "type_", ".", "lower", "(", ")", "if", "type_", "==", "\"enum\"", ":", "return", "\"string\"", "return", "super", "(", "PostgresSchemaGrammar", ",", "self", ")", ".", "_get_dbal_column_type", "(", "type_", ")" ]
22.333333
18.866667
def plot_brillouin_zone(bz_lattice, lines=None, labels=None, kpoints=None, fold=False, coords_are_cartesian=False, ax=None, **kwargs): """ Plots a 3D representation of the Brillouin zone of the structure. Can add to the plot paths, labels and kpoints Args: bz_lattice: Lattice object of the Brillouin zone lines: list of lists of coordinates. Each list represent a different path labels: dict containing the label as a key and the coordinates as value. kpoints: list of coordinates fold: whether the points should be folded inside the first Brillouin Zone. Defaults to False. Requires lattice if True. coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: provided by add_fig_kwargs decorator Returns: matplotlib figure """ fig, ax = plot_lattice_vectors(bz_lattice, ax=ax) plot_wigner_seitz(bz_lattice, ax=ax) if lines is not None: for line in lines: plot_path(line, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax) if labels is not None: plot_labels(labels, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax) plot_points(labels.values(), bz_lattice, coords_are_cartesian=coords_are_cartesian, fold=False, ax=ax) if kpoints is not None: plot_points(kpoints, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax, fold=fold) ax.set_xlim3d(-1, 1) ax.set_ylim3d(-1, 1) ax.set_zlim3d(-1, 1) ax.set_aspect('equal') ax.axis("off") return fig
[ "def", "plot_brillouin_zone", "(", "bz_lattice", ",", "lines", "=", "None", ",", "labels", "=", "None", ",", "kpoints", "=", "None", ",", "fold", "=", "False", ",", "coords_are_cartesian", "=", "False", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fig", ",", "ax", "=", "plot_lattice_vectors", "(", "bz_lattice", ",", "ax", "=", "ax", ")", "plot_wigner_seitz", "(", "bz_lattice", ",", "ax", "=", "ax", ")", "if", "lines", "is", "not", "None", ":", "for", "line", "in", "lines", ":", "plot_path", "(", "line", ",", "bz_lattice", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ",", "ax", "=", "ax", ")", "if", "labels", "is", "not", "None", ":", "plot_labels", "(", "labels", ",", "bz_lattice", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ",", "ax", "=", "ax", ")", "plot_points", "(", "labels", ".", "values", "(", ")", ",", "bz_lattice", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ",", "fold", "=", "False", ",", "ax", "=", "ax", ")", "if", "kpoints", "is", "not", "None", ":", "plot_points", "(", "kpoints", ",", "bz_lattice", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ",", "ax", "=", "ax", ",", "fold", "=", "fold", ")", "ax", ".", "set_xlim3d", "(", "-", "1", ",", "1", ")", "ax", ".", "set_ylim3d", "(", "-", "1", ",", "1", ")", "ax", ".", "set_zlim3d", "(", "-", "1", ",", "1", ")", "ax", ".", "set_aspect", "(", "'equal'", ")", "ax", ".", "axis", "(", "\"off\"", ")", "return", "fig" ]
36.84
21
def file_path(self, request, response=None, info=None): """ 抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息 """ return json.dumps(self._extract_key_info(request))
[ "def", "file_path", "(", "self", ",", "request", ",", "response", "=", "None", ",", "info", "=", "None", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "_extract_key_info", "(", "request", ")", ")" ]
41.4
13
def qvalues(pv, m = None, verbose = False, lowmem = False, pi0 = None): """ Copyright (c) 2012, Nicolo Fusi, University of Sheffield All rights reserved. Estimates q-values from p-values Args ===== m: number of tests. If not specified m = pv.size verbose: print verbose messages? (default False) lowmem: use memory-efficient in-place algorithm pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003. For most GWAS this is not necessary, since pi0 is extremely likely to be 1 :param pv: :param m: :param verbose: :param lowmem: :param pi0: :return: """ assert(pv.min() >= 0 and pv.max() <= 1), "p-values should be between 0 and 1" original_shape = pv.shape pv = pv.ravel() # flattens the array in place, more efficient than flatten() if m == None: m = float(len(pv)) else: # the user has supplied an m m *= 1.0 # if the number of hypotheses is small, just set pi0 to 1 if len(pv) < 100 and pi0 == None: pi0 = 1.0 elif pi0 != None: pi0 = pi0 else: # evaluate pi0 for different lambdas pi0 = [] lam = sp.arange(0, 0.90, 0.01) counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)]) for l in range(len(lam)): pi0.append(counts[l]/(m*(1-lam[l]))) pi0 = sp.array(pi0) # fit natural cubic spline tck = sp.interpolate.splrep(lam, pi0, k = 3) pi0 = sp.interpolate.splev(lam[-1], tck) if pi0 > 1: if verbose: print("got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1" % pi0) pi0 = 1.0 assert(pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0 if lowmem: # low memory version, only uses 1 pv and 1 qv matrices qv = sp.zeros((len(pv),)) last_pv = pv.argmax() qv[last_pv] = (pi0*pv[last_pv]*m)/float(m) pv[last_pv] = -sp.inf prev_qv = last_pv for i in range(int(len(pv))-2, -1, -1): cur_max = pv.argmax() qv_i = (pi0*m*pv[cur_max]/float(i+1)) pv[cur_max] = -sp.inf qv_i1 = prev_qv qv[cur_max] = min(qv_i, qv_i1) prev_qv = qv[cur_max] else: p_ordered = sp.argsort(pv) pv = pv[p_ordered] qv = pi0 * m/len(pv) * pv qv[-1] = min(qv[-1],1.0) for i in range(len(pv)-2, -1, -1): qv[i] = min(pi0*m*pv[i]/(i+1.0), qv[i+1]) # reorder qvalues qv_temp = qv.copy() qv = sp.zeros_like(qv) qv[p_ordered] = qv_temp # reshape qvalues qv = qv.reshape(original_shape) return qv
[ "def", "qvalues", "(", "pv", ",", "m", "=", "None", ",", "verbose", "=", "False", ",", "lowmem", "=", "False", ",", "pi0", "=", "None", ")", ":", "assert", "(", "pv", ".", "min", "(", ")", ">=", "0", "and", "pv", ".", "max", "(", ")", "<=", "1", ")", ",", "\"p-values should be between 0 and 1\"", "original_shape", "=", "pv", ".", "shape", "pv", "=", "pv", ".", "ravel", "(", ")", "# flattens the array in place, more efficient than flatten() ", "if", "m", "==", "None", ":", "m", "=", "float", "(", "len", "(", "pv", ")", ")", "else", ":", "# the user has supplied an m", "m", "*=", "1.0", "# if the number of hypotheses is small, just set pi0 to 1", "if", "len", "(", "pv", ")", "<", "100", "and", "pi0", "==", "None", ":", "pi0", "=", "1.0", "elif", "pi0", "!=", "None", ":", "pi0", "=", "pi0", "else", ":", "# evaluate pi0 for different lambdas", "pi0", "=", "[", "]", "lam", "=", "sp", ".", "arange", "(", "0", ",", "0.90", ",", "0.01", ")", "counts", "=", "sp", ".", "array", "(", "[", "(", "pv", ">", "i", ")", ".", "sum", "(", ")", "for", "i", "in", "sp", ".", "arange", "(", "0", ",", "0.9", ",", "0.01", ")", "]", ")", "for", "l", "in", "range", "(", "len", "(", "lam", ")", ")", ":", "pi0", ".", "append", "(", "counts", "[", "l", "]", "/", "(", "m", "*", "(", "1", "-", "lam", "[", "l", "]", ")", ")", ")", "pi0", "=", "sp", ".", "array", "(", "pi0", ")", "# fit natural cubic spline", "tck", "=", "sp", ".", "interpolate", ".", "splrep", "(", "lam", ",", "pi0", ",", "k", "=", "3", ")", "pi0", "=", "sp", ".", "interpolate", ".", "splev", "(", "lam", "[", "-", "1", "]", ",", "tck", ")", "if", "pi0", ">", "1", ":", "if", "verbose", ":", "print", "(", "\"got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1\"", "%", "pi0", ")", "pi0", "=", "1.0", "assert", "(", "pi0", ">=", "0", "and", "pi0", "<=", "1", ")", ",", "\"pi0 is not between 0 and 1: %f\"", "%", "pi0", "if", "lowmem", ":", "# low memory version, only uses 1 pv and 1 qv matrices", "qv", "=", "sp", ".", "zeros", "(", "(", "len", "(", "pv", ")", ",", ")", ")", "last_pv", "=", "pv", ".", "argmax", "(", ")", "qv", "[", "last_pv", "]", "=", "(", "pi0", "*", "pv", "[", "last_pv", "]", "*", "m", ")", "/", "float", "(", "m", ")", "pv", "[", "last_pv", "]", "=", "-", "sp", ".", "inf", "prev_qv", "=", "last_pv", "for", "i", "in", "range", "(", "int", "(", "len", "(", "pv", ")", ")", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "cur_max", "=", "pv", ".", "argmax", "(", ")", "qv_i", "=", "(", "pi0", "*", "m", "*", "pv", "[", "cur_max", "]", "/", "float", "(", "i", "+", "1", ")", ")", "pv", "[", "cur_max", "]", "=", "-", "sp", ".", "inf", "qv_i1", "=", "prev_qv", "qv", "[", "cur_max", "]", "=", "min", "(", "qv_i", ",", "qv_i1", ")", "prev_qv", "=", "qv", "[", "cur_max", "]", "else", ":", "p_ordered", "=", "sp", ".", "argsort", "(", "pv", ")", "pv", "=", "pv", "[", "p_ordered", "]", "qv", "=", "pi0", "*", "m", "/", "len", "(", "pv", ")", "*", "pv", "qv", "[", "-", "1", "]", "=", "min", "(", "qv", "[", "-", "1", "]", ",", "1.0", ")", "for", "i", "in", "range", "(", "len", "(", "pv", ")", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "qv", "[", "i", "]", "=", "min", "(", "pi0", "*", "m", "*", "pv", "[", "i", "]", "/", "(", "i", "+", "1.0", ")", ",", "qv", "[", "i", "+", "1", "]", ")", "# reorder qvalues", "qv_temp", "=", "qv", ".", "copy", "(", ")", "qv", "=", "sp", ".", "zeros_like", "(", "qv", ")", "qv", "[", "p_ordered", "]", "=", "qv_temp", "# reshape qvalues", "qv", "=", "qv", ".", "reshape", "(", "original_shape", ")", "return", "qv" ]
27.642857
21.561224
def phi(v): """Neutrino direction in polar coordinates. ``phi``, ``theta`` is the opposite of ``zenith``, ``azimuth``. Angles in radians. """ v = np.atleast_2d(v) dir_x = v[:, 0] dir_y = v[:, 1] return phi_separg(dir_x, dir_y)
[ "def", "phi", "(", "v", ")", ":", "v", "=", "np", ".", "atleast_2d", "(", "v", ")", "dir_x", "=", "v", "[", ":", ",", "0", "]", "dir_y", "=", "v", "[", ":", ",", "1", "]", "return", "phi_separg", "(", "dir_x", ",", "dir_y", ")" ]
22.727273
19.636364
def _write_str(self, data): """ Converts the given data then writes it :param data: Data to be written :return: The result of ``self.output.write()`` """ with self.__lock: self.output.write( to_str(data, self.encoding) .encode() .decode(self.out_encoding, errors="replace") )
[ "def", "_write_str", "(", "self", ",", "data", ")", ":", "with", "self", ".", "__lock", ":", "self", ".", "output", ".", "write", "(", "to_str", "(", "data", ",", "self", ".", "encoding", ")", ".", "encode", "(", ")", ".", "decode", "(", "self", ".", "out_encoding", ",", "errors", "=", "\"replace\"", ")", ")" ]
29.538462
12.615385
def assign_site_properties(self, slab, height=0.9): """ Assigns site properties. """ if 'surface_properties' in slab.site_properties.keys(): return slab else: surf_sites = self.find_surface_sites_by_height(slab, height) surf_props = ['surface' if site in surf_sites else 'subsurface' for site in slab.sites] return slab.copy( site_properties={'surface_properties': surf_props})
[ "def", "assign_site_properties", "(", "self", ",", "slab", ",", "height", "=", "0.9", ")", ":", "if", "'surface_properties'", "in", "slab", ".", "site_properties", ".", "keys", "(", ")", ":", "return", "slab", "else", ":", "surf_sites", "=", "self", ".", "find_surface_sites_by_height", "(", "slab", ",", "height", ")", "surf_props", "=", "[", "'surface'", "if", "site", "in", "surf_sites", "else", "'subsurface'", "for", "site", "in", "slab", ".", "sites", "]", "return", "slab", ".", "copy", "(", "site_properties", "=", "{", "'surface_properties'", ":", "surf_props", "}", ")" ]
40
16
def findElementsWithId(node, elems=None): """ Returns all elements with id attributes """ if elems is None: elems = {} id = node.getAttribute('id') if id != '': elems[id] = node if node.hasChildNodes(): for child in node.childNodes: # from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html # we are only really interested in nodes of type Element (1) if child.nodeType == Node.ELEMENT_NODE: findElementsWithId(child, elems) return elems
[ "def", "findElementsWithId", "(", "node", ",", "elems", "=", "None", ")", ":", "if", "elems", "is", "None", ":", "elems", "=", "{", "}", "id", "=", "node", ".", "getAttribute", "(", "'id'", ")", "if", "id", "!=", "''", ":", "elems", "[", "id", "]", "=", "node", "if", "node", ".", "hasChildNodes", "(", ")", ":", "for", "child", "in", "node", ".", "childNodes", ":", "# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html", "# we are only really interested in nodes of type Element (1)", "if", "child", ".", "nodeType", "==", "Node", ".", "ELEMENT_NODE", ":", "findElementsWithId", "(", "child", ",", "elems", ")", "return", "elems" ]
33.625
13.75
def quic_graph_lasso_cv(X, metric): """Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected. """ print("QuicGraphicalLassoCV with:") print(" metric: {}".format(metric)) model = QuicGraphicalLassoCV( cv=2, # cant deal w more folds at small size n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric, ) model.fit(X) print(" len(cv_lams): {}".format(len(model.cv_lams_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
[ "def", "quic_graph_lasso_cv", "(", "X", ",", "metric", ")", ":", "print", "(", "\"QuicGraphicalLassoCV with:\"", ")", "print", "(", "\" metric: {}\"", ".", "format", "(", "metric", ")", ")", "model", "=", "QuicGraphicalLassoCV", "(", "cv", "=", "2", ",", "# cant deal w more folds at small size", "n_refinements", "=", "6", ",", "n_jobs", "=", "1", ",", "init_method", "=", "\"cov\"", ",", "score_metric", "=", "metric", ",", ")", "model", ".", "fit", "(", "X", ")", "print", "(", "\" len(cv_lams): {}\"", ".", "format", "(", "len", "(", "model", ".", "cv_lams_", ")", ")", ")", "print", "(", "\" lam_scale_: {}\"", ".", "format", "(", "model", ".", "lam_scale_", ")", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "model", ".", "lam_", ")", ")", "return", "model", ".", "covariance_", ",", "model", ".", "precision_", ",", "model", ".", "lam_" ]
37.35
15.6
def route(route_str): # decorator param """ Provides play2 likes routes, with python formatter All string fileds should be named parameters :param route_str: a route "GET /parent/{parentID}/child/{childId}{ctype}" :return: the response of requests.request """ def ilog(elapsed): # statistic last_stat = _routes_stat.get(route_str, {"count": 0, "min": sys.maxint, "max": 0, "avg": 0}) last_count = last_stat["count"] _routes_stat[route_str] = { "count": last_count + 1, "min": min(elapsed, last_stat["min"]), "max": max(elapsed, last_stat["max"]), "avg": (last_count * last_stat["avg"] + elapsed) / (last_count + 1) } # log.debug('Route Time: {0} took {1} ms'.format(route_str, elapsed)) def wrapper(f): # decorated function @wraps(f) def wrapped_func(*args, **kwargs): # params of function self = args[0] method, url = route_str.split(" ") def defaults_dict(): f_args, varargs, keywords, defaults = inspect.getargspec(f) defaults = defaults or [] return dict(zip(f_args[-len(defaults):], defaults)) defs = defaults_dict() route_args = dict(defs.items() + kwargs.items()) def get_destination_url(): try: return url.format(**route_args) except KeyError as e: # KeyError in format have a message with key raise AttributeError("Define {0} as named argument for route.".format(e)) destination_url = self.base_url + get_destination_url() f(*args, **kwargs) # generally this is "pass" bypass_args = dict([ (param, route_args[param]) for param in ["data", "json", "cookies", "auth", "files", "content_type", "params"] if param in route_args ]) # add json content type for: # - unless files are sent # - private that ends with .json # - all public api with POST/PUT method, meaning have basic auth # - json parameter is present if "files" not in bypass_args and (destination_url.endswith('.json') or "json" in route_args or ( "auth" in bypass_args and method in ["POST", "PUT"])): bypass_args['headers'] = {'Content-Type': 'application/json'} if "content_type" in bypass_args and bypass_args['content_type'] == "yaml": del bypass_args["content_type"] bypass_args['headers'] = {'Content-Type': 'application/x-yaml'} start = time.time() try: response = self._session.request(method, destination_url, verify=self.verify_ssl, **bypass_args) except requests.ConnectionError: log.info('ConnectionError caught. Trying again: \n %s:%s ' % (method, destination_url)) import traceback def log_exception(exc_class, exc, tb): log.info('Got exception: %s' % exc) log.info('Class: %s' % exc_class) log.info('Trace: %s' % traceback.format_tb(tb)) log.error('Got exception while executing: %s' % exc) log_exception(*sys.exc_info()) time.sleep(2) response = self._session.request(method, destination_url, verify=self.verify_ssl, **bypass_args) end = time.time() elapsed = int((end - start) * 1000.0) ilog(elapsed) if self.verify_codes: if response.status_code is not 200: msg = "Route {0} {1} returned code={2} and error: {3}".format(method, get_destination_url(), response.status_code, response.text) if response.status_code in api_http_code_errors.keys(): raise api_http_code_errors[response.status_code](msg) else: log.debug(response.text) log.debug(response.request.body) raise ApiError(msg) return response return wrapped_func return wrapper
[ "def", "route", "(", "route_str", ")", ":", "# decorator param", "def", "ilog", "(", "elapsed", ")", ":", "# statistic", "last_stat", "=", "_routes_stat", ".", "get", "(", "route_str", ",", "{", "\"count\"", ":", "0", ",", "\"min\"", ":", "sys", ".", "maxint", ",", "\"max\"", ":", "0", ",", "\"avg\"", ":", "0", "}", ")", "last_count", "=", "last_stat", "[", "\"count\"", "]", "_routes_stat", "[", "route_str", "]", "=", "{", "\"count\"", ":", "last_count", "+", "1", ",", "\"min\"", ":", "min", "(", "elapsed", ",", "last_stat", "[", "\"min\"", "]", ")", ",", "\"max\"", ":", "max", "(", "elapsed", ",", "last_stat", "[", "\"max\"", "]", ")", ",", "\"avg\"", ":", "(", "last_count", "*", "last_stat", "[", "\"avg\"", "]", "+", "elapsed", ")", "/", "(", "last_count", "+", "1", ")", "}", "# log.debug('Route Time: {0} took {1} ms'.format(route_str, elapsed))", "def", "wrapper", "(", "f", ")", ":", "# decorated function", "@", "wraps", "(", "f", ")", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# params of function", "self", "=", "args", "[", "0", "]", "method", ",", "url", "=", "route_str", ".", "split", "(", "\" \"", ")", "def", "defaults_dict", "(", ")", ":", "f_args", ",", "varargs", ",", "keywords", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "f", ")", "defaults", "=", "defaults", "or", "[", "]", "return", "dict", "(", "zip", "(", "f_args", "[", "-", "len", "(", "defaults", ")", ":", "]", ",", "defaults", ")", ")", "defs", "=", "defaults_dict", "(", ")", "route_args", "=", "dict", "(", "defs", ".", "items", "(", ")", "+", "kwargs", ".", "items", "(", ")", ")", "def", "get_destination_url", "(", ")", ":", "try", ":", "return", "url", ".", "format", "(", "*", "*", "route_args", ")", "except", "KeyError", "as", "e", ":", "# KeyError in format have a message with key", "raise", "AttributeError", "(", "\"Define {0} as named argument for route.\"", ".", "format", "(", "e", ")", ")", "destination_url", "=", "self", ".", "base_url", "+", "get_destination_url", "(", ")", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# generally this is \"pass\"", "bypass_args", "=", "dict", "(", "[", "(", "param", ",", "route_args", "[", "param", "]", ")", "for", "param", "in", "[", "\"data\"", ",", "\"json\"", ",", "\"cookies\"", ",", "\"auth\"", ",", "\"files\"", ",", "\"content_type\"", ",", "\"params\"", "]", "if", "param", "in", "route_args", "]", ")", "# add json content type for:", "# - unless files are sent", "# - private that ends with .json", "# - all public api with POST/PUT method, meaning have basic auth", "# - json parameter is present", "if", "\"files\"", "not", "in", "bypass_args", "and", "(", "destination_url", ".", "endswith", "(", "'.json'", ")", "or", "\"json\"", "in", "route_args", "or", "(", "\"auth\"", "in", "bypass_args", "and", "method", "in", "[", "\"POST\"", ",", "\"PUT\"", "]", ")", ")", ":", "bypass_args", "[", "'headers'", "]", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "if", "\"content_type\"", "in", "bypass_args", "and", "bypass_args", "[", "'content_type'", "]", "==", "\"yaml\"", ":", "del", "bypass_args", "[", "\"content_type\"", "]", "bypass_args", "[", "'headers'", "]", "=", "{", "'Content-Type'", ":", "'application/x-yaml'", "}", "start", "=", "time", ".", "time", "(", ")", "try", ":", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "destination_url", ",", "verify", "=", "self", ".", "verify_ssl", ",", "*", "*", "bypass_args", ")", "except", "requests", ".", "ConnectionError", ":", "log", ".", "info", "(", "'ConnectionError caught. Trying again: \\n %s:%s '", "%", "(", "method", ",", "destination_url", ")", ")", "import", "traceback", "def", "log_exception", "(", "exc_class", ",", "exc", ",", "tb", ")", ":", "log", ".", "info", "(", "'Got exception: %s'", "%", "exc", ")", "log", ".", "info", "(", "'Class: %s'", "%", "exc_class", ")", "log", ".", "info", "(", "'Trace: %s'", "%", "traceback", ".", "format_tb", "(", "tb", ")", ")", "log", ".", "error", "(", "'Got exception while executing: %s'", "%", "exc", ")", "log_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "time", ".", "sleep", "(", "2", ")", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "destination_url", ",", "verify", "=", "self", ".", "verify_ssl", ",", "*", "*", "bypass_args", ")", "end", "=", "time", ".", "time", "(", ")", "elapsed", "=", "int", "(", "(", "end", "-", "start", ")", "*", "1000.0", ")", "ilog", "(", "elapsed", ")", "if", "self", ".", "verify_codes", ":", "if", "response", ".", "status_code", "is", "not", "200", ":", "msg", "=", "\"Route {0} {1} returned code={2} and error: {3}\"", ".", "format", "(", "method", ",", "get_destination_url", "(", ")", ",", "response", ".", "status_code", ",", "response", ".", "text", ")", "if", "response", ".", "status_code", "in", "api_http_code_errors", ".", "keys", "(", ")", ":", "raise", "api_http_code_errors", "[", "response", ".", "status_code", "]", "(", "msg", ")", "else", ":", "log", ".", "debug", "(", "response", ".", "text", ")", "log", ".", "debug", "(", "response", ".", "request", ".", "body", ")", "raise", "ApiError", "(", "msg", ")", "return", "response", "return", "wrapped_func", "return", "wrapper" ]
45.049505
24.673267
def parse(self, line, **options): """\ Parse a line and return (cmd, args, kwargs) - cmd may be False if it wasn't parseable. Relatively simple in the Regex parser - anything can be "parsed", but is not guaranteed to match. """ line = line.strip() if not line: return (False, (), {}) split = line.split() cmd, args = split[0], split[1:] return (cmd, args, {})
[ "def", "parse", "(", "self", ",", "line", ",", "*", "*", "options", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "return", "(", "False", ",", "(", ")", ",", "{", "}", ")", "split", "=", "line", ".", "split", "(", ")", "cmd", ",", "args", "=", "split", "[", "0", "]", ",", "split", "[", "1", ":", "]", "return", "(", "cmd", ",", "args", ",", "{", "}", ")" ]
31.928571
13
def histogram(a, bins=10, range=None, **kwargs): """Compute the histogram of the input data. Parameters ---------- a : NDArray Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), required if bins is an integer The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()). Values outside the range are ignored. The first element of the range must be less than or equal to the second. range affects the automatic bin computation as well, the range will be equally divided by the number of bins. Returns ------- out : Symbol The created Symbol """ if isinstance(bins, Symbol): return _internal._histogram(data=a, bins=bins, **kwargs) elif isinstance(bins, integer_types): if range is None: raise ValueError("null range is not supported in symbol mode") return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs) raise ValueError("bins argument should be either an integer or an NDArray")
[ "def", "histogram", "(", "a", ",", "bins", "=", "10", ",", "range", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "bins", ",", "Symbol", ")", ":", "return", "_internal", ".", "_histogram", "(", "data", "=", "a", ",", "bins", "=", "bins", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "bins", ",", "integer_types", ")", ":", "if", "range", "is", "None", ":", "raise", "ValueError", "(", "\"null range is not supported in symbol mode\"", ")", "return", "_internal", ".", "_histogram", "(", "data", "=", "a", ",", "bin_cnt", "=", "bins", ",", "range", "=", "range", ",", "*", "*", "kwargs", ")", "raise", "ValueError", "(", "\"bins argument should be either an integer or an NDArray\"", ")" ]
46.413793
26.551724
def publish(idx=None): """Publish packaged distributions to pypi index""" if idx is None: idx = '' else: idx = '-r ' + idx run('python setup.py register {}'.format(idx)) run('twine upload {} dist/*.whl dist/*.egg dist/*.tar.gz'.format(idx))
[ "def", "publish", "(", "idx", "=", "None", ")", ":", "if", "idx", "is", "None", ":", "idx", "=", "''", "else", ":", "idx", "=", "'-r '", "+", "idx", "run", "(", "'python setup.py register {}'", ".", "format", "(", "idx", ")", ")", "run", "(", "'twine upload {} dist/*.whl dist/*.egg dist/*.tar.gz'", ".", "format", "(", "idx", ")", ")" ]
33.625
19.125
def cli(): """Parse options from the command line""" parser = argparse.ArgumentParser(prog="sphinx-serve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler="resolve", description=__doc__ ) parser.add_argument("-v", "--version", action="version", version="%(prog)s {0}".format(__version__) ) parser.add_argument("-h", "--host", action="store", default="0.0.0.0", help="Listen to the given hostname" ) parser.add_argument("-p", "--port", action="store", type=int, default=8081, help="Listen to given port" ) parser.add_argument("-b", "--build", action="store", default="_build", help="Build folder name" ) parser.add_argument("-s", "--single", action="store_true", help="Serve the single-html documentation version" ) return parser.parse_args()
[ "def", "cli", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"sphinx-serve\"", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ",", "conflict_handler", "=", "\"resolve\"", ",", "description", "=", "__doc__", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"%(prog)s {0}\"", ".", "format", "(", "__version__", ")", ")", "parser", ".", "add_argument", "(", "\"-h\"", ",", "\"--host\"", ",", "action", "=", "\"store\"", ",", "default", "=", "\"0.0.0.0\"", ",", "help", "=", "\"Listen to the given hostname\"", ")", "parser", ".", "add_argument", "(", "\"-p\"", ",", "\"--port\"", ",", "action", "=", "\"store\"", ",", "type", "=", "int", ",", "default", "=", "8081", ",", "help", "=", "\"Listen to given port\"", ")", "parser", ".", "add_argument", "(", "\"-b\"", ",", "\"--build\"", ",", "action", "=", "\"store\"", ",", "default", "=", "\"_build\"", ",", "help", "=", "\"Build folder name\"", ")", "parser", ".", "add_argument", "(", "\"-s\"", ",", "\"--single\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Serve the single-html documentation version\"", ")", "return", "parser", ".", "parse_args", "(", ")" ]
38.53125
20.6875
def encode_pin(self, pin, matrix=None): """Transform correct PIN according to the displayed matrix.""" if matrix is None: _, matrix = self.read_pin() return "".join([str(matrix.index(p) + 1) for p in pin])
[ "def", "encode_pin", "(", "self", ",", "pin", ",", "matrix", "=", "None", ")", ":", "if", "matrix", "is", "None", ":", "_", ",", "matrix", "=", "self", ".", "read_pin", "(", ")", "return", "\"\"", ".", "join", "(", "[", "str", "(", "matrix", ".", "index", "(", "p", ")", "+", "1", ")", "for", "p", "in", "pin", "]", ")" ]
47.4
7.8
def dynamicmap_memoization(callable_obj, streams): """ Determine whether the Callable should have memoization enabled based on the supplied streams (typically by a DynamicMap). Memoization is disabled if any of the streams require it it and are currently in a triggered state. """ memoization_state = bool(callable_obj._stream_memoization) callable_obj._stream_memoization &= not any(s.transient and s._triggering for s in streams) try: yield except: raise finally: callable_obj._stream_memoization = memoization_state
[ "def", "dynamicmap_memoization", "(", "callable_obj", ",", "streams", ")", ":", "memoization_state", "=", "bool", "(", "callable_obj", ".", "_stream_memoization", ")", "callable_obj", ".", "_stream_memoization", "&=", "not", "any", "(", "s", ".", "transient", "and", "s", ".", "_triggering", "for", "s", "in", "streams", ")", "try", ":", "yield", "except", ":", "raise", "finally", ":", "callable_obj", ".", "_stream_memoization", "=", "memoization_state" ]
38.133333
21.6
def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f) f.close() return functions, classes
[ "def", "_parse_module", "(", "self", ",", "uri", ")", ":", "filename", "=", "self", ".", "_uri2path", "(", "uri", ")", "if", "filename", "is", "None", ":", "# nothing that we could handle here.", "return", "(", "[", "]", ",", "[", "]", ")", "f", "=", "open", "(", "filename", ",", "'rt'", ")", "functions", ",", "classes", "=", "self", ".", "_parse_lines", "(", "f", ")", "f", ".", "close", "(", ")", "return", "functions", ",", "classes" ]
34.5
9.9
def pdf(self, x, e=0., w=1., a=0.): """ probability density function see: https://en.wikipedia.org/wiki/Skew_normal_distribution :param x: input value :param e: :param w: :param a: :return: """ t = (x-e) / w return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t)
[ "def", "pdf", "(", "self", ",", "x", ",", "e", "=", "0.", ",", "w", "=", "1.", ",", "a", "=", "0.", ")", ":", "t", "=", "(", "x", "-", "e", ")", "/", "w", "return", "2.", "/", "w", "*", "stats", ".", "norm", ".", "pdf", "(", "t", ")", "*", "stats", ".", "norm", ".", "cdf", "(", "a", "*", "t", ")" ]
28.333333
15.166667
def minimize_algorithm_1dim_golden(function, a, b, c, tolerance=DOUBLE_TOL): ''' Given a function f, and given a bracketing triplet of abscissas ax, bx, cx (such that bx is between ax and cx, and f(bx) is less than both f(ax) and f(cx)), this routine performs a golden section search for the minimum, isolating it to a fractional precision of about tol. The abscissa of the minimum is returned as xmin, and the minimum function value is returned as Golden, the returned function value. See Press, et al. (1992) "Numerical recipes in C", 2nd ed., p.401. ''' x0 = a x3 = c if abs(c - b) > abs(b - a): x1 = b x2 = b + c * (c - b) else: x2 = b x1 = b - c * (b - a) f1 = function(x1) f2 = function(x2) counter = 0 while abs(x3 - x0) - tolerance * (abs(x1) + abs(x2)) > DOUBLE_TOL: """print("------") print("x0 = " + str(x0)) print("x1 = " + str(x1)) print("x2 = " + str(x2)) print("x3 = " + str(x3)) print("f1 = " + str(f1)) print("f2 = " + str(f2)) print("tolerance * (abs(x1) + abs(x2) = " + str(tolerance * (abs(x1) + abs(x2)))) print("abs(x3 - x0) = " + str(abs(x3 - x0)))""" if f2 < f1: x0 = x1 x1 = x2 x2 = R * x1 + C * x3 f1 = f2 f2 = function(x2) else: x3 = x2 x2 = x1 x1 = R * x2 + C * x0 f2 = f1 f1 = function(x1) counter = counter + 1 if counter > 10000: raise Exception("More than 10000 iterations.") if f1 < f2: return (x1, f1) else: return (x2, f2)
[ "def", "minimize_algorithm_1dim_golden", "(", "function", ",", "a", ",", "b", ",", "c", ",", "tolerance", "=", "DOUBLE_TOL", ")", ":", "x0", "=", "a", "x3", "=", "c", "if", "abs", "(", "c", "-", "b", ")", ">", "abs", "(", "b", "-", "a", ")", ":", "x1", "=", "b", "x2", "=", "b", "+", "c", "*", "(", "c", "-", "b", ")", "else", ":", "x2", "=", "b", "x1", "=", "b", "-", "c", "*", "(", "b", "-", "a", ")", "f1", "=", "function", "(", "x1", ")", "f2", "=", "function", "(", "x2", ")", "counter", "=", "0", "while", "abs", "(", "x3", "-", "x0", ")", "-", "tolerance", "*", "(", "abs", "(", "x1", ")", "+", "abs", "(", "x2", ")", ")", ">", "DOUBLE_TOL", ":", "\"\"\"print(\"------\")\n print(\"x0 = \" + str(x0))\n print(\"x1 = \" + str(x1))\n print(\"x2 = \" + str(x2))\n print(\"x3 = \" + str(x3))\n print(\"f1 = \" + str(f1))\n print(\"f2 = \" + str(f2))\n print(\"tolerance * (abs(x1) + abs(x2) = \" + str(tolerance * (abs(x1) + abs(x2))))\n print(\"abs(x3 - x0) = \" + str(abs(x3 - x0)))\"\"\"", "if", "f2", "<", "f1", ":", "x0", "=", "x1", "x1", "=", "x2", "x2", "=", "R", "*", "x1", "+", "C", "*", "x3", "f1", "=", "f2", "f2", "=", "function", "(", "x2", ")", "else", ":", "x3", "=", "x2", "x2", "=", "x1", "x1", "=", "R", "*", "x2", "+", "C", "*", "x0", "f2", "=", "f1", "f1", "=", "function", "(", "x1", ")", "counter", "=", "counter", "+", "1", "if", "counter", ">", "10000", ":", "raise", "Exception", "(", "\"More than 10000 iterations.\"", ")", "if", "f1", "<", "f2", ":", "return", "(", "x1", ",", "f1", ")", "else", ":", "return", "(", "x2", ",", "f2", ")" ]
32.647059
22.392157
def _make_all_matchers(cls, parameters): ''' For every parameter, create a matcher if the parameter has an annotation. ''' for name, param in parameters: annotation = param.annotation if annotation is not Parameter.empty: yield name, cls._make_param_matcher(annotation, param.kind)
[ "def", "_make_all_matchers", "(", "cls", ",", "parameters", ")", ":", "for", "name", ",", "param", "in", "parameters", ":", "annotation", "=", "param", ".", "annotation", "if", "annotation", "is", "not", "Parameter", ".", "empty", ":", "yield", "name", ",", "cls", ".", "_make_param_matcher", "(", "annotation", ",", "param", ".", "kind", ")" ]
39.222222
17.222222
def lines(self) -> str: """Return the source code lines for this error.""" if self.definition is None: return '' source = '' lines = self.definition.source offset = self.definition.start # type: ignore lines_stripped = list(reversed(list(dropwhile(is_blank, reversed(lines))))) numbers_width = len(str(offset + len(lines_stripped))) line_format = '{{:{}}}:{{}}'.format(numbers_width) for n, line in enumerate(lines_stripped): if line: line = ' ' + line source += line_format.format(n + offset, line) if n > 5: source += ' ...\n' break return source
[ "def", "lines", "(", "self", ")", "->", "str", ":", "if", "self", ".", "definition", "is", "None", ":", "return", "''", "source", "=", "''", "lines", "=", "self", ".", "definition", ".", "source", "offset", "=", "self", ".", "definition", ".", "start", "# type: ignore", "lines_stripped", "=", "list", "(", "reversed", "(", "list", "(", "dropwhile", "(", "is_blank", ",", "reversed", "(", "lines", ")", ")", ")", ")", ")", "numbers_width", "=", "len", "(", "str", "(", "offset", "+", "len", "(", "lines_stripped", ")", ")", ")", "line_format", "=", "'{{:{}}}:{{}}'", ".", "format", "(", "numbers_width", ")", "for", "n", ",", "line", "in", "enumerate", "(", "lines_stripped", ")", ":", "if", "line", ":", "line", "=", "' '", "+", "line", "source", "+=", "line_format", ".", "format", "(", "n", "+", "offset", ",", "line", ")", "if", "n", ">", "5", ":", "source", "+=", "' ...\\n'", "break", "return", "source" ]
40.421053
15.052632
def set_if_unset(self, key, value): """Set a particular spark property by the string key name if it hasn't already been set. This method allows chaining so that i can provide a similar feel to the standard Scala way of setting multiple configurations Parameters ---------- key : string value : string Returns ------- self """ if key not in self._conf_dict: self.set(key, value) return self
[ "def", "set_if_unset", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "not", "in", "self", ".", "_conf_dict", ":", "self", ".", "set", "(", "key", ",", "value", ")", "return", "self" ]
27.277778
21.888889
def robust_single_linkage(X, cut, k=5, alpha=1.4142135623730951, gamma=5, metric='euclidean', algorithm='best', memory=Memory(cachedir=None, verbose=0), leaf_size=40, core_dist_n_jobs=4, **kwargs): """Perform robust single linkage clustering from a vector array or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. cut : float The reachability distance value to cut the cluster heirarchy at to derive a flat cluster labelling. k : int, optional (default=5) Reachability distances will be computed with regard to the `k` nearest neighbors. alpha : float, optional (default=np.sqrt(2)) Distance scaling for reachability distance computation. Reachability distance is computed as $max \{ core_k(a), core_k(b), 1/\alpha d(a,b) \}$. gamma : int, optional (default=5) Ignore any clusters in the flat clustering with size less than gamma, and declare points in such clusters as noise points. metric : string, or callable, optional (default='euclidean') The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. algorithm : string, optional (default='best') Exactly which algorithm to use; hdbscan has variants specialised for different characteristics of the data. By default this is set to ``best`` which chooses the "best" algorithm given the nature of the data. You can force other options if you believe you know better. Options are: * ``generic`` * ``best`` * ``prims_kdtree`` * ``prims_balltree`` * ``boruvka_kdtree`` * ``boruvka_balltree`` memory : Instance of joblib.Memory or string (optional) Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. leaf_size : int, optional (default=40) Leaf size for trees responsible for fast nearest neighbour queries. core_dist_n_jobs : int, optional Number of parallel jobs to run in core distance computations (if supported by the specific algorithm). For ``core_dist_n_jobs`` below -1, (n_cpus + 1 + core_dist_n_jobs) are used. (default 4) Returns ------- labels : ndarray, shape (n_samples, ) Cluster labels for each point. Noisy samples are given the label -1. single_linkage_tree : ndarray, shape (n_samples - 1, 4) The single linkage tree produced during clustering in scipy hierarchical clustering format (see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html). References ---------- .. [1] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the cluster tree. In Advances in Neural Information Processing Systems (pp. 343-351). """ if not isinstance(k, int) or k < 1: raise ValueError('k must be an integer greater than zero!') if not isinstance(alpha, float) or alpha < 1.0: raise ValueError('alpha must be a float greater than or equal to 1.0!') if not isinstance(gamma, int) or gamma < 1: raise ValueError('gamma must be an integer greater than zero!') if not isinstance(leaf_size, int) or leaf_size < 1: raise ValueError('Leaf size must be at least one!') if metric == 'minkowski': if 'p' not in kwargs or kwargs['p'] is None: raise TypeError('Minkowski metric given but no p value supplied!') if kwargs['p'] < 0: raise ValueError('Minkowski metric with negative p value is not' ' defined!') X = check_array(X, accept_sparse='csr') if isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) if algorithm != 'best': if algorithm == 'generic': single_linkage_tree = memory.cache(_rsl_generic)( X, k, alpha, metric, **kwargs) elif algorithm == 'prims_kdtree': single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) elif algorithm == 'prims_balltree': single_linkage_tree = memory.cache(_rsl_prims_balltree)( X, k, alpha, metric, **kwargs) elif algorithm == 'boruvka_kdtree': single_linkage_tree = \ memory.cache(_rsl_boruvka_kdtree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) elif algorithm == 'boruvka_balltree': single_linkage_tree = \ memory.cache(_rsl_boruvka_balltree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) else: raise TypeError('Unknown algorithm type %s specified' % algorithm) else: if issparse(X) or metric not in FAST_METRICS: # We can't do much with sparse matrices ... single_linkage_tree = memory.cache(_rsl_generic)( X, k, alpha, metric, **kwargs) elif metric in KDTree.valid_metrics: # Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 128: single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) else: single_linkage_tree = \ memory.cache(_rsl_boruvka_kdtree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) else: # Metric is a valid BallTree metric # Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 128: single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) else: single_linkage_tree = \ memory.cache(_rsl_boruvka_balltree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) labels = single_linkage_tree.get_clusters(cut, gamma) return labels, single_linkage_tree.to_numpy()
[ "def", "robust_single_linkage", "(", "X", ",", "cut", ",", "k", "=", "5", ",", "alpha", "=", "1.4142135623730951", ",", "gamma", "=", "5", ",", "metric", "=", "'euclidean'", ",", "algorithm", "=", "'best'", ",", "memory", "=", "Memory", "(", "cachedir", "=", "None", ",", "verbose", "=", "0", ")", ",", "leaf_size", "=", "40", ",", "core_dist_n_jobs", "=", "4", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", "or", "k", "<", "1", ":", "raise", "ValueError", "(", "'k must be an integer greater than zero!'", ")", "if", "not", "isinstance", "(", "alpha", ",", "float", ")", "or", "alpha", "<", "1.0", ":", "raise", "ValueError", "(", "'alpha must be a float greater than or equal to 1.0!'", ")", "if", "not", "isinstance", "(", "gamma", ",", "int", ")", "or", "gamma", "<", "1", ":", "raise", "ValueError", "(", "'gamma must be an integer greater than zero!'", ")", "if", "not", "isinstance", "(", "leaf_size", ",", "int", ")", "or", "leaf_size", "<", "1", ":", "raise", "ValueError", "(", "'Leaf size must be at least one!'", ")", "if", "metric", "==", "'minkowski'", ":", "if", "'p'", "not", "in", "kwargs", "or", "kwargs", "[", "'p'", "]", "is", "None", ":", "raise", "TypeError", "(", "'Minkowski metric given but no p value supplied!'", ")", "if", "kwargs", "[", "'p'", "]", "<", "0", ":", "raise", "ValueError", "(", "'Minkowski metric with negative p value is not'", "' defined!'", ")", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csr'", ")", "if", "isinstance", "(", "memory", ",", "six", ".", "string_types", ")", ":", "memory", "=", "Memory", "(", "cachedir", "=", "memory", ",", "verbose", "=", "0", ")", "if", "algorithm", "!=", "'best'", ":", "if", "algorithm", "==", "'generic'", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_generic", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'prims_kdtree'", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_prims_kdtree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'prims_balltree'", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_prims_balltree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'boruvka_kdtree'", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_boruvka_kdtree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "leaf_size", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'boruvka_balltree'", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_boruvka_balltree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "leaf_size", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "TypeError", "(", "'Unknown algorithm type %s specified'", "%", "algorithm", ")", "else", ":", "if", "issparse", "(", "X", ")", "or", "metric", "not", "in", "FAST_METRICS", ":", "# We can't do much with sparse matrices ...", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_generic", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "elif", "metric", "in", "KDTree", ".", "valid_metrics", ":", "# Need heuristic to decide when to go to boruvka;", "# still debugging for now", "if", "X", ".", "shape", "[", "1", "]", ">", "128", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_prims_kdtree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "else", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_boruvka_kdtree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "leaf_size", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "else", ":", "# Metric is a valid BallTree metric", "# Need heuristic to decide when to go to boruvka;", "# still debugging for now", "if", "X", ".", "shape", "[", "1", "]", ">", "128", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_prims_kdtree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "*", "*", "kwargs", ")", "else", ":", "single_linkage_tree", "=", "memory", ".", "cache", "(", "_rsl_boruvka_balltree", ")", "(", "X", ",", "k", ",", "alpha", ",", "metric", ",", "leaf_size", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "labels", "=", "single_linkage_tree", ".", "get_clusters", "(", "cut", ",", "gamma", ")", "return", "labels", ",", "single_linkage_tree", ".", "to_numpy", "(", ")" ]
43.223602
22.10559
def parse(self, data): """Parse a 17 bytes packet in the Wind format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'id': "0x2EB2", 'packet_length': 16, 'packet_type': 86, 'packet_type_name': 'Wind sensors', 'sequence_number': 0, 'packet_subtype': 4, 'packet_subtype_name': "TFA", 'temperature': 17.3, 'direction': 120, 'wind_gust': 11, 'av_speed': 12, 'wind_chill': 10, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict """ self.validate_packet(data) results = self.parse_header_part(data) sub_type = results['packet_subtype'] id_ = self.dump_hex(data[4:6]) direction = data[6] * 256 + data[7] if sub_type != 0x05: av_speed = (data[8] * 256 + data[9]) * 0.1 else: av_speed = '--??--' gust = (data[10] * 256 + data[11]) * 0.1 if sub_type == 0x04: temperature = ((data[12] & 0x7f) * 256 + data[13]) / 10 signbit = data[12] & 0x80 if signbit != 0: temperature = -temperature else: temperature = '--??--' if sub_type == 0x04: wind_chill = ((data[14] & 0x7f) * 256 + data[15]) / 10 signbit = data[14] & 0x80 if signbit != 0: wind_chill = -wind_chill else: wind_chill = '--??--' sensor_specific = { 'id': id_, 'direction': direction, 'wind_gust': gust } if av_speed != '--??--': sensor_specific['av_speed'] = av_speed if temperature != '--??--': sensor_specific['temperature'] = temperature if wind_chill != '--??--': sensor_specific['wind_chill'] = wind_chill results.update(RfxPacketUtils.parse_signal_and_battery(data[16])) results.update(sensor_specific) return results
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "validate_packet", "(", "data", ")", "results", "=", "self", ".", "parse_header_part", "(", "data", ")", "sub_type", "=", "results", "[", "'packet_subtype'", "]", "id_", "=", "self", ".", "dump_hex", "(", "data", "[", "4", ":", "6", "]", ")", "direction", "=", "data", "[", "6", "]", "*", "256", "+", "data", "[", "7", "]", "if", "sub_type", "!=", "0x05", ":", "av_speed", "=", "(", "data", "[", "8", "]", "*", "256", "+", "data", "[", "9", "]", ")", "*", "0.1", "else", ":", "av_speed", "=", "'--??--'", "gust", "=", "(", "data", "[", "10", "]", "*", "256", "+", "data", "[", "11", "]", ")", "*", "0.1", "if", "sub_type", "==", "0x04", ":", "temperature", "=", "(", "(", "data", "[", "12", "]", "&", "0x7f", ")", "*", "256", "+", "data", "[", "13", "]", ")", "/", "10", "signbit", "=", "data", "[", "12", "]", "&", "0x80", "if", "signbit", "!=", "0", ":", "temperature", "=", "-", "temperature", "else", ":", "temperature", "=", "'--??--'", "if", "sub_type", "==", "0x04", ":", "wind_chill", "=", "(", "(", "data", "[", "14", "]", "&", "0x7f", ")", "*", "256", "+", "data", "[", "15", "]", ")", "/", "10", "signbit", "=", "data", "[", "14", "]", "&", "0x80", "if", "signbit", "!=", "0", ":", "wind_chill", "=", "-", "wind_chill", "else", ":", "wind_chill", "=", "'--??--'", "sensor_specific", "=", "{", "'id'", ":", "id_", ",", "'direction'", ":", "direction", ",", "'wind_gust'", ":", "gust", "}", "if", "av_speed", "!=", "'--??--'", ":", "sensor_specific", "[", "'av_speed'", "]", "=", "av_speed", "if", "temperature", "!=", "'--??--'", ":", "sensor_specific", "[", "'temperature'", "]", "=", "temperature", "if", "wind_chill", "!=", "'--??--'", ":", "sensor_specific", "[", "'wind_chill'", "]", "=", "wind_chill", "results", ".", "update", "(", "RfxPacketUtils", ".", "parse_signal_and_battery", "(", "data", "[", "16", "]", ")", ")", "results", ".", "update", "(", "sensor_specific", ")", "return", "results" ]
30.626667
15.76
def template_chooser_clicked(self): """Slot activated when report file tool button is clicked. .. versionadded: 4.3.0 """ path = self.template_path.text() if not path: path = setting('lastCustomTemplate', '', str) if path: directory = dirname(path) else: directory = '' # noinspection PyCallByClass,PyTypeChecker file_name = QFileDialog.getOpenFileName( self, tr('Select report'), directory, tr('QGIS composer templates (*.qpt *.QPT)')) self.template_path.setText(file_name)
[ "def", "template_chooser_clicked", "(", "self", ")", ":", "path", "=", "self", ".", "template_path", ".", "text", "(", ")", "if", "not", "path", ":", "path", "=", "setting", "(", "'lastCustomTemplate'", ",", "''", ",", "str", ")", "if", "path", ":", "directory", "=", "dirname", "(", "path", ")", "else", ":", "directory", "=", "''", "# noinspection PyCallByClass,PyTypeChecker", "file_name", "=", "QFileDialog", ".", "getOpenFileName", "(", "self", ",", "tr", "(", "'Select report'", ")", ",", "directory", ",", "tr", "(", "'QGIS composer templates (*.qpt *.QPT)'", ")", ")", "self", ".", "template_path", ".", "setText", "(", "file_name", ")" ]
32.684211
13.052632
def open_submission(self, url=None): """ Select the current submission to view posts. """ if url is None: data = self.get_selected_item() url = data['permalink'] if data.get('url_type') == 'selfpost': self.config.history.add(data['url_full']) self.selected_page = self.open_submission_page(url)
[ "def", "open_submission", "(", "self", ",", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "data", "=", "self", ".", "get_selected_item", "(", ")", "url", "=", "data", "[", "'permalink'", "]", "if", "data", ".", "get", "(", "'url_type'", ")", "==", "'selfpost'", ":", "self", ".", "config", ".", "history", ".", "add", "(", "data", "[", "'url_full'", "]", ")", "self", ".", "selected_page", "=", "self", ".", "open_submission_page", "(", "url", ")" ]
34.272727
11.545455
def get_google_links(limit, params, headers): """ function to fetch links equal to limit every Google search result page has a start index. every page contains 10 search results. """ links = [] for start_index in range(0, limit, 10): params['start'] = start_index resp = s.get("https://www.google.com/search", params = params, headers = headers) page_links = scrape_links(resp.content, engine = 'g') links.extend(page_links) return links[:limit]
[ "def", "get_google_links", "(", "limit", ",", "params", ",", "headers", ")", ":", "links", "=", "[", "]", "for", "start_index", "in", "range", "(", "0", ",", "limit", ",", "10", ")", ":", "params", "[", "'start'", "]", "=", "start_index", "resp", "=", "s", ".", "get", "(", "\"https://www.google.com/search\"", ",", "params", "=", "params", ",", "headers", "=", "headers", ")", "page_links", "=", "scrape_links", "(", "resp", ".", "content", ",", "engine", "=", "'g'", ")", "links", ".", "extend", "(", "page_links", ")", "return", "links", "[", ":", "limit", "]" ]
32.071429
13.357143
def get_resource_agent_session(self): """Gets the session for retrieving resource agent mappings. return: (osid.resource.ResourceAgentSession) - a ``ResourceAgentSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_agent()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_agent()`` is ``true``.* """ if not self.supports_resource_agent(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ResourceAgentSession(runtime=self._runtime)
[ "def", "get_resource_agent_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_resource_agent", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ResourceAgentSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
41.625
14.625
def starts_variation(self) -> bool: """ Checks if this node starts a variation (and can thus have a starting comment). The root node does not start a variation and can have no starting comment. For example, in ``1. e4 e5 (1... c5 2. Nf3) 2. Nf3``, the node holding 1... c5 starts a variation. """ if not self.parent or not self.parent.variations: return False return self.parent.variations[0] != self
[ "def", "starts_variation", "(", "self", ")", "->", "bool", ":", "if", "not", "self", ".", "parent", "or", "not", "self", ".", "parent", ".", "variations", ":", "return", "False", "return", "self", ".", "parent", ".", "variations", "[", "0", "]", "!=", "self" ]
36.461538
19.538462
def on_canvas_slave__route_electrode_added(self, slave, electrode_id): ''' .. versionchanged:: 0.11 Draw temporary route currently being formed. .. versionchanged:: 0.11.3 Update routes table by setting ``df_routes`` property of :attr:`canvas_slave`. ''' logger.debug('Route electrode added: %s', electrode_id) if slave._route.electrode_ids is None: return df_route = pd.DataFrame([[-1, e, i] for i, e in enumerate(slave._route.electrode_ids)], columns=['route_i', 'electrode_i', 'transition_i']) # XXX Negative `route_i` corresponds to temporary route being # drawn. Append row entries for temporary route to existing routes # table. df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() self.canvas_slave.df_routes = pd.concat([df_routes, df_route])
[ "def", "on_canvas_slave__route_electrode_added", "(", "self", ",", "slave", ",", "electrode_id", ")", ":", "logger", ".", "debug", "(", "'Route electrode added: %s'", ",", "electrode_id", ")", "if", "slave", ".", "_route", ".", "electrode_ids", "is", "None", ":", "return", "df_route", "=", "pd", ".", "DataFrame", "(", "[", "[", "-", "1", ",", "e", ",", "i", "]", "for", "i", ",", "e", "in", "enumerate", "(", "slave", ".", "_route", ".", "electrode_ids", ")", "]", ",", "columns", "=", "[", "'route_i'", ",", "'electrode_i'", ",", "'transition_i'", "]", ")", "# XXX Negative `route_i` corresponds to temporary route being", "# drawn. Append row entries for temporary route to existing routes", "# table.", "df_routes", "=", "slave", ".", "df_routes", ".", "loc", "[", "slave", ".", "df_routes", ".", "route_i", ">=", "0", "]", ".", "copy", "(", ")", "self", ".", "canvas_slave", ".", "df_routes", "=", "pd", ".", "concat", "(", "[", "df_routes", ",", "df_route", "]", ")" ]
47.52381
23.238095
def buffer_typechecks_and_display(self, call_id, payload): """Adds typecheck events to the buffer, and displays them right away. This is a workaround for this issue: https://github.com/ensime/ensime-server/issues/1616 """ self.buffer_typechecks(call_id, payload) self.editor.display_notes(self.buffered_notes)
[ "def", "buffer_typechecks_and_display", "(", "self", ",", "call_id", ",", "payload", ")", ":", "self", ".", "buffer_typechecks", "(", "call_id", ",", "payload", ")", "self", ".", "editor", ".", "display_notes", "(", "self", ".", "buffered_notes", ")" ]
43.875
12.875
def fseq(self, client, message): """ fseq messages associate a unique frame id with a set of set and alive messages """ client.last_frame = client.current_frame client.current_frame = message[3]
[ "def", "fseq", "(", "self", ",", "client", ",", "message", ")", ":", "client", ".", "last_frame", "=", "client", ".", "current_frame", "client", ".", "current_frame", "=", "message", "[", "3", "]" ]
33.714286
8.285714
def money_receipts(pronac, dt): """ Checks how many items are in a same receipt when payment type is withdraw/money - is_outlier: True if there are any receipts that have more than one - itens_que_compartilham_comprovantes: List of items that share receipt """ df = verified_repeated_receipts_for_pronac(pronac) comprovantes_saque = df[df['tpFormaDePagamento'] == 3.0] return metric_return(comprovantes_saque)
[ "def", "money_receipts", "(", "pronac", ",", "dt", ")", ":", "df", "=", "verified_repeated_receipts_for_pronac", "(", "pronac", ")", "comprovantes_saque", "=", "df", "[", "df", "[", "'tpFormaDePagamento'", "]", "==", "3.0", "]", "return", "metric_return", "(", "comprovantes_saque", ")" ]
40.363636
19.272727
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs): """Computes a solution to the ROF problem with the fast gradient projection algorithm. Parameters ---------- p : np.array dual initial variable data : np.array noisy data / proximal point alpha : float regularization parameter niter : int number of iterations grad : instance of gradient class class that supports grad(x), grad.adjoint(x), grad.norm proj_C : function projection onto the constraint set of the primal variable, e.g. non-negativity proj_P : function projection onto the constraint set of the dual variable, e.g. norm <= 1 tol : float (optional) nonnegative parameter that gives the tolerance for convergence. If set None, then the algorithm will run for a fixed number of iterations Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. """ # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable'.format(callback)) factr = 1 / (grad.norm**2 * alpha) q = p.copy() x = data.space.zero() t = 1. if tol is None: def convergence_eval(p1, p2): return False else: def convergence_eval(p1, p2): return (p1 - p2).norm() / p1.norm() < tol pnew = p.copy() if callback is not None: callback(p) for k in range(niter): t0 = t grad.adjoint(q, out=x) proj_C(data - alpha * x, out=x) grad(x, out=pnew) pnew *= factr pnew += q proj_P(pnew, out=pnew) converged = convergence_eval(p, pnew) if not converged: # update step size t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2. # calculate next iterate q[:] = pnew + (t0 - 1) / t * (pnew - p) p[:] = pnew if converged: t = None break if callback is not None: callback(p) # get current image estimate x = proj_C(data - alpha * grad.adjoint(p)) return x
[ "def", "fgp_dual", "(", "p", ",", "data", ",", "alpha", ",", "niter", ",", "grad", ",", "proj_C", ",", "proj_P", ",", "tol", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Callback object", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", "if", "callback", "is", "not", "None", "and", "not", "callable", "(", "callback", ")", ":", "raise", "TypeError", "(", "'`callback` {} is not callable'", ".", "format", "(", "callback", ")", ")", "factr", "=", "1", "/", "(", "grad", ".", "norm", "**", "2", "*", "alpha", ")", "q", "=", "p", ".", "copy", "(", ")", "x", "=", "data", ".", "space", ".", "zero", "(", ")", "t", "=", "1.", "if", "tol", "is", "None", ":", "def", "convergence_eval", "(", "p1", ",", "p2", ")", ":", "return", "False", "else", ":", "def", "convergence_eval", "(", "p1", ",", "p2", ")", ":", "return", "(", "p1", "-", "p2", ")", ".", "norm", "(", ")", "/", "p1", ".", "norm", "(", ")", "<", "tol", "pnew", "=", "p", ".", "copy", "(", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", "p", ")", "for", "k", "in", "range", "(", "niter", ")", ":", "t0", "=", "t", "grad", ".", "adjoint", "(", "q", ",", "out", "=", "x", ")", "proj_C", "(", "data", "-", "alpha", "*", "x", ",", "out", "=", "x", ")", "grad", "(", "x", ",", "out", "=", "pnew", ")", "pnew", "*=", "factr", "pnew", "+=", "q", "proj_P", "(", "pnew", ",", "out", "=", "pnew", ")", "converged", "=", "convergence_eval", "(", "p", ",", "pnew", ")", "if", "not", "converged", ":", "# update step size", "t", "=", "(", "1", "+", "np", ".", "sqrt", "(", "1", "+", "4", "*", "t0", "**", "2", ")", ")", "/", "2.", "# calculate next iterate", "q", "[", ":", "]", "=", "pnew", "+", "(", "t0", "-", "1", ")", "/", "t", "*", "(", "pnew", "-", "p", ")", "p", "[", ":", "]", "=", "pnew", "if", "converged", ":", "t", "=", "None", "break", "if", "callback", "is", "not", "None", ":", "callback", "(", "p", ")", "# get current image estimate", "x", "=", "proj_C", "(", "data", "-", "alpha", "*", "grad", ".", "adjoint", "(", "p", ")", ")", "return", "x" ]
25.306818
21.681818
def id_to_object(self, line): """ Resolves an ip adres to a range object, creating it if it doesn't exists. """ result = Range.get(line, ignore=404) if not result: result = Range(range=line) result.save() return result
[ "def", "id_to_object", "(", "self", ",", "line", ")", ":", "result", "=", "Range", ".", "get", "(", "line", ",", "ignore", "=", "404", ")", "if", "not", "result", ":", "result", "=", "Range", "(", "range", "=", "line", ")", "result", ".", "save", "(", ")", "return", "result" ]
31.777778
12.666667
def _mpda(self, re_grammar, splitstring=0): """ Args: re_grammar (list): A list of grammar rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA """ cnfgrammar = CNFGenerator(re_grammar) if not self.alphabet: self._extract_alphabet(cnfgrammar) cnftopda = CnfPda(self.alphabet) productions = {} nonterminals = [] nonterminals.append(cnfgrammar.init_symbol) for key in list(cnfgrammar.grammar_nonterminals): if key != cnfgrammar.init_symbol: nonterminals.append(key) for key in list(cnfgrammar.grammar_nonterminals): j = 0 productions[key] = {} # print 'testing '+key for pair in cnfgrammar.grammar_rules: cnf_form = list(pair) if cnf_form[0] == key: productions[key][j] = {} if isinstance(cnf_form[1], type(())): # print list(p[1]) productions[key][j]['b0'] = list(cnf_form[1])[0] productions[key][j]['b1'] = list(cnf_form[1])[1] else: # print p[1] productions[key][j]['a'] = cnf_form[1] j = j + 1 return cnftopda.initialize( nonterminals, productions, list( cnfgrammar.grammar_terminals), splitstring)
[ "def", "_mpda", "(", "self", ",", "re_grammar", ",", "splitstring", "=", "0", ")", ":", "cnfgrammar", "=", "CNFGenerator", "(", "re_grammar", ")", "if", "not", "self", ".", "alphabet", ":", "self", ".", "_extract_alphabet", "(", "cnfgrammar", ")", "cnftopda", "=", "CnfPda", "(", "self", ".", "alphabet", ")", "productions", "=", "{", "}", "nonterminals", "=", "[", "]", "nonterminals", ".", "append", "(", "cnfgrammar", ".", "init_symbol", ")", "for", "key", "in", "list", "(", "cnfgrammar", ".", "grammar_nonterminals", ")", ":", "if", "key", "!=", "cnfgrammar", ".", "init_symbol", ":", "nonterminals", ".", "append", "(", "key", ")", "for", "key", "in", "list", "(", "cnfgrammar", ".", "grammar_nonterminals", ")", ":", "j", "=", "0", "productions", "[", "key", "]", "=", "{", "}", "# print 'testing '+key", "for", "pair", "in", "cnfgrammar", ".", "grammar_rules", ":", "cnf_form", "=", "list", "(", "pair", ")", "if", "cnf_form", "[", "0", "]", "==", "key", ":", "productions", "[", "key", "]", "[", "j", "]", "=", "{", "}", "if", "isinstance", "(", "cnf_form", "[", "1", "]", ",", "type", "(", "(", ")", ")", ")", ":", "# print list(p[1])", "productions", "[", "key", "]", "[", "j", "]", "[", "'b0'", "]", "=", "list", "(", "cnf_form", "[", "1", "]", ")", "[", "0", "]", "productions", "[", "key", "]", "[", "j", "]", "[", "'b1'", "]", "=", "list", "(", "cnf_form", "[", "1", "]", ")", "[", "1", "]", "else", ":", "# print p[1]", "productions", "[", "key", "]", "[", "j", "]", "[", "'a'", "]", "=", "cnf_form", "[", "1", "]", "j", "=", "j", "+", "1", "return", "cnftopda", ".", "initialize", "(", "nonterminals", ",", "productions", ",", "list", "(", "cnfgrammar", ".", "grammar_terminals", ")", ",", "splitstring", ")" ]
39.5
13.5
def useNonce(self, server_url, timestamp, salt): """Return whether this nonce is valid. str -> bool """ if abs(timestamp - time.time()) > nonce.SKEW: return False if server_url: proto, rest = server_url.split('://', 1) else: # Create empty proto / rest values for empty server_url, # which is part of a consumer-generated nonce. proto, rest = '', '' domain = _filenameEscape(rest.split('/', 1)[0]) url_hash = _safe64(server_url) salt_hash = _safe64(salt) filename = '%08x-%s-%s-%s-%s' % (timestamp, proto, domain, url_hash, salt_hash) filename = os.path.join(self.nonce_dir, filename) try: fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0200) except OSError, why: if why.errno == EEXIST: return False else: raise else: os.close(fd) return True
[ "def", "useNonce", "(", "self", ",", "server_url", ",", "timestamp", ",", "salt", ")", ":", "if", "abs", "(", "timestamp", "-", "time", ".", "time", "(", ")", ")", ">", "nonce", ".", "SKEW", ":", "return", "False", "if", "server_url", ":", "proto", ",", "rest", "=", "server_url", ".", "split", "(", "'://'", ",", "1", ")", "else", ":", "# Create empty proto / rest values for empty server_url,", "# which is part of a consumer-generated nonce.", "proto", ",", "rest", "=", "''", ",", "''", "domain", "=", "_filenameEscape", "(", "rest", ".", "split", "(", "'/'", ",", "1", ")", "[", "0", "]", ")", "url_hash", "=", "_safe64", "(", "server_url", ")", "salt_hash", "=", "_safe64", "(", "salt", ")", "filename", "=", "'%08x-%s-%s-%s-%s'", "%", "(", "timestamp", ",", "proto", ",", "domain", ",", "url_hash", ",", "salt_hash", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "nonce_dir", ",", "filename", ")", "try", ":", "fd", "=", "os", ".", "open", "(", "filename", ",", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", "|", "os", ".", "O_WRONLY", ",", "0200", ")", "except", "OSError", ",", "why", ":", "if", "why", ".", "errno", "==", "EEXIST", ":", "return", "False", "else", ":", "raise", "else", ":", "os", ".", "close", "(", "fd", ")", "return", "True" ]
31.363636
19.818182
def hkdf(self, chaining_key, input_key_material, dhlen=64): """Hash-based key derivation function Takes a ``chaining_key'' byte sequence of len HASHLEN, and an ``input_key_material'' byte sequence with length either zero bytes, 32 bytes or dhlen bytes. Returns two byte sequences of length HASHLEN""" if len(chaining_key) != self.HASHLEN: raise HashError("Incorrect chaining key length") if len(input_key_material) not in (0, 32, dhlen): raise HashError("Incorrect input key material length") temp_key = self.hmac_hash(chaining_key, input_key_material) output1 = self.hmac_hash(temp_key, b'\x01') output2 = self.hmac_hash(temp_key, output1 + b'\x02') return output1, output2
[ "def", "hkdf", "(", "self", ",", "chaining_key", ",", "input_key_material", ",", "dhlen", "=", "64", ")", ":", "if", "len", "(", "chaining_key", ")", "!=", "self", ".", "HASHLEN", ":", "raise", "HashError", "(", "\"Incorrect chaining key length\"", ")", "if", "len", "(", "input_key_material", ")", "not", "in", "(", "0", ",", "32", ",", "dhlen", ")", ":", "raise", "HashError", "(", "\"Incorrect input key material length\"", ")", "temp_key", "=", "self", ".", "hmac_hash", "(", "chaining_key", ",", "input_key_material", ")", "output1", "=", "self", ".", "hmac_hash", "(", "temp_key", ",", "b'\\x01'", ")", "output2", "=", "self", ".", "hmac_hash", "(", "temp_key", ",", "output1", "+", "b'\\x02'", ")", "return", "output1", ",", "output2" ]
45.470588
19.588235
def nl_skipped_handler_debug(msg, arg): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L134.""" ofd = arg or _LOGGER.debug ofd('-- Debug: Skipped message: ' + print_header_content(nlmsg_hdr(msg))) return NL_SKIP
[ "def", "nl_skipped_handler_debug", "(", "msg", ",", "arg", ")", ":", "ofd", "=", "arg", "or", "_LOGGER", ".", "debug", "ofd", "(", "'-- Debug: Skipped message: '", "+", "print_header_content", "(", "nlmsg_hdr", "(", "msg", ")", ")", ")", "return", "NL_SKIP" ]
48.8
14
def _calculate_scores(self): """Calculate the 'value' of each node in the graph based on how many blocking descendants it has. We use this score for the internal priority queue's ordering, so the quality of this metric is important. The score is stored as a negative number because the internal PriorityQueue picks lowest values first. We could do this in one pass over the graph instead of len(self.graph) passes but this is easy. For large graphs this may hurt performance. This operates on the graph, so it would require a lock if called from outside __init__. :return Dict[str, int]: The score dict, mapping unique IDs to integer scores. Lower scores are higher priority. """ scores = {} for node in self.graph.nodes(): score = -1 * len([ d for d in nx.descendants(self.graph, node) if self._include_in_cost(d) ]) scores[node] = score return scores
[ "def", "_calculate_scores", "(", "self", ")", ":", "scores", "=", "{", "}", "for", "node", "in", "self", ".", "graph", ".", "nodes", "(", ")", ":", "score", "=", "-", "1", "*", "len", "(", "[", "d", "for", "d", "in", "nx", ".", "descendants", "(", "self", ".", "graph", ",", "node", ")", "if", "self", ".", "_include_in_cost", "(", "d", ")", "]", ")", "scores", "[", "node", "]", "=", "score", "return", "scores" ]
40.96
22.44
def instructions(self): """ Return an iterator over this block's instructions. The iterator will yield a ValueRef for each instruction. """ if not self.is_block: raise ValueError('expected block value, got %s' % (self._kind,)) it = ffi.lib.LLVMPY_BlockInstructionsIter(self) parents = self._parents.copy() parents.update(block=self) return _InstructionsIterator(it, parents)
[ "def", "instructions", "(", "self", ")", ":", "if", "not", "self", ".", "is_block", ":", "raise", "ValueError", "(", "'expected block value, got %s'", "%", "(", "self", ".", "_kind", ",", ")", ")", "it", "=", "ffi", ".", "lib", ".", "LLVMPY_BlockInstructionsIter", "(", "self", ")", "parents", "=", "self", ".", "_parents", ".", "copy", "(", ")", "parents", ".", "update", "(", "block", "=", "self", ")", "return", "_InstructionsIterator", "(", "it", ",", "parents", ")" ]
40.727273
12.545455
def to_dict(self, into=dict): """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) return into_c(self.items())
[ "def", "to_dict", "(", "self", ",", "into", "=", "dict", ")", ":", "# GH16122", "into_c", "=", "com", ".", "standardize_mapping", "(", "into", ")", "return", "into_c", "(", "self", ".", "items", "(", ")", ")" ]
31.735294
17.735294
def setrange(self, key, offset, value): """Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. If the offset is larger than the current length of the string at key, the string is padded with zero-bytes to make offset fit. Non-existing keys are considered as empty strings, so this command will make sure it holds a string large enough to be able to set value at offset. .. note:: The maximum offset that you can set is 2 :sup:`29` -1 (536870911), as Redis Strings are limited to 512 megabytes. If you need to grow beyond this size, you can use multiple keys. .. warning:: When setting the last possible byte and the string value stored at key does not yet hold a string value, or holds a small string value, Redis needs to allocate all intermediate memory which can block the server for some time. On a 2010 MacBook Pro, setting byte number 536870911 (512MB allocation) takes ~300ms, setting byte number 134217728 (128MB allocation) takes ~80ms, setting bit number 33554432 (32MB allocation) takes ~30ms and setting bit number 8388608 (8MB allocation) takes ~8ms. Note that once this first allocation is done, subsequent calls to :meth:`~tredis.RedisClient.setrange` for the same key will not have the allocation overhead. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(1)``, not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is ``O(1)``. Otherwise, complexity is ``O(M)`` with ``M`` being the length of the value argument. :param key: The key to get the bit from :type key: :class:`str`, :class:`bytes` :param value: The value to set :type value: :class:`str`, :class:`bytes`, :class:`int` :returns: The length of the string after it was modified by the command :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SETRANGE', key, ascii(offset), value])
[ "def", "setrange", "(", "self", ",", "key", ",", "offset", ",", "value", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SETRANGE'", ",", "key", ",", "ascii", "(", "offset", ")", ",", "value", "]", ")" ]
54.02439
27.536585
def _grow(list_of_lists, num_new): """ Given a list of lists, and a number of new lists to add, copy the content of the first list into the new ones, and add them to the list of lists. """ first = list_of_lists[0] for i in range(num_new): list_of_lists.append(copy.deepcopy(first)) return list_of_lists
[ "def", "_grow", "(", "list_of_lists", ",", "num_new", ")", ":", "first", "=", "list_of_lists", "[", "0", "]", "for", "i", "in", "range", "(", "num_new", ")", ":", "list_of_lists", ".", "append", "(", "copy", ".", "deepcopy", "(", "first", ")", ")", "return", "list_of_lists" ]
36.666667
12.888889
def calculate_vss(self, method=None): """ Calculate the vertical swimming speed of this behavior. Takes into account the vertical swimming speed and the variance. Parameters: method: "gaussian" (default) or "random" "random" (vss - variance) < X < (vss + variance) """ if self.variance == float(0): return self.vss else: # Calculate gausian distribution and return if method == "gaussian" or method is None: return gauss(self.vss, self.variance) elif method == "random": return uniform(self.vss - self.variance, self.vss + self.variance) else: raise ValueError("Method of vss calculation not recognized, please use 'gaussian' or 'random'")
[ "def", "calculate_vss", "(", "self", ",", "method", "=", "None", ")", ":", "if", "self", ".", "variance", "==", "float", "(", "0", ")", ":", "return", "self", ".", "vss", "else", ":", "# Calculate gausian distribution and return", "if", "method", "==", "\"gaussian\"", "or", "method", "is", "None", ":", "return", "gauss", "(", "self", ".", "vss", ",", "self", ".", "variance", ")", "elif", "method", "==", "\"random\"", ":", "return", "uniform", "(", "self", ".", "vss", "-", "self", ".", "variance", ",", "self", ".", "vss", "+", "self", ".", "variance", ")", "else", ":", "raise", "ValueError", "(", "\"Method of vss calculation not recognized, please use 'gaussian' or 'random'\"", ")" ]
41.35
19.95
def gcm_send_message(registration_id, data, encoding='utf-8', **kwargs): """ Standalone method to send a single gcm notification """ messenger = GCMMessenger(registration_id, data, encoding=encoding, **kwargs) return messenger.send_plain()
[ "def", "gcm_send_message", "(", "registration_id", ",", "data", ",", "encoding", "=", "'utf-8'", ",", "*", "*", "kwargs", ")", ":", "messenger", "=", "GCMMessenger", "(", "registration_id", ",", "data", ",", "encoding", "=", "encoding", ",", "*", "*", "kwargs", ")", "return", "messenger", ".", "send_plain", "(", ")" ]
36.285714
19.142857
def get_platform_node_selector(self, platform): """ search the configuration for entries of the form node_selector.platform :param platform: str, platform to search for, can be null :return dict """ nodeselector = {} if platform: nodeselector_str = self._get_value("node_selector." + platform, self.conf_section, "node_selector." + platform) nodeselector = self.generate_nodeselector_dict(nodeselector_str) return nodeselector
[ "def", "get_platform_node_selector", "(", "self", ",", "platform", ")", ":", "nodeselector", "=", "{", "}", "if", "platform", ":", "nodeselector_str", "=", "self", ".", "_get_value", "(", "\"node_selector.\"", "+", "platform", ",", "self", ".", "conf_section", ",", "\"node_selector.\"", "+", "platform", ")", "nodeselector", "=", "self", ".", "generate_nodeselector_dict", "(", "nodeselector_str", ")", "return", "nodeselector" ]
42.307692
23.384615
def allow_address_pairs(session, network, subnet): """Allow several interfaces to be added and accessed from the other machines. This is particularly useful when working with virtual ips. """ nclient = neutron.Client('2', session=session, region_name=os.environ['OS_REGION_NAME']) ports = nclient.list_ports() ports_to_update = filter( lambda p: p['network_id'] == network['id'], ports['ports']) logger.info('[nova]: Allowing address pairs for ports %s' % list(map(lambda p: p['fixed_ips'], ports_to_update))) for port in ports_to_update: try: nclient.update_port(port['id'], { 'port': { 'allowed_address_pairs': [{ 'ip_address': subnet }] } }) except Exception: # NOTE(msimonin): dhcp and router interface port # seems to have enabled_sec_groups = False which # prevent them to be updated, just throw a warning # a skip them logger.warn("Can't update port %s" % port)
[ "def", "allow_address_pairs", "(", "session", ",", "network", ",", "subnet", ")", ":", "nclient", "=", "neutron", ".", "Client", "(", "'2'", ",", "session", "=", "session", ",", "region_name", "=", "os", ".", "environ", "[", "'OS_REGION_NAME'", "]", ")", "ports", "=", "nclient", ".", "list_ports", "(", ")", "ports_to_update", "=", "filter", "(", "lambda", "p", ":", "p", "[", "'network_id'", "]", "==", "network", "[", "'id'", "]", ",", "ports", "[", "'ports'", "]", ")", "logger", ".", "info", "(", "'[nova]: Allowing address pairs for ports %s'", "%", "list", "(", "map", "(", "lambda", "p", ":", "p", "[", "'fixed_ips'", "]", ",", "ports_to_update", ")", ")", ")", "for", "port", "in", "ports_to_update", ":", "try", ":", "nclient", ".", "update_port", "(", "port", "[", "'id'", "]", ",", "{", "'port'", ":", "{", "'allowed_address_pairs'", ":", "[", "{", "'ip_address'", ":", "subnet", "}", "]", "}", "}", ")", "except", "Exception", ":", "# NOTE(msimonin): dhcp and router interface port", "# seems to have enabled_sec_groups = False which", "# prevent them to be updated, just throw a warning", "# a skip them", "logger", ".", "warn", "(", "\"Can't update port %s\"", "%", "port", ")" ]
40.821429
15.678571
def pairwise(X): M = X.shape[0] N = X.shape[1] D = numpy.zeros((M,N)) "omp parallel for private(i,j,d,k,tmp)" for i in xrange(M): for j in xrange(M): d = 0.0 for k in xrange(N): tmp = X[i,k] - X[j,k] d += tmp * tmp D[i,j] = math.sqrt(d) return D
[ "def", "pairwise", "(", "X", ")", ":", "M", "=", "X", ".", "shape", "[", "0", "]", "N", "=", "X", ".", "shape", "[", "1", "]", "D", "=", "numpy", ".", "zeros", "(", "(", "M", ",", "N", ")", ")", "for", "i", "in", "xrange", "(", "M", ")", ":", "for", "j", "in", "xrange", "(", "M", ")", ":", "d", "=", "0.0", "for", "k", "in", "xrange", "(", "N", ")", ":", "tmp", "=", "X", "[", "i", ",", "k", "]", "-", "X", "[", "j", ",", "k", "]", "d", "+=", "tmp", "*", "tmp", "D", "[", "i", ",", "j", "]", "=", "math", ".", "sqrt", "(", "d", ")", "return", "D" ]
25.615385
14.846154
def download_from(self, buff, remote_path): """Downloads file from WebDAV and writes it in buffer. :param buff: buffer object for writing of downloaded file content. :param remote_path: path to file on WebDAV server. """ urn = Urn(remote_path) if self.is_dir(urn.path()): raise OptionNotValid(name='remote_path', value=remote_path) if not self.check(urn.path()): raise RemoteResourceNotFound(urn.path()) response = self.execute_request(action='download', path=urn.quote()) buff.write(response.content)
[ "def", "download_from", "(", "self", ",", "buff", ",", "remote_path", ")", ":", "urn", "=", "Urn", "(", "remote_path", ")", "if", "self", ".", "is_dir", "(", "urn", ".", "path", "(", ")", ")", ":", "raise", "OptionNotValid", "(", "name", "=", "'remote_path'", ",", "value", "=", "remote_path", ")", "if", "not", "self", ".", "check", "(", "urn", ".", "path", "(", ")", ")", ":", "raise", "RemoteResourceNotFound", "(", "urn", ".", "path", "(", ")", ")", "response", "=", "self", ".", "execute_request", "(", "action", "=", "'download'", ",", "path", "=", "urn", ".", "quote", "(", ")", ")", "buff", ".", "write", "(", "response", ".", "content", ")" ]
39.066667
18.333333
def compose(*funcs): ''' Compose an ordered list of functions. Args of a,b,c,d evaluates as a(b(c(d(ctx)))) ''' def _compose(ctx): # last func gets context, rest get result of previous func _result = funcs[-1](ctx) for f in reversed(funcs[:-1]): _result = f(_result) return _result return _compose
[ "def", "compose", "(", "*", "funcs", ")", ":", "def", "_compose", "(", "ctx", ")", ":", "# last func gets context, rest get result of previous func", "_result", "=", "funcs", "[", "-", "1", "]", "(", "ctx", ")", "for", "f", "in", "reversed", "(", "funcs", "[", ":", "-", "1", "]", ")", ":", "_result", "=", "f", "(", "_result", ")", "return", "_result", "return", "_compose" ]
29.25
22.75
def data_iter(batch_size, num_embed, pre_trained_word2vec=False): """Construct data iter Parameters ---------- batch_size: int num_embed: int pre_trained_word2vec: boolean identify the pre-trained layers or not Returns ---------- train_set: DataIter Train DataIter valid: DataIter Valid DataIter sentences_size: int array dimensions embedded_size: int array dimensions vocab_size: int array dimensions """ print('Loading data...') if pre_trained_word2vec: word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec') x, y = data_helpers.load_data_with_word2vec(word2vec) # reshape for convolution input x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2])) embedded_size = x.shape[-1] sentences_size = x.shape[2] vocabulary_size = -1 else: x, y, vocab, vocab_inv = data_helpers.load_data() embedded_size = num_embed sentences_size = x.shape[1] vocabulary_size = len(vocab) # randomly shuffle data np.random.seed(10) shuffle_indices = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indices] y_shuffled = y[shuffle_indices] # split train/valid set x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:] y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:] print('Train/Valid split: %d/%d' % (len(y_train), len(y_dev))) print('train shape:', x_train.shape) print('valid shape:', x_dev.shape) print('sentence max words', sentences_size) print('embedding size', embedded_size) print('vocab size', vocabulary_size) train_set = mx.io.NDArrayIter( x_train, y_train, batch_size, shuffle=True) valid = mx.io.NDArrayIter( x_dev, y_dev, batch_size) return train_set, valid, sentences_size, embedded_size, vocabulary_size
[ "def", "data_iter", "(", "batch_size", ",", "num_embed", ",", "pre_trained_word2vec", "=", "False", ")", ":", "print", "(", "'Loading data...'", ")", "if", "pre_trained_word2vec", ":", "word2vec", "=", "data_helpers", ".", "load_pretrained_word2vec", "(", "'data/rt.vec'", ")", "x", ",", "y", "=", "data_helpers", ".", "load_data_with_word2vec", "(", "word2vec", ")", "# reshape for convolution input", "x", "=", "np", ".", "reshape", "(", "x", ",", "(", "x", ".", "shape", "[", "0", "]", ",", "1", ",", "x", ".", "shape", "[", "1", "]", ",", "x", ".", "shape", "[", "2", "]", ")", ")", "embedded_size", "=", "x", ".", "shape", "[", "-", "1", "]", "sentences_size", "=", "x", ".", "shape", "[", "2", "]", "vocabulary_size", "=", "-", "1", "else", ":", "x", ",", "y", ",", "vocab", ",", "vocab_inv", "=", "data_helpers", ".", "load_data", "(", ")", "embedded_size", "=", "num_embed", "sentences_size", "=", "x", ".", "shape", "[", "1", "]", "vocabulary_size", "=", "len", "(", "vocab", ")", "# randomly shuffle data", "np", ".", "random", ".", "seed", "(", "10", ")", "shuffle_indices", "=", "np", ".", "random", ".", "permutation", "(", "np", ".", "arange", "(", "len", "(", "y", ")", ")", ")", "x_shuffled", "=", "x", "[", "shuffle_indices", "]", "y_shuffled", "=", "y", "[", "shuffle_indices", "]", "# split train/valid set", "x_train", ",", "x_dev", "=", "x_shuffled", "[", ":", "-", "1000", "]", ",", "x_shuffled", "[", "-", "1000", ":", "]", "y_train", ",", "y_dev", "=", "y_shuffled", "[", ":", "-", "1000", "]", ",", "y_shuffled", "[", "-", "1000", ":", "]", "print", "(", "'Train/Valid split: %d/%d'", "%", "(", "len", "(", "y_train", ")", ",", "len", "(", "y_dev", ")", ")", ")", "print", "(", "'train shape:'", ",", "x_train", ".", "shape", ")", "print", "(", "'valid shape:'", ",", "x_dev", ".", "shape", ")", "print", "(", "'sentence max words'", ",", "sentences_size", ")", "print", "(", "'embedding size'", ",", "embedded_size", ")", "print", "(", "'vocab size'", ",", "vocabulary_size", ")", "train_set", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "x_train", ",", "y_train", ",", "batch_size", ",", "shuffle", "=", "True", ")", "valid", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "x_dev", ",", "y_dev", ",", "batch_size", ")", "return", "train_set", ",", "valid", ",", "sentences_size", ",", "embedded_size", ",", "vocabulary_size" ]
32.677966
16.118644
def CRRAutility_invP(u, gam): ''' Evaluates the derivative of the inverse of the CRRA utility function (with risk aversion parameter gam) at a given utility level u. Parameters ---------- u : float Utility value gam : float Risk aversion Returns ------- (unnamed) : float Marginal consumption corresponding to given utility value ''' if gam == 1: return np.exp(u) else: return( ((1.0-gam)*u)**(gam/(1.0-gam)) )
[ "def", "CRRAutility_invP", "(", "u", ",", "gam", ")", ":", "if", "gam", "==", "1", ":", "return", "np", ".", "exp", "(", "u", ")", "else", ":", "return", "(", "(", "(", "1.0", "-", "gam", ")", "*", "u", ")", "**", "(", "gam", "/", "(", "1.0", "-", "gam", ")", ")", ")" ]
23.047619
25.619048
def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """ def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named))
[ "def", "expect_element", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "named", ")", ":", "def", "_expect_element", "(", "collection", ")", ":", "if", "isinstance", "(", "collection", ",", "(", "set", ",", "frozenset", ")", ")", ":", "# Special case the error message for set and frozen set to make it", "# less verbose.", "collection_for_error_message", "=", "tuple", "(", "sorted", "(", "collection", ")", ")", "else", ":", "collection_for_error_message", "=", "collection", "template", "=", "(", "\"%(funcname)s() expected a value in {collection} \"", "\"for argument '%(argname)s', but got %(actual)s instead.\"", ")", ".", "format", "(", "collection", "=", "collection_for_error_message", ")", "return", "make_check", "(", "ValueError", ",", "template", ",", "complement", "(", "op", ".", "contains", "(", "collection", ")", ")", ",", "repr", ",", "funcname", "=", "__funcname", ",", ")", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_element", ",", "named", ")", ")" ]
33.557692
23.173077
def variance_inflation_factors(df): ''' Computes the variance inflation factor (VIF) for each column in the df. Returns a pandas Series of VIFs Args: df: pandas DataFrame with columns to run diagnostics on ''' corr = np.corrcoef(df, rowvar=0) corr_inv = np.linalg.inv(corr) vifs = np.diagonal(corr_inv) return pd.Series(vifs, df.columns, name='VIF')
[ "def", "variance_inflation_factors", "(", "df", ")", ":", "corr", "=", "np", ".", "corrcoef", "(", "df", ",", "rowvar", "=", "0", ")", "corr_inv", "=", "np", ".", "linalg", ".", "inv", "(", "corr", ")", "vifs", "=", "np", ".", "diagonal", "(", "corr_inv", ")", "return", "pd", ".", "Series", "(", "vifs", ",", "df", ".", "columns", ",", "name", "=", "'VIF'", ")" ]
31.916667
19.416667
def Update(self, env, args=None): """ Update an environment with the option variables. env - the environment to update. """ values = {} # first set the defaults: for option in self.options: if not option.default is None: values[option.key] = option.default # next set the value specified in the options file for filename in self.files: if os.path.exists(filename): dir = os.path.split(os.path.abspath(filename))[0] if dir: sys.path.insert(0, dir) try: values['__name__'] = filename with open(filename, 'r') as f: contents = f.read() exec(contents, {}, values) finally: if dir: del sys.path[0] del values['__name__'] # set the values specified on the command line if args is None: args = self.args for arg, value in args.items(): added = False for option in self.options: if arg in list(option.aliases) + [ option.key ]: values[option.key] = value added = True if not added: self.unknown[arg] = value # put the variables in the environment: # (don't copy over variables that are not declared as options) for option in self.options: try: env[option.key] = values[option.key] except KeyError: pass # Call the convert functions: for option in self.options: if option.converter and option.key in values: value = env.subst('${%s}'%option.key) try: try: env[option.key] = option.converter(value) except TypeError: env[option.key] = option.converter(value, env) except ValueError as x: raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x)) # Finally validate the values: for option in self.options: if option.validator and option.key in values: option.validator(option.key, env.subst('${%s}'%option.key), env)
[ "def", "Update", "(", "self", ",", "env", ",", "args", "=", "None", ")", ":", "values", "=", "{", "}", "# first set the defaults:", "for", "option", "in", "self", ".", "options", ":", "if", "not", "option", ".", "default", "is", "None", ":", "values", "[", "option", ".", "key", "]", "=", "option", ".", "default", "# next set the value specified in the options file", "for", "filename", "in", "self", ".", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "dir", "=", "os", ".", "path", ".", "split", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "[", "0", "]", "if", "dir", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "dir", ")", "try", ":", "values", "[", "'__name__'", "]", "=", "filename", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "exec", "(", "contents", ",", "{", "}", ",", "values", ")", "finally", ":", "if", "dir", ":", "del", "sys", ".", "path", "[", "0", "]", "del", "values", "[", "'__name__'", "]", "# set the values specified on the command line", "if", "args", "is", "None", ":", "args", "=", "self", ".", "args", "for", "arg", ",", "value", "in", "args", ".", "items", "(", ")", ":", "added", "=", "False", "for", "option", "in", "self", ".", "options", ":", "if", "arg", "in", "list", "(", "option", ".", "aliases", ")", "+", "[", "option", ".", "key", "]", ":", "values", "[", "option", ".", "key", "]", "=", "value", "added", "=", "True", "if", "not", "added", ":", "self", ".", "unknown", "[", "arg", "]", "=", "value", "# put the variables in the environment:", "# (don't copy over variables that are not declared as options)", "for", "option", "in", "self", ".", "options", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "values", "[", "option", ".", "key", "]", "except", "KeyError", ":", "pass", "# Call the convert functions:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "converter", "and", "option", ".", "key", "in", "values", ":", "value", "=", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", "try", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ")", "except", "TypeError", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ",", "env", ")", "except", "ValueError", "as", "x", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Error converting option: %s\\n%s'", "%", "(", "option", ".", "key", ",", "x", ")", ")", "# Finally validate the values:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "validator", "and", "option", ".", "key", "in", "values", ":", "option", ".", "validator", "(", "option", ".", "key", ",", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", ",", "env", ")" ]
34.794118
16.117647
def unregister(name, delete=False): ''' Unregister a VM CLI Example: .. code-block:: bash salt '*' vboxmanage.unregister my_vm_filename ''' nodes = list_nodes_min() if name not in nodes: raise CommandExecutionError( 'The specified VM ({0}) is not registered.'.format(name) ) cmd = '{0} unregistervm {1}'.format(vboxcmd(), name) if delete is True: cmd += ' --delete' ret = salt.modules.cmdmod.run_all(cmd) if ret['retcode'] == 0: return True return ret['stderr']
[ "def", "unregister", "(", "name", ",", "delete", "=", "False", ")", ":", "nodes", "=", "list_nodes_min", "(", ")", "if", "name", "not", "in", "nodes", ":", "raise", "CommandExecutionError", "(", "'The specified VM ({0}) is not registered.'", ".", "format", "(", "name", ")", ")", "cmd", "=", "'{0} unregistervm {1}'", ".", "format", "(", "vboxcmd", "(", ")", ",", "name", ")", "if", "delete", "is", "True", ":", "cmd", "+=", "' --delete'", "ret", "=", "salt", ".", "modules", ".", "cmdmod", ".", "run_all", "(", "cmd", ")", "if", "ret", "[", "'retcode'", "]", "==", "0", ":", "return", "True", "return", "ret", "[", "'stderr'", "]" ]
23.608696
21.521739
def utrecht(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", location="unknown", lat="", lon="", dmy_flag=False, noave=False, meas_n_orient=8, meth_code="LP-NO", specnum=1, samp_con='2', labfield=0, phi=0, theta=0): """ Converts Utrecht magnetometer data files to MagIC files Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" append : bool append output files to existing files instead of overwrite, default False location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" dmy_flag : bool default False noave : bool do not average duplicate measurements, default False (so by default, DO average) meas_n_orient : int Number of different orientations in measurement (default : 8) meth_code : str sample method codes, default "LP-NO" e.g. [SO-MAG, SO-SUN, SO-SIGHT, ...] specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '2', see info below labfield : float DC_FIELD in microTesla (default : 0) phi : float DC_PHI in degrees (default : 0) theta : float DC_THETA in degrees (default : 0) Returns ---------- type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info --------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY """ # initialize some stuff version_num = pmag.get_version() MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) specnum = -int(specnum) if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: site_num = samp_con.split("-")[1] samp_con = "4" elif "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: site_num = samp_con.split("-")[1] samp_con = "7" else: site_num = 1 try: DC_FIELD = float(labfield)*1e-6 DC_PHI = float(phi) DC_THETA = float(theta) except ValueError: raise ValueError( 'problem with your dc parameters. please provide a labfield in microTesla and a phi and theta in degrees.') # format variables if not mag_file: return False, 'You must provide a Utrecht format file' mag_file = pmag.resolve_file_name(mag_file, input_dir_path) # need to add these meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) # parse data # Open up the Utrecht file and read the header information AF_or_T = mag_file.split('.')[-1] data = open(mag_file, 'r') line = data.readline() line_items = line.split(',') operator = line_items[0] operator = operator.replace("\"", "") machine = line_items[1] machine = machine.replace("\"", "") machine = machine.rstrip('\n') # print("operator=", operator) # print("machine=", machine) # read in measurement data line = data.readline() while line != "END" and line != '"END"': SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {} line_items = line.split(',') spec_name = line_items[0] spec_name = spec_name.replace("\"", "") # print("spec_name=", spec_name) free_string = line_items[1] free_string = free_string.replace("\"", "") # print("free_string=", free_string) dec = line_items[2] # print("dec=", dec) inc = line_items[3] # print("inc=", inc) volume = float(line_items[4]) volume = volume * 1e-6 # enter volume in cm^3, convert to m^3 # print("volume=", volume) bed_plane = line_items[5] # print("bed_plane=", bed_plane) bed_dip = line_items[6] # print("bed_dip=", bed_dip) # Configure et er_ tables if specnum == 0: sample_name = spec_name else: sample_name = spec_name[:specnum] site = pmag.parse_site(sample_name, samp_con, site_num) SpecRec['specimen'] = spec_name SpecRec['sample'] = sample_name if volume != 0: SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample_name != "" and sample_name not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample_name SampRec['azimuth'] = dec SampRec['dip'] = str(float(inc)-90) SampRec['bed_dip_direction'] = bed_plane SampRec['bed_dip'] = bed_dip SampRec['method_codes'] = meth_code SampRec['site'] = site SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) # measurement data line = data.readline() line = line.rstrip("\n") items = line.split(",") while line != '9999': step = items[0] step = step.split('.') step_value = step[0] step_type = "" if len(step) == 2: step_type = step[1] if step_type == '5': step_value = items[0] A = float(items[1]) B = float(items[2]) C = float(items[3]) # convert to MagIC coordinates Z = -A X = -B Y = C cart = np.array([X, Y, Z]).transpose() direction = pmag.cart2dir(cart).transpose() measurement_dec = direction[0] measurement_inc = direction[1] # the data are in pico-Am^2 - this converts to Am^2 magn_moment = direction[2] * 1.0e-12 if volume != 0: # data volume normalized - converted to A/m magn_volume = direction[2] * 1.0e-12 / volume # print("magn_moment=", magn_moment) # print("magn_volume=", magn_volume) error = items[4] date = items[5] date = date.strip('"').replace(' ', '') if date.count("-") > 0: date = date.split("-") elif date.count("/") > 0: date = date.split("/") else: print("date format seperator cannot be identified") # print(date) time = items[6] time = time.strip('"').replace(' ', '') time = time.split(":") # print(time) dt = date[0] + ":" + date[1] + ":" + date[2] + \ ":" + time[0] + ":" + time[1] + ":" + "0" local = pytz.timezone("Europe/Amsterdam") try: if dmy_flag: naive = datetime.datetime.strptime(dt, "%d:%m:%Y:%H:%M:%S") else: naive = datetime.datetime.strptime(dt, "%m:%d:%Y:%H:%M:%S") except ValueError: try: naive = datetime.datetime.strptime(dt, "%Y:%m:%d:%H:%M:%S") except ValueError: print('-W- Could not parse date format') return False, 'Could not parse date format' local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) timestamp = utc_dt.strftime("%Y-%m-%dT%H:%M:%S")+"Z" # print(timestamp) MeasRec = {} MeasRec["timestamp"] = timestamp MeasRec["analysts"] = operator MeasRec["instrument_codes"] = "Utrecht_" + machine MeasRec["description"] = "free string = " + free_string MeasRec["citations"] = "This study" MeasRec['software_packages'] = version_num MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' # will be overwritten by measurements_methods3 MeasRec["experiment"] = location + site + spec_name MeasRec["treat_step_num"] = location + site + spec_name + items[0] MeasRec["specimen"] = spec_name # MeasRec["treat_ac_field"] = '0' if AF_or_T.lower() == "th": MeasRec["treat_temp"] = '%8.3e' % ( float(step_value)+273.) # temp in kelvin MeasRec['treat_ac_field'] = '0' lab_treat_type = "T" else: MeasRec['treat_temp'] = '273' MeasRec['treat_ac_field'] = '%10.3e' % (float(step_value)*1e-3) lab_treat_type = "AF" MeasRec['treat_dc_field'] = '0' if step_value == '0': meas_type = "LT-NO" # print("step_type=", step_type) if step_type == '0' or step_type == '00': meas_type = "LT-%s-Z" % lab_treat_type elif step_type == '1' or step_type == '11': meas_type = "LT-%s-I" % lab_treat_type MeasRec['treat_dc_field'] = '%1.2e' % DC_FIELD elif step_type == '2' or step_type == '12': meas_type = "LT-PTRM-I" MeasRec['treat_dc_field'] = '%1.2e' % DC_FIELD elif step_type == '3' or step_type == '13': meas_type = "LT-PTRM-Z" # print("meas_type=", meas_type) MeasRec['treat_dc_field_phi'] = '%1.2f' % DC_PHI MeasRec['treat_dc_field_theta'] = '%1.2f' % DC_THETA MeasRec['method_codes'] = meas_type MeasRec["magn_moment"] = magn_moment if volume != 0: MeasRec["magn_volume"] = magn_volume MeasRec["dir_dec"] = measurement_dec MeasRec["dir_inc"] = measurement_inc MeasRec['dir_csd'] = error MeasRec['meas_n_orient'] = meas_n_orient # print(MeasRec) MeasRecs.append(MeasRec) line = data.readline() line = line.rstrip("\n") items = line.split(",") line = data.readline() line = line.rstrip("\n") items = line.split(",") data.close() con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) # figures out method codes for measuremet data MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file) con.tables['samples'].write_magic_file(custom_name=samp_file) con.tables['sites'].write_magic_file(custom_name=site_file) con.tables['locations'].write_magic_file(custom_name=loc_file) con.tables['measurements'].write_magic_file(custom_name=meas_file) return True, meas_file
[ "def", "utrecht", "(", "mag_file", ",", "dir_path", "=", "\".\"", ",", "input_dir_path", "=", "\"\"", ",", "meas_file", "=", "\"measurements.txt\"", ",", "spec_file", "=", "\"specimens.txt\"", ",", "samp_file", "=", "\"samples.txt\"", ",", "site_file", "=", "\"sites.txt\"", ",", "loc_file", "=", "\"locations.txt\"", ",", "location", "=", "\"unknown\"", ",", "lat", "=", "\"\"", ",", "lon", "=", "\"\"", ",", "dmy_flag", "=", "False", ",", "noave", "=", "False", ",", "meas_n_orient", "=", "8", ",", "meth_code", "=", "\"LP-NO\"", ",", "specnum", "=", "1", ",", "samp_con", "=", "'2'", ",", "labfield", "=", "0", ",", "phi", "=", "0", ",", "theta", "=", "0", ")", ":", "# initialize some stuff", "version_num", "=", "pmag", ".", "get_version", "(", ")", "MeasRecs", ",", "SpecRecs", ",", "SampRecs", ",", "SiteRecs", ",", "LocRecs", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "input_dir_path", ",", "output_dir_path", "=", "pmag", ".", "fix_directories", "(", "input_dir_path", ",", "dir_path", ")", "specnum", "=", "-", "int", "(", "specnum", ")", "if", "\"4\"", "in", "samp_con", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [4] must be in form 4-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [4] must be in form 4-Z where Z is an integer\"", "else", ":", "site_num", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"4\"", "elif", "\"7\"", "in", "samp_con", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [7] must be in form 7-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [7] must be in form 7-Z where Z is an integer\"", "else", ":", "site_num", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"7\"", "else", ":", "site_num", "=", "1", "try", ":", "DC_FIELD", "=", "float", "(", "labfield", ")", "*", "1e-6", "DC_PHI", "=", "float", "(", "phi", ")", "DC_THETA", "=", "float", "(", "theta", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'problem with your dc parameters. please provide a labfield in microTesla and a phi and theta in degrees.'", ")", "# format variables", "if", "not", "mag_file", ":", "return", "False", ",", "'You must provide a Utrecht format file'", "mag_file", "=", "pmag", ".", "resolve_file_name", "(", "mag_file", ",", "input_dir_path", ")", "# need to add these", "meas_file", "=", "pmag", ".", "resolve_file_name", "(", "meas_file", ",", "output_dir_path", ")", "spec_file", "=", "pmag", ".", "resolve_file_name", "(", "spec_file", ",", "output_dir_path", ")", "samp_file", "=", "pmag", ".", "resolve_file_name", "(", "samp_file", ",", "output_dir_path", ")", "site_file", "=", "pmag", ".", "resolve_file_name", "(", "site_file", ",", "output_dir_path", ")", "loc_file", "=", "pmag", ".", "resolve_file_name", "(", "loc_file", ",", "output_dir_path", ")", "# parse data", "# Open up the Utrecht file and read the header information", "AF_or_T", "=", "mag_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "data", "=", "open", "(", "mag_file", ",", "'r'", ")", "line", "=", "data", ".", "readline", "(", ")", "line_items", "=", "line", ".", "split", "(", "','", ")", "operator", "=", "line_items", "[", "0", "]", "operator", "=", "operator", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "machine", "=", "line_items", "[", "1", "]", "machine", "=", "machine", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "machine", "=", "machine", ".", "rstrip", "(", "'\\n'", ")", "# print(\"operator=\", operator)", "# print(\"machine=\", machine)", "# read in measurement data", "line", "=", "data", ".", "readline", "(", ")", "while", "line", "!=", "\"END\"", "and", "line", "!=", "'\"END\"'", ":", "SpecRec", ",", "SampRec", ",", "SiteRec", ",", "LocRec", "=", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", "line_items", "=", "line", ".", "split", "(", "','", ")", "spec_name", "=", "line_items", "[", "0", "]", "spec_name", "=", "spec_name", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "# print(\"spec_name=\", spec_name)", "free_string", "=", "line_items", "[", "1", "]", "free_string", "=", "free_string", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "# print(\"free_string=\", free_string)", "dec", "=", "line_items", "[", "2", "]", "# print(\"dec=\", dec)", "inc", "=", "line_items", "[", "3", "]", "# print(\"inc=\", inc)", "volume", "=", "float", "(", "line_items", "[", "4", "]", ")", "volume", "=", "volume", "*", "1e-6", "# enter volume in cm^3, convert to m^3", "# print(\"volume=\", volume)", "bed_plane", "=", "line_items", "[", "5", "]", "# print(\"bed_plane=\", bed_plane)", "bed_dip", "=", "line_items", "[", "6", "]", "# print(\"bed_dip=\", bed_dip)", "# Configure et er_ tables", "if", "specnum", "==", "0", ":", "sample_name", "=", "spec_name", "else", ":", "sample_name", "=", "spec_name", "[", ":", "specnum", "]", "site", "=", "pmag", ".", "parse_site", "(", "sample_name", ",", "samp_con", ",", "site_num", ")", "SpecRec", "[", "'specimen'", "]", "=", "spec_name", "SpecRec", "[", "'sample'", "]", "=", "sample_name", "if", "volume", "!=", "0", ":", "SpecRec", "[", "'volume'", "]", "=", "volume", "SpecRecs", ".", "append", "(", "SpecRec", ")", "if", "sample_name", "!=", "\"\"", "and", "sample_name", "not", "in", "[", "x", "[", "'sample'", "]", "if", "'sample'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SampRecs", "]", ":", "SampRec", "[", "'sample'", "]", "=", "sample_name", "SampRec", "[", "'azimuth'", "]", "=", "dec", "SampRec", "[", "'dip'", "]", "=", "str", "(", "float", "(", "inc", ")", "-", "90", ")", "SampRec", "[", "'bed_dip_direction'", "]", "=", "bed_plane", "SampRec", "[", "'bed_dip'", "]", "=", "bed_dip", "SampRec", "[", "'method_codes'", "]", "=", "meth_code", "SampRec", "[", "'site'", "]", "=", "site", "SampRecs", ".", "append", "(", "SampRec", ")", "if", "site", "!=", "\"\"", "and", "site", "not", "in", "[", "x", "[", "'site'", "]", "if", "'site'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SiteRecs", "]", ":", "SiteRec", "[", "'site'", "]", "=", "site", "SiteRec", "[", "'location'", "]", "=", "location", "SiteRec", "[", "'lat'", "]", "=", "lat", "SiteRec", "[", "'lon'", "]", "=", "lon", "SiteRecs", ".", "append", "(", "SiteRec", ")", "if", "location", "!=", "\"\"", "and", "location", "not", "in", "[", "x", "[", "'location'", "]", "if", "'location'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "LocRecs", "]", ":", "LocRec", "[", "'location'", "]", "=", "location", "LocRec", "[", "'lat_n'", "]", "=", "lat", "LocRec", "[", "'lon_e'", "]", "=", "lon", "LocRec", "[", "'lat_s'", "]", "=", "lat", "LocRec", "[", "'lon_w'", "]", "=", "lon", "LocRecs", ".", "append", "(", "LocRec", ")", "# measurement data", "line", "=", "data", ".", "readline", "(", ")", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "items", "=", "line", ".", "split", "(", "\",\"", ")", "while", "line", "!=", "'9999'", ":", "step", "=", "items", "[", "0", "]", "step", "=", "step", ".", "split", "(", "'.'", ")", "step_value", "=", "step", "[", "0", "]", "step_type", "=", "\"\"", "if", "len", "(", "step", ")", "==", "2", ":", "step_type", "=", "step", "[", "1", "]", "if", "step_type", "==", "'5'", ":", "step_value", "=", "items", "[", "0", "]", "A", "=", "float", "(", "items", "[", "1", "]", ")", "B", "=", "float", "(", "items", "[", "2", "]", ")", "C", "=", "float", "(", "items", "[", "3", "]", ")", "# convert to MagIC coordinates", "Z", "=", "-", "A", "X", "=", "-", "B", "Y", "=", "C", "cart", "=", "np", ".", "array", "(", "[", "X", ",", "Y", ",", "Z", "]", ")", ".", "transpose", "(", ")", "direction", "=", "pmag", ".", "cart2dir", "(", "cart", ")", ".", "transpose", "(", ")", "measurement_dec", "=", "direction", "[", "0", "]", "measurement_inc", "=", "direction", "[", "1", "]", "# the data are in pico-Am^2 - this converts to Am^2", "magn_moment", "=", "direction", "[", "2", "]", "*", "1.0e-12", "if", "volume", "!=", "0", ":", "# data volume normalized - converted to A/m", "magn_volume", "=", "direction", "[", "2", "]", "*", "1.0e-12", "/", "volume", "# print(\"magn_moment=\", magn_moment)", "# print(\"magn_volume=\", magn_volume)", "error", "=", "items", "[", "4", "]", "date", "=", "items", "[", "5", "]", "date", "=", "date", ".", "strip", "(", "'\"'", ")", ".", "replace", "(", "' '", ",", "''", ")", "if", "date", ".", "count", "(", "\"-\"", ")", ">", "0", ":", "date", "=", "date", ".", "split", "(", "\"-\"", ")", "elif", "date", ".", "count", "(", "\"/\"", ")", ">", "0", ":", "date", "=", "date", ".", "split", "(", "\"/\"", ")", "else", ":", "print", "(", "\"date format seperator cannot be identified\"", ")", "# print(date)", "time", "=", "items", "[", "6", "]", "time", "=", "time", ".", "strip", "(", "'\"'", ")", ".", "replace", "(", "' '", ",", "''", ")", "time", "=", "time", ".", "split", "(", "\":\"", ")", "# print(time)", "dt", "=", "date", "[", "0", "]", "+", "\":\"", "+", "date", "[", "1", "]", "+", "\":\"", "+", "date", "[", "2", "]", "+", "\":\"", "+", "time", "[", "0", "]", "+", "\":\"", "+", "time", "[", "1", "]", "+", "\":\"", "+", "\"0\"", "local", "=", "pytz", ".", "timezone", "(", "\"Europe/Amsterdam\"", ")", "try", ":", "if", "dmy_flag", ":", "naive", "=", "datetime", ".", "datetime", ".", "strptime", "(", "dt", ",", "\"%d:%m:%Y:%H:%M:%S\"", ")", "else", ":", "naive", "=", "datetime", ".", "datetime", ".", "strptime", "(", "dt", ",", "\"%m:%d:%Y:%H:%M:%S\"", ")", "except", "ValueError", ":", "try", ":", "naive", "=", "datetime", ".", "datetime", ".", "strptime", "(", "dt", ",", "\"%Y:%m:%d:%H:%M:%S\"", ")", "except", "ValueError", ":", "print", "(", "'-W- Could not parse date format'", ")", "return", "False", ",", "'Could not parse date format'", "local_dt", "=", "local", ".", "localize", "(", "naive", ",", "is_dst", "=", "None", ")", "utc_dt", "=", "local_dt", ".", "astimezone", "(", "pytz", ".", "utc", ")", "timestamp", "=", "utc_dt", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "+", "\"Z\"", "# print(timestamp)", "MeasRec", "=", "{", "}", "MeasRec", "[", "\"timestamp\"", "]", "=", "timestamp", "MeasRec", "[", "\"analysts\"", "]", "=", "operator", "MeasRec", "[", "\"instrument_codes\"", "]", "=", "\"Utrecht_\"", "+", "machine", "MeasRec", "[", "\"description\"", "]", "=", "\"free string = \"", "+", "free_string", "MeasRec", "[", "\"citations\"", "]", "=", "\"This study\"", "MeasRec", "[", "'software_packages'", "]", "=", "version_num", "MeasRec", "[", "\"meas_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MeasRec", "[", "\"quality\"", "]", "=", "'g'", "MeasRec", "[", "\"standard\"", "]", "=", "'u'", "# will be overwritten by measurements_methods3", "MeasRec", "[", "\"experiment\"", "]", "=", "location", "+", "site", "+", "spec_name", "MeasRec", "[", "\"treat_step_num\"", "]", "=", "location", "+", "site", "+", "spec_name", "+", "items", "[", "0", "]", "MeasRec", "[", "\"specimen\"", "]", "=", "spec_name", "# MeasRec[\"treat_ac_field\"] = '0'", "if", "AF_or_T", ".", "lower", "(", ")", "==", "\"th\"", ":", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "float", "(", "step_value", ")", "+", "273.", ")", "# temp in kelvin", "MeasRec", "[", "'treat_ac_field'", "]", "=", "'0'", "lab_treat_type", "=", "\"T\"", "else", ":", "MeasRec", "[", "'treat_temp'", "]", "=", "'273'", "MeasRec", "[", "'treat_ac_field'", "]", "=", "'%10.3e'", "%", "(", "float", "(", "step_value", ")", "*", "1e-3", ")", "lab_treat_type", "=", "\"AF\"", "MeasRec", "[", "'treat_dc_field'", "]", "=", "'0'", "if", "step_value", "==", "'0'", ":", "meas_type", "=", "\"LT-NO\"", "# print(\"step_type=\", step_type)", "if", "step_type", "==", "'0'", "or", "step_type", "==", "'00'", ":", "meas_type", "=", "\"LT-%s-Z\"", "%", "lab_treat_type", "elif", "step_type", "==", "'1'", "or", "step_type", "==", "'11'", ":", "meas_type", "=", "\"LT-%s-I\"", "%", "lab_treat_type", "MeasRec", "[", "'treat_dc_field'", "]", "=", "'%1.2e'", "%", "DC_FIELD", "elif", "step_type", "==", "'2'", "or", "step_type", "==", "'12'", ":", "meas_type", "=", "\"LT-PTRM-I\"", "MeasRec", "[", "'treat_dc_field'", "]", "=", "'%1.2e'", "%", "DC_FIELD", "elif", "step_type", "==", "'3'", "or", "step_type", "==", "'13'", ":", "meas_type", "=", "\"LT-PTRM-Z\"", "# print(\"meas_type=\", meas_type)", "MeasRec", "[", "'treat_dc_field_phi'", "]", "=", "'%1.2f'", "%", "DC_PHI", "MeasRec", "[", "'treat_dc_field_theta'", "]", "=", "'%1.2f'", "%", "DC_THETA", "MeasRec", "[", "'method_codes'", "]", "=", "meas_type", "MeasRec", "[", "\"magn_moment\"", "]", "=", "magn_moment", "if", "volume", "!=", "0", ":", "MeasRec", "[", "\"magn_volume\"", "]", "=", "magn_volume", "MeasRec", "[", "\"dir_dec\"", "]", "=", "measurement_dec", "MeasRec", "[", "\"dir_inc\"", "]", "=", "measurement_inc", "MeasRec", "[", "'dir_csd'", "]", "=", "error", "MeasRec", "[", "'meas_n_orient'", "]", "=", "meas_n_orient", "# print(MeasRec)", "MeasRecs", ".", "append", "(", "MeasRec", ")", "line", "=", "data", ".", "readline", "(", ")", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "items", "=", "line", ".", "split", "(", "\",\"", ")", "line", "=", "data", ".", "readline", "(", ")", "line", "=", "line", ".", "rstrip", "(", "\"\\n\"", ")", "items", "=", "line", ".", "split", "(", "\",\"", ")", "data", ".", "close", "(", ")", "con", "=", "cb", ".", "Contribution", "(", "output_dir_path", ",", "read_tables", "=", "[", "]", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'specimens'", ",", "data", "=", "SpecRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'samples'", ",", "data", "=", "SampRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'sites'", ",", "data", "=", "SiteRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'locations'", ",", "data", "=", "LocRecs", ")", "# figures out method codes for measuremet data", "MeasOuts", "=", "pmag", ".", "measurements_methods3", "(", "MeasRecs", ",", "noave", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'measurements'", ",", "data", "=", "MeasOuts", ")", "con", ".", "tables", "[", "'specimens'", "]", ".", "write_magic_file", "(", "custom_name", "=", "spec_file", ")", "con", ".", "tables", "[", "'samples'", "]", ".", "write_magic_file", "(", "custom_name", "=", "samp_file", ")", "con", ".", "tables", "[", "'sites'", "]", ".", "write_magic_file", "(", "custom_name", "=", "site_file", ")", "con", ".", "tables", "[", "'locations'", "]", ".", "write_magic_file", "(", "custom_name", "=", "loc_file", ")", "con", ".", "tables", "[", "'measurements'", "]", ".", "write_magic_file", "(", "custom_name", "=", "meas_file", ")", "return", "True", ",", "meas_file" ]
40.343558
17.815951
def beacon(config): ''' The journald beacon allows for the systemd journal to be parsed and linked objects to be turned into events. This beacons config will return all sshd jornal entries .. code-block:: yaml beacons: journald: - services: sshd: SYSLOG_IDENTIFIER: sshd PRIORITY: 6 ''' ret = [] journal = _get_journal() _config = {} list(map(_config.update, config)) while True: cur = journal.get_next() if not cur: break for name in _config.get('services', {}): n_flag = 0 for key in _config['services'][name]: if isinstance(key, salt.ext.six.string_types): key = salt.utils.data.decode(key) if key in cur: if _config['services'][name][key] == cur[key]: n_flag += 1 if n_flag == len(_config['services'][name]): # Match! sub = salt.utils.data.simple_types_filter(cur) sub.update({'tag': name}) ret.append(sub) return ret
[ "def", "beacon", "(", "config", ")", ":", "ret", "=", "[", "]", "journal", "=", "_get_journal", "(", ")", "_config", "=", "{", "}", "list", "(", "map", "(", "_config", ".", "update", ",", "config", ")", ")", "while", "True", ":", "cur", "=", "journal", ".", "get_next", "(", ")", "if", "not", "cur", ":", "break", "for", "name", "in", "_config", ".", "get", "(", "'services'", ",", "{", "}", ")", ":", "n_flag", "=", "0", "for", "key", "in", "_config", "[", "'services'", "]", "[", "name", "]", ":", "if", "isinstance", "(", "key", ",", "salt", ".", "ext", ".", "six", ".", "string_types", ")", ":", "key", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "key", ")", "if", "key", "in", "cur", ":", "if", "_config", "[", "'services'", "]", "[", "name", "]", "[", "key", "]", "==", "cur", "[", "key", "]", ":", "n_flag", "+=", "1", "if", "n_flag", "==", "len", "(", "_config", "[", "'services'", "]", "[", "name", "]", ")", ":", "# Match!", "sub", "=", "salt", ".", "utils", ".", "data", ".", "simple_types_filter", "(", "cur", ")", "sub", ".", "update", "(", "{", "'tag'", ":", "name", "}", ")", "ret", ".", "append", "(", "sub", ")", "return", "ret" ]
28.02439
20.512195
def l2_distance_sq(t1, t2, name=None): """Square of l2 distance between t1 and t2. Args: t1: A tensor. t2: A tensor that is the same size as t1. name: Optional name for this op. Returns: The l2 distance between t1 and t2. """ with tf.name_scope(name, 'l2_distance_sq', [t1, t2]) as scope: t1 = tf.convert_to_tensor(t1, name='t1') t2 = tf.convert_to_tensor(t2, name='t2') return length_squared(tf.subtract(t1, t2), name=scope)
[ "def", "l2_distance_sq", "(", "t1", ",", "t2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "'l2_distance_sq'", ",", "[", "t1", ",", "t2", "]", ")", "as", "scope", ":", "t1", "=", "tf", ".", "convert_to_tensor", "(", "t1", ",", "name", "=", "'t1'", ")", "t2", "=", "tf", ".", "convert_to_tensor", "(", "t2", ",", "name", "=", "'t2'", ")", "return", "length_squared", "(", "tf", ".", "subtract", "(", "t1", ",", "t2", ")", ",", "name", "=", "scope", ")" ]
32.214286
13.5
def trimUTR(args): """ %prog trimUTR gffile Remove UTRs in the annotation set. If reference GFF3 is provided, reinstate UTRs from reference transcripts after trimming. Note: After running trimUTR, it is advised to also run `python -m jcvi.formats.gff fixboundaries` on the resultant GFF3 to adjust the boundaries of all parent 'gene' features """ import gffutils from jcvi.formats.base import SetFile p = OptionParser(trimUTR.__doc__) p.add_option("--trim5", default=None, type="str", \ help="File containing gene list for 5' UTR trimming") p.add_option("--trim3", default=None, type="str", \ help="File containing gene list for 3' UTR trimming") p.add_option("--trimrange", default=None, type="str", \ help="File containing gene list for UTR trim back" + \ "based on suggested (start, stop) coordinate range") p.add_option("--refgff", default=None, type="str", \ help="Reference GFF3 used as fallback to replace UTRs") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = make_index(gffile) trim_both = False if (opts.trim5 or opts.trim3) else True trim5 = SetFile(opts.trim5) if opts.trim5 else set() trim3 = SetFile(opts.trim3) if opts.trim3 else set() trimrange = dict() if opts.trimrange: trf = must_open(opts.trimrange) for tr in trf: assert len(tr.split("\t")) == 3, \ "Must specify (start, stop) coordinate range" id, start, stop = tr.split("\t") trimrange[id] = (int(start), int(stop)) trf.close() refgff = make_index(opts.refgff) if opts.refgff else None fw = must_open(opts.outfile, "w") for feat in gff.iter_by_parent_childs(featuretype="gene", order_by=("seqid", "start"), level=1): for c in feat: cid, ctype, cparent = c.id, c.featuretype, \ c.attributes.get('Parent', [None])[0] t5, t3 = False, False if ctype == "gene": t5 = True if cid in trim5 else False t3 = True if cid in trim3 else False start, end = get_cds_minmax(gff, cid) trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(c, fw) elif ctype == "mRNA": utr_types, extras = [], set() if any(id in trim5 for id in (cid, cparent)): t5 = True trim5.add(cid) if any(id in trim3 for id in (cid, cparent)): t3 = True trim3.add(cid) refc = None if refgff: try: refc = refgff[cid] refctype = refc.featuretype refptype = refgff[refc.attributes['Parent'][0]].featuretype if refctype == "mRNA" and refptype == "gene": if cmp_children(cid, gff, refgff, cftype="CDS"): reinstate(c, refc, trim5=t5, trim3=t3, both=trim_both) if t5: utr_types.append('five_prime_UTR') if t3: utr_types.append('three_prime_UTR') for utr_type in utr_types: for utr in refgff.children(refc, featuretype=utr_type): extras.add(utr) for exon in refgff.region(region=utr, featuretype="exon"): if exon.attributes['Parent'][0] == cid: extras.add(exon) else: refc = None except gffutils.exceptions.FeatureNotFoundError: pass start, end = get_cds_minmax(gff, cid, level=1) if cid in trimrange: start, end = range_minmax([trimrange[cid], (start, end)]) if not refc: trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(c, fw) for cc in gff.children(cid, order_by=("start")): _ctype = cc.featuretype if _ctype not in utr_types: if _ctype != "CDS": if _ctype == "exon": eskip = [range_overlap(to_range(cc), to_range(x)) \ for x in extras if x.featuretype == 'exon'] if any(skip for skip in eskip): continue trim(cc, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(cc, fw) else: fprint(cc, fw) for x in extras: fprint(x, fw) fw.close()
[ "def", "trimUTR", "(", "args", ")", ":", "import", "gffutils", "from", "jcvi", ".", "formats", ".", "base", "import", "SetFile", "p", "=", "OptionParser", "(", "trimUTR", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--trim5\"", ",", "default", "=", "None", ",", "type", "=", "\"str\"", ",", "help", "=", "\"File containing gene list for 5' UTR trimming\"", ")", "p", ".", "add_option", "(", "\"--trim3\"", ",", "default", "=", "None", ",", "type", "=", "\"str\"", ",", "help", "=", "\"File containing gene list for 3' UTR trimming\"", ")", "p", ".", "add_option", "(", "\"--trimrange\"", ",", "default", "=", "None", ",", "type", "=", "\"str\"", ",", "help", "=", "\"File containing gene list for UTR trim back\"", "+", "\"based on suggested (start, stop) coordinate range\"", ")", "p", ".", "add_option", "(", "\"--refgff\"", ",", "default", "=", "None", ",", "type", "=", "\"str\"", ",", "help", "=", "\"Reference GFF3 used as fallback to replace UTRs\"", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "gffile", ",", "=", "args", "gff", "=", "make_index", "(", "gffile", ")", "trim_both", "=", "False", "if", "(", "opts", ".", "trim5", "or", "opts", ".", "trim3", ")", "else", "True", "trim5", "=", "SetFile", "(", "opts", ".", "trim5", ")", "if", "opts", ".", "trim5", "else", "set", "(", ")", "trim3", "=", "SetFile", "(", "opts", ".", "trim3", ")", "if", "opts", ".", "trim3", "else", "set", "(", ")", "trimrange", "=", "dict", "(", ")", "if", "opts", ".", "trimrange", ":", "trf", "=", "must_open", "(", "opts", ".", "trimrange", ")", "for", "tr", "in", "trf", ":", "assert", "len", "(", "tr", ".", "split", "(", "\"\\t\"", ")", ")", "==", "3", ",", "\"Must specify (start, stop) coordinate range\"", "id", ",", "start", ",", "stop", "=", "tr", ".", "split", "(", "\"\\t\"", ")", "trimrange", "[", "id", "]", "=", "(", "int", "(", "start", ")", ",", "int", "(", "stop", ")", ")", "trf", ".", "close", "(", ")", "refgff", "=", "make_index", "(", "opts", ".", "refgff", ")", "if", "opts", ".", "refgff", "else", "None", "fw", "=", "must_open", "(", "opts", ".", "outfile", ",", "\"w\"", ")", "for", "feat", "in", "gff", ".", "iter_by_parent_childs", "(", "featuretype", "=", "\"gene\"", ",", "order_by", "=", "(", "\"seqid\"", ",", "\"start\"", ")", ",", "level", "=", "1", ")", ":", "for", "c", "in", "feat", ":", "cid", ",", "ctype", ",", "cparent", "=", "c", ".", "id", ",", "c", ".", "featuretype", ",", "c", ".", "attributes", ".", "get", "(", "'Parent'", ",", "[", "None", "]", ")", "[", "0", "]", "t5", ",", "t3", "=", "False", ",", "False", "if", "ctype", "==", "\"gene\"", ":", "t5", "=", "True", "if", "cid", "in", "trim5", "else", "False", "t3", "=", "True", "if", "cid", "in", "trim3", "else", "False", "start", ",", "end", "=", "get_cds_minmax", "(", "gff", ",", "cid", ")", "trim", "(", "c", ",", "start", ",", "end", ",", "trim5", "=", "t5", ",", "trim3", "=", "t3", ",", "both", "=", "trim_both", ")", "fprint", "(", "c", ",", "fw", ")", "elif", "ctype", "==", "\"mRNA\"", ":", "utr_types", ",", "extras", "=", "[", "]", ",", "set", "(", ")", "if", "any", "(", "id", "in", "trim5", "for", "id", "in", "(", "cid", ",", "cparent", ")", ")", ":", "t5", "=", "True", "trim5", ".", "add", "(", "cid", ")", "if", "any", "(", "id", "in", "trim3", "for", "id", "in", "(", "cid", ",", "cparent", ")", ")", ":", "t3", "=", "True", "trim3", ".", "add", "(", "cid", ")", "refc", "=", "None", "if", "refgff", ":", "try", ":", "refc", "=", "refgff", "[", "cid", "]", "refctype", "=", "refc", ".", "featuretype", "refptype", "=", "refgff", "[", "refc", ".", "attributes", "[", "'Parent'", "]", "[", "0", "]", "]", ".", "featuretype", "if", "refctype", "==", "\"mRNA\"", "and", "refptype", "==", "\"gene\"", ":", "if", "cmp_children", "(", "cid", ",", "gff", ",", "refgff", ",", "cftype", "=", "\"CDS\"", ")", ":", "reinstate", "(", "c", ",", "refc", ",", "trim5", "=", "t5", ",", "trim3", "=", "t3", ",", "both", "=", "trim_both", ")", "if", "t5", ":", "utr_types", ".", "append", "(", "'five_prime_UTR'", ")", "if", "t3", ":", "utr_types", ".", "append", "(", "'three_prime_UTR'", ")", "for", "utr_type", "in", "utr_types", ":", "for", "utr", "in", "refgff", ".", "children", "(", "refc", ",", "featuretype", "=", "utr_type", ")", ":", "extras", ".", "add", "(", "utr", ")", "for", "exon", "in", "refgff", ".", "region", "(", "region", "=", "utr", ",", "featuretype", "=", "\"exon\"", ")", ":", "if", "exon", ".", "attributes", "[", "'Parent'", "]", "[", "0", "]", "==", "cid", ":", "extras", ".", "add", "(", "exon", ")", "else", ":", "refc", "=", "None", "except", "gffutils", ".", "exceptions", ".", "FeatureNotFoundError", ":", "pass", "start", ",", "end", "=", "get_cds_minmax", "(", "gff", ",", "cid", ",", "level", "=", "1", ")", "if", "cid", "in", "trimrange", ":", "start", ",", "end", "=", "range_minmax", "(", "[", "trimrange", "[", "cid", "]", ",", "(", "start", ",", "end", ")", "]", ")", "if", "not", "refc", ":", "trim", "(", "c", ",", "start", ",", "end", ",", "trim5", "=", "t5", ",", "trim3", "=", "t3", ",", "both", "=", "trim_both", ")", "fprint", "(", "c", ",", "fw", ")", "for", "cc", "in", "gff", ".", "children", "(", "cid", ",", "order_by", "=", "(", "\"start\"", ")", ")", ":", "_ctype", "=", "cc", ".", "featuretype", "if", "_ctype", "not", "in", "utr_types", ":", "if", "_ctype", "!=", "\"CDS\"", ":", "if", "_ctype", "==", "\"exon\"", ":", "eskip", "=", "[", "range_overlap", "(", "to_range", "(", "cc", ")", ",", "to_range", "(", "x", ")", ")", "for", "x", "in", "extras", "if", "x", ".", "featuretype", "==", "'exon'", "]", "if", "any", "(", "skip", "for", "skip", "in", "eskip", ")", ":", "continue", "trim", "(", "cc", ",", "start", ",", "end", ",", "trim5", "=", "t5", ",", "trim3", "=", "t3", ",", "both", "=", "trim_both", ")", "fprint", "(", "cc", ",", "fw", ")", "else", ":", "fprint", "(", "cc", ",", "fw", ")", "for", "x", "in", "extras", ":", "fprint", "(", "x", ",", "fw", ")", "fw", ".", "close", "(", ")" ]
44.160714
19.5
def do_terminateInstance(self,args): """Terminate an EC2 instance""" parser = CommandArgumentParser("terminateInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.terminate_instances(InstanceIds=[instanceId['InstanceId']]) self.do_printInstances("-r")
[ "def", "do_terminateInstance", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"terminateInstance\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "args", "=", "vars", "(", "parser", ".", "parse_args", "(", "args", ")", ")", "instanceId", "=", "args", "[", "'instance'", "]", "try", ":", "index", "=", "int", "(", "instanceId", ")", "instances", "=", "self", ".", "scalingGroupDescription", "[", "'AutoScalingGroups'", "]", "[", "0", "]", "[", "'Instances'", "]", "instanceId", "=", "instances", "[", "index", "]", "except", "ValueError", ":", "pass", "client", "=", "AwsConnectionFactory", ".", "getEc2Client", "(", ")", "client", ".", "terminate_instances", "(", "InstanceIds", "=", "[", "instanceId", "[", "'InstanceId'", "]", "]", ")", "self", ".", "do_printInstances", "(", "\"-r\"", ")" ]
39.470588
18.588235
def loadTableData(df, df_key='index',table="node", \ table_key_column = "name", network="current",\ namespace="default",\ host=cytoscape_host,port=cytoscape_port,verbose=False): """ Loads tables into cytoscape :param df: a pandas dataframe to load :param df_key: key column in df, default="index" :param table: target table, default="node" :param table_key_column: table key column, default="name" :param network: a network name or id, default="current" :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :param verbose: print more information :returns: output of put request """ if type(network) != int: networkID=cytoscape("network", "get attribute",\ {"network":network,\ "namespace":namespace,\ "columnList":"SUID"},host=host,port=port) networkname=cytoscape("network", "get attribute",\ {"network":network,\ "namespace":namespace,\ "columnList":"name"},host=host,port=port) network=networkID[0]["SUID"] networkname=networkname[0]["name"] tmp=df.copy() if df_key!="index": tmp.index=tmp[df_key].tolist() tmp=tmp.drop([df_key],axis=1) tablen=networkname+" default node" data=[] for c in tmp.columns.tolist(): tmpcol=tmp[[c]].dropna() for r in tmpcol.index.tolist(): check=tmpcol[tmpcol.index.isin([r])] cell={} cell[str(table_key_column)]=str(r) # {"name":"p53"} if len(check) == 1: val=tmpcol.loc[r,c] if type(val) != str: val=float(val) else: print(check) val="" cell[str(c)]=val data.append(cell) upload={"key":table_key_column,"dataKey":table_key_column,\ "data":data} URL="http://"+str(host)+":"+str(port)+"/v1/networks/"+str(network)+"/tables/"+namespace+table if verbose: print("'"+URL+"'") sys.stdout.flush() r = requests.put(url = URL, json = upload) if verbose: print(r) CheckResponse(r) res=r.content return res
[ "def", "loadTableData", "(", "df", ",", "df_key", "=", "'index'", ",", "table", "=", "\"node\"", ",", "table_key_column", "=", "\"name\"", ",", "network", "=", "\"current\"", ",", "namespace", "=", "\"default\"", ",", "host", "=", "cytoscape_host", ",", "port", "=", "cytoscape_port", ",", "verbose", "=", "False", ")", ":", "if", "type", "(", "network", ")", "!=", "int", ":", "networkID", "=", "cytoscape", "(", "\"network\"", ",", "\"get attribute\"", ",", "{", "\"network\"", ":", "network", ",", "\"namespace\"", ":", "namespace", ",", "\"columnList\"", ":", "\"SUID\"", "}", ",", "host", "=", "host", ",", "port", "=", "port", ")", "networkname", "=", "cytoscape", "(", "\"network\"", ",", "\"get attribute\"", ",", "{", "\"network\"", ":", "network", ",", "\"namespace\"", ":", "namespace", ",", "\"columnList\"", ":", "\"name\"", "}", ",", "host", "=", "host", ",", "port", "=", "port", ")", "network", "=", "networkID", "[", "0", "]", "[", "\"SUID\"", "]", "networkname", "=", "networkname", "[", "0", "]", "[", "\"name\"", "]", "tmp", "=", "df", ".", "copy", "(", ")", "if", "df_key", "!=", "\"index\"", ":", "tmp", ".", "index", "=", "tmp", "[", "df_key", "]", ".", "tolist", "(", ")", "tmp", "=", "tmp", ".", "drop", "(", "[", "df_key", "]", ",", "axis", "=", "1", ")", "tablen", "=", "networkname", "+", "\" default node\"", "data", "=", "[", "]", "for", "c", "in", "tmp", ".", "columns", ".", "tolist", "(", ")", ":", "tmpcol", "=", "tmp", "[", "[", "c", "]", "]", ".", "dropna", "(", ")", "for", "r", "in", "tmpcol", ".", "index", ".", "tolist", "(", ")", ":", "check", "=", "tmpcol", "[", "tmpcol", ".", "index", ".", "isin", "(", "[", "r", "]", ")", "]", "cell", "=", "{", "}", "cell", "[", "str", "(", "table_key_column", ")", "]", "=", "str", "(", "r", ")", "# {\"name\":\"p53\"}", "if", "len", "(", "check", ")", "==", "1", ":", "val", "=", "tmpcol", ".", "loc", "[", "r", ",", "c", "]", "if", "type", "(", "val", ")", "!=", "str", ":", "val", "=", "float", "(", "val", ")", "else", ":", "print", "(", "check", ")", "val", "=", "\"\"", "cell", "[", "str", "(", "c", ")", "]", "=", "val", "data", ".", "append", "(", "cell", ")", "upload", "=", "{", "\"key\"", ":", "table_key_column", ",", "\"dataKey\"", ":", "table_key_column", ",", "\"data\"", ":", "data", "}", "URL", "=", "\"http://\"", "+", "str", "(", "host", ")", "+", "\":\"", "+", "str", "(", "port", ")", "+", "\"/v1/networks/\"", "+", "str", "(", "network", ")", "+", "\"/tables/\"", "+", "namespace", "+", "table", "if", "verbose", ":", "print", "(", "\"'\"", "+", "URL", "+", "\"'\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "r", "=", "requests", ".", "put", "(", "url", "=", "URL", ",", "json", "=", "upload", ")", "if", "verbose", ":", "print", "(", "r", ")", "CheckResponse", "(", "r", ")", "res", "=", "r", ".", "content", "return", "res" ]
30.265823
17.987342
def get_samples_live_last(self, sensor_id): """Get the last sample recorded by the sensor. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples/live/last" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
[ "def", "get_samples_live_last", "(", "self", ",", "sensor_id", ")", ":", "url", "=", "\"https://api.neur.io/v1/samples/live/last\"", "headers", "=", "self", ".", "__gen_headers", "(", ")", "headers", "[", "\"Content-Type\"", "]", "=", "\"application/json\"", "params", "=", "{", "\"sensorId\"", ":", "sensor_id", "}", "url", "=", "self", ".", "__append_url_params", "(", "url", ",", "params", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "return", "r", ".", "json", "(", ")" ]
27.65
18.6
def check_value_error_for_parity(value_error: ValueError, call_type: ParityCallType) -> bool: """ For parity failing calls and functions do not return None if the transaction will fail but instead throw a ValueError exception. This function checks the thrown exception to see if it's the correct one and if yes returns True, if not returns False """ try: error_data = json.loads(str(value_error).replace("'", '"')) except json.JSONDecodeError: return False if call_type == ParityCallType.ESTIMATE_GAS: code_checks_out = error_data['code'] == -32016 message_checks_out = 'The execution failed due to an exception' in error_data['message'] elif call_type == ParityCallType.CALL: code_checks_out = error_data['code'] == -32015 message_checks_out = 'VM execution error' in error_data['message'] else: raise ValueError('Called check_value_error_for_parity() with illegal call type') if code_checks_out and message_checks_out: return True return False
[ "def", "check_value_error_for_parity", "(", "value_error", ":", "ValueError", ",", "call_type", ":", "ParityCallType", ")", "->", "bool", ":", "try", ":", "error_data", "=", "json", ".", "loads", "(", "str", "(", "value_error", ")", ".", "replace", "(", "\"'\"", ",", "'\"'", ")", ")", "except", "json", ".", "JSONDecodeError", ":", "return", "False", "if", "call_type", "==", "ParityCallType", ".", "ESTIMATE_GAS", ":", "code_checks_out", "=", "error_data", "[", "'code'", "]", "==", "-", "32016", "message_checks_out", "=", "'The execution failed due to an exception'", "in", "error_data", "[", "'message'", "]", "elif", "call_type", "==", "ParityCallType", ".", "CALL", ":", "code_checks_out", "=", "error_data", "[", "'code'", "]", "==", "-", "32015", "message_checks_out", "=", "'VM execution error'", "in", "error_data", "[", "'message'", "]", "else", ":", "raise", "ValueError", "(", "'Called check_value_error_for_parity() with illegal call type'", ")", "if", "code_checks_out", "and", "message_checks_out", ":", "return", "True", "return", "False" ]
40
25.307692
def GetFlagSuggestions(attempt, longopt_list): """Get helpful similar matches for an invalid flag.""" # Don't suggest on very short strings, or if no longopts are specified. if len(attempt) <= 2 or not longopt_list: return [] option_names = [v.split('=')[0] for v in longopt_list] # Find close approximations in flag prefixes. # This also handles the case where the flag is spelled right but ambiguous. distances = [(_DamerauLevenshtein(attempt, option[0:len(attempt)]), option) for option in option_names] distances.sort(key=lambda t: t[0]) least_errors, _ = distances[0] # Don't suggest excessively bad matches. if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt): return [] suggestions = [] for errors, name in distances: if errors == least_errors: suggestions.append(name) else: break return suggestions
[ "def", "GetFlagSuggestions", "(", "attempt", ",", "longopt_list", ")", ":", "# Don't suggest on very short strings, or if no longopts are specified.", "if", "len", "(", "attempt", ")", "<=", "2", "or", "not", "longopt_list", ":", "return", "[", "]", "option_names", "=", "[", "v", ".", "split", "(", "'='", ")", "[", "0", "]", "for", "v", "in", "longopt_list", "]", "# Find close approximations in flag prefixes.", "# This also handles the case where the flag is spelled right but ambiguous.", "distances", "=", "[", "(", "_DamerauLevenshtein", "(", "attempt", ",", "option", "[", "0", ":", "len", "(", "attempt", ")", "]", ")", ",", "option", ")", "for", "option", "in", "option_names", "]", "distances", ".", "sort", "(", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "least_errors", ",", "_", "=", "distances", "[", "0", "]", "# Don't suggest excessively bad matches.", "if", "least_errors", ">=", "_SUGGESTION_ERROR_RATE_THRESHOLD", "*", "len", "(", "attempt", ")", ":", "return", "[", "]", "suggestions", "=", "[", "]", "for", "errors", ",", "name", "in", "distances", ":", "if", "errors", "==", "least_errors", ":", "suggestions", ".", "append", "(", "name", ")", "else", ":", "break", "return", "suggestions" ]
33.538462
20.307692
def build_api(packages, input, output, sanitizer, excluded_modules=None): """ Builds the Sphinx documentation API. :param packages: Packages to include in the API. :type packages: list :param input: Input modules directory. :type input: unicode :param output: Output reStructuredText files directory. :type output: unicode :param sanitizer: Sanitizer python module. :type sanitizer: unicode :param excluded_modules: Excluded modules. :type excluded_modules: list :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Building Sphinx documentation API!".format(build_api.__name__)) sanitizer = import_sanitizer(sanitizer) if os.path.exists(input): shutil.rmtree(input) os.makedirs(input) excluded_modules = [] if excluded_modules is None else excluded_modules packages_modules = {"apiModules": [], "testsModules": []} for package in packages: package = __import__(package) path = foundations.common.get_first_item(package.__path__) package_directory = os.path.dirname(path) for file in sorted( list(foundations.walkers.files_walker(package_directory, filters_in=("{0}.*\.ui$".format(path),)))): LOGGER.info("{0} | Ui file: '{1}'".format(build_api.__name__, file)) target_directory = os.path.dirname(file).replace(package_directory, "") directory = "{0}{1}".format(input, target_directory) if not foundations.common.path_exists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) modules = [] for file in sorted( list(foundations.walkers.files_walker(package_directory, filters_in=("{0}.*\.py$".format(path),), filters_out=excluded_modules))): LOGGER.info("{0} | Python file: '{1}'".format(build_api.__name__, file)) module = "{0}.{1}".format((".".join(os.path.dirname(file).replace(package_directory, "").split("/"))), foundations.strings.get_splitext_basename(file)).strip(".") LOGGER.info("{0} | Module name: '{1}'".format(build_api.__name__, module)) directory = os.path.dirname(os.path.join(input, module.replace(".", "/"))) if not foundations.common.path_exists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) sanitizer.bleach(source) if "__init__.py" in file: continue rst_file_path = "{0}{1}".format(module, FILES_EXTENSION) LOGGER.info("{0} | Building API file: '{1}'".format(build_api.__name__, rst_file_path)) rst_file = File(os.path.join(output, rst_file_path)) header = ["_`{0}`\n".format(module), "==={0}\n".format("=" * len(module)), "\n", ".. automodule:: {0}\n".format(module), "\n"] rst_file.content.extend(header) functions = OrderedDict() classes = OrderedDict() module_attributes = OrderedDict() for member, object in module_browser._readmodule(module, [source, ]).iteritems(): if object.__class__ == module_browser.Function: if not member.startswith("_"): functions[member] = [".. autofunction:: {0}\n".format(member)] elif object.__class__ == module_browser.Class: classes[member] = [".. autoclass:: {0}\n".format(member), " :show-inheritance:\n", " :members:\n"] elif object.__class__ == module_browser.Global: if not member.startswith("_"): module_attributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)] module_attributes and rst_file.content.append("Module Attributes\n-----------------\n\n") for module_attribute in module_attributes.itervalues(): rst_file.content.extend(module_attribute) rst_file.content.append("\n") functions and rst_file.content.append("Functions\n---------\n\n") for function in functions.itervalues(): rst_file.content.extend(function) rst_file.content.append("\n") classes and rst_file.content.append("Classes\n-------\n\n") for class_ in classes.itervalues(): rst_file.content.extend(class_) rst_file.content.append("\n") rst_file.write() modules.append(module) packages_modules["apiModules"].extend([module for module in modules if not "tests" in module]) packages_modules["testsModules"].extend([module for module in modules if "tests" in module]) api_file = File("{0}{1}".format(output, FILES_EXTENSION)) api_file.content.extend(TOCTREE_TEMPLATE_BEGIN) for module in packages_modules["apiModules"]: api_file.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module))) for module in packages_modules["testsModules"]: api_file.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module))) api_file.content.extend(TOCTREE_TEMPLATE_END) api_file.write() return True
[ "def", "build_api", "(", "packages", ",", "input", ",", "output", ",", "sanitizer", ",", "excluded_modules", "=", "None", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Building Sphinx documentation API!\"", ".", "format", "(", "build_api", ".", "__name__", ")", ")", "sanitizer", "=", "import_sanitizer", "(", "sanitizer", ")", "if", "os", ".", "path", ".", "exists", "(", "input", ")", ":", "shutil", ".", "rmtree", "(", "input", ")", "os", ".", "makedirs", "(", "input", ")", "excluded_modules", "=", "[", "]", "if", "excluded_modules", "is", "None", "else", "excluded_modules", "packages_modules", "=", "{", "\"apiModules\"", ":", "[", "]", ",", "\"testsModules\"", ":", "[", "]", "}", "for", "package", "in", "packages", ":", "package", "=", "__import__", "(", "package", ")", "path", "=", "foundations", ".", "common", ".", "get_first_item", "(", "package", ".", "__path__", ")", "package_directory", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "for", "file", "in", "sorted", "(", "list", "(", "foundations", ".", "walkers", ".", "files_walker", "(", "package_directory", ",", "filters_in", "=", "(", "\"{0}.*\\.ui$\"", ".", "format", "(", "path", ")", ",", ")", ")", ")", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Ui file: '{1}'\"", ".", "format", "(", "build_api", ".", "__name__", ",", "file", ")", ")", "target_directory", "=", "os", ".", "path", ".", "dirname", "(", "file", ")", ".", "replace", "(", "package_directory", ",", "\"\"", ")", "directory", "=", "\"{0}{1}\"", ".", "format", "(", "input", ",", "target_directory", ")", "if", "not", "foundations", ".", "common", ".", "path_exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "source", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "os", ".", "path", ".", "basename", "(", "file", ")", ")", "shutil", ".", "copyfile", "(", "file", ",", "source", ")", "modules", "=", "[", "]", "for", "file", "in", "sorted", "(", "list", "(", "foundations", ".", "walkers", ".", "files_walker", "(", "package_directory", ",", "filters_in", "=", "(", "\"{0}.*\\.py$\"", ".", "format", "(", "path", ")", ",", ")", ",", "filters_out", "=", "excluded_modules", ")", ")", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Python file: '{1}'\"", ".", "format", "(", "build_api", ".", "__name__", ",", "file", ")", ")", "module", "=", "\"{0}.{1}\"", ".", "format", "(", "(", "\".\"", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "file", ")", ".", "replace", "(", "package_directory", ",", "\"\"", ")", ".", "split", "(", "\"/\"", ")", ")", ")", ",", "foundations", ".", "strings", ".", "get_splitext_basename", "(", "file", ")", ")", ".", "strip", "(", "\".\"", ")", "LOGGER", ".", "info", "(", "\"{0} | Module name: '{1}'\"", ".", "format", "(", "build_api", ".", "__name__", ",", "module", ")", ")", "directory", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "input", ",", "module", ".", "replace", "(", "\".\"", ",", "\"/\"", ")", ")", ")", "if", "not", "foundations", ".", "common", ".", "path_exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "source", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "os", ".", "path", ".", "basename", "(", "file", ")", ")", "shutil", ".", "copyfile", "(", "file", ",", "source", ")", "sanitizer", ".", "bleach", "(", "source", ")", "if", "\"__init__.py\"", "in", "file", ":", "continue", "rst_file_path", "=", "\"{0}{1}\"", ".", "format", "(", "module", ",", "FILES_EXTENSION", ")", "LOGGER", ".", "info", "(", "\"{0} | Building API file: '{1}'\"", ".", "format", "(", "build_api", ".", "__name__", ",", "rst_file_path", ")", ")", "rst_file", "=", "File", "(", "os", ".", "path", ".", "join", "(", "output", ",", "rst_file_path", ")", ")", "header", "=", "[", "\"_`{0}`\\n\"", ".", "format", "(", "module", ")", ",", "\"==={0}\\n\"", ".", "format", "(", "\"=\"", "*", "len", "(", "module", ")", ")", ",", "\"\\n\"", ",", "\".. automodule:: {0}\\n\"", ".", "format", "(", "module", ")", ",", "\"\\n\"", "]", "rst_file", ".", "content", ".", "extend", "(", "header", ")", "functions", "=", "OrderedDict", "(", ")", "classes", "=", "OrderedDict", "(", ")", "module_attributes", "=", "OrderedDict", "(", ")", "for", "member", ",", "object", "in", "module_browser", ".", "_readmodule", "(", "module", ",", "[", "source", ",", "]", ")", ".", "iteritems", "(", ")", ":", "if", "object", ".", "__class__", "==", "module_browser", ".", "Function", ":", "if", "not", "member", ".", "startswith", "(", "\"_\"", ")", ":", "functions", "[", "member", "]", "=", "[", "\".. autofunction:: {0}\\n\"", ".", "format", "(", "member", ")", "]", "elif", "object", ".", "__class__", "==", "module_browser", ".", "Class", ":", "classes", "[", "member", "]", "=", "[", "\".. autoclass:: {0}\\n\"", ".", "format", "(", "member", ")", ",", "\"\t:show-inheritance:\\n\"", ",", "\"\t:members:\\n\"", "]", "elif", "object", ".", "__class__", "==", "module_browser", ".", "Global", ":", "if", "not", "member", ".", "startswith", "(", "\"_\"", ")", ":", "module_attributes", "[", "member", "]", "=", "[", "\".. attribute:: {0}.{1}\\n\"", ".", "format", "(", "module", ",", "member", ")", "]", "module_attributes", "and", "rst_file", ".", "content", ".", "append", "(", "\"Module Attributes\\n-----------------\\n\\n\"", ")", "for", "module_attribute", "in", "module_attributes", ".", "itervalues", "(", ")", ":", "rst_file", ".", "content", ".", "extend", "(", "module_attribute", ")", "rst_file", ".", "content", ".", "append", "(", "\"\\n\"", ")", "functions", "and", "rst_file", ".", "content", ".", "append", "(", "\"Functions\\n---------\\n\\n\"", ")", "for", "function", "in", "functions", ".", "itervalues", "(", ")", ":", "rst_file", ".", "content", ".", "extend", "(", "function", ")", "rst_file", ".", "content", ".", "append", "(", "\"\\n\"", ")", "classes", "and", "rst_file", ".", "content", ".", "append", "(", "\"Classes\\n-------\\n\\n\"", ")", "for", "class_", "in", "classes", ".", "itervalues", "(", ")", ":", "rst_file", ".", "content", ".", "extend", "(", "class_", ")", "rst_file", ".", "content", ".", "append", "(", "\"\\n\"", ")", "rst_file", ".", "write", "(", ")", "modules", ".", "append", "(", "module", ")", "packages_modules", "[", "\"apiModules\"", "]", ".", "extend", "(", "[", "module", "for", "module", "in", "modules", "if", "not", "\"tests\"", "in", "module", "]", ")", "packages_modules", "[", "\"testsModules\"", "]", ".", "extend", "(", "[", "module", "for", "module", "in", "modules", "if", "\"tests\"", "in", "module", "]", ")", "api_file", "=", "File", "(", "\"{0}{1}\"", ".", "format", "(", "output", ",", "FILES_EXTENSION", ")", ")", "api_file", ".", "content", ".", "extend", "(", "TOCTREE_TEMPLATE_BEGIN", ")", "for", "module", "in", "packages_modules", "[", "\"apiModules\"", "]", ":", "api_file", ".", "content", ".", "append", "(", "\" {0} <{1}>\\n\"", ".", "format", "(", "module", ",", "\"api/{0}\"", ".", "format", "(", "module", ")", ")", ")", "for", "module", "in", "packages_modules", "[", "\"testsModules\"", "]", ":", "api_file", ".", "content", ".", "append", "(", "\" {0} <{1}>\\n\"", ".", "format", "(", "module", ",", "\"api/{0}\"", ".", "format", "(", "module", ")", ")", ")", "api_file", ".", "content", ".", "extend", "(", "TOCTREE_TEMPLATE_END", ")", "api_file", ".", "write", "(", ")", "return", "True" ]
45.991667
24.558333
def decorate(func, caller): """ decorate(func, caller) decorates a function using a caller. """ evaldict = func.__globals__.copy() evaldict['_call_'] = caller evaldict['_func_'] = func fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun
[ "def", "decorate", "(", "func", ",", "caller", ")", ":", "evaldict", "=", "func", ".", "__globals__", ".", "copy", "(", ")", "evaldict", "[", "'_call_'", "]", "=", "caller", "evaldict", "[", "'_func_'", "]", "=", "func", "fun", "=", "FunctionMaker", ".", "create", "(", "func", ",", "\"return _call_(_func_, %(shortsignature)s)\"", ",", "evaldict", ",", "__wrapped__", "=", "func", ")", "if", "hasattr", "(", "func", ",", "'__qualname__'", ")", ":", "fun", ".", "__qualname__", "=", "func", ".", "__qualname__", "return", "fun" ]
32.384615
9.461538
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) x_vf = _get_min_distance_to_volcanic_front(sites.lon, sites.lat) mean = _apply_volcanic_front_correction(mean, x_vf, rup.hypo_depth, imt) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "mean", ",", "stddevs", "=", "super", "(", ")", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "x_vf", "=", "_get_min_distance_to_volcanic_front", "(", "sites", ".", "lon", ",", "sites", ".", "lat", ")", "mean", "=", "_apply_volcanic_front_correction", "(", "mean", ",", "x_vf", ",", "rup", ".", "hypo_depth", ",", "imt", ")", "return", "mean", ",", "stddevs" ]
46
18.142857
def add_mavlink_packet(self, msg): '''add data to the graph''' mtype = msg.get_type() if mtype not in self.msg_types: return for i in range(len(self.fields)): if mtype not in self.field_types[i]: continue f = self.fields[i] self.values[i] = mavutil.evaluate_expression(f, self.state.master.messages) if self.livegraph is not None: self.livegraph.add_values(self.values)
[ "def", "add_mavlink_packet", "(", "self", ",", "msg", ")", ":", "mtype", "=", "msg", ".", "get_type", "(", ")", "if", "mtype", "not", "in", "self", ".", "msg_types", ":", "return", "for", "i", "in", "range", "(", "len", "(", "self", ".", "fields", ")", ")", ":", "if", "mtype", "not", "in", "self", ".", "field_types", "[", "i", "]", ":", "continue", "f", "=", "self", ".", "fields", "[", "i", "]", "self", ".", "values", "[", "i", "]", "=", "mavutil", ".", "evaluate_expression", "(", "f", ",", "self", ".", "state", ".", "master", ".", "messages", ")", "if", "self", ".", "livegraph", "is", "not", "None", ":", "self", ".", "livegraph", ".", "add_values", "(", "self", ".", "values", ")" ]
39.5
11.5
def list_comments(self, topic_id, start=0): """ 回复列表 :param topic_id: 话题ID :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_GET_TOPIC % topic_id, params={'start': start}) xml_results = xml.xpath('//ul[@id="comments"]/li') results = [] for item in xml_results: try: author_avatar = item.xpath('.//img/@src')[0] author_url = item.xpath('.//div[@class="user-face"]/a/@href')[0] author_alias = slash_right(author_url) author_signature = item.xpath('.//h4/text()')[1].strip() author_nickname = item.xpath('.//h4/a/text()')[0].strip() created_at = item.xpath('.//h4/span/text()')[0].strip() content = etree.tostring(item.xpath('.//div[@class="reply-doc content"]/p')[0]).decode('utf8').strip() cid = item.get('id') results.append({ 'id': cid, 'author_avatar': author_avatar, 'author_url': author_url, 'author_alias': author_alias, 'author_signature': author_signature, 'author_nickname': author_nickname, 'created_at': created_at, 'content': unescape(content), }) except Exception as e: self.api.logger.exception('parse comment exception: %s' % e) return build_list_result(results, xml)
[ "def", "list_comments", "(", "self", ",", "topic_id", ",", "start", "=", "0", ")", ":", "xml", "=", "self", ".", "api", ".", "xml", "(", "API_GROUP_GET_TOPIC", "%", "topic_id", ",", "params", "=", "{", "'start'", ":", "start", "}", ")", "xml_results", "=", "xml", ".", "xpath", "(", "'//ul[@id=\"comments\"]/li'", ")", "results", "=", "[", "]", "for", "item", "in", "xml_results", ":", "try", ":", "author_avatar", "=", "item", ".", "xpath", "(", "'.//img/@src'", ")", "[", "0", "]", "author_url", "=", "item", ".", "xpath", "(", "'.//div[@class=\"user-face\"]/a/@href'", ")", "[", "0", "]", "author_alias", "=", "slash_right", "(", "author_url", ")", "author_signature", "=", "item", ".", "xpath", "(", "'.//h4/text()'", ")", "[", "1", "]", ".", "strip", "(", ")", "author_nickname", "=", "item", ".", "xpath", "(", "'.//h4/a/text()'", ")", "[", "0", "]", ".", "strip", "(", ")", "created_at", "=", "item", ".", "xpath", "(", "'.//h4/span/text()'", ")", "[", "0", "]", ".", "strip", "(", ")", "content", "=", "etree", ".", "tostring", "(", "item", ".", "xpath", "(", "'.//div[@class=\"reply-doc content\"]/p'", ")", "[", "0", "]", ")", ".", "decode", "(", "'utf8'", ")", ".", "strip", "(", ")", "cid", "=", "item", ".", "get", "(", "'id'", ")", "results", ".", "append", "(", "{", "'id'", ":", "cid", ",", "'author_avatar'", ":", "author_avatar", ",", "'author_url'", ":", "author_url", ",", "'author_alias'", ":", "author_alias", ",", "'author_signature'", ":", "author_signature", ",", "'author_nickname'", ":", "author_nickname", ",", "'created_at'", ":", "created_at", ",", "'content'", ":", "unescape", "(", "content", ")", ",", "}", ")", "except", "Exception", "as", "e", ":", "self", ".", "api", ".", "logger", ".", "exception", "(", "'parse comment exception: %s'", "%", "e", ")", "return", "build_list_result", "(", "results", ",", "xml", ")" ]
44.764706
18.529412
def _handle_delete(self, transaction): """ Handle DELETE requests :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the edited transaction with the response to the request """ path = str("/" + transaction.request.uri_path) transaction.response = Response() transaction.response.destination = transaction.request.source transaction.response.token = transaction.request.token try: resource = self._server.root[path] except KeyError: resource = None if resource is None: transaction.response.code = defines.Codes.NOT_FOUND.number else: # Delete transaction.resource = resource transaction = self._server.resourceLayer.delete_resource(transaction, path) return transaction
[ "def", "_handle_delete", "(", "self", ",", "transaction", ")", ":", "path", "=", "str", "(", "\"/\"", "+", "transaction", ".", "request", ".", "uri_path", ")", "transaction", ".", "response", "=", "Response", "(", ")", "transaction", ".", "response", ".", "destination", "=", "transaction", ".", "request", ".", "source", "transaction", ".", "response", ".", "token", "=", "transaction", ".", "request", ".", "token", "try", ":", "resource", "=", "self", ".", "_server", ".", "root", "[", "path", "]", "except", "KeyError", ":", "resource", "=", "None", "if", "resource", "is", "None", ":", "transaction", ".", "response", ".", "code", "=", "defines", ".", "Codes", ".", "NOT_FOUND", ".", "number", "else", ":", "# Delete", "transaction", ".", "resource", "=", "resource", "transaction", "=", "self", ".", "_server", ".", "resourceLayer", ".", "delete_resource", "(", "transaction", ",", "path", ")", "return", "transaction" ]
36.6
17.8
def stdout_avail(self): """Data is available in stdout, let's empty the queue and write it!""" data = self.interpreter.stdout_write.empty_queue() if data: self.write(data)
[ "def", "stdout_avail", "(", "self", ")", ":", "data", "=", "self", ".", "interpreter", ".", "stdout_write", ".", "empty_queue", "(", ")", "if", "data", ":", "self", ".", "write", "(", "data", ")" ]
41.4
14
def features_check_singular(X, tol=1e-8): """Checks if a set of features/variables X result in a ill-conditioned matrix dot(X.T,T) Parameters: ----------- X : ndarray An NxM array with N observations (rows) and M features/variables (columns). Note: Make sure that X variables are all normalized or or scaled, e.g. X = sklearn.preprocessing.normalize(rawdata, norm='l2') tol : float Threshold when to consider a Singular Value s_k (U*S*V^T of SVD) is considered to small s_k<tol. The Default is tol=1e-8. Returns: -------- flag : bool True if the X leads to singular matrix dot(X.T,T), or False if X does not not lead to singular matrix. num : int Number of Singular Values that failed the s_k<tol test s : ndarray The Singular Values computed by numpy.linalg.svd Usage: ------ * flag. During Forward-Selection check if an newly added variable causes an ill-conditioned matrix. * num. Get an indication how many variables still needs to be eliminated during Backward-Selection """ import numpy as np _, s, _ = np.linalg.svd(np.dot(X.T, X)) failed = s < tol flag = True if np.any(failed) else False return flag, failed.sum(), s
[ "def", "features_check_singular", "(", "X", ",", "tol", "=", "1e-8", ")", ":", "import", "numpy", "as", "np", "_", ",", "s", ",", "_", "=", "np", ".", "linalg", ".", "svd", "(", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", ")", "failed", "=", "s", "<", "tol", "flag", "=", "True", "if", "np", ".", "any", "(", "failed", ")", "else", "False", "return", "flag", ",", "failed", ".", "sum", "(", ")", ",", "s" ]
30.452381
21.238095
def load_hdf(cls, filename, path='', name=None): """ A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. """ if not os.path.exists(filename): raise IOError('{} does not exist.'.format(filename)) store = pd.HDFStore(filename) try: samples = store[path+'/samples'] attrs = store.get_storer(path+'/samples').attrs except: store.close() raise try: ic = attrs.ic_type(attrs.ic_bands) except AttributeError: ic = attrs.ic_type use_emcee = attrs.use_emcee mnest = True try: basename = attrs._mnest_basename except AttributeError: mnest = False bounds = attrs._bounds priors = attrs._priors if name is None: try: name = attrs.name except: name = '' store.close() obs = ObservationTree.load_hdf(filename, path+'/obs', ic=ic) mod = cls(ic, obs=obs, use_emcee=use_emcee, name=name) mod._samples = samples if mnest: mod._mnest_basename = basename mod._directory = os.path.dirname(filename) return mod
[ "def", "load_hdf", "(", "cls", ",", "filename", ",", "path", "=", "''", ",", "name", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "IOError", "(", "'{} does not exist.'", ".", "format", "(", "filename", ")", ")", "store", "=", "pd", ".", "HDFStore", "(", "filename", ")", "try", ":", "samples", "=", "store", "[", "path", "+", "'/samples'", "]", "attrs", "=", "store", ".", "get_storer", "(", "path", "+", "'/samples'", ")", ".", "attrs", "except", ":", "store", ".", "close", "(", ")", "raise", "try", ":", "ic", "=", "attrs", ".", "ic_type", "(", "attrs", ".", "ic_bands", ")", "except", "AttributeError", ":", "ic", "=", "attrs", ".", "ic_type", "use_emcee", "=", "attrs", ".", "use_emcee", "mnest", "=", "True", "try", ":", "basename", "=", "attrs", ".", "_mnest_basename", "except", "AttributeError", ":", "mnest", "=", "False", "bounds", "=", "attrs", ".", "_bounds", "priors", "=", "attrs", ".", "_priors", "if", "name", "is", "None", ":", "try", ":", "name", "=", "attrs", ".", "name", "except", ":", "name", "=", "''", "store", ".", "close", "(", ")", "obs", "=", "ObservationTree", ".", "load_hdf", "(", "filename", ",", "path", "+", "'/obs'", ",", "ic", "=", "ic", ")", "mod", "=", "cls", "(", "ic", ",", "obs", "=", "obs", ",", "use_emcee", "=", "use_emcee", ",", "name", "=", "name", ")", "mod", ".", "_samples", "=", "samples", "if", "mnest", ":", "mod", ".", "_mnest_basename", "=", "basename", "mod", ".", "_directory", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "return", "mod" ]
26.464286
18.821429
def add_network(self, network, netmask, area=0): """Adds a network to be advertised by OSPF Args: network (str): The network to be advertised in dotted decimal notation netmask (str): The netmask to configure area (str): The area the network belongs to. By default this value is 0 Returns: bool: True if the command completes successfully Exception: ValueError: This will get raised if network or netmask are not passed to the method """ if network == '' or netmask == '': raise ValueError('network and mask values ' 'may not be empty') cmd = 'network {}/{} area {}'.format(network, netmask, area) return self.configure_ospf(cmd)
[ "def", "add_network", "(", "self", ",", "network", ",", "netmask", ",", "area", "=", "0", ")", ":", "if", "network", "==", "''", "or", "netmask", "==", "''", ":", "raise", "ValueError", "(", "'network and mask values '", "'may not be empty'", ")", "cmd", "=", "'network {}/{} area {}'", ".", "format", "(", "network", ",", "netmask", ",", "area", ")", "return", "self", ".", "configure_ospf", "(", "cmd", ")" ]
44.45
16
def get_token_from_env(): """Get the token from env var, VAULT_TOKEN. If not set, attempt to get the token from, ~/.vault-token :return: The vault token if set, else None :rtype: str | None """ token = os.getenv('VAULT_TOKEN') if not token: token_file_path = os.path.expanduser('~/.vault-token') if os.path.exists(token_file_path): with open(token_file_path, 'r') as f_in: token = f_in.read().strip() if not token: return None return token
[ "def", "get_token_from_env", "(", ")", ":", "token", "=", "os", ".", "getenv", "(", "'VAULT_TOKEN'", ")", "if", "not", "token", ":", "token_file_path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.vault-token'", ")", "if", "os", ".", "path", ".", "exists", "(", "token_file_path", ")", ":", "with", "open", "(", "token_file_path", ",", "'r'", ")", "as", "f_in", ":", "token", "=", "f_in", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "not", "token", ":", "return", "None", "return", "token" ]
30
17.294118
def convert_to_layout_rules(x): """Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules. """ if isinstance(x, LayoutRules): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x) return LayoutRules(x)
[ "def", "convert_to_layout_rules", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "LayoutRules", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "_parse_string_to_list_of_pairs", "(", "x", ")", "return", "LayoutRules", "(", "x", ")" ]
20.785714
18.5
def _call_apt(args, scope=True, **kwargs): ''' Call apt* utilities. ''' cmd = [] if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend(args) params = {'output_loglevel': 'trace', 'python_shell': False, 'env': salt.utils.environment.get_module_environment(globals())} params.update(kwargs) return __salt__['cmd.run_all'](cmd, **params)
[ "def", "_call_apt", "(", "args", ",", "scope", "=", "True", ",", "*", "*", "kwargs", ")", ":", "cmd", "=", "[", "]", "if", "scope", "and", "salt", ".", "utils", ".", "systemd", ".", "has_scope", "(", "__context__", ")", "and", "__salt__", "[", "'config.get'", "]", "(", "'systemd.scope'", ",", "True", ")", ":", "cmd", ".", "extend", "(", "[", "'systemd-run'", ",", "'--scope'", "]", ")", "cmd", ".", "extend", "(", "args", ")", "params", "=", "{", "'output_loglevel'", ":", "'trace'", ",", "'python_shell'", ":", "False", ",", "'env'", ":", "salt", ".", "utils", ".", "environment", ".", "get_module_environment", "(", "globals", "(", ")", ")", "}", "params", ".", "update", "(", "kwargs", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "*", "*", "params", ")" ]
33.066667
23.6
def _get_redis_cache_opts(): ''' Return the Redis server connection details from the __opts__. ''' return { 'host': __opts__.get('cache.redis.host', 'localhost'), 'port': __opts__.get('cache.redis.port', 6379), 'unix_socket_path': __opts__.get('cache.redis.unix_socket_path', None), 'db': __opts__.get('cache.redis.db', '0'), 'password': __opts__.get('cache.redis.password', ''), 'cluster_mode': __opts__.get('cache.redis.cluster_mode', False), 'startup_nodes': __opts__.get('cache.redis.cluster.startup_nodes', {}), 'skip_full_coverage_check': __opts__.get('cache.redis.cluster.skip_full_coverage_check', False), }
[ "def", "_get_redis_cache_opts", "(", ")", ":", "return", "{", "'host'", ":", "__opts__", ".", "get", "(", "'cache.redis.host'", ",", "'localhost'", ")", ",", "'port'", ":", "__opts__", ".", "get", "(", "'cache.redis.port'", ",", "6379", ")", ",", "'unix_socket_path'", ":", "__opts__", ".", "get", "(", "'cache.redis.unix_socket_path'", ",", "None", ")", ",", "'db'", ":", "__opts__", ".", "get", "(", "'cache.redis.db'", ",", "'0'", ")", ",", "'password'", ":", "__opts__", ".", "get", "(", "'cache.redis.password'", ",", "''", ")", ",", "'cluster_mode'", ":", "__opts__", ".", "get", "(", "'cache.redis.cluster_mode'", ",", "False", ")", ",", "'startup_nodes'", ":", "__opts__", ".", "get", "(", "'cache.redis.cluster.startup_nodes'", ",", "{", "}", ")", ",", "'skip_full_coverage_check'", ":", "__opts__", ".", "get", "(", "'cache.redis.cluster.skip_full_coverage_check'", ",", "False", ")", ",", "}" ]
49
29.142857
def get_config(self): """ Currently only contains the "config" member, which is a string containing the config file as loaded by i3 most recently. :rtype: ConfigReply """ data = self.message(MessageType.GET_CONFIG, '') return json.loads(data, object_hook=ConfigReply)
[ "def", "get_config", "(", "self", ")", ":", "data", "=", "self", ".", "message", "(", "MessageType", ".", "GET_CONFIG", ",", "''", ")", "return", "json", ".", "loads", "(", "data", ",", "object_hook", "=", "ConfigReply", ")" ]
35.111111
17.555556
def get_eids(self, num_rlzs): """ :param num_rlzs: the number of realizations for the given group :returns: an array of event IDs """ num_events = self.n_occ if self.samples > 1 else self.n_occ * num_rlzs return TWO32 * U64(self.serial) + numpy.arange(num_events, dtype=U64)
[ "def", "get_eids", "(", "self", ",", "num_rlzs", ")", ":", "num_events", "=", "self", ".", "n_occ", "if", "self", ".", "samples", ">", "1", "else", "self", ".", "n_occ", "*", "num_rlzs", "return", "TWO32", "*", "U64", "(", "self", ".", "serial", ")", "+", "numpy", ".", "arange", "(", "num_events", ",", "dtype", "=", "U64", ")" ]
45.142857
16.857143
def _author_line(self): """ Helper method to concatenate author and institution values, if necessary :return: string """ if self.author and self.institution: return self.author + ";" + self.institution elif self.author: return self.author else: return self.institution
[ "def", "_author_line", "(", "self", ")", ":", "if", "self", ".", "author", "and", "self", ".", "institution", ":", "return", "self", ".", "author", "+", "\";\"", "+", "self", ".", "institution", "elif", "self", ".", "author", ":", "return", "self", ".", "author", "else", ":", "return", "self", ".", "institution" ]
31.818182
13.636364
def assertDutTraceDoesNotContain(dut, message, bench): """ Raise TestStepFail if bench.verify_trace does not find message from dut traces. :param dut: Dut object. :param message: Message to look for. :param: Bench, must contain verify_trace method. :raises: AttributeError if bench does not contain verify_trace method. TestStepFail if verify_trace returns True. """ if not hasattr(bench, "verify_trace"): raise AttributeError("Bench object does not contain verify_trace method!") if bench.verify_trace(dut, message, False): raise TestStepFail('Assert: Message(s) "%s" in response' % message)
[ "def", "assertDutTraceDoesNotContain", "(", "dut", ",", "message", ",", "bench", ")", ":", "if", "not", "hasattr", "(", "bench", ",", "\"verify_trace\"", ")", ":", "raise", "AttributeError", "(", "\"Bench object does not contain verify_trace method!\"", ")", "if", "bench", ".", "verify_trace", "(", "dut", ",", "message", ",", "False", ")", ":", "raise", "TestStepFail", "(", "'Assert: Message(s) \"%s\" in response'", "%", "message", ")" ]
45.428571
17.714286
def qr(X, ip_B=None, reorthos=1): """QR factorization with customizable inner product. :param X: array with ``shape==(N,k)`` :param ip_B: (optional) inner product, see :py:meth:`inner`. :param reorthos: (optional) numer of reorthogonalizations. Defaults to 1 (i.e. 2 runs of modified Gram-Schmidt) which should be enough in most cases (TODO: add reference). :return: Q, R where :math:`X=QR` with :math:`\\langle Q,Q \\rangle=I_k` and R upper triangular. """ if ip_B is None and X.shape[1] > 0: return scipy.linalg.qr(X, mode='economic') else: (N, k) = X.shape Q = X.copy() R = numpy.zeros((k, k), dtype=X.dtype) for i in range(k): for reortho in range(reorthos+1): for j in range(i): alpha = inner(Q[:, [j]], Q[:, [i]], ip_B=ip_B)[0, 0] R[j, i] += alpha Q[:, [i]] -= alpha * Q[:, [j]] R[i, i] = norm(Q[:, [i]], ip_B=ip_B) if R[i, i] >= 1e-15: Q[:, [i]] /= R[i, i] return Q, R
[ "def", "qr", "(", "X", ",", "ip_B", "=", "None", ",", "reorthos", "=", "1", ")", ":", "if", "ip_B", "is", "None", "and", "X", ".", "shape", "[", "1", "]", ">", "0", ":", "return", "scipy", ".", "linalg", ".", "qr", "(", "X", ",", "mode", "=", "'economic'", ")", "else", ":", "(", "N", ",", "k", ")", "=", "X", ".", "shape", "Q", "=", "X", ".", "copy", "(", ")", "R", "=", "numpy", ".", "zeros", "(", "(", "k", ",", "k", ")", ",", "dtype", "=", "X", ".", "dtype", ")", "for", "i", "in", "range", "(", "k", ")", ":", "for", "reortho", "in", "range", "(", "reorthos", "+", "1", ")", ":", "for", "j", "in", "range", "(", "i", ")", ":", "alpha", "=", "inner", "(", "Q", "[", ":", ",", "[", "j", "]", "]", ",", "Q", "[", ":", ",", "[", "i", "]", "]", ",", "ip_B", "=", "ip_B", ")", "[", "0", ",", "0", "]", "R", "[", "j", ",", "i", "]", "+=", "alpha", "Q", "[", ":", ",", "[", "i", "]", "]", "-=", "alpha", "*", "Q", "[", ":", ",", "[", "j", "]", "]", "R", "[", "i", ",", "i", "]", "=", "norm", "(", "Q", "[", ":", ",", "[", "i", "]", "]", ",", "ip_B", "=", "ip_B", ")", "if", "R", "[", "i", ",", "i", "]", ">=", "1e-15", ":", "Q", "[", ":", ",", "[", "i", "]", "]", "/=", "R", "[", "i", ",", "i", "]", "return", "Q", ",", "R" ]
38.428571
15.678571
def deal(self, num=1, end=TOP): """ Returns a list of cards, which are removed from the Stack. :arg int num: The number of cards to deal. :arg str end: Which end to deal from. Can be ``0`` (top) or ``1`` (bottom). :returns: The given number of cards from the stack. """ ends = {TOP: self.cards.pop, BOTTOM: self.cards.popleft} self_size = self.size if num <= self_size: dealt_cards = [None] * num else: num = self_size dealt_cards = [None] * self_size if self_size: for n in xrange(num): try: card = ends[end]() dealt_cards[n] = card except: break return Stack(cards=dealt_cards) else: return Stack()
[ "def", "deal", "(", "self", ",", "num", "=", "1", ",", "end", "=", "TOP", ")", ":", "ends", "=", "{", "TOP", ":", "self", ".", "cards", ".", "pop", ",", "BOTTOM", ":", "self", ".", "cards", ".", "popleft", "}", "self_size", "=", "self", ".", "size", "if", "num", "<=", "self_size", ":", "dealt_cards", "=", "[", "None", "]", "*", "num", "else", ":", "num", "=", "self_size", "dealt_cards", "=", "[", "None", "]", "*", "self_size", "if", "self_size", ":", "for", "n", "in", "xrange", "(", "num", ")", ":", "try", ":", "card", "=", "ends", "[", "end", "]", "(", ")", "dealt_cards", "[", "n", "]", "=", "card", "except", ":", "break", "return", "Stack", "(", "cards", "=", "dealt_cards", ")", "else", ":", "return", "Stack", "(", ")" ]
25.588235
18.823529
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False, released=False, ): """Create a version in a project and return a Resource for it. :param name: name of the version to create :type name: str :param project: key of the project to create the version in :type project: str :param description: a description of the version :type description: str :param releaseDate: the release date assigned to the version :type releaseDate: Optional[Any] :param startDate: The start date for the version :type startDate: Optional[Any] :param archived: Denotes whether a version should be archived. (Default: False) :type archived: bool :param released: Denotes whether a version is released. (Default: False) :type released: bool :rtype: Version """ data = { 'name': name, 'project': project, 'archived': archived, 'released': released} if description is not None: data['description'] = description if releaseDate is not None: data['releaseDate'] = releaseDate if startDate is not None: data['startDate'] = startDate url = self._get_url('version') r = self._session.post( url, data=json.dumps(data)) time.sleep(1) version = Version(self._options, self._session, raw=json_loads(r)) return version
[ "def", "create_version", "(", "self", ",", "name", ",", "project", ",", "description", "=", "None", ",", "releaseDate", "=", "None", ",", "startDate", "=", "None", ",", "archived", "=", "False", ",", "released", "=", "False", ",", ")", ":", "data", "=", "{", "'name'", ":", "name", ",", "'project'", ":", "project", ",", "'archived'", ":", "archived", ",", "'released'", ":", "released", "}", "if", "description", "is", "not", "None", ":", "data", "[", "'description'", "]", "=", "description", "if", "releaseDate", "is", "not", "None", ":", "data", "[", "'releaseDate'", "]", "=", "releaseDate", "if", "startDate", "is", "not", "None", ":", "data", "[", "'startDate'", "]", "=", "startDate", "url", "=", "self", ".", "_get_url", "(", "'version'", ")", "r", "=", "self", ".", "_session", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "time", ".", "sleep", "(", "1", ")", "version", "=", "Version", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw", "=", "json_loads", "(", "r", ")", ")", "return", "version" ]
35.744681
14.021277
def _ModifyInterface( self, interface_config, config_key, config_value, replace=False): """Write a value to a config file if not already present. Args: interface_config: string, the path to a config file. config_key: string, the configuration key to set. config_value: string, the value to set for the configuration key. replace: bool, replace the configuration option if already present. """ config_entry = '%s=%s' % (config_key, config_value) if not open(interface_config).read().count(config_key): with open(interface_config, 'a') as config: config.write('%s\n' % config_entry) elif replace: for line in fileinput.input(interface_config, inplace=True): print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip()))
[ "def", "_ModifyInterface", "(", "self", ",", "interface_config", ",", "config_key", ",", "config_value", ",", "replace", "=", "False", ")", ":", "config_entry", "=", "'%s=%s'", "%", "(", "config_key", ",", "config_value", ")", "if", "not", "open", "(", "interface_config", ")", ".", "read", "(", ")", ".", "count", "(", "config_key", ")", ":", "with", "open", "(", "interface_config", ",", "'a'", ")", "as", "config", ":", "config", ".", "write", "(", "'%s\\n'", "%", "config_entry", ")", "elif", "replace", ":", "for", "line", "in", "fileinput", ".", "input", "(", "interface_config", ",", "inplace", "=", "True", ")", ":", "print", "(", "re", ".", "sub", "(", "r'%s=.*'", "%", "config_key", ",", "config_entry", ",", "line", ".", "rstrip", "(", ")", ")", ")" ]
46.352941
20.352941
def get_ip_mac_arp_list(auth, url, devid=None, devip=None): """ function takes devid of specific device and issues a RESTFUL call to get the IP/MAC/ARP list from the target device. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: int or str value of the target device. :param devip: str of ipv4 address of the target device :return: list of dictionaries containing the IP/MAC/ARP list of the target device. :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ip_mac_list = get_ip_mac_arp_list( auth.creds, auth.url, devid='10') >>> ip_mac_list = get_ip_mac_arp_list( auth.creds, auth.url, devip='10.101.0.221') >>> assert type(ip_mac_list) is list >>> assert 'deviceId' in ip_mac_list[0] """ if devip is not None: dev_details = get_dev_details(devip, auth, url) if isinstance(dev_details, str): print("Device not found") return 403 else: devid = get_dev_details(devip, auth, url)['id'] f_url = url + "/imcrs/res/access/ipMacArp/" + str(devid) response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: ipmacarplist = (json.loads(response.text)) if 'ipMacArp' in ipmacarplist: return ipmacarplist['ipMacArp'] else: return ['this function is unsupported'] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_ip_mac_arp_list: An Error has occured"
[ "def", "get_ip_mac_arp_list", "(", "auth", ",", "url", ",", "devid", "=", "None", ",", "devip", "=", "None", ")", ":", "if", "devip", "is", "not", "None", ":", "dev_details", "=", "get_dev_details", "(", "devip", ",", "auth", ",", "url", ")", "if", "isinstance", "(", "dev_details", ",", "str", ")", ":", "print", "(", "\"Device not found\"", ")", "return", "403", "else", ":", "devid", "=", "get_dev_details", "(", "devip", ",", "auth", ",", "url", ")", "[", "'id'", "]", "f_url", "=", "url", "+", "\"/imcrs/res/access/ipMacArp/\"", "+", "str", "(", "devid", ")", "response", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "if", "response", ".", "status_code", "==", "200", ":", "ipmacarplist", "=", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")", "if", "'ipMacArp'", "in", "ipmacarplist", ":", "return", "ipmacarplist", "[", "'ipMacArp'", "]", "else", ":", "return", "[", "'this function is unsupported'", "]", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "\" get_ip_mac_arp_list: An Error has occured\"" ]
35.36
25.52
def add_debug(parser): """Add a `debug` flag to the _parser_.""" parser.add_argument( '-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output')
[ "def", "add_debug", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-d'", ",", "'--debug'", ",", "action", "=", "'store_const'", ",", "const", "=", "logging", ".", "DEBUG", ",", "default", "=", "logging", ".", "INFO", ",", "help", "=", "'Set DEBUG output'", ")" ]
51.25
27