Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
3,200
def unicode_to_string(self): for tag in self.tags: self.ununicode.append(str(tag))
Convert unicode in string
3,201
def syscall_direct(*events): def _syscall(scheduler, processor): for e in events: processor(e) return _syscall
Directly process these events. This should never be used for normal events.
3,202
def schedule_downtime(scope, api_key=None, app_key=None, monitor_id=None, start=None, end=None, message=None, recurrence=None, timezone=None, test=False): host:app230 minutes01234567899876543210 ret = {: False, : None, : } if api_key is None: raise SaltInvocationError() if app_key is None: raise SaltInvocationError() if test is True: ret[] = True ret[] = return ret _initialize_connection(api_key, app_key) try: response = datadog.api.Downtime.create(scope=scope, monitor_id=monitor_id, start=start, end=end, message=message, recurrence=recurrence, timezone=timezone) except ValueError: comment = ( ) ret[] = comment return ret ret[] = response if in response.keys(): ret[] = True ret[] = return ret
Schedule downtime for a scope of monitors. CLI Example: .. code-block:: bash salt-call datadog.schedule_downtime 'host:app2' \\ stop=$(date --date='30 minutes' +%s) \\ app_key='0123456789' \\ api_key='9876543210' Optional arguments :param monitor_id: The ID of the monitor :param start: Start time in seconds since the epoch :param end: End time in seconds since the epoch :param message: A message to send in a notification for this downtime :param recurrence: Repeat this downtime periodically :param timezone: Specify the timezone
3,203
def is_complete(self): return all( [node.route_allocation() is not None for node in list(self._nodes.values()) if node != self._problem.depot()] )
Returns True if this is a complete solution, i.e, all nodes are allocated Returns ------- bool True if all nodes are llocated.
3,204
def getVisibility(self): try: if self.map[GET_VISIBILITY_PROPERTY] == : return VISIBLE elif self.map[GET_VISIBILITY_PROPERTY] == : return INVISIBLE elif self.map[GET_VISIBILITY_PROPERTY] == : return GONE else: return -2 except: return -1
Gets the View visibility
3,205
def download(self, url, dest_path=None): if os.path.exists(dest_path): os.remove(dest_path) resp = get(url, stream=True) size = int(resp.headers.get("content-length")) label = "Downloading {filename} ({size:.2f}MB)".format( filename=os.path.basename(dest_path), size=size / float(self.chunk_size) / self.chunk_size ) with open_file(dest_path, ) as file: content_iter = resp.iter_content(chunk_size=self.chunk_size) with progressbar(content_iter, length=size / self.chunk_size, label=label) as bar: for chunk in bar: if chunk: file.write(chunk) file.flush()
:param url: :type url: str :param dest_path: :type dest_path: str
3,206
def pow2_quantized_affine(inp, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True, quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True): if not hasattr(n_outmaps, ): n_outmaps = [n_outmaps] n_outmaps = list(n_outmaps) n_outmap = int(np.prod(n_outmaps)) if w_init is None: inmaps = np.prod(inp.shape[base_axis:]) w_init = UniformInitializer( calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, True, not fix_parameters) if quantize_w: w_q = get_parameter_or_create( "W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps, w_init, False) real_w_q = F.pow2_quantize(w, quantize=quantize_w, sign=sign_w, with_zero=with_zero_w, n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", n_outmaps, b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", n_outmaps, b_init, False) real_b_q = F.pow2_quantize(b, quantize=quantize_b, sign=sign_b, with_zero=with_zero_b, n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.affine(inp, real_w_q, real_b_q, base_axis)
Pow2 Quantized Affine. Pow2 Quantized Affine is the affine function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_j = \sum_{i} Q(w_{ji}) x_i, where :math:`Q(w_{ji})` is the power-of-2 quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) Quantized values are stored as floating point number for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix. n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. sign_w (bool): Use signed quantization if `True`. with_zero_w (bool): Indicate using zero as a quantized value. Default is false. n_w (int): Bit width used for weight. m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. with_zero_b (bool): Indicate using zero as a quantized value. Default is false. n_b (int): Bit width used for bias. m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
3,207
def stage(self): if not in os.environ or not in os.environ: raise BuildError("You must set the PYPI_USER and PYPI_PASS environment variables") try: import twine except ImportError: raise BuildError("You must install twine in order to release python packages", suggestion="pip install twine") if not self.component.has_wheel: raise BuildError("You cant have python packages") wheel = self.component.support_wheel sdist = "%s-%s.tar.gz" % (self.component.support_distribution, self.component.parsed_version.pep440_string()) wheel_path = os.path.realpath(os.path.abspath(os.path.join(self.component.output_folder, , wheel))) sdist_path = os.path.realpath(os.path.abspath(os.path.join(self.component.output_folder, , sdist))) if not os.path.isfile(wheel_path) or not os.path.isfile(sdist_path): raise BuildError("Could not find built wheel or sdist matching current built version", sdist_path=sdist_path, wheel_path=wheel_path) self.dists = [sdist_path, wheel_path]
Stage python packages for release, verifying everything we can about them.
3,208
def get_build_configuration(id=None, name=None): data = get_build_configuration_raw(id, name) if data: return utils.format_json(data)
Retrieve a specific BuildConfiguration
3,209
def _set_serial_console(self): yield from self._modify_vm("--uart1 0x3F8 4") pipe_name = self._get_pipe_name() args = [self._vmname, "--uartmode1", "server", pipe_name] yield from self.manager.execute("modifyvm", args)
Configures the first serial port to allow a serial console connection.
3,210
def process_file(source_file): if source_file.endswith((, )): txt = extract_pdf(source_file) elif source_file.endswith((, , , )): with open(source_file, ) as f: txt = f.read() else: logger.info("Unsupported file extension for file {}".format(source_file)) return "" return txt
Extract text from a file (pdf, txt, eml, csv, json) :param source_file path to file to read :return text from file
3,211
def rollsd(self, scale=1, **kwargs): sd ts = self.rollapply(, **kwargs) if scale != 1: ts *= scale return ts
A :ref:`rolling function <rolling-function>` for stadard-deviation values: Same as:: self.rollapply('sd', **kwargs)
3,212
def _fix_lsm_bitspersample(self, parent): if self.code != 258 or self.count != 2: return log.warning(, self.code) value = struct.pack(, *self.value) self.valueoffset = struct.unpack(, value)[0] parent.filehandle.seek(self.valueoffset) self.value = struct.unpack(, parent.filehandle.read(4))
Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag.
3,213
def run( self, for_time=None ): self.for_time = for_time try: self.is_initialised() except AttributeError: raise if self.number_of_equilibration_jumps > 0: for step in range( self.number_of_equilibration_jumps ): self.lattice.jump() self.reset() if self.for_time: self.number_of_jumps = 0 while self.lattice.time < self.for_time: self.lattice.jump() self.number_of_jumps += 1 else: for step in range( self.number_of_jumps ): self.lattice.jump() self.has_run = True
Run the simulation. Args: for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None. Returns: None
3,214
def filter(self, dict_name, priority_min=, priority_max=, start=0, limit=None): conn = redis.Redis(connection_pool=self.pool) script = conn.register_script() if limit is None: limit = -1 res = script(keys=[self._lock_name, self._namespace(dict_name), self._namespace(dict_name) + ], args=[self._session_lock_identifier or , priority_min, priority_max, start, limit]) if res == -1: raise LockError() split_res = dict([(self._decode(res[i]), self._decode(res[i+1])) for i in xrange(0, len(res)-1, 2)]) return split_res
Get a subset of a dictionary. This retrieves only keys with priority scores greater than or equal to `priority_min` and less than or equal to `priority_max`. Of those keys, it skips the first `start` ones, and then returns at most `limit` keys. With default parameters, this retrieves the entire dictionary, making it a more expensive version of :meth:`pull`. This can be used to limit the dictionary by priority score, for instance using the score as a time stamp and only retrieving values before or after a specific time; or it can be used to get slices of the dictionary if there are too many items to use :meth:`pull`. This is a read-only operation and does not require a session lock, but if this is run in a session context, the lock will be honored. :param str dict_name: name of the dictionary to retrieve :param float priority_min: lowest score to retrieve :param float priority_max: highest score to retrieve :param int start: number of items to skip :param int limit: number of items to retrieve :return: corresponding (partial) Python dictionary :raise rejester.LockError: if the session lock timed out
3,215
def denoise_grid(self, val, expand=1): updated_grid = [[self.grd.get_tile(y,x) \ for x in range(self.grd.grid_width)] \ for y in range(self.grd.grid_height)] for row in range(self.grd.get_grid_height() - expand): for col in range(self.grd.get_grid_width() - expand): updated_grid[row][col] = self.grd.get_tile(row,col) if self.grd.get_tile(row,col) == val: for y in range(-expand, expand): for x in range(-expand, expand): new_x = col+x new_y = row+y if new_x < 0: new_x = 0 if new_y < 0: new_y = 0 if new_x > self.grd.get_grid_width() - 1: new_x = self.grd.get_grid_width() - 1 if new_y > self.grd.get_grid_height() - 1: new_y = self.grd.get_grid_height() - 1 if expand > 0: if randint(1,expand * 2) > (expand+1): updated_grid[new_y][new_x] = val else: updated_grid[new_y][new_x] = val self.grd.replace_grid(updated_grid)
for every cell in the grid of 'val' fill all cells around it to de noise the grid
3,216
def _make_shred(self, c, name, feature_extractors, sheet_name): height, width, channels = self.orig_img.shape r_x, r_y, r_w, r_h = cv2.boundingRect(c) epsilon = 0.01 * cv2.arcLength(c, True) simplified_contour = cv2.approxPolyDP(c, epsilon, True) if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3: print("Skipping piece name, r_w, r_h)) return None if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100: print("Skipping piece name, r_w, r_h)) return None box_center, bbox, angle = cv2.minAreaRect(c) if bbox[0] > bbox[1]: angle += 90 bbox = (bbox[1], bbox[0]) if bbox[1] / float(bbox[0]) > 70: print("Skipping piece return None y1 = math.floor(box_center[1] - bbox[1] / 2) x1 = math.floor(box_center[0] - bbox[0] / 2) bbox = tuple(map(int, map(math.ceil, bbox))) piece_mask = np.zeros([height, width, 1], dtype=np.uint8) cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED) img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w] piece_in_context = self.save_image( "pieces/%s_ctx" % name, self.orig_img[max(r_y - 10, 0):r_y + r_h + 10, max(r_x - 10, 0):r_x + r_w + 10]) mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w] img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask) img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA) img_roi[:, :, 3] = mask[:, :, 0] M = cv2.getRotationMatrix2D((box_center[0] - r_x, box_center[1] - r_y), angle, 1) M[0][2] += r_x - x1 M[1][2] += r_y - y1 img_roi = cv2.warpAffine(img_roi, M, bbox) piece_fname = self.save_image("pieces/%s" % name, img_roi, "png") _, _, _, mask = cv2.split(img_roi) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) != 1: print("Piece cnt = contours[0] features_fname = self.save_image("pieces/%s_mask" % name, mask, "png") base_features = { "on_sheet_x": r_x, "on_sheet_y": r_y, "on_sheet_width": r_w, "on_sheet_height": r_h, "on_sheet_angle": angle, "width": img_roi.shape[1], "height": img_roi.shape[0], } tags_suggestions = [] for feat in feature_extractors: fts, tags = feat.get_info(img_roi, cnt, name) base_features.update(fts) tags_suggestions += tags if tags_suggestions: print(name, tags_suggestions) return Shred( contour=c, features=base_features, features_fname=features_fname, img_roi=img_roi, name=name, piece_fname=piece_fname, piece_in_context_fname=piece_in_context, sheet=sheet_name, simplified_contour=simplified_contour, tags_suggestions=tags_suggestions, )
Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure.
3,217
def not_has_branch(branch): if _has_branch(branch): msg = .format(branch) raise temple.exceptions.ExistingBranchError(msg)
Raises `ExistingBranchError` if the specified branch exists.
3,218
def before_app_websocket(self, func: Callable) -> Callable: self.record_once(lambda state: state.app.before_websocket(func)) return func
Add a before request websocket to the App. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_websocket def before(): ...
3,219
def channels_open(self, room_id, **kwargs): return self.__call_api_post(, roomId=room_id, kwargs=kwargs)
Adds the channel back to the user’s list of channels.
3,220
def run(self): self.fenum.write() self.fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), ), ) for idx, line in enumerate(self.fcpp): l = self.process_line(idx, line) self.f_data_type.write(l) self.fcpp.close() self.f_data_type.close() self.fenum.close() print()
主函数
3,221
def init_model(engine, create=True, drop=False): meta.engine = engine meta.metadata = MetaData(bind=meta.engine) meta.Session = scoped_session(sessionmaker(bind=meta.engine)) model.setup_tables(create=create, drop=drop)
Initializes the shared SQLAlchemy state in the L{coilmq.store.sa.model} module. @param engine: The SQLAlchemy engine instance. @type engine: C{sqlalchemy.Engine} @param create: Whether to create the tables (if they do not exist). @type create: C{bool} @param drop: Whether to drop the tables (if they exist). @type drop: C{bool}
3,222
def __event_exist(self, event_type): for i in range(self.len()): if self.events_list[i][1] < 0 and self.events_list[i][3] == event_type: return i return -1
Return the event position, if it exists. An event exist if: * end is < 0 * event_type is matching Return -1 if the item is not found.
3,223
def _maybe_throw(self): if self._err: ex_cls, ex_obj, ex_bt = self._err self._err = None PyCBC.raise_helper(ex_cls, ex_obj, ex_bt)
Throw any deferred exceptions set via :meth:`_add_err`
3,224
def GET(self, func, data): if func not in []: s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.get(url, params = data) r.encoding = if self.debug: print r.text try: try: metadata = json.loads(r.text) except: metadata = json.loads(r.text[r.text.find():-1]) message = metadata[] if message == : return True, metadata[] else: return False, message except: for e in sys.exc_info(): print e sys.exit(1) return False, "Error %s: Failed to send GET request" %func
Send GET request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: metadata when success or False when failed
3,225
def add_plot_parser(subparsers): argparser_replot = subparsers.add_parser("replot", help="Reproduce GSEA desktop output figures.") group_replot = argparser_replot.add_argument_group("Input arguments") group_replot.add_argument("-i", "--indir", action="store", dest="indir", required=True, metavar=, help="The GSEA desktop results directroy that you want to reproduce the figure ") add_output_option(group_replot) group_replot.add_argument("-w", "--weight", action=, dest=, default=1.0, type=float, metavar=, help=,) return
Add function 'plot' argument parsers.
3,226
def get_opcodes_from_bp_table(bp): x = len(bp) - 1 y = len(bp[0]) - 1 opcodes = [] while x != 0 or y != 0: this_bp = bp[x][y] opcodes.append(this_bp) if this_bp[0] == EQUAL or this_bp[0] == REPLACE: x = x - 1 y = y - 1 elif this_bp[0] == INSERT: y = y - 1 elif this_bp[0] == DELETE: x = x - 1 opcodes.reverse() return opcodes
Given a 2d list structure, collect the opcodes from the best path.
3,227
def add_constraint(self, name, coefficients={}, ub=0): if name in self._constraints: raise ValueError( "A constraint named " + name + " already exists." ) self._constraints[name] = len(self._constraints) self.upper_bounds = np.append(self.upper_bounds, ub) new_row = np.array([[coefficients.get(name, 0) for name in self._variables]]) self._add_row_to_A(new_row) self._reset_solution()
Add a constraint to the problem. The constrain is formulated as a dictionary of variable names to linear coefficients. The constraint can only have an upper bound. To make a constraint with a lower bound, multiply all coefficients by -1.
3,228
def register(self, config_file, contexts, config_template=None): self.templates[config_file] = OSConfigTemplate( config_file=config_file, contexts=contexts, config_template=config_template ) log(.format(config_file), level=INFO)
Register a config file with a list of context generators to be called during rendering. config_template can be used to load a template from a string instead of using template loaders and template files. :param config_file (str): a path where a config file will be rendered :param contexts (list): a list of context dictionaries with kv pairs :param config_template (str): an optional template string to use
3,229
def pmdec(self,*args,**kwargs): out= self._orb.pmdec(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: pmdec PURPOSE: return proper motion in declination (in mas/yr) INPUT: t - (optional) time at which to get pmdec (can be Quantity) obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_dec(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU)
3,230
def register(self, name): def register_func(func): self.store[name] = func return func return register_func
Decorator for registering a function with PyPhi. Args: name (string): The name of the function
3,231
def get_auth_basic(self): username = password = auth_header = self.get_header() if auth_header: m = re.search(r"^Basic\s+(\S+)$", auth_header, re.I) if m: auth_str = Base64.decode(m.group(1)) username, password = auth_str.split(, 1) return username, password
return the username and password of a basic auth header if it exists
3,232
def set(self, tclass, tnum, tlvt=0, tdata=b): if isinstance(tdata, bytearray): tdata = bytes(tdata) elif not isinstance(tdata, bytes): raise TypeError("tag data must be bytes or bytearray") self.tagClass = tclass self.tagNumber = tnum self.tagLVT = tlvt self.tagData = tdata
set the values of the tag.
3,233
def show_linkinfo_output_show_link_info_linkinfo_domain_reachable(self, **kwargs): config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop() linkinfo_domain_reachable = ET.SubElement(show_link_info, "linkinfo-domain-reachable") linkinfo_domain_reachable.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
3,234
def add_exit(self, guard, dst, jk, ip): self.irsb.statements.append(Exit(guard, dst.con, jk, ip))
Add an exit out of the middle of an IRSB. (e.g., a conditional jump) :param guard: An expression, the exit is taken if true :param dst: the destination of the exit (a Const) :param jk: the JumpKind of this exit (probably Ijk_Boring) :param ip: The address of this exit's source
3,235
def delay(self, func, args=None, kwargs=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, when=None, retry=None, retry_on=None, retry_method=None, max_queue_size=None): task = Task(self, func, args=args, kwargs=kwargs, queue=queue, hard_timeout=hard_timeout, unique=unique, lock=lock, lock_key=lock_key, retry=retry, retry_on=retry_on, retry_method=retry_method) task.delay(when=when, max_queue_size=max_queue_size) return task
Queues a task. See README.rst for an explanation of the options.
3,236
def has_code(state, text, pattern=True, not_typed_msg=None): if not not_typed_msg: if pattern: not_typed_msg = "Could not find the correct pattern in your code." else: not_typed_msg = "Could not find the following text in your code: %r" % text student_code = state.student_code _msg = state.build_message(not_typed_msg) state.do_test( StringContainsTest(student_code, text, pattern, Feedback(_msg, state)) ) return state
Test the student code. Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``, as it is more robust to small syntactical differences that don't change the code's behavior. Args: text (str): the text that is searched for pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text. not_typed_msg (str): feedback message to be displayed if the student did not type the text. :Example: Student code and solution code:: y = 1 + 2 + 3 SCT:: # Verify that student code contains pattern (not robust!!): Ex().has_code(r"1\\s*\\+2\\s*\\+3")
3,237
def __embed_frond(node_u, node_w, dfs_data, as_branch_marker=False): d_u = D(node_u, dfs_data) d_w = D(node_w, dfs_data) comp_d_w = abs(d_w) if as_branch_marker: d_w *= -1 if dfs_data[] == : __insert_frond_RF(d_w, d_u, dfs_data) else: __insert_frond_LF(d_w, d_u, dfs_data) return True LF = dfs_data[] m = dfs_data[][] l_w = lw(dfs_data) r_w = rw(dfs_data) u_m = u(m, dfs_data) x_m = fn_x(m, dfs_data) case_1 = False case_2 = False case_3 = False if d_u > u_m and d_u > x_m: case_1 = True elif d_u <= u_m and d_u > x_m: case_2 = True elif d_u > u_m and d_u <= x_m: case_3 = True else: return False return False
Embeds a frond uw into either LF or RF. Returns whether the embedding was successful.
3,238
def _sparse_blockify(tuples, dtype=None): new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) block = make_block(array, placement=[i]) new_blocks.append(block) return new_blocks
return an array of blocks that potentially have different dtypes (and are sparse)
3,239
def _leftMouseDragged(self, stopCoord, strCoord, speed): appPid = self._getPid() if strCoord == (0, 0): loc = AppKit.NSEvent.mouseLocation() strCoord = (loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y) appPid = self._getPid() pressLeftButton = Quartz.CGEventCreateMouseEvent( None, Quartz.kCGEventLeftMouseDown, strCoord, Quartz.kCGMouseButtonLeft ) Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, pressLeftButton) time.sleep(5) speed = round(1 / float(speed), 2) xmoved = stopCoord[0] - strCoord[0] ymoved = stopCoord[1] - strCoord[1] if ymoved == 0: raise ValueError() else: k = abs(ymoved / xmoved) if xmoved != 0: for xpos in range(int(abs(xmoved))): if xmoved > 0 and ymoved > 0: currcoord = (strCoord[0] + xpos, strCoord[1] + xpos * k) elif xmoved > 0 and ymoved < 0: currcoord = (strCoord[0] + xpos, strCoord[1] - xpos * k) elif xmoved < 0 and ymoved < 0: currcoord = (strCoord[0] - xpos, strCoord[1] - xpos * k) elif xmoved < 0 and ymoved > 0: currcoord = (strCoord[0] - xpos, strCoord[1] + xpos * k) dragLeftButton = Quartz.CGEventCreateMouseEvent( None, Quartz.kCGEventLeftMouseDragged, currcoord, Quartz.kCGMouseButtonLeft ) Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, dragLeftButton) time.sleep(speed) else: raise ValueError() upLeftButton = Quartz.CGEventCreateMouseEvent( None, Quartz.kCGEventLeftMouseUp, stopCoord, Quartz.kCGMouseButtonLeft ) time.sleep(5) Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, upLeftButton)
Private method to handle generic mouse left button dragging and dropping. Parameters: stopCoord(x,y) drop point Optional: strCoord (x, y) drag point, default (0,0) get current mouse position speed (int) 1 to unlimit, simulate mouse moving action from some special requirement Returns: None
3,240
def tagfunc(nargs=None, ndefs=None, nouts=None): def wrapper(f): return wraps(f)(FunctionWithTag(f, nargs=nargs, nouts=nouts, ndefs=ndefs)) return wrapper
decorate of tagged function
3,241
def _parser(): launcher = % sys.version_info.major parser = argparse.ArgumentParser( description= % __description__, epilog= % launcher, prog=launcher) parser.add_argument( , action=, version= + __version__) subparsers = parser.add_subparsers() parser_dependants = subparsers.add_parser( , add_help=False, help=) parser_dependants.add_argument( , metavar=, type=_distribution) parser_dependants.add_argument( , , action=, help=argparse.SUPPRESS) parser_dependants.set_defaults( func=command_dependants) parser_dependents = subparsers.add_parser( , add_help=False, help=) parser_dependents.add_argument( , metavar=, type=_distribution) parser_dependents.add_argument( , , action=, help=) parser_dependents.add_argument( , , action=, help=) parser_dependents.add_argument( , , action=, help=argparse.SUPPRESS) parser_dependents.set_defaults( func=command_dependents) parser_locate = subparsers.add_parser( , add_help=False, help=) parser_locate.add_argument( , metavar=, type=argparse.FileType()) parser_locate.add_argument( , , action=, help=argparse.SUPPRESS) parser_locate.set_defaults( func=command_locate) parser_outdated = subparsers.add_parser( , add_help=False, help=) parser_outdated.add_argument( , , action=, help=) group = parser_outdated.add_mutually_exclusive_group() group.add_argument( , , action=, help=) group.add_argument( , , action=, help=) group.add_argument( , , action=, dest=, help= ) parser_outdated.add_argument( , , action=, help=argparse.SUPPRESS) parser_outdated.set_defaults( func=command_outdated) parser_parents = subparsers.add_parser( , add_help=False, help=) parser_parents.add_argument( , , action=, help=argparse.SUPPRESS) parser_parents.set_defaults( func=command_parents) return parser
Parse command-line options.
3,242
def execute(self): logging.info( % self.project_name) project_csv_meta = self.rws_connection.send_request(ProjectMetaDataRequest(self.project_name)) self.db_adapter.processMetaData(project_csv_meta) for dataset_name in self.db_adapter.datasets.keys(): logging.info( % dataset_name) form_name, _type = self.name_type_from_viewname(dataset_name) form_data = self.rws_connection.send_request( FormDataRequest(self.project_name, self.environment, _type, form_name)) logging.info( % dataset_name) self.db_adapter.processFormData(form_data, dataset_name) logging.info()
Generate local DB, pulling metadata and data from RWSConnection
3,243
def edit_account_info(self, short_name=None, author_name=None, author_url=None): return self._telegraph.method(, values={ : short_name, : author_name, : author_url })
Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels
3,244
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False): LOG.info("Fetching OMIM files from https://omim.org/") mim2genes_url = mimtitles_url= .format(api_key) morbidmap_url = .format(api_key) genemap2_url = .format(api_key) mim_files = {} mim_urls = {} if mim2genes is True: mim_urls[] = mim2genes_url if mimtitles is True: mim_urls[] = mimtitles_url if morbidmap is True: mim_urls[] = morbidmap_url if genemap2 is True: mim_urls[] = genemap2_url for file_name in mim_urls: url = mim_urls[file_name] mim_files[file_name] = fetch_resource(url) return mim_files
Fetch the necessary mim files using a api key Args: api_key(str): A api key necessary to fetch mim data Returns: mim_files(dict): A dictionary with the neccesary files
3,245
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None, key_spec=None, grant_tokens=None, region=None, key=None, keyid=None, profile=None): alias/mykey conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: data_key = conn.generate_data_key( key_id, encryption_context=encryption_context, number_of_bytes=number_of_bytes, key_spec=key_spec, grant_tokens=grant_tokens ) r[] = data_key except boto.exception.BotoServerError as e: r[] = __utils__[](e) return r
Generate a secure data key. CLI example:: salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
3,246
def _get_axis_bounds(self, dim, bunch): if dim in self.attributes: vmin, vmax = bunch[] assert vmin is not None assert vmax is not None return vmin, vmax return (-1. / self.scaling, +1. / self.scaling)
Return the min/max of an axis.
3,247
def load_remote_db(self): signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4") s3 = boto3.resource( , config=botocore.client.Config(signature_version=signature_version), ) if not in self.settings_dict[]: try: etag = if os.path.isfile( + self.settings_dict[]): m = hashlib.md5() with open( + self.settings_dict[], ) as f: m.update(f.read()) etag = m.hexdigest() obj = s3.Object(self.settings_dict[], self.settings_dict[]) obj_bytes = obj.get(IfNoneMatch=etag)["Body"] with open( + self.settings_dict[], ) as f: f.write(obj_bytes.read()) m = hashlib.md5() with open( + self.settings_dict[], ) as f: m.update(f.read()) self.db_hash = m.hexdigest() except botocore.exceptions.ClientError as e: if e.response[][] == "304": logging.debug("ETag matches md5 of local copy, using local copy of DB!") self.db_hash = etag else: logging.debug("Couldn/tmp/NAMEREMOTE_NAMENAMENAME/tmp/NAMEt yet if not os.path.isfile(self.settings_dict[]): open(self.settings_dict[], ).close() logging.debug("Loaded remote DB!")
Load remote S3 DB
3,248
def _query_ned_and_add_results_to_database( self, batchCount): self.log.debug( ) tableName = self.dbTableName converter = unit_conversion( log=self.log ) totalCount = len(self.theseIds) print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals() search = namesearch( log=self.log, names=self.theseIds.keys(), quiet=True ) results = search.get() print "results returned from ned -- starting to add to database" % locals() totalCount = len(results) count = 0 sqlQuery = "" dictList = [] colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter", "ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"] if not len(results): for k, v in self.theseIds.iteritems(): dictList.append({ "in_ned": 0, "primaryID": v }) for thisDict in results: thisDict["tableName"] = tableName count += 1 for k, v in thisDict.iteritems(): if not v or len(v) == 0: thisDict[k] = "null" if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v): thisDict[k] = v.replace(":", "").replace( "?", "").replace("<", "") if isinstance(v, str) and in v: thisDict[k] = v.replace(, ) if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]: if thisDict["ra"] != "null" and thisDict["dec"] != "null": thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal( ra=thisDict["ra"] ) thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal( dec=thisDict["dec"] ) else: thisDict["raDeg"] = None thisDict["decDeg"] = None thisDict["in_ned"] = 1 thisDict["eb_v"] = thisDict["eb-v"] row = {} row["primary_ned_id"] = thisDict["input_name"] try: row["primaryID"] = self.theseIds[thisDict["input_name"]] for c in colList: if thisDict[c] == "null": row[c] = None else: row[c] = thisDict[c] dictList.append(row) except: g = thisDict["input_name"] self.log.error( "Cannot find database table %(tableName)s primaryID for \n\n" % locals()) dictList.append({ "in_ned": 0, "primary_ned_id": thisDict["input_name"] }) else: dictList.append({ "primary_ned_id": thisDict["input_name"], "in_ned": 0, "primaryID": self.theseIds[thisDict["input_name"]] }) self.log.debug( ) return dictList
query ned and add results to database **Key Arguments:** - ``batchCount`` - the index number of the batch sent to NED .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
3,249
def list_fonts(): vals = _list_fonts() for font in _vispy_fonts: vals += [font] if font not in vals else [] vals = sorted(vals, key=lambda s: s.lower()) return vals
List system fonts Returns ------- fonts : list of str List of system fonts.
3,250
def circ_corrcl(x, y, tail=): from scipy.stats import pearsonr, chi2 x = np.asarray(x) y = np.asarray(y) if x.size != y.size: raise ValueError() x, y = remove_na(x, y, paired=True) n = x.size rxs = pearsonr(y, np.sin(x))[0] rxc = pearsonr(y, np.cos(x))[0] rcs = pearsonr(np.sin(x), np.cos(x))[0] r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2)) pval = chi2.sf(n * r**2, 2) pval = pval / 2 if tail == else pval return np.round(r, 3), pval
Correlation coefficient between one circular and one linear variable random variables. Parameters ---------- x : np.array First circular variable (expressed in radians) y : np.array Second circular variable (linear) tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- r : float Correlation coefficient pval : float Uncorrected p-value Notes ----- Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats) Please note that NaN are automatically removed from datasets. Examples -------- Compute the r and p-value between one circular and one linear variables. >>> from pingouin import circ_corrcl >>> x = [0.785, 1.570, 3.141, 0.839, 5.934] >>> y = [1.593, 1.291, -0.248, -2.892, 0.102] >>> r, pval = circ_corrcl(x, y) >>> print(r, pval) 0.109 0.9708899750629236
3,251
def process(self): if WINDOWS: select_inputs = [] for i in self.inputs: if not isinstance(i, SelectableObject): warning("Unknown ignored object type: %s", type(i)) elif i.__selectable_force_select__: select_inputs.append(i) elif not self.remain and i.check_recv(): self.results.append(i) else: i.wait_return(self._exit_door) if select_inputs: self.results.extend(select(select_inputs, [], [], self.remain)[0]) if not self.remain: return self.results threading.Thread(target=self._timeout_thread, args=(self.remain,)).start() if not self._ended: self.available_lock.acquire() return self.results else: r, _, _ = select(self.inputs, [], [], self.remain) return r
Entry point of SelectableSelector
3,252
def create(cls, name, template=None): command = [, , name] if template: command.extend([, template]) subwrap.run(command)
Creates an LXC
3,253
def filter(self, scored_list): top_n_key = -1 * self.top_n top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:] result_list = sorted(top_n_list, key=lambda x: x[0]) return result_list
Filtering with top-n ranking. Args: scored_list: The list of scoring. Retruns: The list of filtered result.
3,254
def content(self, content): allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"] if not set(content).issubset(set(allowed_values)): raise ValueError( "Invalid values for `content` [{0}], must be a subset of [{1}]" .format(", ".join(map(str, set(content)-set(allowed_values))), ", ".join(map(str, allowed_values))) ) self._content = content
Sets the content of this SupportLevelPage. :param content: The content of this SupportLevelPage. :type: list[str]
3,255
def get_tabs(self, request, **kwargs): if self._tab_group is None: self._tab_group = self.tab_group_class(request, **kwargs) return self._tab_group
Returns the initialized tab group for this view.
3,256
def remove_dashboard_tag(self, id, tag_value, **kwargs): kwargs[] = True if kwargs.get(): return self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) else: (data) = self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) return data
Remove a tag from a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str tag_value: (required) :return: ResponseContainer If the method is called asynchronously, returns the request thread.
3,257
async def write(self, item): await self._queue.put(item) self._can_read.set() if self._queue.full(): self._can_write.clear()
Write an item in the queue. :param item: The item.
3,258
def byname(nlist): warnings.warn("The internal byname() function has been deprecated, with " "no replacement.", DeprecationWarning, stacklevel=_stacklevel_above_module(__name__)) return OrderedDict([(x.name, x) for x in nlist])
**Deprecated:** Convert a list of named objects into an ordered dictionary indexed by name. This function is internal and has been deprecated in pywbem 0.12.
3,259
def iter_chunks_class(self): for m in self.get_metadata(): try: yield self.chunkclass(self.get_chunk(m.x, m.z)) except RegionFileFormatError: pass
Yield each readable chunk present in the region. Chunks that can not be read for whatever reason are silently skipped. This function returns a :class:`nbt.chunk.Chunk` instance.
3,260
def _win32_read_junction(path): if not jwfs.is_reparse_point(path): raise ValueError() handle = jwfs.api.CreateFile( path, 0, 0, None, jwfs.api.OPEN_EXISTING, jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT | jwfs.api.FILE_FLAG_BACKUP_SEMANTICS, None) if handle == jwfs.api.INVALID_HANDLE_VALUE: raise WindowsError() res = jwfs.reparse.DeviceIoControl( handle, jwfs.api.FSCTL_GET_REPARSE_POINT, None, 10240) bytes = jwfs.create_string_buffer(res) p_rdb = jwfs.cast(bytes, jwfs.POINTER(jwfs.api.REPARSE_DATA_BUFFER)) rdb = p_rdb.contents if rdb.tag not in [2684354563, jwfs.api.IO_REPARSE_TAG_SYMLINK]: raise RuntimeError( "Expected <2684354563 or 2684354572>, but got %d" % rdb.tag) jwfs.handle_nonzero_success(jwfs.api.CloseHandle(handle)) subname = rdb.get_substitute_name() if subname.startswith(): subname = subname[2:] return subname
Returns the location that the junction points, raises ValueError if path is not a junction. CommandLine: python -m ubelt._win32_links _win32_read_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> dpath = join(root, 'dpath') >>> djunc = join(root, 'djunc') >>> ub.ensuredir(dpath) >>> _win32_junction(dpath, djunc) >>> path = djunc >>> pointed = _win32_read_junction(path) >>> print('pointed = {!r}'.format(pointed))
3,261
def from_json_file(file: TextIO, check_version=True) -> BELGraph: graph_json_dict = json.load(file) return from_json(graph_json_dict, check_version=check_version)
Build a graph from the Node-Link JSON contained in the given file.
3,262
def _generic_signal_handler(self, signal_type): print("</pre>") message = "Got " + signal_type + ". Failing gracefully..." self.timestamp(message) self.fail_pipeline(KeyboardInterrupt(signal_type), dynamic_recover=True) sys.exit(1)
Function for handling both SIGTERM and SIGINT
3,263
def stop_playback(self): self._sink.flush() self._sink.stop() self._playing = False
Stop playback from the audio sink.
3,264
def __setAsOrphaned(self): cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN cmplMessage = "Killed by Scheduler" self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
Sets the current model as orphaned. This is called when the scheduler is about to kill the process to reallocate the worker to a different process.
3,265
def abort(self, jobs=None, targets=None, block=None): block = self.block if block is None else block jobs = jobs if jobs is not None else list(self.outstanding) targets = self._build_targets(targets)[0] msg_ids = [] if isinstance(jobs, (basestring,AsyncResult)): jobs = [jobs] bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs) if bad_ids: raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0]) for j in jobs: if isinstance(j, AsyncResult): msg_ids.extend(j.msg_ids) else: msg_ids.append(j) content = dict(msg_ids=msg_ids) for t in targets: self.session.send(self._control_socket, , content=content, ident=t) error = False if block: self._flush_ignored_control() for i in range(len(targets)): idents,msg = self.session.recv(self._control_socket,0) if self.debug: pprint(msg) if msg[][] != : error = self._unwrap_exception(msg[]) else: self._ignored_control_replies += len(targets) if error: raise error
Abort specific jobs from the execution queues of target(s). This is a mechanism to prevent jobs that have already been submitted from executing. Parameters ---------- jobs : msg_id, list of msg_ids, or AsyncResult The jobs to be aborted If unspecified/None: abort all outstanding jobs.
3,266
def clean_egginfo(self): dir_name = os.path.join(self.root, self.get_egginfo_dir()) self._clean_directory(dir_name)
Clean .egginfo directory
3,267
def Run(self): "Execute the action" inputs = self.GetInput() return SendInput( len(inputs), ctypes.byref(inputs), ctypes.sizeof(INPUT))
Execute the action
3,268
def FindEnumTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self.FindFileContainingSymbol(full_name) return self._enum_descriptors[full_name]
Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type.
3,269
def _scipy_distribution_positional_args_from_dict(distribution, params): params[] = params.get(, 0) if not in params: params[] = 1 if distribution == : return params[], params[] elif distribution == : return params[], params[], params[], params[] elif distribution == : return params[], params[], params[] elif distribution == : return params[], params[] elif distribution == : return params[], params[], params[] elif distribution == : return params[], params[]
Helper function that returns positional arguments for a scipy distribution using a dict of parameters. See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\ to see an example of scipy's positional arguments. This function returns the arguments specified by the \ scipy.stat.distribution.cdf() for tha distribution. Args: distribution (string): \ The scipy distribution name. params (dict): \ A dict of named parameters. Raises: AttributeError: \ If an unsupported distribution is provided.
3,270
def source_start(base=, book_id=): repo_htm_path = "{book_id}-h/{book_id}-h.htm".format(book_id=book_id) possible_paths = ["book.asciidoc", repo_htm_path, "{}-0.txt".format(book_id), "{}-8.txt".format(book_id), "{}.txt".format(book_id), "{}-pdf.pdf".format(book_id), ] for path in possible_paths: fullpath = os.path.join(base, path) if os.path.exists(fullpath): return path return None
chooses a starting source file in the 'base' directory for id = book_id
3,271
def set_data(self, capacity, voltage=None, capacity_label="q", voltage_label="v" ): logging.debug("setting data (capacity and voltage)") if isinstance(capacity, pd.DataFrame): logging.debug("recieved a pandas.DataFrame") self.capacity = capacity[capacity_label] self.voltage = capacity[voltage_label] else: assert len(capacity) == len(voltage) self.capacity = capacity self.voltage = voltage
Set the data
3,272
def filter_kwargs(_function, *args, **kwargs): if has_kwargs(_function): return _function(*args, **kwargs) func_code = six.get_function_code(_function) function_args = func_code.co_varnames[:func_code.co_argcount] filtered_kwargs = {} for kwarg, value in list(kwargs.items()): if kwarg in function_args: filtered_kwargs[kwarg] = value return _function(*args, **filtered_kwargs)
Given a function and args and keyword args to pass to it, call the function but using only the keyword arguments which it accepts. This is equivalent to redefining the function with an additional \*\*kwargs to accept slop keyword args. If the target function already accepts \*\*kwargs parameters, no filtering is performed. Parameters ---------- _function : callable Function to call. Can take in any number of args or kwargs
3,273
def stoch(df, window=14, d=3, k=3, fast=False): my_df = pd.DataFrame(index=df.index) my_df[] = df[].rolling(window).max() my_df[] = df[].rolling(window).min() my_df[] = 100 * (df[] - my_df[])/(my_df[] - my_df[]) my_df[] = my_df[].rolling(d).mean() if fast: return my_df.loc[:, [, ]] my_df[] = my_df[].rolling(k).mean() my_df[] = my_df[].rolling(d).mean() return my_df.loc[:, [, ]]
compute the n period relative strength indicator http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html
3,274
def NetFxSDKIncludes(self): if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: return [] return [os.path.join(self.si.NetFxSdkDir, r)]
Microsoft .Net Framework SDK Includes
3,275
def parallel_part(data, parallel): if parallel is None or "SGE_TASK_ID" not in os.environ: return data data_per_job = int(math.ceil(float(len(data)) / float(parallel))) task_id = int(os.environ[]) first = (task_id-1) * data_per_job last = min(len(data), task_id * data_per_job) return data[first:last]
parallel_part(data, parallel) -> part Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable. **Parameters:** ``data`` : [object] A list of data that should be split up into ``parallel`` parts ``parallel`` : int or ``None`` The total number of parts, in which the data should be split into **Returns:** ``part`` : [object] The desired partition of the ``data``
3,276
def hist(sample, options={}, **kwargs): kwargs[] = sample scales = kwargs.pop(, {}) if not in scales: dimension = _get_attribute_dimension(, Hist) if dimension in _context[]: scales[] = _context[][dimension] else: scales[] = LinearScale(**options.get(, {})) _context[][dimension] = scales[] kwargs[] = scales return _draw_mark(Hist, options=options, **kwargs)
Draw a histogram in the current context figure. Parameters ---------- sample: numpy.ndarray, 1d The sample for which the histogram must be generated. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'counts' is required for that mark, options['counts'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'counts' is required for that mark, axes_options['counts'] contains optional keyword arguments for the constructor of the corresponding axis type.
3,277
def has_started(self): timeout = False auth_in_progress = False if self._handler._connection.cbs: timeout, auth_in_progress = self._handler._auth.handle_token() if timeout: raise EventHubError("Authorization timeout.") if auth_in_progress: return False if not self._handler._client_ready(): return False return True
Whether the handler has completed all start up processes such as establishing the connection, session, link and authentication, and is not ready to process messages. **This function is now deprecated and will be removed in v2.0+.** :rtype: bool
3,278
def keyword( name: str, ns: Optional[str] = None, kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN, ) -> Keyword: h = hash((name, ns)) return kw_cache.swap(__get_or_create, h, name, ns)[h]
Create a new keyword.
3,279
async def edit_2fa( self, current_password=None, new_password=None, *, hint=, email=None, email_code_callback=None): if new_password is None and current_password is None: return False if email and not callable(email_code_callback): raise ValueError() pwd = await self(functions.account.GetPasswordRequest()) pwd.new_algo.salt1 += os.urandom(32) assert isinstance(pwd, types.account.Password) if not pwd.has_password and current_password: current_password = None if current_password: password = pwd_mod.compute_check(pwd, current_password) else: password = types.InputCheckPasswordEmpty() if new_password: new_password_hash = pwd_mod.compute_digest( pwd.new_algo, new_password) else: new_password_hash = b try: await self(functions.account.UpdatePasswordSettingsRequest( password=password, new_settings=types.account.PasswordInputSettings( new_algo=pwd.new_algo, new_password_hash=new_password_hash, hint=hint, email=email, new_secure_settings=None ) )) except errors.EmailUnconfirmedError as e: code = email_code_callback(e.code_length) if inspect.isawaitable(code): code = await code code = str(code) await self(functions.account.ConfirmPasswordEmailRequest(code)) return True
Changes the 2FA settings of the logged in user, according to the passed parameters. Take note of the parameter explanations. Note that this method may be *incredibly* slow depending on the prime numbers that must be used during the process to make sure that everything is safe. Has no effect if both current and new password are omitted. current_password (`str`, optional): The current password, to authorize changing to ``new_password``. Must be set if changing existing 2FA settings. Must **not** be set if 2FA is currently disabled. Passing this by itself will remove 2FA (if correct). new_password (`str`, optional): The password to set as 2FA. If 2FA was already enabled, ``current_password`` **must** be set. Leaving this blank or ``None`` will remove the password. hint (`str`, optional): Hint to be displayed by Telegram when it asks for 2FA. Leaving unspecified is highly discouraged. Has no effect if ``new_password`` is not set. email (`str`, optional): Recovery and verification email. If present, you must also set `email_code_callback`, else it raises ``ValueError``. email_code_callback (`callable`, optional): If an email is provided, a callback that returns the code sent to it must also be set. This callback may be asynchronous. It should return a string with the code. The length of the code will be passed to the callback as an input parameter. If the callback returns an invalid code, it will raise ``CodeInvalidError``. Returns: ``True`` if successful, ``False`` otherwise.
3,280
def _init(self): group_stack = [self] clip_stack = [] last_layer = None for record, channels in self._record._iter_layers(): current_group = group_stack[-1] blocks = record.tagged_blocks end_of_group = False divider = blocks.get_data(, None) divider = blocks.get_data(, divider) if divider is not None: if divider.kind == SectionDivider.BOUNDING_SECTION_DIVIDER: layer = Group(self, None, None, current_group) group_stack.append(layer) elif divider.kind in (SectionDivider.OPEN_FOLDER, SectionDivider.CLOSED_FOLDER): layer = group_stack.pop() assert layer is not self layer._record = record layer._channels = channels for key in ( , , ): if key in blocks: layer = Artboard._move(layer) end_of_group = True elif ( in blocks or in blocks ): layer = TypeLayer(self, record, channels, current_group) elif ( record.flags.pixel_data_irrelevant and ( in blocks or in blocks or in blocks or in blocks or in blocks ) ): layer = ShapeLayer(self, record, channels, current_group) elif ( in blocks or in blocks or in blocks or in blocks ): layer = SmartObjectLayer(self, record, channels, current_group) else: layer = None for key in adjustments.TYPES.keys(): if key in blocks: layer = adjustments.TYPES[key]( self, record, channels, current_group ) break if layer is None: layer = PixelLayer( self, record, channels, current_group ) if record.clipping == Clipping.NON_BASE: clip_stack.append(layer) else: if clip_stack: last_layer._clip_layers = clip_stack clip_stack = [] if not end_of_group: current_group._layers.append(layer) last_layer = layer if clip_stack and last_layer: last_layer._clip_layers = clip_stack
Initialize layer structure.
3,281
def run_one(self, set_title=False): s title with the work unit name :return: :const:`True` if there was a job (even if it failed) No work to do; stopping.re already quitting everything, but this is weirdly bad. logger.error(, xunit.work_spec_name, xunit.key, exc_info=True) else: ok = self._run_unit(xunit, set_title) return ok return self._run_unit(unit)
Get exactly one job, run it, and return. Does nothing (but returns :const:`False`) if there is no work to do. Ignores the global mode; this will do work even if :func:`rejester.TaskMaster.get_mode` returns :attr:`~rejester.TaskMaster.TERMINATE`. :param set_title: if true, set the process's title with the work unit name :return: :const:`True` if there was a job (even if it failed)
3,282
def encode(self, word, max_length=-1, keep_vowels=False, vowel_char=): r word = unicode_normalize(, text_type(word.upper())) word = word.replace(, ) word = .join(c for c in word if c in self._uc_set) if word[:3] in {, , }: word = + word[3:] elif word[:2] == : word = + word[2:] pos = len(word) - 2 while pos > -1: if word[pos : pos + 2] in { , , , , , , , , , , }: word = word[: pos + 1] + word[pos + 2 :] pos += 1 pos -= 1 word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) pos = word.find(, 1) while pos != -1: if word[pos - 1 : pos] not in self._uc_vy_set: word = word[:pos] + + word[pos + 1 :] pos = word.find(, pos + 1) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) pos = word.find(, 0) while pos != -1: if pos > 1 and word[pos - 1 : pos] not in self._uc_vy_set | { , , , }: word = word[: pos - 1] + word[pos:] pos -= 1 pos = word.find(, pos + 1) if max_length > 0 and word[-1:] == : word = word[:-1] word = self._delete_consecutive_repeats(word) if word[:2] == : word = word[1:] if word[-2:] == : word = word[:-1] elif word[-2:] == : if word[-3:-2] in self._uc_vy_set: word = word[:-2] + else: word = word[:-2] + word = word.replace(, ) if max_length > 0: word = word.replace(, ) first = 1 + (1 if max_length > 0 else 0) code = for pos, char in enumerate(word): if char in self._uc_vy_set: if first or keep_vowels: code += vowel_char first -= 1 elif pos > 0 and char in {, }: continue else: code += char if max_length > 0: if len(code) > max_length and code[-1:] == : code = code[:-1] if keep_vowels: code = code[:max_length] else: code = code[: max_length + 2] while len(code) > max_length: vowels = len(code) - max_length excess = vowels - 1 word = code code = for char in word: if char == vowel_char: if vowels: code += char vowels -= 1 else: code += char code = code[: max_length + excess] code += * (max_length - len(code)) return code
r"""Return the Dolby Code of a name. Parameters ---------- word : str The word to transform max_length : int Maximum length of the returned Dolby code -- this also activates the fixed-length code mode if it is greater than 0 keep_vowels : bool If True, retains all vowel markers vowel_char : str The vowel marker character (default to \*) Returns ------- str The Dolby Code Examples -------- >>> pe = Dolby() >>> pe.encode('Hansen') 'H*NSN' >>> pe.encode('Larsen') 'L*RSN' >>> pe.encode('Aagaard') '*GR' >>> pe.encode('Braaten') 'BR*DN' >>> pe.encode('Sandvik') 'S*NVK' >>> pe.encode('Hansen', max_length=6) 'H*NS*N' >>> pe.encode('Larsen', max_length=6) 'L*RS*N' >>> pe.encode('Aagaard', max_length=6) '*G*R ' >>> pe.encode('Braaten', max_length=6) 'BR*D*N' >>> pe.encode('Sandvik', max_length=6) 'S*NF*K' >>> pe.encode('Smith') 'SM*D' >>> pe.encode('Waters') 'W*DRS' >>> pe.encode('James') 'J*MS' >>> pe.encode('Schmidt') 'SM*D' >>> pe.encode('Ashcroft') '*SKRFD' >>> pe.encode('Smith', max_length=6) 'SM*D ' >>> pe.encode('Waters', max_length=6) 'W*D*RS' >>> pe.encode('James', max_length=6) 'J*M*S ' >>> pe.encode('Schmidt', max_length=6) 'SM*D ' >>> pe.encode('Ashcroft', max_length=6) '*SKRFD'
3,283
def insert_before(self, value: Union[RawValue, Value], raw: bool = False) -> "ArrayEntry": return ArrayEntry(self.index, self.before, self.after.cons(self.value), self._cook_value(value, raw), self.parinst, self.schema_node, datetime.now())
Insert a new entry before the receiver. Args: value: The value of the new entry. raw: Flag to be set if `value` is raw. Returns: An instance node of the new inserted entry.
3,284
def fetch_credential(self, credential=None, profile=None): q = self.db.get(self.query.profile == profile) if q is not None: return q.get(credential)
Fetch credential from credentials file. Args: credential (str): Credential to fetch. profile (str): Credentials profile. Defaults to ``'default'``. Returns: str, None: Fetched credential or ``None``.
3,285
def flow_ramp(self): return np.linspace(1 / self.n_rows, 1, self.n_rows)*self.q
An equally spaced array representing flow at each row.
3,286
def read_certificate_signing_request(self, name, **kwargs): kwargs[] = True if kwargs.get(): return self.read_certificate_signing_request_with_http_info(name, **kwargs) else: (data) = self.read_certificate_signing_request_with_http_info(name, **kwargs) return data
read_certificate_signing_request # noqa: E501 read the specified CertificateSigningRequest # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_certificate_signing_request(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CertificateSigningRequest (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1CertificateSigningRequest If the method is called asynchronously, returns the request thread.
3,287
def remove_entity(self, name): self.entities.remove(name) self.padaos.remove_entity(name)
Unload an entity
3,288
def serialzeValueToTCL(self, val, do_eval=False) -> Tuple[str, str, bool]: if isinstance(val, int): val = hInt(val) if do_eval: val = val.staticEval() if isinstance(val, RtlSignalBase): ctx = VivadoTclExpressionSerializer.getBaseContext() tclVal = VivadoTclExpressionSerializer.asHdl(val, ctx) tclValVal = VivadoTclExpressionSerializer.asHdl( val.staticEval()) return tclVal, tclValVal, False else: tclVal = VivadoTclExpressionSerializer.asHdl(val, None) return tclVal, tclVal, True
:see: doc of method on parent class
3,289
def set_app_id(self, id, version, icon): return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon))
Sets some meta-information about the application. See also L{set_user_agent}(). @param id: Java-style application identifier, e.g. "com.acme.foobar". @param version: application version numbers, e.g. "1.2.3". @param icon: application icon name, e.g. "foobar". @version: LibVLC 2.1.0 or later.
3,290
def expect_re(regexp, buf, pos): match = regexp.match(buf, pos) if not match: return None, len(buf) return buf[match.start(1):match.end(1)], match.end(0)
Require a regular expression at the current buffer position.
3,291
def get_queryset(self): queryset = super(PageList, self).get_queryset() if hasattr(self.request, ): queryset = queryset.filter(site_id=self.request.site_id) return queryset
If MultiTenantMiddleware is used, filter queryset by request.site_id
3,292
def write(self, location=None): if location is not None: self.location = location assert self.location for io in self.editor.io_backends: if io.can_open_location(self.location): break else: self.editor.show_message( % location) try: io.write(self.location, self.buffer.text + , self.encoding) self.is_new = False except Exception as e: self.editor.show_message( % e) else: self._file_content = self.buffer.text
Write file to I/O backend.
3,293
def move_forward(columns=1, file=sys.stdout): move.forward(columns).write(file=file)
Move the cursor forward a number of columns. Esc[<columns>C: Moves the cursor forward by the specified number of columns without changing lines. If the cursor is already in the rightmost column, ANSI.SYS ignores this sequence.
3,294
def wait_until(predicate, success_description, timeout=10): start = time.time() while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(0.1)
Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value.
3,295
def clear_all(): sure = input("Are you sure to drop the complete database content? (Type " "in upppercase YES)") if not (sure == ): db_log() sys.exit(5) client = pymongo.MongoClient(host=dbhost, port=dbport) db = client[dbname] for col in db.collection_names(include_system_collections=False): db_log("Dropping collection ", col, lvl=warn) db.drop_collection(col)
DANGER! *This command is a maintenance tool and clears the complete database.*
3,296
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), , {: learning_rate, : weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
Trains the model.
3,297
def main_make_views(gtfs_fname): print("creating views") conn = GTFS(fname_or_conn=gtfs_fname).conn for L in Loaders: L(None).make_views(conn) conn.commit()
Re-create all views.
3,298
async def get_blueprint_params(request, left: int, right: int) -> str: res = left * right return "{left}*{right}={res}".format(left=left, right=right, res=res)
API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/).
3,299
def lookup_defs(self, variable, size_threshold=32): live_def_locs = set() if isinstance(variable, SimRegisterVariable): if variable.reg is None: l.warning() return live_def_locs size = min(variable.size, size_threshold) offset = variable.reg while offset < variable.reg + size: if offset in self._register_map: live_def_locs |= self._register_map[offset] offset += 1 elif isinstance(variable, SimMemoryVariable): size = min(variable.size, size_threshold) offset = variable.addr while offset < variable.addr + size: if offset in self._memory_map: live_def_locs |= self._memory_map[offset] offset += 1 else: l.error(, type(variable)) return live_def_locs
Find all definitions of the varaible :param SimVariable variable: The variable to lookup for. :param int size_threshold: The maximum bytes to consider for the variable. For example, if the variable is 100 byte long, only the first `size_threshold` bytes are considered. :return: A set of code locations where the variable is defined. :rtype: set