docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Download file process: - Open the url - Check if it has been downloaded and it hanged. - Download it to the destination folder. Args: :urls: url to take the file. :destionation: place to store the downloaded file.
def download_file(url, destination, **kwargs): web_file = open_remote_url(url, **kwargs) file_size = 0 if not web_file: logger.error( "Remote file not found. Attempted URLs: {}".format(url)) return modified = is_remote_file_modified(web_file, destination) if modified: logger.info("Downloading: " + web_file.url) file_size = copy_remote_file(web_file, destination) else: logger.info("File up-to-date: " + destination) web_file.close() return file_size
1,020,871
Open the url and check that it stores a file. Args: :urls: Endpoint to take the file
def open_remote_url(urls, **kwargs): if isinstance(urls, str): urls = [urls] for url in urls: try: web_file = requests.get(url, stream=True, **kwargs) if 'html' in web_file.headers['content-type']: raise ValueError("HTML source file retrieved.") return web_file except Exception as ex: logger.error('Fail to open remote url - {}'.format(ex)) continue
1,020,872
Check if online file has been modified. Args: :web_file: online file to check. :destination: path of the offline file to compare.
def is_remote_file_modified(web_file, destination): try: # check datetime of last modified in file. last_mod = web_file.headers.get('last-modified') if last_mod: web_file_time = time.strptime( web_file.headers.get( 'last-modified'), '%a, %d %b %Y %H:%M:%S %Z') else: web_file_time = time.gmtime() web_file_size = int(web_file.headers.get('content-length', -1)) if os.path.exists(destination): file_time = time.gmtime(os.path.getmtime(destination)) file_size = os.path.getsize(destination) if file_time >= web_file_time and file_size == web_file_size: return False except Exception as ex: msg = ('Fail checking if remote file is modified default returns TRUE' ' - {}'.format(ex)) logger.debug(msg) return True
1,020,873
Check if exist the destination path, and copy the online resource file to local. Args: :web_file: reference to online file resource to take. :destination: path to store the file.
def copy_remote_file(web_file, destination): size = 0 dir_name = os.path.dirname(destination) if not os.path.exists(dir_name): os.makedirs(dir_name) with open(destination, 'wb') as file_: chunk_size = 8 * 1024 for chunk in web_file.iter_content(chunk_size=chunk_size): if chunk: file_.write(chunk) size += len(chunk) return size
1,020,874
Fetch a number of samples from self.wave_cache Args: sample_count (int): Number of samples to fetch Returns: ndarray
def get_samples(self, sample_count): if self.amplitude.value <= 0: return None # Build samples by rolling the period cache through the buffer rolled_array = numpy.roll(self.wave_cache, -1 * self.last_played_sample) # Append remaining partial period full_count, remainder = divmod(sample_count, self.cache_length) final_subarray = rolled_array[:int(remainder)] return_array = numpy.concatenate((numpy.tile(rolled_array, full_count), final_subarray)) # Keep track of where we left off to prevent popping between chunks self.last_played_sample = int(((self.last_played_sample + remainder) % self.cache_length)) # Multiply output by amplitude return return_array * (self.amplitude.value * self.amplitude_multiplier)
1,020,906
Optionally set all attributes. Args: path (str): Relative file path. line (int): Line number. msg (str): Explanation of what is wrong. col (int): Column where the problem begins.
def __init__(self, linter_name, path, msg, line_nr=None, col=None): # Set all attributes in the constructor for convenience. # pylint: disable=too-many-arguments if line_nr: line_nr = int(line_nr) if col: col = int(col) self._linter_name = linter_name self.path = path self.line_nr = line_nr self.msg = msg self.col = col
1,020,921
Match pattern line by line and return Results. Use ``_create_output_from_match`` to convert pattern match groups to Result instances. Args: lines (iterable): Output lines to be parsed. pattern: Compiled pattern to match against lines. result_fn (function): Receive results of one match and return a Result. Return: generator: Result instances.
def _parse_by_pattern(self, lines, pattern): for line in lines: match = pattern.match(line) if match: params = match.groupdict() if not params: params = match.groups() yield self._create_output_from_match(params)
1,020,927
Create Result instance from pattern match results. Args: match: Pattern match.
def _create_output_from_match(self, match_result): if isinstance(match_result, dict): return LinterOutput(self.name, **match_result) return LinterOutput(self.name, *match_result)
1,020,928
Merges two refs Args: ref_name: ref to merge in the current one
def merge(self, ref_name: str): if self.is_dirty(): LOGGER.error('repository is dirty; cannot merge: %s', ref_name) sys.exit(-1) LOGGER.info('merging ref: "%s" into branch: %s', ref_name, self.get_current_branch()) self.repo.git.merge(ref_name)
1,021,108
Creates a new branch Args: branch_name: name of the branch
def create_branch(self, branch_name: str): LOGGER.info('creating branch: %s', branch_name) self._validate_branch_name(branch_name) if branch_name in self.list_branches(): LOGGER.error('branch already exists') sys.exit(-1) new_branch = self.repo.create_head(branch_name) new_branch.commit = self.repo.head.commit
1,021,115
Creates a new branch if it doesn't exist Args: branch_name: branch name
def create_branch_and_checkout(self, branch_name: str): self.create_branch(branch_name) self.checkout(branch_name)
1,021,116
Writes the requirement files Args: amend: amend last commit with changes stage: stage changes
def _write_reqs(amend: bool = False, stage: bool = False): LOGGER.info('writing requirements') base_cmd = 'pipenv lock -r' _write_reqs_file(f'{base_cmd}', 'requirements.txt') _write_reqs_file(f'{base_cmd} -d', 'requirements-dev.txt') files_to_add = ['Pipfile', 'requirements.txt', 'requirements-dev.txt'] if amend: CTX.repo.amend_commit(append_to_msg='update requirements [auto]', files_to_add=files_to_add) elif stage: CTX.repo.stage_subset(*files_to_add)
1,021,227
Write requirements files Args: amend: amend last commit with changes stage: stage changes
def reqs(amend: bool = False, stage: bool = False): changed_files = CTX.repo.changed_files() if 'requirements.txt' in changed_files or 'requirements-dev.txt' in changed_files: LOGGER.error('Requirements have changed; cannot update them') sys.exit(-1) _write_reqs(amend, stage)
1,021,228
This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml
def parse(file_contents, file_name): try: yaml.load(file_contents) except Exception: _, exc_value, _ = sys.exc_info() return("Cannot Parse: {file_name}: \n {exc_value}" .format(file_name=file_name, exc_value=exc_value))
1,021,556
Execute a python code object in the given environment. Args: globals_map: Dictionary to use as the globals context. Returns: locals_map: Dictionary of locals from the environment after execution.
def exec_function(ast, globals_map): locals_map = globals_map exec ast in globals_map, locals_map return locals_map
1,021,742
Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment.
def parse(self, **global_args): if self.build_file not in ParseContext._parsed: # http://en.wikipedia.org/wiki/Abstract_syntax_tree # http://martinfowler.com/books/dsl.html butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
1,021,744
Turn csv into dict. Args: :csv_filepath: path to csv file to turn into dict. :limits: path to csv file to turn into dict
def csv_to_dict(csv_filepath, **kwargs): callbacks = {'to_list': csv_tolist, 'row_csv_limiter': row_csv_limiter, 'csv_row_cleaner': csv_row_cleaner, 'row_headers_count': row_headers_count, 'get_col_header': get_csv_col_headers, 'get_row_headers': get_row_headers, 'populate_headers': populate_headers, 'csv_column_header_cleaner': csv_column_header_cleaner, 'csv_column_cleaner': csv_column_cleaner, 'retrieve_csv_data': retrieve_csv_data} callbacks.update(kwargs.get('alt_callbacks', {})) rows = kwargs.get('rows', []) if not rows: # csv_tolist of rows rows = callbacks.get('to_list')(csv_filepath, **kwargs) if not rows: msg = 'Empty rows obtained from {}'.format(csv_filepath) logger.warning(msg) raise ValueError(msg) # apply limits rows = callbacks.get('row_csv_limiter')( rows, kwargs.get('limits', [None, None])) # apply row cleaner rows = callbacks.get('csv_row_cleaner')(rows) # apply column cleaner rows = callbacks.get('csv_column_cleaner')(rows) # count raw headers num_row_headers = callbacks.get('row_headers_count')(rows) # take colum_headers c_headers_raw = callbacks.get('get_col_header')(rows, num_row_headers) # get row_headers r_headers = callbacks.get('get_row_headers')( rows, num_row_headers, len(c_headers_raw)) # format colum_headers c_headers_dirty = callbacks.get('populate_headers')( c_headers_raw) if len(c_headers_raw) > 1 else c_headers_raw[0] # Clean csv column headers of empty values. c_headers = callbacks.get('csv_column_header_cleaner')(c_headers_dirty) # take data csv_data = callbacks.get('retrieve_csv_data')( rows, column_header=len(c_headers_raw), row_header=num_row_headers, limit_column=len(c_headers) - len(c_headers_dirty) or None) # Check column headers validation if csv_data: assert len(c_headers) == len(csv_data[0]) # Check row headers validation if r_headers: assert len(r_headers) == len(csv_data) # Transform rows into dict zipping the headers. kwargs.pop('rows', None) result = csv_format(csv_data, c_headers, r_headers, rows, **kwargs) return result
1,021,877
Turn excel into dict. Args: :excel_filepath: path to excel file to turn into dict. :limits: path to csv file to turn into dict
def excel_to_dict(excel_filepath, encapsulate_filepath=False, **kwargs): result = {} try: callbacks = {'to_dictlist': excel_todictlist} # Default callback callbacks.update(kwargs.get('alt_callbacks', {})) # Retrieve excel data as dict of sheets lists excel_data = callbacks.get('to_dictlist')(excel_filepath, **kwargs) for sheet in excel_data.keys(): try: kwargs['rows'] = excel_data.get(sheet, []) result[sheet] = csv_to_dict(excel_filepath, **kwargs) except Exception as ex: logger.error('Fail to parse sheet {} - {}'.format(sheet, ex)) result[sheet] = [] continue if encapsulate_filepath: result = {excel_filepath: result} except Exception as ex: msg = 'Fail transform excel to dict - {}'.format(ex) logger.error(msg, excel_filepath=excel_filepath) return result
1,021,878
Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely.
def get_cached_filename(self, filename, extention, settings_list=None): cached_name = "_".join([filename, self.get_hash()]) return ".".join([cached_name, extention])
1,021,992
Fetches the table and applies all post processors. Args: rebuild (bool): Rebuild the table and ignore cache. Default: False cache (bool): Cache the finished table for faster future loading. Default: True
def fetch(self, rebuild=False, cache=True): if rebuild: return self._process_table(cache) try: return self.read_cache() except FileNotFoundError: return self._process_table(cache)
1,021,995
Required by flake8 add the possible options, called first Args: parser (OptionsManager):
def add_options(cls, parser): kwargs = {'action': 'store', 'default': '', 'parse_from_config': True, 'comma_separated_list': True} for num in range(cls.min_check, cls.max_check): parser.add_option(None, "--filename_check{}".format(num), **kwargs)
1,022,004
Required by flake8 parse the options, called after add_options Args: options (dict): options to be parsed
def parse_options(cls, options): d = {} for filename_check, dictionary in cls.filename_checks.items(): # retrieve the marks from the passed options filename_data = getattr(options, filename_check) if len(filename_data) != 0: parsed_params = {} for single_line in filename_data: a = [s.strip() for s in single_line.split('=')] # whitelist the acceptable params if a[0] in ['filter_regex', 'filename_regex']: parsed_params[a[0]] = a[1] d[filename_check] = parsed_params cls.filename_checks.update(d) # delete any empty rules cls.filename_checks = {x: y for x, y in cls.filename_checks.items() if len(y) > 0}
1,022,005
Construct Retsly client Args: token (string): access token vendor (string): vendor ID
def __init__(self, token, vendor='test'): self.token = token self.vendor = vendor
1,022,007
Amends last commit Args: append_to_msg: string to append to previous message new_message: new commit message files_to_add: optional list of files to commit
def amend_commit( self, append_to_msg: typing.Optional[str] = None, new_message: typing.Optional[str] = None, files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None, ):
1,022,117
Sets environment variable on AV Args: key: variable name value: variable value
def set_env_var(key: str, value: str): elib_run.run(f'appveyor SetVariable -Name {key} -Value {value}') AV.info('Env', f'set "{key}" -> "{value}"')
1,022,144
Parse a JSON BUILD file. Args: builddata: dictionary of buildfile data reponame: name of the repo that it came from path: directory path within the repo
def _parse(self, stream): builddata = json.load(stream) log.debug('This is a JSON build file.') if 'targets' not in builddata: log.warn('Warning: No targets defined here.') return for tdata in builddata['targets']: # TODO: validate name target = address.new(target=tdata.pop('name'), repo=self.target.repo, path=self.target.path) # Duplicate target definition? Uh oh. if target in self.node and 'target_obj' in self.node[target]: raise error.ButcherError( 'Target is defined more than once: %s', target) rule_obj = targets.new(name=target, ruletype=tdata.pop('type'), **tdata) log.debug('New target: %s', target) self.add_node(target, {'target_obj': rule_obj}) # dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla" for dep in rule_obj.composed_deps() or []: d_target = address.new(dep) if not d_target.repo: # ":blabla" d_target.repo = self.target.repo if d_target.repo == self.target.repo and not d_target.path: d_target.path = self.target.path if d_target not in self.nodes(): self.add_node(d_target) log.debug('New dep: %s -> %s', target, d_target) self.add_edge(target, d_target)
1,022,159
Runs all linters Args: ctx: click context amend: whether or not to commit results stage: whether or not to stage changes
def lint(ctx: click.Context, amend: bool = False, stage: bool = False): _lint(ctx, amend, stage)
1,022,290
Build xml documents from a list of document ids. Args: doc_ids -- A document id or a lost of those.
def set_doc_ids(self, doc_ids): if isinstance(doc_ids, list): self.set_documents(dict.fromkeys(doc_ids)) else: self.set_documents({doc_ids: None})
1,022,318
Set properies of atributes stored in content using stored common fdel and fget and given fset. Args: set_property -- Function that sets given property. name -- Name of the atribute this property must simulate. Used as key in content dict by default. starting_value -- Starting value of given property. Keyword args: tag_name -- The tag name stored in conted dict as a key if different to name.
def add_property(self, set_property, name, starting_value, tag_name=None): def del_property(self, tag_name): try: del self._content[tag_name] except KeyError: pass def get_property(self, tag_name): try: return self._content[tag_name] except KeyError: return None tag_name = (name if tag_name is None else tag_name) fget = lambda self: get_property(self, tag_name) fdel = lambda self: del_property(self, tag_name) fset = lambda self, value: set_property(value) setattr(self.__class__, name, property(fget, fset, fdel)) set_property(starting_value)
1,022,319
Convert a dict form of query in a string of needed and store the query string. Args: value -- A query string or a dict with query xpaths as keys and text or nested query dicts as values.
def set_query(self, value): if isinstance(value, basestring) or value is None: self._content['query'] = value elif hasattr(value, 'keys'): self._content['query'] = query.terms_from_dict(value) else: raise TypeError("Query must be a string or dict. Got: " + type(value) + " insted!")
1,022,321
r"""Displays help for the given route. Args: route (str): A route that resolves a member.
def help(route): r help_text = getRouteHelp(route.split('/') if route else []) if help_text is None: err('Can\'t help :(') else: print '\n%s' % help_text
1,022,396
r"""Calls a task, as if it were called from the command line. Args: command (str): A route followed by params (as if it were entered in the shell). collect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False. Returns: The return value of the called command.
def call(command, collect_missing=False, silent=True): r return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)
1,022,451
r"""Adds members to an existing group. Args: TargetGroup (Group): The target group for the addition. NewMember (Group / Task): The member to be added. Config (dict): The config for the member. Args (OrderedDict): ArgConfig for the NewMember, if it's a task (optional).
def add(TargetGroup, NewMember, Config=None, Args=None): r Member = Task(NewMember, Args or {}, Config or {}) if isfunction(NewMember) else Group(NewMember, Config or {}) ParentMembers = TargetGroup.__ec_member__.Members ParentMembers[Member.Config['name']] = Member alias = Member.Config.get('alias') if alias: ParentMembers[alias] = Member
1,022,452
Run the python pep8 tool against the filst of supplied files. Append any linting errors to the returned status list Args: files (str): list of files to run pep8 against status (list): list of pre-receive check failures to eventually print to the user Returns: status list of current pre-redeive check failures. Might be an empty list.
def do_check_pep8(files, status): for file_name in files: args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)] output = run(*args) if output: status.append("Python PEP8/Flake8: {0}: {1}".format(file_name, output)) return status
1,022,455
Generic do_check helper method Args: func (function): Specific function to call files (list): list of files to run against status (list): list of pre-receive check failures to eventually print to the user Returns: status list of current pre-redeive check failures. Might be an empty list.
def do_check(func, files, status): for file_name in files: with open(file_name, 'r') as f: output = func.parse(f.read(), file_name) if output: status.append("{0}: {1}".format(file_name, output)) return status
1,022,456
Return singleton instance. Args: cls (type): the class. args (tuple/list): initializer function arguments. kwargs (dict): initializer function keyword arguments.
def __call__(cls, *args, **kwargs): if cls.instance is None: with threading.Lock(): if cls.instance is None: cls.instance = super(Singleton, cls).__call__(*args, **kwargs) return cls.instance
1,022,486
Given a dict and path '/' or '.' separated. Digs into de dict to retrieve the specified element. Args: source (dict): set of nested objects in which the data will be searched path (string): '/' or '.' string with attribute names
def get_element(source, path, separator=r'[/.]'): return _get_element_by_names(source, re.split(separator, path))
1,022,518
Given a dict and path '/' or '.' separated. Digs into de dict to retrieve the specified element. Args: source (dict): set of nested objects in which the data will be searched path (list): list of attribute names
def _get_element_by_names(source, names): if source is None: return source else: if names: head, *rest = names if isinstance(source, dict) and head in source: return _get_element_by_names(source[head], rest) elif isinstance(source, list) and head.isdigit(): return _get_element_by_names(source[int(head)], rest) elif not names[0]: pass else: source = None return source
1,022,519
Format dict to string passing a list of keys as order Args: lista: List with elements to clean duplicates.
def format_dict(dic, format_list, separator=',', default_value=str): dic = collections.defaultdict(default_value, dic) str_format = separator.join(["{" + "{}".format(head) + "}" for head in format_list]) return str_format.format(**dic)
1,022,522
Remove duplicated elements in a list. Args: lista: List with elements to clean duplicates.
def remove_list_duplicates(lista, unique=False): result = [] allready = [] for elem in lista: if elem not in result: result.append(elem) else: allready.append(elem) if unique: for elem in allready: result = list(filter((elem).__ne__, result)) return result
1,022,530
Build a neural net with the indicated input, hidden, and outout dimensions Arguments: params (dict or PyBrainParams namedtuple): default: {'N_hidden': 6} (this is the only parameter that affects the NN build) Returns: FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1): N_input = N_input or 1 N_output = N_output or 1 N_hidden = N_hidden or tuple() if isinstance(N_hidden, (int, float, basestring)): N_hidden = (int(N_hidden),) hidden_layer_type = hidden_layer_type or tuple() hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type)) if verbosity > 0: print(N_hidden, ' layers of type ', hidden_layer_type) assert(len(N_hidden) == len(hidden_layer_type)) nn = pb.structure.FeedForwardNetwork() # layers nn.addInputModule(pb.structure.BiasUnit(name='bias')) nn.addInputModule(pb.structure.LinearLayer(N_input, name='input')) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)): Nhid = int(Nhid) nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden'))) nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output')) # connections nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output'])) nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output'])) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])): Nhid = int(Nhid) nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['hidden-{}'.format(i + 1)])) i = len(N_hidden) - 1 nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output'])) nn.sortModules() if FAST: try: nn.convertToFastNetwork() except: if verbosity > 0: print('Unable to convert slow PyBrain NN to a fast ARAC network...') if verbosity > 0: print(nn.connections) return nn
1,022,597
Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors samples[0..N]['target'] should have an index with the date timestamp If you use_cache for the curent year, you may not get the most recent data. Arguments: samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0): if verbosity > 1: print('Prepending weather data for {} to dataset samples'.format(weather_columns)) if not weather_columns: return samples timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples]) years = range(timestamps.min().date().year, timestamps.max().date().year + 1) weather_df = weather.daily(location=location, years=years, use_cache=use_cache) # FIXME: weather_df.resample('D') fails weather_df.index = [d.date() for d in weather_df.index] if verbosity > 1: print('Retrieved weather for years {}:'.format(years)) print(weather_df) weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)] for label in (weather_columns or [])] for sampnum, sample in enumerate(samples): timestamp = timestamps[sampnum] try: weather_day = weather_df.loc[timestamp.date()] except: from traceback import print_exc print_exc() weather_day = {} if verbosity >= 0: warnings.warn('Unable to find weather for the date {}'.format(timestamp.date())) NaN = float('NaN') sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input']) if verbosity > 0 and NaN in sample['input']: warnings.warn('Unable to find weather features {} in the weather for date {}'.format( [label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp)) return samples
1,022,599
Python 2.4 compatible memoize decorator. It creates a cache that has a maximum size. If the cache exceeds the max, it is thrown out and a new one made. With such behavior, it is wise to set the cache just a little larger that the maximum expected need. Parameters: max_cache_size - the size to which a cache can grow Limitations: The cache works only on args, not kwargs
def _memoizeArgsOnly (max_cache_size=1000): def wrapper (f): def fn (*args): try: return fn.cache[args] except KeyError: if fn.count >= max_cache_size: fn.cache = {} fn.count = 0 fn.cache[args] = result = f(*args) fn.count += 1 return result fn.cache = {} fn.count = 0 return fn return wrapper
1,022,674
turn a string representing a version into a normalized version list. Version lists are directly comparable using standard operators such as >, <, ==, etc. Parameters: version_string - such as '3.5' or '3.6.3plugin3' max_version_parts - version strings are comprised of a series of 4 tuples. This should be set to the maximum number of 4 tuples in a version string.
def normalize(version_string, max_version_parts=4): version_list = [] for part_count, version_part in enumerate(version_string.split('.')): try: groups = _version_part_re.match(version_part).groups() except Exception, x: raise NotAVersionException(version_string) version_list.extend(t(x) for x, t in zip(groups, _normalize_fn_list)) version_list.extend(_padding_list * (max_version_parts - part_count - 1)) return version_list
1,022,675
Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment.
def parse_env(config_schema, env): try: return { key: item_schema.parse(key, env.get(key)) for key, item_schema in config_schema.items() } except KeyError as error: raise MissingConfigError( "Required config not set: {}".format(error.args[0]) )
1,022,848
Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed.
def parse(self, key, value): if value is not None: try: return self._parser(value) except Exception: raise ParsingError("Error parsing {}".format(key)) elif self._default is not SENTINAL: return self._default else: raise KeyError(key)
1,022,850
Reads one or more text files and returns them joined together. A title is automatically created based on the file name. Args: *file_paths: list of files to aggregate Returns: content of files
def read_local_files(*file_paths: str) -> str: def _read_single_file(file_path): with open(file_path) as f: filename = os.path.splitext(file_path)[0] title = f'{filename}\n{"=" * len(filename)}' return '\n\n'.join((title, f.read())) return '\n' + '\n\n'.join(map(_read_single_file, file_paths))
1,023,498
makes some plots creates binned histograms of the results of each module (ie count of results in ranges [(0,40), (40, 50), (50,60), (60, 70), (70, 80), (80, 90), (90, 100)]) Arguments: path {str} -- path to save plots to show {boolean} -- whether to show plots using python goodFormat {dict} -- module : [results for module] output: saves plots to files/shows plots depending on inputs
def plotter(path, show, goodFormat): for module in goodFormat.items(): # for each module bins = [0, 40, 50, 60, 70, 80, 90, 100] # cut the data into bins out = pd.cut(module[1], bins=bins, include_lowest=True) ax = out.value_counts().plot.bar(rot=0, color="b", figsize=(10, 6), alpha=0.5, title=module[0]) # plot counts of the cut data as a bar ax.set_xticklabels(['0 to 40', '40 to 50', '50 to 60', '60 to 70', '70 to 80', '80 to 90', '90 to 100']) ax.set_ylabel("# of candidates") ax.set_xlabel( "grade bins \n total candidates: {}".format(len(module[1]))) if path is not None and show is not False: # if export path directory doesn't exist: create it if not pathlib.Path.is_dir(path.as_posix()): pathlib.Path.mkdir(path.as_posix()) plt.savefig(path / ''.join([module[0], '.png'])) plt.show() elif path is not None: # if export path directory doesn't exist: create it if not pathlib.Path.is_dir(path): pathlib.Path.mkdir(path) plt.savefig(path / ''.join([module[0], '.png'])) plt.close() elif show is not False: plt.show()
1,023,627
returns final result of candidateNumber in year Arguments: year {int} -- the year candidateNumber is in candidateNumber {str} -- the candidateNumber of candidateNumber badFormat {dict} -- candNumber : [results for candidate] length {int} -- length of each row in badFormat divided by 2 Returns: int -- a weighted average for a specific candidate number and year
def myGrades(year, candidateNumber, badFormat, length): weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5] weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5] if year == 1: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) * weights1[i] for i in range(length-1)]) / 6 elif year == 2 or year == 3: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) * weights2[i] for i in range(length-1)]) / 7 elif year == 4: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) for i in range(length-1)]) / 8 return myFinalResult
1,023,628
rank of candidateNumber in year Arguments: grade {int} -- a weighted average for a specific candidate number and year badFormat {dict} -- candNumber : [results for candidate] year {int} -- year you are in length {int} -- length of each row in badFormat divided by 2 Returns: int -- rank of candidateNumber in year
def myRank(grade, badFormat, year, length): return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
1,023,629
creates list of weighted average results for everyone in year Arguments: year {int} badFormat {dict} -- candNumber : [results for candidate] length {int} -- length of each row in badFormat divided by 2 returns: list -- weighted average results of everyone in year
def everyonesAverage(year, badFormat, length): return [myGrades(year, cand, badFormat, length) for cand in list(badFormat.keys())[1:]]
1,023,630
plots using inquirer prompts Arguments: goodFormat {dict} -- module : [results for module]
def howPlotAsk(goodFormat): plotAnswer = askPlot() if "Save" in plotAnswer['plotQ']: exportPlotsPath = pathlib.Path(askSave()) if "Show" in plotAnswer['plotQ']: plotter(exportPlotsPath, True, goodFormat) else: plotter(exportPlotsPath, False, goodFormat) elif "Show" in plotAnswer['plotQ']: plotter(None, True, goodFormat)
1,023,632
plots using argparse if can, if not uses howPlotask() Arguments: goodFormat {dict} -- module : [results for module]
def howPlotArgs(goodFormat): if args.exportplots is not None: exportPlotsPath = pathlib.Path(args.exportplots) if args.showplots: plotter(exportPlotsPath, True, goodFormat) else: plotter(exportPlotsPath, False, goodFormat) elif args.showplots: plotter(None, True, goodFormat) else: howPlotAsk(goodFormat)
1,023,633
main entry point of app Arguments: args {namespace} -- arguments provided in cli
def main(args): print("\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\n") ######################### # # # # # prompt # # # # # ######################### if not len(sys.argv) > 1: initialAnswers = askInitial() inputPath = pathlib.Path(initialAnswers['inputPath']) year = int(initialAnswers['year']) # create a list from every row badFormat = badFormater(inputPath) # create a list from every row howManyCandidates = len(badFormat) - 1 length = int(len(badFormat['Cand'])/2) finalReturn = [] if "Get your rank in the year" in initialAnswers['whatToDo']: candidateNumber = askCandidateNumber() weightedAverage = myGrades(year, candidateNumber, badFormat, length) rank = myRank(weightedAverage, badFormat, year, length) if "Get your weighted average" in initialAnswers['whatToDo']: finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format( rank, howManyCandidates, (rank * 100) / howManyCandidates)) elif "Get your weighted average" in initialAnswers['whatToDo']: candidateNumber = askCandidateNumber() weightedAverage = myGrades(year, candidateNumber, badFormat, length) finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) if "Reformat results by module and output to csv" in initialAnswers['whatToDo']: formatOutputPath = pathlib.Path(askFormat()) goodFormat = goodFormater(badFormat, formatOutputPath, year, length) if "Plot the results by module" in initialAnswers['whatToDo']: howPlotAsk(goodFormat) elif "Plot the results by module" in initialAnswers['whatToDo']: goodFormat = goodFormater(badFormat, None, year, length) howPlotAsk(goodFormat) [print('\n', x) for x in finalReturn] ######################### # # # end # # prompt # # # # # ######################### ######################### # # # # # run with # # cli args # # # ######################### if len(sys.argv) > 1: if not args.input: inputPath = pathlib.Path(askInput()) else: inputPath = pathlib.Path(args.input) if not args.year: year = int(askYear()) else: year = int(args.year) # create a list from every row badFormat = badFormater(inputPath) # create a list from every row howManyCandidates = len(badFormat) - 1 length = int(len(badFormat['Cand'])/2) finalReturn = [] if args.rank: if not args.candidate: candidateNumber = askCandidateNumber() else: candidateNumber = args.candidate weightedAverage = myGrades(year, candidateNumber, badFormat, length) rank = myRank(weightedAverage, badFormat, year, length) if args.my: finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format( rank, howManyCandidates, (rank * 100) / howManyCandidates)) elif args.my: if not args.candidate: candidateNumber = askCandidateNumber() else: candidateNumber = args.candidate weightedAverage = myGrades(year, candidateNumber, badFormat, length) finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) if args.format is not None: formatOutputPath = pathlib.Path(args.format) goodFormat = goodFormater(badFormat, formatOutputPath, year, length) if args.plot: howPlotArgs(goodFormat) elif args.plot: goodFormat = goodFormater(badFormat, None, year, length) howPlotArgs(goodFormat) [print('\n', x) for x in finalReturn] ######################### # # # end # # run with # # cli args # # # ######################### print('')
1,023,635
r"""A decorator that makes the decorated function to run while ec exits. Args: callable (callable): The target callable. once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True. Note: Hooks are processedd in a LIFO order.
def exit_hook(callable, once=True): r if once and callable in ExitHooks: return ExitHooks.append(callable)
1,023,653
a predicate that converts the key'd source to boolean. parameters: raw_crash - dict dumps - placeholder in a fat interface - unused processed_crash - placeholder in a fat interface - unused processor - placeholder in a fat interface - unused
def is_not_null_predicate( raw_crash, dumps, processed_crash, processor, key='' ): try: return bool(raw_crash[key]) except KeyError: return False
1,023,664
Makes sure that an executable can be found on the system path. Will exit the program if the executable cannot be found Args: exe_name: name of the executable paths: optional path(s) to be searched; if not specified, search the whole system
def ensure_exe(exe_name: str, *paths: str): # pragma: no cover if not elib_run.find_executable(exe_name, *paths): LOGGER.error('could not find "%s.exe" on this system', exe_name) sys.exit(-1)
1,024,016
Scan an ast object for targetvar and return its value. Only handles single direct assignment of python literal types. See docs on ast.literal_eval for more info: http://docs.python.org/2/library/ast.html#ast.literal_eval Args: syntree: ast.Module object targetvar: name of global variable to return Returns: Value of targetvar if found in syntree, or None if not found.
def getvar(syntree, targetvar): for node in syntree.body: if isinstance(node, ast.Assign): for var in node.targets: if var.id == targetvar: return ast.literal_eval(node.value)
1,024,303
Writes the changelog Args: amend: amend last commit with changes stage: stage changes
def _chglog(amend: bool = False, stage: bool = False, next_version: str = None, auto_next_version: bool = False): if config.CHANGELOG_DISABLE(): LOGGER.info('skipping changelog update as per config') else: epab.utils.ensure_exe('git') epab.utils.ensure_exe('gitchangelog') LOGGER.info('writing changelog') if auto_next_version: next_version = epab.utils.get_next_version() with gitchangelog_config(): with temporary_tag(next_version): changelog, _ = elib_run.run('gitchangelog', mute=True) # changelog = changelog.encode('utf8').replace(b'\r\n', b'\n').decode('utf8') changelog = re.sub(BOGUS_LINE_PATTERN, '\\1\n', changelog) Path(config.CHANGELOG_FILE_PATH()).write_text(changelog, encoding='utf8') if amend: CTX.repo.amend_commit( append_to_msg='update changelog [auto]', files_to_add=str(config.CHANGELOG_FILE_PATH()) ) elif stage: CTX.repo.stage_subset(str(config.CHANGELOG_FILE_PATH()))
1,024,504
Writes the changelog Args: amend: amend last commit with changes stage: stage changes next_version: indicates next version auto_next_version: infer next version from VCS
def chglog(amend: bool = False, stage: bool = False, next_version: str = None, auto_next_version: bool = False): changed_files = CTX.repo.changed_files() changelog_file_path: Path = config.CHANGELOG_FILE_PATH() changelog_file_name = changelog_file_path.name if changelog_file_name in changed_files: LOGGER.error('changelog has changed; cannot update it') exit(-1) _chglog(amend, stage, next_version, auto_next_version)
1,024,505
Maps a given URL path, name and namespace to a view. Arguments: - path: the URL regex, e.g.: '^teste/(?P<pk>[0-9])/$'. Optional arguments: - name: the URL name, which Django uses to identify the URL; - include: A custom URL list, previously set on the module's urls.py; - namespace: the URL's namespace; - priority: the URL's priority;
def umap(path, name=None, include=None, namespace=None, priority=None): def url_wrapper(view): # gets the module name module = _find_urls_module(view) # gets the view function (checking if it's a class-based view) fn = view.as_view() if hasattr(view, 'as_view') else view if namespace and include: raise TypeError( 'You can\'t use \'namespace\' and \'include\'' ' at the same time!' ) if namespace: # imports the urlpatterns object base = import_string('{}.urls.urlpatterns'.format(module)) # searchs for the namespace urlpatterns_list = [ x for x in base if getattr(x, 'namespace', None) == namespace ] # if the list length is different than 1, # then the namespace is either duplicated or doesn't exist if len(urlpatterns_list) != 1: raise ValueError( 'Namespace \'{}\' not in list.'.format(namespace) ) # if the namespace was found, get its object urlpatterns = urlpatterns_list.pop(0).url_patterns else: # imports the urlpatterns object urlpatterns = import_string('{}.urls.{}'.format( module, include or 'urlpatterns' )) # appends the url with its given name call = ( urlpatterns.append if priority is None else partial(urlpatterns.insert, priority) ) call(url(path, fn, name=name)) return view return url_wrapper
1,024,799
Fetch json data from n.pl Args: date (date) - default today url_patter (string) - default URL_PATTERN Returns: dict - data from api
def fetcher(date=datetime.today(), url_pattern=URL_PATTERN): api_url = url_pattern % date.strftime('%Y-%m-%d') headers = {'Referer': 'http://n.pl/program-tv'} raw_result = requests.get(api_url, headers=headers).json() return raw_result
1,024,829
Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary
def result_to_dict(raw_result): result = {} for channel_index, channel in enumerate(raw_result): channel_id, channel_name = channel[0], channel[1] channel_result = { 'id': channel_id, 'name': channel_name, 'movies': [] } for movie in channel[2]: channel_result['movies'].append({ 'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp(movie[2] + movie[3]), 'inf': True if movie[3] else False, }) result[channel_id] = channel_result return result
1,024,830
Send the prepared XML request block to the CPS using the corect protocol. Args: xml_request -- A fully formed xml request string for the CPS. Returns: The raw xml response string. Raises: ConnectionError -- Can't establish a connection with the server.
def _send_request(self, xml_request): if self._scheme == 'http': return self._send_http_request(xml_request) else: return self._send_socket_request(xml_request)
1,024,958
Send a request via HTTP protocol. Args: xml_request -- A fully formed xml request string for the CPS. Returns: The raw xml response string.
def _send_http_request(self, xml_request): headers = {"Host": self._host, "Content-Type": "text/xml", "Recipient": self._storage} try: # Retry once if failed in case the socket has just gone bad. self._connection.request("POST", self._selector_url, xml_request, headers) response = self._connection.getresponse() except (httplib.CannotSendRequest, httplib.BadStatusLine): Debug.warn("\nRestarting socket, resending message!") self._open_connection() self._connection.request("POST", self._selector_url, xml_request, headers) response = self._connection.getresponse() data = response.read() return data
1,024,959
Send a request via protobuf. Args: xml_request -- A fully formed xml request string for the CPS. Returns: The raw xml response string.
def _send_socket_request(self, xml_request): def to_variant(number): buff = [] while number: byte = number % 128 number = number // 128 if number > 0: byte |= 0x80 buff.append(chr(byte)) return ''.join(buff) def from_variant(stream): used = 0 number = 0 q = 1 while True: byte = ord(stream[used]) used += 1 number += q * (byte & 0x7F) q *= 128 if byte&0x80==0: break return (number, used) def encode_fields(fields): chunks = [] for field_id, message in fields.items(): chunks.append(to_variant((field_id << 3) | 2)) # Hardcoded WireType=2 chunks.append(to_variant(len(message))) chunks.append(message) return ''.join(chunks) def decode_fields(stream): fields = {} offset = 0 stream_lenght = len(stream) while offset<stream_lenght: field_header, used = from_variant(stream[offset:]) offset += used wire_type = field_header & 0x07 field_id = field_header >> 3 if wire_type==2: message_lenght, used = from_variant(stream[offset:]) offset += used fields[field_id] = stream[offset:offset+message_lenght] offset += message_lenght elif wire_type==0: fields[field_id], used = from_variant(stream[offset:]) offset += used elif wire_type==1: fields[field_id] = stream[offset:offset+8] offset += 8 elif wire_type==3: raise ConnectionError() elif wire_type==4: raise ConnectionError() elif wire_type==5: fields[field_id] = stream[offse:offset+4] offset += 4 else: raise ConnectionError() return fields def make_header(lenght): result = [] result.append(chr((lenght & 0x000000FF))) result.append(chr((lenght & 0x0000FF00) >> 8)) result.append(chr((lenght & 0x00FF0000) >> 16)) result.append(chr((lenght & 0xFF000000) >> 24)) return '\t\t\x00\x00' + ''.join(result) def parse_header(header): if len(header) == 8 and header[0] == '\t' and header[1] == '\t' and\ header[2] == '\00' and header[3] == '\00': return ord(header[4]) | (ord(header[5]) << 8) |\ (ord(header[6]) << 16) | (ord(header[7]) << 24) else: raise ConnectionError() def socket_send(data): sent_bytes = 0 failures = 0 total_bytes = len(data) while sent_bytes < total_bytes: sent = self._connection.send(data[sent_bytes:]) if sent == 0: failures += 1 if failures > 5: raise ConnectionError() continue sent_bytes += sent def socket_recieve(lenght): total_recieved = 0 failures = 5 recieved_chunks = [] while total_recieved<lenght: chunk = self._connection.recv(lenght-total_recieved) if not chunk: failures += 1 if failures > 5: raise ConnectionError() continue recieved_chunks.append(chunk) total_recieved += len(chunk) return ''.join(recieved_chunks) encoded_message = encode_fields({1: xml_request, 2: self._storage if self._storage else "special:detect-storage"}) header = make_header(len(encoded_message)) try: # Retry once if failed in case the socket has just gone bad. socket_send(header+encoded_message) except (ConnectionError, socket.error): self._connection.close() self._open_connection() socket_send(header+encoded_message) # TODO: timeout header = socket_recieve(8) lenght = parse_header(header) encoded_response = socket_recieve(lenght) response = decode_fields(encoded_response) # TODO: Test for id=3 error message # TODO: check for and raise errors return response[1]
1,024,960
Increment the world state, determining which cells live, die, or appear. Args: world (list[list]): A square matrix of cells Returns: None
def update_state(world): world_size = len(world) def wrap(index): return index % world_size for x in range(world_size): for y in range(world_size): # Decide if this node cares about the rules right now if not world[x][y].allow_change.get(): continue live_neighbor_count = sum([ world[wrap(x)][wrap(y + 1)].value, world[wrap(x + 1)][wrap(y + 1)].value, world[wrap(x + 1)][wrap(y)].value, world[wrap(x + 1)][wrap(y - 1)].value, world[wrap(x)][wrap(y-1)].value, world[wrap(x - 1)][wrap(y - 1)].value, world[wrap(x - 1)][wrap(y)].value, world[wrap(x - 1)][wrap(y + 1)].value ]) if world[x][y].value: # Any live cell with fewer than two live neighbours dies # Any live cell with more than three live neighbours dies # Any live cell with two or three live neighbours lives if not (live_neighbor_count == 2 or live_neighbor_count == 3): world[x][y].value = False else: # Any dead cell with exactly three live neighbours comes alive if live_neighbor_count == 3: world[x][y].value = True
1,025,012
Prints description information about all tables registered Args: full (bool): Also prints description of post processors.
def describe_all(self, full=False): for table in self.tabs: yield self.tabs[table]().describe(full)
1,025,217
Configure how a function should be retried. Args: on_exception (BaseException): The exception to catch. Use this to set which exception and it's subclasses to catch. limit ()
def __init__(self, on_exception=Exception, limit=5, interval=None, validator=None): self.attempts = 0 self._on_exception = on_exception self._setup_limit(limit) self._setup_interval(interval) self._setup_validator(validator)
1,025,725
Constructor. Args: watch_paths: A list of filesystem paths to watch for changes. on_changed: Callback to call when one or more changes to the watch path are detected. interval: The minimum interval at which to notify about changes (in seconds). recursive: Should the watch path be monitored recursively for changes?
def __init__(self, watch_paths, on_changed=None, interval=1.0, recursive=True): if isinstance(watch_paths, basestring): watch_paths = [watch_paths] watch_paths = [os.path.abspath(path) for path in watch_paths] for path in watch_paths: if not os.path.exists(path) or not os.path.isdir(path): raise MissingFolderError(path) self.watch_paths = watch_paths self.interval = interval * 1000.0 self.recursive = recursive self.periodic_callback = PeriodicCallback(self.check_fs_events, self.interval) self.on_changed = on_changed self.observer = Observer() for path in self.watch_paths: self.observer.schedule( WatcherEventHandler(self), path, self.recursive ) self.started = False self.fs_event_queue = Queue()
1,025,783
Determining whether the exception is thrown Args: error_type: None: checking without specified exception Specified Exception Return: Boolean
def threw(self, error_type=None): if not error_type: return True if len(self.exceptions) > 0 else False else: return uch.obj_in_list(self.exceptions, error_type)
1,026,183
Determining whether the specified exception is the ONLY thrown exception Args: error_type: None: checking without specified exception Specified Exception Return: Boolean
def alwaysThrew(self, error_type=None): #pylint: disable=invalid-name if self.callCount == 0: return False if not error_type: return True if len(self.exceptions) == self.callCount else False else: return uch.obj_in_list_always(self.exceptions, error_type)
1,026,184
execute by argument dictionary Args: args (dict): command line argument dictionary
def execute_by_options(args): if args['subcommand'] == 'sphinx': s = Sphinx(proj_info) if args['quickstart']: s.quickstart() elif args['gen_code_api']: s.gen_code_api() elif args['rst2html']: s.rst2html() pass elif args['subcommand'] == 'offline_dist': pod = PyOfflineDist() if args['freeze_deps']: pod.freeze_deps() elif args['download_deps']: pod.download_deps() elif args['install_deps']: pod.install_deps() elif args['clean_deps']: pod.clean_deps() elif args['mkbinary']: pod.pyinstaller_mkbinary(args['mkbinary']) elif args['clean_binary']: pod.clean_binary() pass
1,026,407
find the first matched line, then replace Args: regex_tgtline (str): regular expression used to match the target line to_replace (str): line you wanna use to replace
def editline_with_regex(self, regex_tgtline, to_replace): for idx, line in enumerate(self._swp_lines): mobj = re.match(regex_tgtline, line) if mobj: self._swp_lines[idx] = to_replace return
1,026,410
init project info Args: author_fakename (str): TODO author_truename (str): TODO email (str): TODO project_name (str): TODO project_version (str): TODO
def __init__(self, **kwinfo): self._author_fakename = getpass.getuser() self._author_truename = ProjectInfo.find_pakcage_info( 'author', SRC_FOLDER, PROJECT_NAME, '__init__.py') self._email = ProjectInfo.find_pakcage_info( 'email', SRC_FOLDER, PROJECT_NAME, '__init__.py') self._project_name = os.path.basename( os.path.dirname(os.path.realpath(__file__))) self._project_version = ProjectInfo.find_pakcage_info( 'version', SRC_FOLDER, PROJECT_NAME, '__init__.py') for key, info in kwinfo.items(): key = '_' + key setattr(self, key, info)
1,026,412
TODO: to be defined1. Args: proj_info (ProjectInfo): TODO
def __init__(self, proj_info): self._proj_info = proj_info self.__docfolder = DOC_FOLDER self.__htmlfolder = HTML_FOLDER self.conf_fpath = os.path.abspath( os.path.join(self.__docfolder, 'conf.py')) self.code_fdpath = os.path.abspath( os.path.join(SRC_FOLDER, self.proj_info.project_name)) self._sphinx_quickstart_cmd = [ 'sphinx-quickstart', self.__docfolder, '-p', self.proj_info.project_name, '-a', self.proj_info.author_fakename, '-v', self.proj_info.project_version, '-r', self.proj_info.project_version, '-l', 'en', '--ext-autodoc', '--makefile', '--quiet' ] self._sphinx_apidoc_cmd = [ 'sphinx-apidoc', self.code_fdpath, '-o', self.__docfolder, '-M', '--force' ] # sphinx-build -b html docs html self._sphinx_buildhtml_cmd = [ 'sphinx-build', '-b', 'html', self.__docfolder, self.__htmlfolder ] # make sure directories exist mkdir_exist(self.__docfolder) mkdir_exist(self.__htmlfolder)
1,026,414
Copies the google spreadsheet to the backup_name and folder specified. Args: backup_name (str): The name of the backup document to create. folder_key (Optional) (str): The key of a folder that the new copy will be moved to. folder_name (Optional) (str): Like folder_key, references the folder to move a backup to. If the folder can't be found, sheetsync will create it.
def backup(self, backup_name, folder_key=None, folder_name=None): folder = self._find_or_create_folder(folder_key, folder_name) drive_service = self.drive_service try: source_rsrc = drive_service.files().get(fileId=self.document_key).execute() except Exception, e: logger.exception("Google API error. %s", e) raise e backup = self._create_new_or_copy(source_doc=source_rsrc, target_name=backup_name, folder=folder, sheet_description="backup") backup_key = backup['id'] return backup_key
1,026,717
Equivalent to the inject method but will delete rows from the google spreadsheet if their key is not found in the input (raw_data) dictionary. Args: raw_data (dict): See inject method row_change_callback (Optional) (func): See inject method Returns: UpdateResults (object): See inject method
def sync(self, raw_data, row_change_callback=None): return self._update(raw_data, row_change_callback, delete_rows=True)
1,026,720
Checking and setting type to MODULE_FUNCTION Args: obj: ModuleType prop: FunctionType Return: Boolean Raise: prop_type_error: When the type of prop is not valid prop_in_obj_error: When prop is not in the obj(module/class) prop_is_func_error: When prop is not a callable stuff
def is_module_function(obj, prop): python_version = sys.version_info[0] if python_version == 3: unicode = str if prop and (isinstance(prop, str) or isinstance(prop, unicode)): #property if prop in dir(obj): if ( isinstance(getattr(obj, prop), FunctionType) or isinstance(getattr(obj, prop), BuiltinFunctionType) or inspect.ismethod(getattr(obj, prop)) ): #inspect.ismethod for python2.7 #isinstance(...) for python3.x return True else: ErrorHandler.prop_is_func_error(obj, prop) else: ErrorHandler.prop_in_obj_error(obj, prop) elif prop: ErrorHandler.prop_type_error(prop) return False
1,026,730
Checking and setting type to MODULE Args: obj: ModuleType / class Note: An instance will be treated as a Class Return: Boolean
def is_module(obj): return True if obj and isinstance(obj, ModuleType) or inspect.isclass(obj) else False
1,026,731
Validate the given config against the `Scheme`. Args: config (dict): The configuration to validate. Raises: errors.SchemeValidationError: The configuration fails validation against the `Schema`.
def validate(self, config): if not isinstance(config, dict): raise errors.SchemeValidationError( 'Scheme can only validate a dictionary config, but was given ' '{} (type: {})'.format(config, type(config)) ) for arg in self.args: # the option exists in the config if arg.name in config: arg.validate(config[arg.name]) # the option does not exist in the config else: # if the option is not required, then it is fine to omit. # otherwise, its omission constitutes a validation error. if arg.required: raise errors.SchemeValidationError( 'Option "{}" is required, but not found.'.format(arg.name) )
1,027,516
Cast a value to the type required by the option, if one is set. This is used to cast the string values gathered from environment variable into their required type. Args: value: The value to cast. Returns: The value casted to the expected type for the option.
def cast(self, value): # if there is no type set for the option, return the given # value unchanged. if self.type is None: return value # cast directly if self.type in (str, int, float): try: return self.type(value) except Exception as e: raise errors.BisonError( 'Failed to cast {} to {}'.format(value, self.type) ) from e # for bool, can't cast a string, since a string is truthy, # so we need to check the value. elif self.type == bool: return value.lower() == 'true' # the option type is currently not supported else: raise errors.BisonError('Unsupported type for casting: {}'.format(self.type))
1,027,520
Copy all values of scope into the class SinonGlobals Args: scope (eg. locals() or globals()) Return: SinonGlobals instance
def init(scope): class SinonGlobals(object): #pylint: disable=too-few-public-methods pass global CPSCOPE #pylint: disable=global-statement CPSCOPE = SinonGlobals() funcs = [obj for obj in scope.values() if isinstance(obj, FunctionType)] for func in funcs: setattr(CPSCOPE, func.__name__, func) return CPSCOPE
1,028,138
Constructor of SinonBase It will new true base but return a proxy of weakref and store it in _queue Args: obj: None / function / instance method / module / class Inspected target prop: None / string Inspected target when obj contains callable things func: function / instance method ONLY used by stub, it will replace original target Return: weakref
def __new__(cls, obj=None, prop=None, func=None): new = super(SinonBase, cls).__new__(cls) if func: new.__init__(obj, prop, func) else: new.__init__(obj, prop) cls._queue.append(new) return weakref.proxy(new)
1,028,139
It will create the true base flow: __new__ => __init__ => set type based on arguments => check the arguments is valid or not based on type => wrap the target Args: obj: None / function / instance method / module / class Inspected target If the target is None, it will create a Pure() class prop: None / string Inspected target when obj contains callable things
def __init__(self, obj=None, prop=None): if not hasattr(self, "args_type"): self.__set_type(obj, prop) self.obj, self.prop = obj, prop self.__check_lock() self.wrap2spy() self.is_in_queue = False
1,028,141
Triage type based on arguments Here are four types of base: PURE, MODULE, MODULE_FUNCTION, FUNCTION Args: obj: None, FunctionType, ModuleType, Class, Instance prop: None, string
def __set_type(self, obj, prop): if TypeHandler.is_pure(obj, prop): self.args_type = "PURE" self.pure = SinonBase.Pure() setattr(self.pure, "func", Wrapper.empty_function) self.orig_func = None elif TypeHandler.is_module_function(obj, prop): self.args_type = "MODULE_FUNCTION" self.orig_func = None elif TypeHandler.is_function(obj): self.args_type = "FUNCTION" self.orig_func = None elif TypeHandler.is_module(obj): self.args_type = "MODULE" elif TypeHandler.is_instance(obj): obj = obj.__class__ self.args_type = "MODULE"
1,028,142
Wrapping the inspector as a stub based on the type Args: customfunc: function that replaces the original Returns: function, the spy wrapper around the customfunc
def wrap2stub(self, customfunc): if self.args_type == "MODULE_FUNCTION": wrapper = Wrapper.wrap_spy(customfunc, self.obj) setattr(self.obj, self.prop, wrapper) elif self.args_type == "MODULE": wrapper = Wrapper.EmptyClass setattr(CPSCOPE, self.obj.__name__, wrapper) elif self.args_type == "FUNCTION": wrapper = Wrapper.wrap_spy(customfunc) setattr(CPSCOPE, self.obj.__name__, wrapper) elif self.args_type == "PURE": wrapper = Wrapper.wrap_spy(customfunc) setattr(self.pure, "func", wrapper) return wrapper
1,028,146
Set the data type of the cluster. Parameters: ----------- cluster_dtype : numpy.dtype or equivalent Defines the dtype of the cluster array.
def set_cluster_dtype(self, cluster_dtype): if not cluster_dtype: cluster_dtype = np.dtype([]) else: cluster_dtype = np.dtype(cluster_dtype) cluster_descr = cluster_dtype.descr for dtype_name, dtype in self._default_cluster_descr: if self._cluster_fields_mapping[dtype_name] not in cluster_dtype.fields: cluster_descr.append((dtype_name, dtype)) self._cluster_descr = cluster_descr self._init_arrays(size=0)
1,028,323
Merge two dictionaries (or DotDicts) together. Args: d: The dictionary/DotDict to merge into. u: The source of the data to merge.
def _merge(d, u): for k, v in u.items(): # if we have a mapping, recursively merge the values if isinstance(v, collections.Mapping): d[k] = _merge(d.get(k, {}), v) # if d (the dict to merge into) is a dict, just add the # value to the dict. elif isinstance(d, collections.MutableMapping): d[k] = v # otherwise if d (the dict to merge into) is not a dict (e.g. when # recursing into it, `d.get(k, {})` may not be a dict), then do what # `update` does and prefer the new value. # # this means that something like `{'foo': 1}` when updated with # `{'foo': {'bar': 1}}` would have the original value (`1`) overwritten # and would become: `{'foo': {'bar': 1}}` else: d = {k: v} return d
1,029,303
Get a value from the `DotDict`. The `key` parameter can either be a regular string key, e.g. "foo", or it can be a string key with dot notation, e.g. "foo.bar.baz", to signify a nested lookup. The default value is returned if any level of the key's components are not found. Args: key (str): The key to get the value for. default: The return value should the given key not exist in the `DotDict`.
def get(self, key, default=None): # if there are no dots in the key, its a normal get if key.count('.') == 0: return super(DotDict, self).get(key, default) # set the return value to the default value = default # split the key into the first component and the rest of # the components. the first component corresponds to this # DotDict. the remainder components correspond to any nested # DotDicts. first, remainder = key.split('.', 1) if first in self: value = super(DotDict, self).get(first, default) # if the value for the key at this level is a dictionary, # then pass the remainder to that DotDict. if isinstance(value, (dict, DotDict)): return DotDict(value).get(remainder, default) # TODO: support lists return value
1,029,307
Remove a value from the `DotDict`. The `key` parameter can either be a regular string key, e.g. "foo", or it can be a string key with dot notation, e.g. "foo.bar.baz", to signify a nested element. If the key does not exist in the `DotDict`, it will continue silently. Args: key (str): The key to remove.
def delete(self, key): dct = self keys = key.split('.') last_key = keys[-1] for k in keys: # if the key is the last one, e.g. 'z' in 'x.y.z', try # to delete it from its dict. if k == last_key: del dct[k] break # if the dct is a DotDict, get the value for the key `k` from it. if isinstance(dct, DotDict): dct = super(DotDict, dct).__getitem__(k) # otherwise, just get the value from the default __getitem__ # implementation. else: dct = dct.__getitem__(k) if not isinstance(dct, (DotDict, dict)): raise KeyError( 'Subkey "{}" in "{}" invalid for deletion'.format(k, key) )
1,029,308
Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set.
def set(self, key, value): # the configuration changes, so we invalidate the cached config self._full_config = None self._override[key] = value
1,029,376
Parse the configuration sources into `Bison`. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True)
def parse(self, requires_cfg=True): self._parse_default() self._parse_config(requires_cfg) self._parse_env()
1,029,377
Parse the configuration file, if one is configured, and add it to the `Bison` state. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True)
def _parse_config(self, requires_cfg=True): if len(self.config_paths) > 0: try: self._find_config() except BisonError: if not requires_cfg: return raise try: with open(self.config_file, 'r') as f: parsed = self._fmt_to_parser[self.config_format](f) except Exception as e: raise BisonError( 'Failed to parse config file: {}'.format(self.config_file) ) from e # the configuration changes, so we invalidate the cached config self._full_config = None self._config = parsed
1,029,379
Checks if a node is the "end" keyword. Args: node: AST node. Returns: True if the node is the "end" keyword, otherwise False.
def is_end_node(node): return (isinstance(node, ast.Expr) and isinstance(node.value, ast.Name) and node.value.id == 'end')
1,030,148
Returns a list of bodies of a compound statement node. Args: node: AST node. Returns: A list of bodies of the node. If the given node does not represent a compound statement, an empty list is returned.
def get_compound_bodies(node): if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)): return [node.body] elif isinstance(node, (ast.If, ast.While, ast.For)): return [node.body, node.orelse] elif PY2 and isinstance(node, ast.TryFinally): return [node.body, node.finalbody] elif PY2 and isinstance(node, ast.TryExcept): return [node.body, node.orelse] + [h.body for h in node.handlers] elif PY3 and isinstance(node, ast.Try): return ([node.body, node.orelse, node.finalbody] + [h.body for h in node.handlers]) end return []
1,030,149
Performs end-block check. Args: frame: A frame object of the module to be checked. Raises: SyntaxError: If check failed.
def check_end_blocks(frame): try: try: module_name = frame.f_globals['__name__'] except KeyError: warnings.warn( 'Can not get the source of an uknown module. ' 'End-of-block syntax check is skipped.', EndSyntaxWarning) return end filename = frame.f_globals.get('__file__', '<unknown>') try: source = inspect.getsource(sys.modules[module_name]) except Exception: warnings.warn( 'Can not get the source of module "%s". ' 'End-of-block syntax check is skipped.' % (module_name,), EndSyntaxWarning) return end finally: del frame end root = ast.parse(source) for node in ast.walk(root): bodies = get_compound_bodies(node) if not bodies: continue end # FIXME: This is an inaccurate hack to handle if-elif-else. if (isinstance(node, ast.If) and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If)): continue end # FIXME: This is an inaccurate hack to handle try-except-finally # statement which is parsed as ast.TryExcept in ast.TryFinally in # Python 2. if (PY2 and isinstance(node, ast.TryFinally) and len(node.body) == 1 and isinstance(node.body[0], ast.TryExcept)): continue end for body in bodies: skip_next = False for i, child in enumerate(body): if skip_next: skip_next = False elif is_end_node(child): raise SyntaxError( '"end" does not close a block.', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) elif get_compound_bodies(child): try: ok = is_end_node(body[i + 1]) except IndexError: ok = False end if not ok: raise SyntaxError( 'This block is not closed with "end".', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) end skip_next = True end end end end
1,030,150
Internal: Do not call. Returns the status list for a list of job_ids Args: self Returns: [status...] : Status list of all jobs
def _status(self): job_id_list = ' '.join(self.resources.keys()) jobs_missing = list(self.resources.keys()) retcode, stdout, stderr = self.channel.execute_wait("qstat {0}".format(job_id_list), 3) for line in stdout.split('\n'): parts = line.split() if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'): continue job_id = parts[0] status = translate_table.get(parts[4], 'UNKNOWN') self.resources[job_id]['status'] = status jobs_missing.remove(job_id) # squeue does not report on jobs that are not running. So we are filling in the # blanks for missing jobs, we might lose some information about why the jobs failed. for missing_job in jobs_missing: if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']: self.resources[missing_job]['status'] = translate_table['E']
1,030,155
Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False.
def cancel(self, job_ids): job_id_list = ' '.join(job_ids) retcode, stdout, stderr = self.channel.execute_wait("qdel {0}".format(job_id_list), 3) rets = None if retcode == 0: for jid in job_ids: self.resources[jid]['status'] = translate_table['E'] # Setting state to exiting rets = [True for i in job_ids] else: rets = [False for i in job_ids] return rets
1,030,157
Compute the gradient. Args: diff (`array-like`): [`m`, `m`] matrix. `D` - `d` d (`array-like`): [`m`, `m`] matrix. coords (`array-like`): [`m`, `n`] matrix. Returns: `np.array`: Gradient, shape [`m`, `n`].
def _gradient(self, diff, d, coords): denom = np.copy(d) denom[denom == 0] = 1e-5 with np.errstate(divide='ignore', invalid='ignore'): K = -2 * diff / denom K[np.isnan(K)] = 0 g = np.empty_like(coords) for n in range(self.n): for i in range(self.m): # Vectorised version of (~70 times faster) # for j in range(self.m): # delta_g = ((coords[i, n] - coords[j, n]) * K[i, j]).sum() # g[i, n] += delta_g g[i, n] = ((coords[i, n] - coords[:, n]) * K[i, :]).sum() return g
1,030,278