code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _write(self, s, s_length=None, flush=False, ignore_overflow=False, err_msg=None): if not ignore_overflow: s_length = len(s) if s_length is None else s_length if err_msg is None: err_msg = ( "Terminal has {} columns; attempted to write " "a string {} of length {}.".format( self.columns, repr(s), s_length) ) ensure(s_length <= self.columns, WidthOverflowError, err_msg) self.cursor.write(s) if flush: self.cursor.flush()
Write ``s`` :type s: str|unicode :param s: String to write :param s_length: Custom length of ``s`` :param flush: Set this to flush the terminal stream after writing :param ignore_overflow: Set this to ignore if s will exceed the terminal's width :param err_msg: The error message given to WidthOverflowError if it is triggered
def get_text(nodelist): value = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: value.append(node.data) return ''.join(value)
Get the value from a text node.
def _request(self, url, params=None, timeout=10): rsp = self._session.get(url, params=params, timeout=timeout) rsp.raise_for_status() return rsp.text.strip()
Send a request with parameters.
def _login_request(self, username=None, secret=None): url = 'http://' + self._host + '/login_sid.lua' params = {} if username: params['username'] = username if secret: params['response'] = secret plain = self._request(url, params) dom = xml.dom.minidom.parseString(plain) sid = get_text(dom.getElementsByTagName('SID')[0].childNodes) challenge = get_text( dom.getElementsByTagName('Challenge')[0].childNodes) return (sid, challenge)
Send a login request with paramerters.
def _logout_request(self): _LOGGER.debug('logout') url = 'http://' + self._host + '/login_sid.lua' params = { 'security:command/logout': '1', 'sid': self._sid } self._request(url, params)
Send a logout request.
def _create_login_secret(challenge, password): to_hash = (challenge + '-' + password).encode('UTF-16LE') hashed = hashlib.md5(to_hash).hexdigest() return '{0}-{1}'.format(challenge, hashed)
Create a login secret.
def _aha_request(self, cmd, ain=None, param=None, rf=str): url = 'http://' + self._host + '/webservices/homeautoswitch.lua' params = { 'switchcmd': cmd, 'sid': self._sid } if param: params['param'] = param if ain: params['ain'] = ain plain = self._request(url, params) if plain == 'inval': raise InvalidError if rf == bool: return bool(int(plain)) return rf(plain)
Send an AHA request.
def login(self): try: (sid, challenge) = self._login_request() if sid == '0000000000000000': secret = self._create_login_secret(challenge, self._password) (sid2, challenge) = self._login_request(username=self._user, secret=secret) if sid2 == '0000000000000000': _LOGGER.warning("login failed %s", sid2) raise LoginError(self._user) self._sid = sid2 except xml.parsers.expat.ExpatError: raise LoginError(self._user)
Login and get a valid session ID.
def get_device_elements(self): plain = self._aha_request('getdevicelistinfos') dom = xml.dom.minidom.parseString(plain) _LOGGER.debug(dom) return dom.getElementsByTagName("device")
Get the DOM elements for the device list.
def get_device_element(self, ain): elements = self.get_device_elements() for element in elements: if element.getAttribute('identifier') == ain: return element return None
Get the DOM element for the specified device.
def get_devices(self): devices = [] for element in self.get_device_elements(): device = FritzhomeDevice(self, node=element) devices.append(device) return devices
Get the list of all known devices.
def get_device_by_ain(self, ain): devices = self.get_devices() for device in devices: if device.ain == ain: return device
Returns a device specified by the AIN.
def set_target_temperature(self, ain, temperature): param = 16 + ((float(temperature) - 8) * 2) if param < min(range(16, 56)): param = 253 elif param > max(range(16, 56)): param = 254 self._aha_request('sethkrtsoll', ain=ain, param=int(param))
Set the thermostate target temperature.
def update(self): node = self._fritz.get_device_element(self.ain) self._update_from_node(node)
Update the device values.
def get_hkr_state(self): self.update() try: return { 126.5: 'off', 127.0: 'on', self.eco_temperature: 'eco', self.comfort_temperature: 'comfort' }[self.target_temperature] except KeyError: return 'manual'
Get the thermostate state.
def set_hkr_state(self, state): try: value = { 'off': 0, 'on': 100, 'eco': self.eco_temperature, 'comfort': self.comfort_temperature }[state] except KeyError: return self.set_target_temperature(value)
Set the state of the thermostat. Possible values for state are: 'on', 'off', 'comfort', 'eco'.
def write(self, s): should_write_s = os.getenv('PROGRESSIVE_NOWRITE') != "True" if should_write_s: self._stream.write(s)
Writes ``s`` to the terminal output stream Writes can be disabled by setting the environment variable `PROGRESSIVE_NOWRITE` to `'True'`
def save(self): self.write(self.term.save) self._saved = True
Saves current cursor position, so that it can be restored later
def newline(self): self.write(self.term.move_down) self.write(self.term.clear_bol)
Effects a newline by moving the cursor down and clearing
def cmap_powerlaw_adjust(cmap, a): if a < 0.: return cmap cdict = copy.copy(cmap._segmentdata) fn = lambda x: (x[0] ** a, x[1], x[2]) for key in ('red', 'green', 'blue'): cdict[key] = map(fn, cdict[key]) cdict[key].sort() assert (cdict[key][0] < 0 or cdict[key][-1] > 1), \ "Resulting indices extend out of the [0, 1] segment." return colors.LinearSegmentedColormap('colormap', cdict, 1024)
Returns a new colormap based on the one given but adjusted via power-law, `newcmap = oldcmap**a`. :param cmap: colormap instance (e.g., cm.jet) :param a: power
def cmap_center_adjust(cmap, center_ratio): if not (0. < center_ratio) & (center_ratio < 1.): return cmap a = math.log(center_ratio) / math.log(0.5) return cmap_powerlaw_adjust(cmap, a)
Returns a new colormap based on the one given but adjusted so that the old center point higher (>0.5) or lower (<0.5) :param cmap: colormap instance (e.g., cm.jet) :param center_ratio:
def cmap_center_point_adjust(cmap, range, center): if not ((range[0] < center) and (center < range[1])): return cmap return cmap_center_adjust( cmap, abs(center - range[0]) / abs(range[1] - range[0]))
Converts center to a ratio between 0 and 1 of the range given and calls cmap_center_adjust(). returns a new adjusted colormap accordingly :param cmap: colormap instance :param range: Tuple of (min, max) :param center: New cmap center
def attach_db(self, db): if db is not None: if isinstance(db, basestring): db = gffutils.FeatureDB(db) if not isinstance(db, gffutils.FeatureDB): raise ValueError( "`db` must be a filename or a gffutils.FeatureDB") self._kwargs['db'] = db self.db = db
Attach a gffutils.FeatureDB for access to features. Useful if you want to attach a db after this instance has already been created. Parameters ---------- db : gffutils.FeatureDB
def features(self, ignore_unknown=False): if not self.db: raise ValueError("Please attach a gffutils.FeatureDB") for i in self.data.index: try: yield gffutils.helpers.asinterval(self.db[i]) except gffutils.FeatureNotFoundError: if ignore_unknown: continue else: raise gffutils.FeatureNotFoundError('%s not found' % i)
Generator of features. If a gffutils.FeatureDB is attached, returns a pybedtools.Interval for every feature in the dataframe's index. Parameters ---------- ignore_unknown : bool If True, silently ignores features that are not found in the db.
def reindex_to(self, x, attribute="Name"): names = [i[attribute] for i in x] new = self.copy() new.data = new.data.reindex(names) return new
Returns a copy that only has rows corresponding to feature names in x. Parameters ---------- x : str or pybedtools.BedTool BED, GFF, GTF, or VCF where the "Name" field (that is, the value returned by feature['Name']) or any arbitrary attribute attribute : str Attribute containing the name of the feature to use as the index.
def five_prime(self, upstream=1, downstream=0): return pybedtools.BedTool(self.features())\ .each(featurefuncs.five_prime, upstream, downstream)\ .saveas()
Creates a BED/GFF file of the 5' end of each feature represented in the table and returns the resulting pybedtools.BedTool object. Needs an attached database. Parameters ---------- upstream, downstream : int Number of basepairs up and downstream to include
def align_with(self, other): return self.__class__(self.data.reindex_like(other), **self._kwargs)
Align the dataframe's index with another.
def strip_unknown_features(self): if not self.db: return self ind = [] for i, gene_id in enumerate(self.data.index): try: self.db[gene_id] ind.append(i) except gffutils.FeatureNotFoundError: pass ind = np.array(ind) return self.__class__(self.data.ix[ind], **self._kwargs)
Remove features not found in the `gffutils.FeatureDB`. This will typically include 'ambiguous', 'no_feature', etc, but can also be useful if the database was created from a different one than was used to create the table.
def changed(self, thresh=0.05, idx=True): ind = self.data[self.pval_column] <= thresh if idx: return ind return self[ind]
Changed features. {threshdoc}
def unchanged(self, thresh=0.05, idx=True): ind = ( (self.data[self.pval_column] > thresh) | np.isnan(self.data[self.pval_column]) ) if idx: return ind return self[ind]
Changed features. {threshdoc}
def enriched(self, thresh=0.05, idx=True): return self.upregulated(thresh=thresh, idx=idx)
Enriched features. {threshdoc}
def upregulated(self, thresh=0.05, idx=True): ind = ( (self.data[self.pval_column] <= thresh) & (self.data[self.lfc_column] > 0) ) if idx: return ind return self[ind]
Upregulated features. {threshdoc}
def disenriched(self, thresh=0.05, idx=True): return self.downregulated(thresh=thresh, idx=idx)
Disenriched features. {threshdoc}
def ma_plot(self, thresh, up_kwargs=None, dn_kwargs=None, zero_line=None, **kwargs): genes_to_highlight = kwargs.pop('genes_to_highlight', []) genes_to_highlight.append( (self.upregulated(thresh), up_kwargs or dict(color='r'))) genes_to_highlight.append( (self.downregulated(thresh), dn_kwargs or dict(color='b'))) if zero_line is None: zero_line = {} x = self.mean_column y = self.lfc_column if 'xfunc' not in kwargs: kwargs['xfunc'] = np.log ax = self.scatter( x=x, y=y, genes_to_highlight=genes_to_highlight, **kwargs) if zero_line: ax.axhline(0, **zero_line) return ax
MA plot Plots the average read count across treatments (x-axis) vs the log2 fold change (y-axis). Additional kwargs are passed to self.scatter (useful ones might include `genes_to_highlight`) Parameters ---------- thresh : float Features with values <= `thresh` will be highlighted in the plot. up_kwargs, dn_kwargs : None or dict Kwargs passed to matplotlib's scatter(), used for styling up/down regulated features (defined by `thresh` and `col`) zero_line : None or dict Kwargs passed to matplotlib.axhline(0).
def autosql_file(self): fn = pybedtools.BedTool._tmp() AUTOSQL = dedent( """ table example "output from DESeq" ( string chrom; "chromosome" uint chromStart; "start coord" uint chromEnd; "stop coord" string name; "name of feature" uint score; "always zero" char[1] strand; "+ or - for strand" uint thickStart; "Coding region start" uint thickEnd; "Coding region end" uint reserved; "color according to score" string padj; "DESeq adjusted p value" string pval; "DESeq raw p value" string logfoldchange; "DESeq log2 fold change" string basemeana; "DESeq baseMeanA" string basemeanb; "DESeq baseMeanB" ) """) fout = open(fn, 'w') fout.write(AUTOSQL) fout.close() return fn
Generate the autosql for DESeq results (to create bigBed) Returns a temp filename containing the autosql defining the extra fields. This for creating bigBed files from BED files created by colormapped_bed. When a user clicks on a feature, the DESeq results will be reported.
def _local_count(reader, feature, stranded=False): if isinstance(feature, basestring): feature = helpers.tointerval(feature) if stranded: strand = feature.strand else: strand = '.' count = 0 for al in reader[feature]: if stranded and al.strand != strand: continue count += 1 return count
The count of genomic signal (typcially BED features) found within an interval. Usually this only makes sense for BED or BAM (not bigWig) files. :param feature: pybedtools.Interval object :param stranded: If `stranded=True`, then only counts signal on the same strand as `feature`.
def _array_parallel(fn, cls, genelist, chunksize=250, processes=1, **kwargs): pool = multiprocessing.Pool(processes) chunks = list(chunker(genelist, chunksize)) # pool.map can only pass a single argument to the mapped function, so you # need this trick for passing multiple arguments; idea from # http://stackoverflow.com/questions/5442910/ # python-multiprocessing-pool-map-for-multiple-arguments # results = pool.map( func=_array_star, iterable=itertools.izip( itertools.repeat(fn), itertools.repeat(cls), chunks, itertools.repeat(kwargs))) pool.close() pool.join() return results
Returns an array of genes in `genelist`, using `bins` bins. `genelist` is a list of pybedtools.Interval objects Splits `genelist` into pieces of size `chunksize`, creating an array for each chunk and merging ret A chunksize of 25-100 seems to work well on 8 cores.
def _array_star(args): fn, cls, genelist, kwargs = args return _array(fn, cls, genelist, **kwargs)
Unpacks the tuple `args` and calls _array. Needed to pass multiple args to a pool.map-ed function
def _array(fn, cls, genelist, **kwargs): reader = cls(fn) _local_coverage_func = cls.local_coverage biglist = [] if 'bins' in kwargs: if isinstance(kwargs['bins'], int): kwargs['bins'] = [kwargs['bins']] for gene in genelist: if not isinstance(gene, (list, tuple)): gene = [gene] coverage_x, coverage_y = _local_coverage_func( reader, gene, **kwargs) biglist.append(coverage_y) return biglist
Returns a "meta-feature" array, with len(genelist) rows and `bins` cols. Each row contains the number of reads falling in each bin of that row's modified feature.
def find_spelling(n): r = 0 d = n - 1 # divmod used for large numbers quotient, remainder = divmod(d, 2) # while we can still divide 2's into n-1... while remainder != 1: r += 1 d = quotient # previous quotient before we overwrite it quotient, remainder = divmod(d, 2) return r, d
Finds d, r s.t. n-1 = 2^r * d
def probably_prime(n, k=10): if n == 2: return True if n % 2 == 0: return False r, d = find_spelling(n) for check in range(k): a = random.randint(2, n - 1) x = pow(a, d, n) # a^d % n if x == 1 or x == n - 1: continue for i in range(r): x = pow(x, 2, n) if x == n - 1: break else: return False return True
Miller-Rabin primality test Input: n > 3 k: accuracy of test Output: True if n is "probably prime", False if it is composite From psuedocode at https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
def _find_next_prime(N): def is_prime(n): if n % 2 == 0: return False i = 3 while i * i <= n: if n % i: i += 2 else: return False return True if N < 3: return 2 if N % 2 == 0: N += 1 for n in range(N, 2*N, 2): if is_prime(n): return n raise AssertionError("Failed to find a prime number between {0} and {1}...".format(N, 2*N))
Find next prime >= N
def save(c, prefix, relative_paths=True): dirname = os.path.dirname(prefix) pybedtools.BedTool(c.features).saveas(prefix + '.intervals') def usepath(f): if relative_paths: return os.path.relpath(f, start=dirname) else: return os.path.abspath(f) with open(prefix + '.info', 'w') as fout: info = { 'ip_bam': usepath(c.ip.fn), 'control_bam': usepath(c.control.fn), 'array_kwargs': c.array_kwargs, 'dbfn': usepath(c.dbfn), 'browser_local_coverage_kwargs': c.browser_local_coverage_kwargs, 'relative_paths': relative_paths, } fout.write(yaml.dump(info, default_flow_style=False)) np.savez( prefix, diffed_array=c.diffed_array, ip_array=c.ip_array, control_array=c.control_array )
Save data from a Chipseq object. Parameters ---------- c : Chipseq object Chipseq object, most likely after calling the `diffed_array` method prefix : str Prefix, including any leading directory paths, to save the data. relative_paths : bool If True (default), then the path names in the `prefix.info` file will be relative to `prefix`. Otherwise, they will be absolute. The following files will be created: :prefix.intervals: A BED file (or GFF, GTF, or VCF as appropriate) of the features used for the array :prefix.info: A YAML-format file indicating the IP and control BAM files, any array kwargs, the database filename, and any minibrowser local coverage args. These are all needed to reconstruct a new Chipseq object. Path names will be relative to `prefix`. :prefix.npz: A NumPy .npz file with keys 'diffed_array', 'ip_array', and 'control_array'
def xcorr(x, y, maxlags): xlen = len(x) ylen = len(y) assert xlen == ylen c = np.correlate(x, y, mode=2) # normalize c /= np.sqrt(np.dot(x, x) * np.dot(y, y)) lags = np.arange(-maxlags, maxlags + 1) c = c[xlen - 1 - maxlags:xlen + maxlags] return c
Streamlined version of matplotlib's `xcorr`, without the plots. :param x, y: NumPy arrays to cross-correlate :param maxlags: Max number of lags; result will be `2*maxlags+1` in length
def callback(self, event): artist = event.artist ind = artist.ind limit = 5 browser = True if len(event.ind) > limit: print "more than %s genes selected; not spawning browsers" % limit browser = False for i in event.ind: feature = artist.features[ind[i]] print feature, if browser: self.minibrowser.plot(feature)
Callback function to spawn a mini-browser when a feature is clicked.
def observe(self, event, fn): iscoroutine = asyncio.iscoroutinefunction(fn) if not iscoroutine and not isfunction(fn): raise TypeError('paco: fn param must be a callable ' 'object or coroutine function') observers = self._pool.get(event) if not observers: observers = self._pool[event] = [] # Register the observer observers.append(fn if iscoroutine else coroutine_wrapper(fn))
Arguments: event (str): event to subscribe. fn (function|coroutinefunction): function to trigger. Raises: TypeError: if fn argument is not valid
def remove(self, event=None): observers = self._pool.get(event) if observers: self._pool[event] = []
Remove all the registered observers for the given event name. Arguments: event (str): event name to remove.
def trigger(self, event, *args, **kw): observers = self._pool.get(event) # If no observers registered for the event, do no-op if not observers or len(observers) == 0: return None # Trigger observers coroutines in FIFO sequentially for fn in observers: # Review: perhaps this should not wait yield from fn(*args, **kw)
Triggers event observers for the given event name, passing custom variadic arguments.
def error(self, s, pos): print("Lexical error:") print("%s" % s[:pos+10]) # + 10 for trailing context print("%s^" % (" "*(pos-1))) for t in self.rv: print(t) raise SystemExit
Show text and a caret under that. For example: x = 2y + z ^
def until(coro, coro_test, assert_coro=None, *args, **kw): @asyncio.coroutine def assert_coro(value): return not value return (yield from whilst(coro, coro_test, assert_coro=assert_coro, *args, **kw))
Repeatedly call `coro` coroutine function until `coro_test` returns `True`. This function is the inverse of `paco.whilst()`. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to execute. coro_test (coroutinefunction): coroutine function to test. assert_coro (coroutinefunction): optional assertion coroutine used to determine if the test passed or not. *args (mixed): optional variadic arguments to pass to `coro` function. Raises: TypeError: if input arguments are invalid. Returns: list: result values returned by `coro`. Usage:: calls = 0 async def task(): nonlocal calls calls += 1 return calls async def calls_gt_4(): return calls > 4 await paco.until(task, calls_gt_4) # => [1, 2, 3, 4, 5]
def compose(*coros): # Make list to inherit built-in type methods coros = list(coros) @asyncio.coroutine def reducer(acc, coro): return (yield from coro(acc)) @asyncio.coroutine def wrapper(acc): return (yield from reduce(reducer, coros, initializer=acc, right=True)) return wrapper
Creates a coroutine function based on the composition of the passed coroutine functions. Each function consumes the yielded result of the coroutine that follows. Composing coroutine functions f(), g(), and h() would produce the result of f(g(h())). Arguments: *coros (coroutinefunction): variadic coroutine functions to compose. Raises: RuntimeError: if cannot execute a coroutine function. Returns: coroutinefunction Usage:: async def sum_1(num): return num + 1 async def mul_2(num): return num * 2 coro = paco.compose(sum_1, mul_2, sum_1) await coro(2) # => 7
def add_config(): genius_key = input('Enter Genius key : ') bing_key = input('Enter Bing key : ') CONFIG['keys']['bing_key'] = bing_key CONFIG['keys']['genius_key'] = genius_key with open(config_path, 'w') as configfile: CONFIG.write(configfile)
Prompts user for API keys, adds them in an .ini file stored in the same location as that of the script
def get_tracks_from_album(album_name): ''' Gets tracks from an album using Spotify's API ''' spotify = spotipy.Spotify() album = spotify.search(q='album:' + album_name, limit=1) album_id = album['tracks']['items'][0]['album']['id'] results = spotify.album_tracks(album_id=str(album_id)) songs = [] for items in results['items']: songs.append(items['name']) return songf get_tracks_from_album(album_name): ''' Gets tracks from an album using Spotify's API ''' spotify = spotipy.Spotify() album = spotify.search(q='album:' + album_name, limit=1) album_id = album['tracks']['items'][0]['album']['id'] results = spotify.album_tracks(album_id=str(album_id)) songs = [] for items in results['items']: songs.append(items['name']) return songs
Gets tracks from an album using Spotify's API
def prompt(youtube_list): ''' Prompts for song number from list of songs ''' option = int(input('\nEnter song number > ')) try: song_url = list(youtube_list.values())[option - 1] song_title = list(youtube_list.keys())[option - 1] except IndexError: log.log_error('Invalid Input') exit() system('clear') print('Download Song: ') print(song_title) print('Y/n?') confirm = input('>') if confirm == '' or confirm.lower() == 'y': pass elif confirm.lower() == 'n': exit() else: log.log_error('Invalid Input') exit() return song_url, song_titlf prompt(youtube_list): ''' Prompts for song number from list of songs ''' option = int(input('\nEnter song number > ')) try: song_url = list(youtube_list.values())[option - 1] song_title = list(youtube_list.keys())[option - 1] except IndexError: log.log_error('Invalid Input') exit() system('clear') print('Download Song: ') print(song_title) print('Y/n?') confirm = input('>') if confirm == '' or confirm.lower() == 'y': pass elif confirm.lower() == 'n': exit() else: log.log_error('Invalid Input') exit() return song_url, song_title
Prompts for song number from list of songs
def getRawReportDescriptor(self): descriptor = _hidraw_report_descriptor() size = ctypes.c_uint() self._ioctl(_HIDIOCGRDESCSIZE, size, True) descriptor.size = size self._ioctl(_HIDIOCGRDESC, descriptor, True) return ''.join(chr(x) for x in descriptor.value[:size.value])
Return a binary string containing the raw HID report descriptor.
def getInfo(self): devinfo = _hidraw_devinfo() self._ioctl(_HIDIOCGRAWINFO, devinfo, True) return DevInfo(devinfo.bustype, devinfo.vendor, devinfo.product)
Returns a DevInfo instance, a named tuple with the following items: - bustype: one of BUS_USB, BUS_HIL, BUS_BLUETOOTH or BUS_VIRTUAL - vendor: device's vendor number - product: device's product number
def getName(self, length=512): name = ctypes.create_string_buffer(length) self._ioctl(_HIDIOCGRAWNAME(length), name, True) return name.value.decode('UTF-8')
Returns device name as an unicode object.
def getPhysicalAddress(self, length=512): name = ctypes.create_string_buffer(length) self._ioctl(_HIDIOCGRAWPHYS(length), name, True) return name.value
Returns device physical address as a string. See hidraw documentation for value signification, as it depends on device's bus type.
def sendFeatureReport(self, report, report_num=0): length = len(report) + 1 buf = bytearray(length) buf[0] = report_num buf[1:] = report self._ioctl( _HIDIOCSFEATURE(length), (ctypes.c_char * length).from_buffer(buf), True, )
Send a feature report.
def getFeatureReport(self, report_num=0, length=63): length += 1 buf = bytearray(length) buf[0] = report_num self._ioctl( _HIDIOCGFEATURE(length), (ctypes.c_char * length).from_buffer(buf), True, ) return buf
Receive a feature report. Blocks, unless you configured provided file (descriptor) to be non-blocking.
def every(coro, iterable, limit=1, loop=None): assert_corofunction(coro=coro) assert_iter(iterable=iterable) # Reduced accumulator value passes = True # Handle empty iterables if len(iterable) == 0: return passes # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) # Tester function to guarantee the file is canceled. @asyncio.coroutine def tester(element): nonlocal passes if not passes: return None if not (yield from coro(element)): # Flag as not test passed passes = False # Force ignoring pending coroutines pool.cancel() # Iterate and attach coroutine for defer scheduling for element in iterable: pool.add(partial(tester, element)) # Wait until all coroutines finish yield from pool.run() return passes
Returns `True` if every element in a given iterable satisfies the coroutine asynchronous test. If any iteratee coroutine call returns `False`, the process is inmediately stopped, and `False` will be returned. You can increase the concurrency limit for a fast race condition scenario. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): coroutine function to call with values to reduce. iterable (iterable): an iterable collection yielding coroutines functions. limit (int): max concurrency execution limit. Use ``0`` for no limit. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: bool: `True` if all the values passes the test, otherwise `False`. Usage:: async def gt_10(num): return num > 10 await paco.every(gt_10, [1, 2, 3, 11]) # => False await paco.every(gt_10, [11, 12, 13]) # => True
def define_settings(ctx, model, values): if isinstance(model, basestring): model = ctx.env[model] model.create(values).execute()
Define settings like being in the interface Example : - model = 'sale.config.settings' or ctx.env['sale.config.settings'] - values = {'default_invoice_policy': 'delivery'} Be careful, settings onchange are not triggered with this function.
def timeout(coro, timeout=None, loop=None): @asyncio.coroutine def _timeout(coro): return (yield from asyncio.wait_for(coro, timeout, loop=loop)) @asyncio.coroutine def wrapper(*args, **kw): return (yield from _timeout(coro(*args, **kw))) return _timeout(coro) if asyncio.iscoroutine(coro) else wrapper
Wraps a given coroutine function, that when executed, if it takes more than the given timeout in seconds to execute, it will be canceled and raise an `asyncio.TimeoutError`. This function is equivalent to Python standard `asyncio.wait_for()` function. This function can be used as decorator. Arguments: coro (coroutinefunction|coroutine): coroutine to wrap. timeout (int|float): max wait timeout in seconds. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if coro argument is not a coroutine function. Returns: coroutinefunction: wrapper coroutine function. Usage:: await paco.timeout(coro, timeout=10)
def overload(fn): if not isfunction(fn): raise TypeError('paco: fn must be a callable object') spec = getargspec(fn) args = spec.args if not spec.varargs and (len(args) < 2 or args[1] != 'iterable'): raise ValueError('paco: invalid function signature or arity') @functools.wraps(fn) def decorator(*args, **kw): # Check function arity if len(args) < 2: return PipeOverloader(fn, args, kw) # Otherwise, behave like a normal wrapper return fn(*args, **kw) return decorator
Overload a given callable object to be used with ``|`` operator overloading. This is especially used for composing a pipeline of transformation over a single data set. Arguments: fn (function): target function to decorate. Raises: TypeError: if function or coroutine function is not provided. Returns: function: decorated function
def debug_reduce(self, rule, tokens, parent, i): prefix = ' ' if parent and tokens: p_token = tokens[parent] if hasattr(p_token, 'line'): prefix = 'L.%3d.%03d: ' % (p_token.line, p_token.column) pass pass print("%s%s ::= %s" % (prefix, rule[0], ' '.join(rule[1])))
Customized format and print for our kind of tokens which gets called in debugging grammar reduce rules
def consume(generator): # pragma: no cover # If synchronous generator, just consume and return as list if hasattr(generator, '__next__'): return list(generator) if not PY_35: raise RuntimeError( 'paco: asynchronous iterator protocol not supported') # If asynchronous generator, consume it generator protocol manually buf = [] while True: try: buf.append((yield from generator.__anext__())) except StopAsyncIteration: # noqa break return buf
Helper function to consume a synchronous or asynchronous generator. Arguments: generator (generator|asyncgenerator): generator to consume. Returns: list
def isfunc(x): return any([ inspect.isfunction(x) and not asyncio.iscoroutinefunction(x), inspect.ismethod(x) and not asyncio.iscoroutinefunction(x) ])
Returns `True` if the given value is a function or method object. Arguments: x (mixed): value to check. Returns: bool
def assert_corofunction(**kw): for name, value in kw.items(): if not asyncio.iscoroutinefunction(value): raise TypeError( 'paco: {} must be a coroutine function'.format(name))
Asserts if a given values are a coroutine function. Arguments: **kw (mixed): value to check if it is an iterable. Raises: TypeError: if assertion fails.
def assert_iter(**kw): for name, value in kw.items(): if not isiter(value): raise TypeError( 'paco: {} must be an iterable object'.format(name))
Asserts if a given values implements a valid iterable interface. Arguments: **kw (mixed): value to check if it is an iterable. Raises: TypeError: if assertion fails.
def interval(coro, interval=1, times=None, loop=None): assert_corofunction(coro=coro) # Store maximum allowed number of calls times = int(times or 0) or float('inf') @asyncio.coroutine def schedule(times, *args, **kw): while times > 0: # Decrement times counter times -= 1 # Schedule coroutine yield from coro(*args, **kw) yield from asyncio.sleep(interval) def wrapper(*args, **kw): return ensure_future(schedule(times, *args, **kw), loop=loop) return wrapper
Schedules the execution of a coroutine function every `x` amount of seconds. The function returns an `asyncio.Task`, which implements also an `asyncio.Future` interface, allowing the user to cancel the execution cycle. This function can be used as decorator. Arguments: coro (coroutinefunction): coroutine function to defer. interval (int/float): number of seconds to repeat the coroutine execution. times (int): optional maximum time of executions. Infinite by default. loop (asyncio.BaseEventLoop, optional): loop to run. Defaults to asyncio.get_event_loop(). Raises: TypeError: if coro argument is not a coroutine function. Returns: future (asyncio.Task): coroutine wrapped as task future. Useful for cancellation and state checking. Usage:: # Usage as function future = paco.interval(coro, 1) # Cancel it after a while... await asyncio.sleep(5) future.cancel() # Usage as decorator @paco.interval(10) async def metrics(): await send_metrics() future = await metrics()
def remove_rules(self, doc): # remove blanks lines and comment lines, e.g. lines starting with "#" doc = os.linesep.join([s for s in doc.splitlines() if s and not re.match("^\s*#", s)]) rules = doc.split() index = [] for i in range(len(rules)): if rules[i] == '::=': index.append(i-1) index.append(len(rules)) for i in range(len(index)-1): lhs = rules[index[i]] rhs = rules[index[i]+2:index[i+1]] rule = (lhs, tuple(rhs)) if lhs not in self.rules: return if rule in self.rules[lhs]: self.rules[lhs].remove(rule) del self.rule2func[rule] del self.rule2name[rule] self.ruleschanged = True # If we are profiling, remove this rule from that as well if self.profile_info is not None and len(rule[1]) > 0: rule_str = self.reduce_string(rule) if rule_str and rule_str in self.profile_info: del self.profile_info[rule_str] pass pass pass return
Remove a grammar rules from _self.rules_, _self.rule2func_, and _self.rule2name_
def errorstack(self, tokens, i, full=False): print("\n-- Stacks of completed symbols:") states = [s for s in self.edges.values() if s] # States now has the set of states we are in state_stack = set() for state in states: # Find rules which can follow, but keep only # the part before the dot for rule, dot in self.states[state].items: lhs, rhs = rule if dot > 0: if full: state_stack.add( "%s ::= %s . %s" % (lhs, ' '.join(rhs[:dot]), ' '.join(rhs[dot:]))) else: state_stack.add( "%s ::= %s" % (lhs, ' '.join(rhs[:dot]))) pass pass pass for stack in sorted(state_stack): print(stack)
Show the stacks of completed symbols. We get this by inspecting the current transitions possible and from that extracting the set of states we are in, and from there we look at the set of symbols before the "dot". If full is True, we show the entire rule with the dot placement. Otherwise just the rule up to the dot.
def parse(self, tokens, debug=None): self.tokens = tokens if debug: self.debug = debug sets = [ [(1, 0), (2, 0)] ] self.links = {} if self.ruleschanged: self.computeNull() self.newrules = {} self.new2old = {} self.makeNewRules() self.ruleschanged = False self.edges, self.cores = {}, {} self.states = { 0: self.makeState0() } self.makeState(0, self._BOF) for i in range(len(tokens)): sets.append([]) if sets[i] == []: break self.makeSet(tokens, sets, i) else: sets.append([]) self.makeSet(None, sets, len(tokens)) finalitem = (self.finalState(tokens), 0) if finalitem not in sets[-2]: if len(tokens) > 0: if self.debug.get('errorstack', False): self.errorstack(tokens, i-1, str(self.debug['errorstack']) == 'full') self.error(tokens, i-1) else: self.error(None, None) if self.profile_info is not None: self.dump_profile_info() return self.buildTree(self._START, finalitem, tokens, len(sets)-2)
This is the main entry point from outside. Passing in a debug dictionary changes the default debug setting.
def dump_grammar(self, out=sys.stdout): for rule in sorted(self.rule2name.items()): out.write("%s\n" % rule2str(rule[0])) return
Print grammar rules
def profile_rule(self, rule): rule_str = self.reduce_string(rule) if rule_str not in self.profile_info: self.profile_info[rule_str] = 1 else: self.profile_info[rule_str] += 1
Bump count of the number of times _rule_ was used
def get_profile_info(self): return sorted(self.profile_info.items(), key=lambda kv: kv[1], reverse=False) return
Show the accumulated results of how many times each rule was used
def partial(coro, *args, **kw): assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*_args, **_kw): call_args = args + _args kw.update(_kw) return (yield from coro(*call_args, **kw)) return wrapper
Partial function implementation designed for coroutines, allowing variadic input arguments. This function can be used as decorator. arguments: coro (coroutinefunction): coroutine function to wrap. *args (mixed): mixed variadic arguments for partial application. Raises: TypeError: if ``coro`` is not a coroutine function. Returns: coroutinefunction Usage:: async def pow(x, y): return x ** y pow_2 = paco.partial(pow, 2) await pow_2(4) # => 16
def eval_expr(expr_str, show_tokens=False, showast=False, showgrammar=False, compile_mode='exec'): parser_debug = {'rules': False, 'transition': False, 'reduce': showgrammar, 'errorstack': True, 'context': True } parsed = parse_expr(expr_str, show_tokens=show_tokens, parser_debug=parser_debug) if showast: print(parsed) assert parsed == 'expr', 'Should have parsed grammar start' evaluator = ExprEvaluator() # What we've been waiting for: Generate source from AST! return evaluator.traverse(parsed)
evaluate simple expression
def n_atom(self, node): length = len(node) if length == 1: self.preorder(node[0]) node.value = node[0].value self.prune() elif length == 3: self.preorder(node[1]) node.value = node[1].value self.prune() else: assert False, "Expecting atom to have length 1 or 3"
atom ::= NUMBER | '(' expr ')'
def n_term(self, node): if len(node) == 1: self.preorder(node[0]) node.value = node[0].value self.prune() else: self.preorder(node[0]) self.preorder(node[2]) if node[1].attr == '*': node.value = node[0].value * node[2].value elif node[1].attr == '/': node.value = node[0].value / node[2].value else: assert False, "Expecting operator to be '*' or '/'" self.prune() assert False, "Expecting atom to have length 1 or 3"
term ::= term MULT_OP atom | atom
def setup(): global CONFIG, BING_KEY, GENIUS_KEY, config_path, LOG_FILENAME, LOG_LINE_SEPERATOR LOG_FILENAME = 'musicrepair_log.txt' LOG_LINE_SEPERATOR = '........................\n' CONFIG = configparser.ConfigParser() config_path = realpath(__file__).replace(basename(__file__),'') config_path = config_path + 'config.ini' CONFIG.read(config_path) GENIUS_KEY = CONFIG['keys']['genius_key']
Gathers all configs
def matching_details(song_name, song_title, artist): ''' Provides a score out of 10 that determines the relevance of the search result ''' match_name = difflib.SequenceMatcher(None, song_name, song_title).ratio() match_title = difflib.SequenceMatcher(None, song_name, artist + song_title).ratio() if max(match_name,match_title) >= 0.55: return True, max(match_name,match_title) else: return False, (match_name + match_title) / f matching_details(song_name, song_title, artist): ''' Provides a score out of 10 that determines the relevance of the search result ''' match_name = difflib.SequenceMatcher(None, song_name, song_title).ratio() match_title = difflib.SequenceMatcher(None, song_name, artist + song_title).ratio() if max(match_name,match_title) >= 0.55: return True, max(match_name,match_title) else: return False, (match_name + match_title) / 2
Provides a score out of 10 that determines the relevance of the search result
def get_lyrics_letssingit(song_name): ''' Scrapes the lyrics of a song since spotify does not provide lyrics takes song title as arguement ''' lyrics = "" url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + \ quote(song_name.encode('utf-8')) html = urlopen(url).read() soup = BeautifulSoup(html, "html.parser") link = soup.find('a', {'class': 'high_profile'}) try: link = link.get('href') link = urlopen(link).read() soup = BeautifulSoup(link, "html.parser") try: lyrics = soup.find('div', {'id': 'lyrics'}).text lyrics = lyrics[3:] except AttributeError: lyrics = "" except: lyrics = "" return lyricf get_lyrics_letssingit(song_name): ''' Scrapes the lyrics of a song since spotify does not provide lyrics takes song title as arguement ''' lyrics = "" url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + \ quote(song_name.encode('utf-8')) html = urlopen(url).read() soup = BeautifulSoup(html, "html.parser") link = soup.find('a', {'class': 'high_profile'}) try: link = link.get('href') link = urlopen(link).read() soup = BeautifulSoup(link, "html.parser") try: lyrics = soup.find('div', {'id': 'lyrics'}).text lyrics = lyrics[3:] except AttributeError: lyrics = "" except: lyrics = "" return lyrics
Scrapes the lyrics of a song since spotify does not provide lyrics takes song title as arguement
def add_albumart(albumart, song_title): ''' Adds the album art to the song ''' try: img = urlopen(albumart) # Gets album art from url except Exception: log.log_error("* Could not add album art", indented=True) return None audio = EasyMP3(song_title, ID3=ID3) try: audio.add_tags() except _util.error: pass audio.tags.add( APIC( encoding=3, # UTF-8 mime='image/png', type=3, # 3 is for album art desc='Cover', data=img.read() # Reads and adds album art ) ) audio.save() log.log("> Added album art"f add_albumart(albumart, song_title): ''' Adds the album art to the song ''' try: img = urlopen(albumart) # Gets album art from url except Exception: log.log_error("* Could not add album art", indented=True) return None audio = EasyMP3(song_title, ID3=ID3) try: audio.add_tags() except _util.error: pass audio.tags.add( APIC( encoding=3, # UTF-8 mime='image/png', type=3, # 3 is for album art desc='Cover', data=img.read() # Reads and adds album art ) ) audio.save() log.log("> Added album art")
Adds the album art to the song
def add_details(file_name, title, artist, album, lyrics=""): ''' Adds the details to song ''' tags = EasyMP3(file_name) tags["title"] = title tags["artist"] = artist tags["album"] = album tags.save() tags = ID3(file_name) uslt_output = USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics) tags["USLT::'eng'"] = uslt_output tags.save(file_name) log.log("> Adding properties") log.log_indented("[*] Title: %s" % title) log.log_indented("[*] Artist: %s" % artist) log.log_indented("[*] Album: %s " % albumf add_details(file_name, title, artist, album, lyrics=""): ''' Adds the details to song ''' tags = EasyMP3(file_name) tags["title"] = title tags["artist"] = artist tags["album"] = album tags.save() tags = ID3(file_name) uslt_output = USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics) tags["USLT::'eng'"] = uslt_output tags.save(file_name) log.log("> Adding properties") log.log_indented("[*] Title: %s" % title) log.log_indented("[*] Artist: %s" % artist) log.log_indented("[*] Album: %s " % album)
Adds the details to song
def filterfalse(coro, iterable, limit=0, loop=None): return (yield from filter(coro, iterable, assert_fn=assert_false, limit=limit, loop=loop))
Returns a list of all the values in coll which pass an asynchronous truth test coroutine. Operations are executed concurrently by default, but results will be in order. You can configure the concurrency via `limit` param. This function is the asynchronous equivalent port Python built-in `filterfalse()` function. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): coroutine filter function to call accepting iterable values. iterable (iterable): an iterable collection yielding coroutines functions. assert_fn (coroutinefunction): optional assertion function. limit (int): max filtering concurrency limit. Use ``0`` for no limit. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if coro argument is not a coroutine function. Returns: filtered values (list): ordered list containing values that do not passed the filter. Usage:: async def iseven(num): return num % 2 == 0 await paco.filterfalse(coro, [1, 2, 3, 4, 5]) # => [1, 3, 5]
def map(coro, iterable, limit=0, loop=None, timeout=None, return_exceptions=False, *args, **kw): # Call each iterable but collecting yielded values return (yield from each(coro, iterable, limit=limit, loop=loop, timeout=timeout, collect=True, return_exceptions=return_exceptions))
Concurrently maps values yielded from an iterable, passing then into an asynchronous coroutine function. Mapped values will be returned as list. Items order will be preserved based on origin iterable order. Concurrency level can be configurable via ``limit`` param. This function is the asynchronous equivalent port Python built-in `map()` function. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutinefunction): map coroutine function to use. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. limit (int): max concurrency limit. Use ``0`` for no limit. loop (asyncio.BaseEventLoop): optional event loop to use. timeout (int|float): timeout can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. return_exceptions (bool): returns exceptions as valid results. *args (mixed): optional variadic arguments to be passed to the coroutine map function. Returns: list: ordered list of values yielded by coroutines Usage:: async def mul_2(num): return num * 2 await paco.map(mul_2, [1, 2, 3, 4, 5]) # => [2, 4, 6, 8, 10]
def uninstall(ctx, module_list): if not module_list: raise AnthemError(u"You have to provide a list of " "module's name to uninstall") mods = ctx.env['ir.module.module'].search([('name', 'in', module_list)]) try: mods.button_immediate_uninstall() except Exception: raise AnthemError(u'Cannot uninstall modules. See the logs')
uninstall module
def update_translations(ctx, module_list): if not module_list: raise AnthemError(u"You have to provide a list of " "module's name to update the translations") for module in module_list: ctx.env['ir.module.module'].with_context(overwrite=True).search( [('name', '=', module)]).update_translations()
Update translations from module list
def img_search_bing(album): ''' Bing image search ''' setup() album = album + " Album Art" api_key = "Key" endpoint = "https://api.cognitive.microsoft.com/bing/v5.0/images/search" links_dict = {} headers = {'Ocp-Apim-Subscription-Key': str(BING_KEY)} param = {'q': album, 'count': '1'} response = requests.get(endpoint, headers=headers, params=param) response = response.json() key = 0 try: for i in response['value']: links_dict[str(key)] = str((i['contentUrl'])) key = key + 1 return links_dict["0"] except KeyError: return Nonf img_search_bing(album): ''' Bing image search ''' setup() album = album + " Album Art" api_key = "Key" endpoint = "https://api.cognitive.microsoft.com/bing/v5.0/images/search" links_dict = {} headers = {'Ocp-Apim-Subscription-Key': str(BING_KEY)} param = {'q': album, 'count': '1'} response = requests.get(endpoint, headers=headers, params=param) response = response.json() key = 0 try: for i in response['value']: links_dict[str(key)] = str((i['contentUrl'])) key = key + 1 return links_dict["0"] except KeyError: return None
Bing image search
def img_search_google(album): ''' google image search ''' album = album + " Album Art" url = ("https://www.google.com/search?q=" + quote(album.encode('utf-8')) + "&source=lnms&tbm=isch") header = {'User-Agent': '''Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/43.0.2357.134 Safari/537.36''' } soup = BeautifulSoup(urlopen(Request(url, headers=header)), "html.parser") albumart_div = soup.find("div", {"class": "rg_meta"}) albumart = json.loads(albumart_div.text)["ou"] return albumarf img_search_google(album): ''' google image search ''' album = album + " Album Art" url = ("https://www.google.com/search?q=" + quote(album.encode('utf-8')) + "&source=lnms&tbm=isch") header = {'User-Agent': '''Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/43.0.2357.134 Safari/537.36''' } soup = BeautifulSoup(urlopen(Request(url, headers=header)), "html.parser") albumart_div = soup.find("div", {"class": "rg_meta"}) albumart = json.loads(albumart_div.text)["ou"] return albumart
google image search
def generator_consumer(coro): # pragma: no cover if not asyncio.iscoroutinefunction(coro): raise TypeError('paco: coro must be a coroutine function') @functools.wraps(coro) @asyncio.coroutine def wrapper(*args, **kw): if len(args) > 1 and isgenerator(args[1]): args = list(args) args[1] = (yield from consume(args[1]) if hasattr(args[1], '__anext__') else list(args[1])) args = tuple(args) return (yield from coro(*args, **kw)) return wrapper
Decorator wrapper that consumes sync/async generators provided as interable input argument. This function is only intended to be used internally. Arguments: coro (coroutinefunction): function to decorate Raises: TypeError: if function or coroutine function is not provided. Returns: function: decorated function.
def decorate(fn): if not isfunction(fn): raise TypeError('paco: fn must be a callable object') @functools.wraps(fn) def decorator(*args, **kw): # If coroutine object is passed for arg in args: if iscoro_or_corofunc(arg): return fn(*args, **kw) # Explicit argument must be at least a coroutine if len(args) and args[0] is None: raise TypeError('paco: first argument cannot be empty') def wrapper(coro, *_args, **_kw): # coro must be a valid type if not iscoro_or_corofunc(coro): raise TypeError('paco: first argument must be a ' 'coroutine or coroutine function') # Merge call arguments _args = ((coro,) + (args + _args)) kw.update(_kw) # Trigger original decorated function return fn(*_args, **kw) return wrapper return decorator
Generic decorator for coroutines helper functions allowing multiple variadic initialization arguments. This function is intended to be used internally. Arguments: fn (function): target function to decorate. Raises: TypeError: if function or coroutine function is not provided. Returns: function: decorated function.
def _getRole(self, matchedVars): role = matchedVars.get(ROLE) if role is not None and role.strip() == '': role = NULL else: valid = Authoriser.isValidRoleName(role) if valid: role = Authoriser.getRoleFromName(role) else: self.print("Invalid role. Valid roles are: {}". format(", ".join(map(lambda r: r.name, Roles))), Token.Error) return False return role
:param matchedVars: :return: NULL or the role's integer value
def whilst(coro, coro_test, assert_coro=None, *args, **kw): assert_corofunction(coro=coro, coro_test=coro_test) # Store yielded values by coroutine results = [] # Set assertion coroutine assert_coro = assert_coro or assert_true # Execute coroutine until a certain while (yield from assert_coro((yield from coro_test()))): results.append((yield from coro(*args, **kw))) return results
Repeatedly call `coro` coroutine function while `coro_test` returns `True`. This function is the inverse of `paco.until()`. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to execute. coro_test (coroutinefunction): coroutine function to test. assert_coro (coroutinefunction): optional assertion coroutine used to determine if the test passed or not. *args (mixed): optional variadic arguments to pass to `coro` function. Raises: TypeError: if input arguments are invalid. Returns: list: result values returned by `coro`. Usage:: calls = 0 async def task(): nonlocal calls calls += 1 return calls async def calls_lt_4(): return calls > 4 await paco.until(task, calls_lt_4) # => [1, 2, 3, 4, 5]
def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams): if not os.path.isabs(path): if ctx.options.odoo_data_path: path = os.path.join(ctx.options.odoo_data_path, path) else: raise AnthemError( 'Got a relative path. ' 'Please, provide a value for `ODOO_DATA_PATH` ' 'in your environment or set `--odoo-data-path` option.' ) with open(path, 'rb') as data: load_csv_stream(ctx, model, data, header=header, header_exclude=header_exclude, **fmtparams)
Load a CSV from a file path. :param ctx: Anthem context :param model: Odoo model name or model klass from env :param path: absolute or relative path to CSV file. If a relative path is given you must provide a value for `ODOO_DATA_PATH` in your environment or set `--odoo-data-path` option. :param header: whitelist of CSV columns to load :param header_exclude: blacklist of CSV columns to not load :param fmtparams: keyword params for `csv_unireader` Usage example:: from pkg_resources import Requirement, resource_string req = Requirement.parse('my-project') load_csv(ctx, ctx.env['res.users'], resource_string(req, 'data/users.csv'), delimiter=',')
def load_csv_stream(ctx, model, data, header=None, header_exclude=None, **fmtparams): _header, _rows = read_csv(data, **fmtparams) header = header if header else _header if _rows: # check if passed header contains all the fields if header != _header and not header_exclude: # if not, we exclude the rest of the fields header_exclude = [x for x in _header if x not in header] if header_exclude: # exclude fields from header as well as respective values header = [x for x in header if x not in header_exclude] # we must loop trough all the rows too to pop values # since odoo import works only w/ reader and not w/ dictreader pop_idxs = [_header.index(x) for x in header_exclude] rows = [] for i, row in enumerate(_rows): rows.append( [x for j, x in enumerate(row) if j not in pop_idxs] ) else: rows = list(_rows) if rows: load_rows(ctx, model, header, rows)
Load a CSV from a stream. :param ctx: current anthem context :param model: model name as string or model klass :param data: csv data to load :param header: csv fieldnames whitelist :param header_exclude: csv fieldnames blacklist Usage example:: from pkg_resources import Requirement, resource_stream req = Requirement.parse('my-project') load_csv_stream(ctx, ctx.env['res.users'], resource_stream(req, 'data/users.csv'), delimiter=',')
def update_translations(ctx, module_list): modules.update_translations(ctx, module_list) ctx.log_line(u'Deprecated: use anthem.lyrics.modules.update_translations' 'instead of anthem.lyrics.loaders.update_translations')
Update translations from module list :param module_list: a list of modules
def format_python2_stmts(python_stmts, show_tokens=False, showast=False, showgrammar=False, compile_mode='exec'): parser_debug = {'rules': False, 'transition': False, 'reduce': showgrammar, 'errorstack': True, 'context': True, 'dups': True } parsed = parse_python2(python_stmts, show_tokens=show_tokens, parser_debug=parser_debug) assert parsed == 'file_input', 'Should have parsed grammar start' formatter = Python2Formatter() if showast: print(parsed) # What we've been waiting for: Generate source from AST! python2_formatted_str = formatter.traverse(parsed) return python2_formatted_str
formats python2 statements
def n_atom(self, node): length = len(node) if length == 1: self.preorder(node[0]) elif length == 3: self.preorder(node[0]) self.preorder(node[1]) self.preorder(node[2]) else: assert False, "Expecting atom to have length 1 or 3" self.prune()
atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+)