_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q4600
Service.asdict
train
def asdict(self): """Return dict presentation of this service. Useful for dumping the device information into JSON. """ return { "methods": {m.name: m.asdict() for m in self.methods}, "protocols": self.protocols, "notifications": {n.name: n.asdict() for n in self.notifications}, }
python
{ "resource": "" }
q4601
horizrad
train
def horizrad(infn: Path, outfn: Path, c1: dict) -> xarray.Dataset: """ read CSV, simulate, write, plot """ if infn is not None: infn = Path(infn).expanduser() if infn.suffix == '.h5': TR = xarray.open_dataset(infn) return TR c1.update({'model': 0, # 0: user meterological data 'itype': 1, # 1: horizontal path 'iemsct': 1, # 1: radiance model 'im': 1, # 1: for horizontal path (see Lowtran manual p.42) 'ird1': 1, # 1: use card 2C2) }) # %% read csv file if not infn: # demo mode c1['p'] = [949., 959.] c1['t'] = [283.8, 285.] c1['wmol'] = [[93.96, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [93.96, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]] c1['time'] = [parse('2017-04-05T12'), parse('2017-04-05T18')] else: # read csv, normal case PTdata = read_csv(infn) c1['p'] = PTdata['p'] c1['t'] = PTdata['Ta'] c1['wmol'] = np.zeros((PTdata.shape[0], 12)) c1['wmol'][:, 0] = PTdata['RH'] c1['time'] = [parse(t) for t in PTdata['time']] # %% TR is 3-D array with axes: time, wavelength, and [transmission,radiance] TR = loopuserdef(c1) return TR
python
{ "resource": "" }
q4602
nm2lt7
train
def nm2lt7(short_nm: float, long_nm: float, step_cminv: float = 20) -> Tuple[float, float, float]: """converts wavelength in nm to cm^-1 minimum meaningful step is 20, but 5 is minimum before crashing lowtran short: shortest wavelength e.g. 200 nm long: longest wavelength e.g. 30000 nm step: step size in cm^-1 e.g. 20 output in cm^-1 """ short = 1e7 / short_nm long = 1e7 / long_nm N = int(np.ceil((short-long) / step_cminv)) + 1 # yes, ceil return short, long, N
python
{ "resource": "" }
q4603
loopangle
train
def loopangle(c1: Dict[str, Any]) -> xarray.Dataset: """ loop over "ANGLE" """ angles = np.atleast_1d(c1['angle']) TR = xarray.Dataset(coords={'wavelength_nm': None, 'angle_deg': angles}) for a in angles: c = c1.copy() c['angle'] = a TR = TR.merge(golowtran(c)) return TR
python
{ "resource": "" }
q4604
golowtran
train
def golowtran(c1: Dict[str, Any]) -> xarray.Dataset: """directly run Fortran code""" # %% default parameters c1.setdefault('time', None) defp = ('h1', 'h2', 'angle', 'im', 'iseasn', 'ird1', 'range_km', 'zmdl', 'p', 't') for p in defp: c1.setdefault(p, 0) c1.setdefault('wmol', [0]*12) # %% input check assert len(c1['wmol']) == 12, 'see Lowtran user manual for 12 values of WMOL' assert np.isfinite(c1['h1']), 'per Lowtran user manual Table 14, H1 must always be defined' # %% setup wavelength c1.setdefault('wlstep', 20) if c1['wlstep'] < 5: logging.critical('minimum resolution 5 cm^-1, specified resolution 20 cm^-1') wlshort, wllong, nwl = nm2lt7(c1['wlshort'], c1['wllong'], c1['wlstep']) if not 0 < wlshort and wllong <= 50000: logging.critical('specified model range 0 <= wavelength [cm^-1] <= 50000') # %% invoke lowtran """ Note we invoke case "3a" from table 14, only observer altitude and apparent angle are specified """ Tx, V, Alam, trace, unif, suma, irrad, sumvv = lowtran7.lwtrn7( True, nwl, wllong, wlshort, c1['wlstep'], c1['model'], c1['itype'], c1['iemsct'], c1['im'], c1['iseasn'], c1['ird1'], c1['zmdl'], c1['p'], c1['t'], c1['wmol'], c1['h1'], c1['h2'], c1['angle'], c1['range_km']) dims = ('time', 'wavelength_nm', 'angle_deg') TR = xarray.Dataset({'transmission': (dims, Tx[:, 9][None, :, None]), 'radiance': (dims, sumvv[None, :, None]), 'irradiance': (dims, irrad[:, 0][None, :, None]), 'pathscatter': (dims, irrad[:, 2][None, :, None])}, coords={'time': [c1['time']], 'wavelength_nm': Alam*1e3, 'angle_deg': [c1['angle']]}) return TR
python
{ "resource": "" }
q4605
plotradtime
train
def plotradtime(TR: xarray.Dataset, c1: Dict[str, Any], log: bool = False): """ make one plot per time for now. TR: 3-D array: time, wavelength, [transmittance, radiance] radiance is currently single-scatter solar """ for t in TR.time: # for each time plotirrad(TR.sel(time=t), c1, log)
python
{ "resource": "" }
q4606
Method.asdict
train
def asdict(self) -> Dict[str, Union[Dict, Union[str, Dict]]]: """Return a dictionary describing the method. This can be used to dump the information into a JSON file. """ return { "service": self.service.name, **self.signature.serialize(), }
python
{ "resource": "" }
q4607
DeviceError.error
train
def error(self): """Return user-friendly error message.""" try: errcode = DeviceErrorCode(self.error_code) return "%s (%s): %s" % (errcode.name, errcode.value, self.error_message) except: return "Error %s: %s" % (self.error_code, self.error_message)
python
{ "resource": "" }
q4608
err
train
def err(msg): """Pretty-print an error.""" click.echo(click.style(msg, fg="red", bold=True))
python
{ "resource": "" }
q4609
coro
train
def coro(f): """Run a coroutine and handle possible errors for the click cli. Source https://github.com/pallets/click/issues/85#issuecomment-43378930 """ f = asyncio.coroutine(f) def wrapper(*args, **kwargs): loop = asyncio.get_event_loop() try: return loop.run_until_complete(f(*args, **kwargs)) except KeyboardInterrupt: click.echo("Got CTRL+C, quitting..") dev = args[0] loop.run_until_complete(dev.stop_listen_notifications()) except SongpalException as ex: err("Error: %s" % ex) if len(args) > 0 and hasattr(args[0], "debug"): if args[0].debug > 0: raise ex return update_wrapper(wrapper, f)
python
{ "resource": "" }
q4610
traverse_settings
train
async def traverse_settings(dev, module, settings, depth=0): """Print all available settings.""" for setting in settings: if setting.is_directory: print("%s%s (%s)" % (depth * " ", setting.title, module)) return await traverse_settings(dev, module, setting.settings, depth + 2) else: try: print_settings([await setting.get_value(dev)], depth=depth) except SongpalException as ex: err("Unable to read setting %s: %s" % (setting, ex)) continue
python
{ "resource": "" }
q4611
print_settings
train
def print_settings(settings, depth=0): """Print all available settings of the device.""" # handle the case where a single setting is passed if isinstance(settings, Setting): settings = [settings] for setting in settings: cur = setting.currentValue print( "%s* %s (%s, value: %s, type: %s)" % ( " " * depth, setting.title, setting.target, click.style(cur, bold=True), setting.type, ) ) for opt in setting.candidate: if not opt.isAvailable: logging.debug("Unavailable setting %s", opt) continue click.echo( click.style( "%s - %s (%s)" % (" " * depth, opt.title, opt.value), bold=opt.value == cur, ) )
python
{ "resource": "" }
q4612
cli
train
async def cli(ctx, endpoint, debug, websocket, post): """Songpal CLI.""" lvl = logging.INFO if debug: lvl = logging.DEBUG click.echo("Setting debug level to %s" % debug) logging.basicConfig(level=lvl) if ctx.invoked_subcommand == "discover": ctx.obj = {"debug": debug} return if endpoint is None: err("Endpoint is required except when with 'discover'!") return protocol = None if post and websocket: err("You can force either --post or --websocket") return elif websocket: protocol = ProtocolType.WebSocket elif post: protocol = ProtocolType.XHRPost logging.debug("Using endpoint %s", endpoint) x = Device(endpoint, force_protocol=protocol, debug=debug) try: await x.get_supported_methods() except (requests.exceptions.ConnectionError, SongpalException) as ex: err("Unable to get supported methods: %s" % ex) sys.exit(-1) ctx.obj = x
python
{ "resource": "" }
q4613
status
train
async def status(dev: Device): """Display status information.""" power = await dev.get_power() click.echo(click.style("%s" % power, bold=power)) vol = await dev.get_volume_information() click.echo(vol.pop()) play_info = await dev.get_play_info() if not play_info.is_idle: click.echo("Playing %s" % play_info) else: click.echo("Not playing any media") outs = await dev.get_inputs() for out in outs: if out.active: click.echo("Active output: %s" % out) sysinfo = await dev.get_system_info() click.echo("System information: %s" % sysinfo)
python
{ "resource": "" }
q4614
power
train
async def power(dev: Device, cmd, target, value): """Turn on and off, control power settings. Accepts commands 'on', 'off', and 'settings'. """ async def try_turn(cmd): state = True if cmd == "on" else False try: return await dev.set_power(state) except SongpalException as ex: if ex.code == 3: err("The device is already %s." % cmd) else: raise ex if cmd == "on" or cmd == "off": click.echo(await try_turn(cmd)) elif cmd == "settings": settings = await dev.get_power_settings() print_settings(settings) elif cmd == "set" and target and value: click.echo(await dev.set_power_settings(target, value)) else: power = await dev.get_power() click.echo(click.style(str(power), bold=power))
python
{ "resource": "" }
q4615
googlecast
train
async def googlecast(dev: Device, target, value): """Return Googlecast settings.""" if target and value: click.echo("Setting %s = %s" % (target, value)) await dev.set_googlecast_settings(target, value) print_settings(await dev.get_googlecast_settings())
python
{ "resource": "" }
q4616
source
train
async def source(dev: Device, scheme): """List available sources. If no `scheme` is given, will list sources for all sc hemes. """ if scheme is None: schemes = await dev.get_schemes() schemes = [scheme.scheme for scheme in schemes] # noqa: T484 else: schemes = [scheme] for schema in schemes: try: sources = await dev.get_source_list(schema) except SongpalException as ex: click.echo("Unable to get sources for %s" % schema) continue for src in sources: click.echo(src) if src.isBrowsable: try: count = await dev.get_content_count(src.source) if count.count > 0: click.echo(" %s" % count) for content in await dev.get_contents(src.source): click.echo(" %s\n\t%s" % (content.title, content.uri)) else: click.echo(" No content to list.") except SongpalException as ex: click.echo(" %s" % ex)
python
{ "resource": "" }
q4617
volume
train
async def volume(dev: Device, volume, output): """Get and set the volume settings. Passing 'mute' as new volume will mute the volume, 'unmute' removes it. """ vol = None vol_controls = await dev.get_volume_information() if output is not None: click.echo("Using output: %s" % output) output_uri = (await dev.get_zone(output)).uri for v in vol_controls: if v.output == output_uri: vol = v break else: vol = vol_controls[0] if vol is None: err("Unable to find volume controller: %s" % output) return if volume and volume == "mute": click.echo("Muting") await vol.set_mute(True) elif volume and volume == "unmute": click.echo("Unmuting") await vol.set_mute(False) elif volume: click.echo("Setting volume to %s" % volume) await vol.set_volume(volume) if output is not None: click.echo(vol) else: [click.echo(x) for x in vol_controls]
python
{ "resource": "" }
q4618
schemes
train
async def schemes(dev: Device): """Print supported uri schemes.""" schemes = await dev.get_schemes() for scheme in schemes: click.echo(scheme)
python
{ "resource": "" }
q4619
check_update
train
async def check_update(dev: Device, internet: bool, update: bool): """Print out update information.""" if internet: print("Checking updates from network") else: print("Not checking updates from internet") update_info = await dev.get_update_info(from_network=internet) if not update_info.isUpdatable: click.echo("No updates available.") return if not update: click.echo("Update available: %s" % update_info) click.echo("Use --update to activate update!") else: click.echo("Activating update, please be seated.") res = await dev.activate_system_update() click.echo("Update result: %s" % res)
python
{ "resource": "" }
q4620
bluetooth
train
async def bluetooth(dev: Device, target, value): """Get or set bluetooth settings.""" if target and value: await dev.set_bluetooth_settings(target, value) print_settings(await dev.get_bluetooth_settings())
python
{ "resource": "" }
q4621
settings
train
async def settings(dev: Device): """Print out all possible settings.""" settings_tree = await dev.get_settings() for module in settings_tree: await traverse_settings(dev, module.usage, module.settings)
python
{ "resource": "" }
q4622
storage
train
async def storage(dev: Device): """Print storage information.""" storages = await dev.get_storage_list() for storage in storages: click.echo(storage)
python
{ "resource": "" }
q4623
sound
train
async def sound(dev: Device, target, value): """Get or set sound settings.""" if target and value: click.echo("Setting %s to %s" % (target, value)) click.echo(await dev.set_sound_settings(target, value)) print_settings(await dev.get_sound_settings())
python
{ "resource": "" }
q4624
soundfield
train
async def soundfield(dev: Device, soundfield: str): """Get or set sound field.""" if soundfield is not None: await dev.set_sound_settings("soundField", soundfield) soundfields = await dev.get_sound_settings("soundField") print_settings(soundfields)
python
{ "resource": "" }
q4625
speaker
train
async def speaker(dev: Device, target, value): """Get and set external speaker settings.""" if target and value: click.echo("Setting %s to %s" % (target, value)) await dev.set_speaker_settings(target, value) print_settings(await dev.get_speaker_settings())
python
{ "resource": "" }
q4626
notifications
train
async def notifications(dev: Device, notification: str, listen_all: bool): """List available notifications and listen to them. Using --listen-all [notification] allows to listen to all notifications from the given subsystem. If the subsystem is omited, notifications from all subsystems are requested. """ notifications = await dev.get_notifications() async def handle_notification(x): click.echo("got notification: %s" % x) if listen_all: if notification is not None: await dev.services[notification].listen_all_notifications( handle_notification ) else: click.echo("Listening to all possible notifications") await dev.listen_notifications(fallback_callback=handle_notification) elif notification: click.echo("Subscribing to notification %s" % notification) for notif in notifications: if notif.name == notification: await notif.activate(handle_notification) click.echo("Unable to find notification %s" % notification) else: click.echo(click.style("Available notifications", bold=True)) for notification in notifications: click.echo("* %s" % notification)
python
{ "resource": "" }
q4627
list_all
train
def list_all(dev: Device): """List all available API calls.""" for name, service in dev.services.items(): click.echo(click.style("\nService %s" % name, bold=True)) for method in service.methods: click.echo(" %s" % method.name)
python
{ "resource": "" }
q4628
command
train
async def command(dev, service, method, parameters): """Run a raw command.""" params = None if parameters is not None: params = ast.literal_eval(parameters) click.echo("Calling %s.%s with params %s" % (service, method, params)) res = await dev.raw_command(service, method, params) click.echo(res)
python
{ "resource": "" }
q4629
dump_devinfo
train
async def dump_devinfo(dev: Device, file): """Dump developer information. Pass `file` to write the results directly into a file. """ import attr methods = await dev.get_supported_methods() res = { "supported_methods": {k: v.asdict() for k, v in methods.items()}, "settings": [attr.asdict(x) for x in await dev.get_settings()], "sysinfo": attr.asdict(await dev.get_system_info()), "interface_info": attr.asdict(await dev.get_interface_information()), } if file: click.echo("Saving to file: %s" % file.name) json.dump(res, file, sort_keys=True, indent=4) else: click.echo(json.dumps(res, sort_keys=True, indent=4))
python
{ "resource": "" }
q4630
state
train
async def state(gc: GroupControl): """Current group state.""" state = await gc.state() click.echo(state) click.echo("Full state info: %s" % repr(state))
python
{ "resource": "" }
q4631
create
train
async def create(gc: GroupControl, name, slaves): """Create new group""" click.echo("Creating group %s with slaves: %s" % (name, slaves)) click.echo(await gc.create(name, slaves))
python
{ "resource": "" }
q4632
add
train
async def add(gc: GroupControl, slaves): """Add speakers to group.""" click.echo("Adding to existing group: %s" % slaves) click.echo(await gc.add(slaves))
python
{ "resource": "" }
q4633
remove
train
async def remove(gc: GroupControl, slaves): """Remove speakers from group.""" click.echo("Removing from existing group: %s" % slaves) click.echo(await gc.remove(slaves))
python
{ "resource": "" }
q4634
Kanboard.execute
train
def execute(self, method, **kwargs): """ Call remote API procedure Args: method: Procedure name kwargs: Procedure named arguments Returns: Procedure result Raises: urllib2.HTTPError: Any HTTP error (Python 2) urllib.error.HTTPError: Any HTTP error (Python 3) """ payload = { 'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': kwargs } credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode()) auth_header_prefix = 'Basic ' if self._auth_header == DEFAULT_AUTH_HEADER else '' headers = { self._auth_header: auth_header_prefix + credentials.decode(), 'Content-Type': 'application/json', } return self._do_request(headers, payload)
python
{ "resource": "" }
q4635
tc
train
def tc(g): """ Given a graph @g, returns the transitive closure of @g """ ret = {} for scc in tarjan(g): ws = set() ews = set() for v in scc: ws.update(g[v]) for w in ws: assert w in ret or w in scc ews.add(w) ews.update(ret.get(w,())) if len(scc) > 1: ews.update(scc) ews = tuple(ews) for v in scc: ret[v] = ews return ret
python
{ "resource": "" }
q4636
Session.list_feeds
train
def list_feeds(self): """ Output a list of all feed names """ feeds = configparser.ConfigParser() feeds.read(self.data_filename) return feeds.sections()
python
{ "resource": "" }
q4637
Session.retrieve_config_file
train
def retrieve_config_file(self): """ Retrieve config file """ try: if self.args["configfile"]: return self.args["configfile"] except KeyError: pass return os.path.expanduser('~/.config/greg/greg.conf')
python
{ "resource": "" }
q4638
Session.retrieve_data_directory
train
def retrieve_data_directory(self): """ Retrieve the data directory Look first into config_filename_global then into config_filename_user. The latter takes preeminence. """ args = self.args try: if args['datadirectory']: aux.ensure_dir(args['datadirectory']) return args['datadirectory'] except KeyError: pass config = configparser.ConfigParser() config.read([config_filename_global, self.config_filename_user]) section = config.default_section data_path = config.get(section, 'Data directory', fallback='~/.local/share/greg') data_path_expanded = os.path.expanduser(data_path) aux.ensure_dir(data_path_expanded) return os.path.expanduser(data_path_expanded)
python
{ "resource": "" }
q4639
Feed.will_tag
train
def will_tag(self): """ Check whether the feed should be tagged """ wanttags = self.retrieve_config('Tag', 'no') if wanttags == 'yes': if aux.staggerexists: willtag = True else: willtag = False print(("You want me to tag {0}, but you have not installed " "the Stagger module. I cannot honour your request."). format(self.name), file=sys.stderr, flush=True) else: willtag = False return willtag
python
{ "resource": "" }
q4640
Feed.how_many
train
def how_many(self): """ Ascertain where to start downloading, and how many entries. """ if self.linkdates != []: # What follows is a quick sanity check: if the entry date is in the # future, this is probably a mistake, and we just count the entry # date as right now. if max(self.linkdates) <= list(time.localtime()): currentdate = max(self.linkdates) else: currentdate = list(time.localtime()) print(("This entry has its date set in the future. " "I will use your current local time as its date " "instead."), file=sys.stderr, flush=True) stop = sys.maxsize else: currentdate = [1, 1, 1, 0, 0] firstsync = self.retrieve_config('firstsync', '1') if firstsync == 'all': stop = sys.maxsize else: stop = int(firstsync) return currentdate, stop
python
{ "resource": "" }
q4641
Feed.fix_linkdate
train
def fix_linkdate(self, entry): """ Give a date for the entry, depending on feed.sync_by_date Save it as feed.linkdate """ if self.sync_by_date: try: entry.linkdate = list(entry.published_parsed) self.linkdate = list(entry.published_parsed) except (AttributeError, TypeError): try: entry.linkdate = list(entry.updated_parsed) self.linkdate = list(entry.updated_parsed) except (AttributeError, TypeError): print(("This entry doesn't seem to have a parseable date. " "I will use your local time instead."), file=sys.stderr, flush=True) entry.linkdate = list(time.localtime()) self.linkdate = list(time.localtime()) else: entry.linkdate = list(time.localtime())
python
{ "resource": "" }
q4642
Feed.retrieve_mime
train
def retrieve_mime(self): """ Check the mime-type to download """ mime = self.retrieve_config('mime', 'audio') mimedict = {"number": mime} # the input that parse_for_download expects return aux.parse_for_download(mimedict)
python
{ "resource": "" }
q4643
Feed.download_entry
train
def download_entry(self, entry): """ Find entry link and download entry """ downloadlinks = {} downloaded = False ignoreenclosures = self.retrieve_config('ignoreenclosures', 'no') notype = self.retrieve_config('notype', 'no') if ignoreenclosures == 'no': for enclosure in entry.enclosures: if notype == 'yes': downloadlinks[urlparse(enclosure["href"]).path.split( "/")[-1]] = enclosure["href"] # preserve original name else: try: # We will download all enclosures of the desired # mime-type if any([mimetype in enclosure["type"] for mimetype in self.mime]): downloadlinks[urlparse( enclosure["href"]).path.split( "/")[-1]] = enclosure["href"] # preserve original name except KeyError: print("This podcast carries no information about " "enclosure types. Try using the notype " "option in your greg.conf", file=sys.stderr, flush=True) else: downloadlinks[urlparse(entry.link).query.split( "/")[-1]] = entry.link for podname in downloadlinks: if (podname, entry.linkdate) not in zip(self.entrylinks, self.linkdates): try: title = entry.title except: title = podname try: sanitizedsummary = aux.html_to_text(entry.summary) if sanitizedsummary == "": sanitizedsummary = "No summary available" except: sanitizedsummary = "No summary available" try: placeholders = Placeholders( self, entry, downloadlinks[podname], podname, title, sanitizedsummary) placeholders = aux.check_directory(placeholders) condition = aux.filtercond(placeholders) if condition: print("Downloading {} -- {}".format(title, podname)) aux.download_handler(self, placeholders) if self.willtag: aux.tag(placeholders) downloaded = True else: print("Skipping {} -- {}".format(title, podname)) downloaded = False if self.info: with open(self.info, 'a') as current: # We write to file this often to ensure that # downloaded entries count as downloaded. current.write(''.join([podname, ' ', str(entry.linkdate), '\n'])) except URLError: sys.exit(("... something went wrong. " "Are you connected to the internet?")) return downloaded
python
{ "resource": "" }
q4644
main
train
def main(): """ Parse the args and call whatever function was selected """ args = parser.parse_args() try: function = args.func except AttributeError: parser.print_usage() parser.exit(1) function(vars(args))
python
{ "resource": "" }
q4645
add
train
def add(args): """ Add a new feed """ session = c.Session(args) if args["name"] in session.feeds.sections(): sys.exit("You already have a feed with that name.") if args["name"] in ["all", "DEFAULT"]: sys.exit( ("greg uses ""{}"" for a special purpose." "Please choose another name for your feed.").format(args["name"])) entry = {} for key, value in args.items(): if value is not None and key != "func" and key != "name": entry[key] = value session.feeds[args["name"]] = entry with open(session.data_filename, 'w') as configfile: session.feeds.write(configfile)
python
{ "resource": "" }
q4646
info
train
def info(args): """ Provide information of a number of feeds """ session = c.Session(args) if "all" in args["names"]: feeds = session.list_feeds() else: feeds = args["names"] for feed in feeds: aux.pretty_print(session, feed)
python
{ "resource": "" }
q4647
sync
train
def sync(args): """ Implement the 'greg sync' command """ import operator session = c.Session(args) if "all" in args["names"]: targetfeeds = session.list_feeds() else: targetfeeds = [] for name in args["names"]: if name not in session.feeds: print("You don't have a feed called {}." .format(name), file=sys.stderr, flush=True) else: targetfeeds.append(name) for target in targetfeeds: feed = c.Feed(session, target, None) if not feed.wentwrong: try: title = feed.podcast.target.title except AttributeError: title = target print("Checking", title, end="...\n") currentdate, stop = feed.how_many() entrycounter = 0 entries_to_download = feed.podcast.entries for entry in entries_to_download: feed.fix_linkdate(entry) # Sort entries_to_download, but only if you want to download as # many as there are if stop >= len(entries_to_download): entries_to_download.sort(key=operator.attrgetter("linkdate"), reverse=False) for entry in entries_to_download: if entry.linkdate > currentdate: downloaded = feed.download_entry(entry) entrycounter += downloaded if entrycounter >= stop: break print("Done") else: msg = ''.join(["I cannot sync ", feed, " just now. Are you connected to the internet?"]) print(msg, file=sys.stderr, flush=True)
python
{ "resource": "" }
q4648
check
train
def check(args): """ Implement the 'greg check' command """ session = c.Session(args) if str(args["url"]) != 'None': url = args["url"] name = "DEFAULT" else: try: url = session.feeds[args["feed"]]["url"] name = args["feed"] except KeyError: sys.exit("You don't appear to have a feed with that name.") podcast = aux.parse_podcast(url) for entry in enumerate(podcast.entries): listentry = list(entry) print(listentry[0], end=": ") try: print(listentry[1]["title"], end=" (") except: print(listentry[1]["link"], end=" (") try: print(listentry[1]["updated"], end=")") except: print("", end=")") print() dumpfilename = os.path.join(session.data_dir, 'feeddump') with open(dumpfilename, mode='wb') as dumpfile: dump = [name, podcast] pickle.dump(dump, dumpfile)
python
{ "resource": "" }
q4649
parse_podcast
train
def parse_podcast(url): """ Try to parse podcast """ try: podcast = feedparser.parse(url) wentwrong = "urlopen" in str(podcast["bozo_exception"]) except KeyError: wentwrong = False if wentwrong: print("Error: ", url, ": ", str(podcast["bozo_exception"])) return podcast
python
{ "resource": "" }
q4650
check_directory
train
def check_directory(placeholders): """ Find out, and create if needed, the directory in which the feed will be downloaded """ feed = placeholders.feed args = feed.args placeholders.directory = "This very directory" # wink, wink placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) try: if args["downloaddirectory"]: ensure_dir(args["downloaddirectory"]) placeholders.directory = args["downloaddirectory"] except KeyError: pass download_path = os.path.expanduser( feed.retrieve_config("Download Directory", "~/Podcasts")) subdirectory = feed.retrieve_config( "Create subdirectory", "no") if "no" in subdirectory: placeholders.directory = download_path elif "yes" in subdirectory: subdnametemplate = feed.retrieve_config( "subdirectory_name", "{podcasttitle}") subdname = substitute_placeholders( subdnametemplate, placeholders) placeholders.directory = os.path.join(download_path, subdname) ensure_dir(placeholders.directory) placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) return placeholders
python
{ "resource": "" }
q4651
tag
train
def tag(placeholders): """ Tag the file at podpath with the information in podcast and entry """ # We first recover the name of the file to be tagged... template = placeholders.feed.retrieve_config("file_to_tag", "{filename}") filename = substitute_placeholders(template, placeholders) podpath = os.path.join(placeholders.directory, filename) # ... and this is it # now we create a dictionary of tags and values tagdict = placeholders.feed.defaulttagdict # these are the defaults try: # We do as if there was a section with potential tag info feedoptions = placeholders.feed.config.options(placeholders.name) # this monstruous concatenation of classes... surely a bad idea. tags = [[option.replace("tag_", ""), placeholders.feed.config[ placeholders.name][option]] for option in feedoptions if "tag_" in option] # these are the tags to be filled if tags: for tag in tags: tagdict[tag[0]] = tag[1] except configparser.NoSectionError: pass for tag in tagdict: metadata = substitute_placeholders( tagdict[tag], placeholders) if metadata: stagger.util.set_frames(podpath, {tag: metadata}) else: stagger.util.remove_frames(podpath, tag)
python
{ "resource": "" }
q4652
download_handler
train
def download_handler(feed, placeholders): import shlex """ Parse and execute the download handler """ value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
python
{ "resource": "" }
q4653
pretty_print
train
def pretty_print(session, feed): """ Print the dictionary entry of a feed in a nice way. """ if feed in session.feeds: print() feed_info = os.path.join(session.data_dir, feed) entrylinks, linkdates = parse_feed_info(feed_info) print(feed) print("-"*len(feed)) print(''.join([" url: ", session.feeds[feed]["url"]])) if linkdates != []: print(''.join([" Next sync will download from: ", time.strftime( "%d %b %Y %H:%M:%S", tuple(max(linkdates))), "."])) else: print("You don't have a feed called {}.".format(feed), file=sys.stderr, flush=True)
python
{ "resource": "" }
q4654
substitute_placeholders
train
def substitute_placeholders(inputstring, placeholders): """ Take a string with placeholders, and return the strings with substitutions. """ newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
python
{ "resource": "" }
q4655
symm_block_tridiag_matmul
train
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v): """ Compute matrix-vector product with a symmetric block tridiagonal matrix H and vector v. :param H_diag: block diagonal terms of H :param H_upper_diag: upper block diagonal terms of H :param v: vector to multiple :return: H * v """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T-1, D, D) assert v.shape == (T, D) out = np.matmul(H_diag, v[:, :, None])[:, :, 0] out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0] out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0] return out
python
{ "resource": "" }
q4656
scipy_solve_symm_block_tridiag
train
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None): """ use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html """ from scipy.linalg import solveh_banded ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \ if ab is None else ab x = solveh_banded(ab, v.ravel(), lower=True) return x.reshape(v.shape)
python
{ "resource": "" }
q4657
sample_block_tridiag
train
def sample_block_tridiag(H_diag, H_upper_diag): """ helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
python
{ "resource": "" }
q4658
compute_symm_block_tridiag_covariances
train
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag): """ use the info smoother to solve a symmetric block tridiagonal system """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
python
{ "resource": "" }
q4659
LDSStatesZeroInflatedCountData.resample_zeroinflation_variables
train
def resample_zeroinflation_variables(self): """ There's no way around the fact that we have to look at every data point, even the zeros here. """ # TODO: move this to cython? T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b indptr = [0] indices = [] vals = [] offset = 0 X = np.hstack((self.gaussian_states, self.inputs)) for t in range(T): # Evaluate probability of data y_t = np.zeros(N) ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]] y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]] ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t)) ll = ll.ravel() # Evaluate the probability that each emission was "exposed", # i.e. p(z_tn = 1 | y_tn, x_tn) log_p_exposed = np.log(self.rho) + ll log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0)) # Sample zero inflation mask z_t = np.random.rand(N) < np.exp(log_p_exposed) # Construct the sparse matrix t_inds = np.where(z_t)[0] indices.append(t_inds) vals.append(y_t[t_inds]) offset += t_inds.size indptr.append(offset) # Construct a sparse matrix vals = np.concatenate(vals) indices = np.concatenate(indices) indptr = np.array(indptr) self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
python
{ "resource": "" }
q4660
PoissonRegression.expected_log_likelihood
train
def expected_log_likelihood(self, mus, sigmas, y): """ Compute the expected log likelihood for a mean and covariance of x and an observed value of y. """ # Flatten the covariance T = mus.shape[0] D = self.D_in sigs_vec = sigmas.reshape((T, D ** 2)) # Compute the log likelihood of each column ll = np.zeros((T, self.D_out)) for n in range(self.D_out): an = self.A[n] E_loglmbda = np.dot(mus, an) ll[:,n] += y[:,n] * E_loglmbda # Vectorized log likelihood calculation aa_vec = np.outer(an, an).reshape((D ** 2,)) ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec)) return ll
python
{ "resource": "" }
q4661
_LaplaceApproxLDSStatesBase.sparse_hessian_log_joint
train
def sparse_hessian_log_joint(self, x): """ The Hessian includes the quadratic terms of the Gaussian LDS prior as well as the Hessian of the local log likelihood. """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms J_diag, J_upper_diag = self.sparse_J_prior H_diag, H_upper_diag = -J_diag, -J_upper_diag # Collect the likelihood terms H_diag += self.hessian_local_log_likelihood(x) # Subtract a little bit to ensure negative definiteness H_diag -= 1e-8 * np.eye(D) return H_diag, H_upper_diag
python
{ "resource": "" }
q4662
_LaplaceApproxLDSStatesBase.gradient_log_joint
train
def gradient_log_joint(self, x): """ The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms _, h_init, _ = self.info_init_params _, _, _, h1, h2, _ = self.info_dynamics_params H_diag, H_upper_diag = self.sparse_J_prior # Compute the gradient from the prior g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x) g[0] += h_init g[:-1] += h1 g[1:] += h2 # Compute gradient from the likelihood terms g += self.grad_local_log_likelihood(x) return g
python
{ "resource": "" }
q4663
_LaplaceApproxLDSStatesBase._laplace_approximation_newton
train
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False): """ Solve a block tridiagonal system with message passing. """ from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag scale = self.T * self.D_emission def newton_step(x, stepsz): assert 0 <= stepsz <= 1 g = self.gradient_log_joint(x) H_diag, H_upper_diag = self.sparse_hessian_log_joint(x) Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale, -H_upper_diag / scale, g / scale) return x - stepsz * Hinv_g if verbose: print("Fitting Laplace approximation") itr = [0] def cbk(x): print("Iteration: ", itr[0], "\tObjective: ", (self.log_joint(x) / scale).round(4), "\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4)) itr[0] += 1 # Solve for optimal x with Newton's method x = self.gaussian_states dx = np.inf while dx >= tol: xnew = newton_step(x, stepsz) dx = np.mean(abs(xnew - x)) x = xnew if verbose: cbk(x) assert np.all(np.isfinite(x)) if verbose: print("Done") return x
python
{ "resource": "" }
q4664
pam.authenticate
train
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True): """username and password authentication for the given service. Returns True for success, or False for failure. self.code (integer) and self.reason (string) are always stored and may be referenced for the reason why authentication failed. 0/'Success' will be stored for success. Python3 expects bytes() for ctypes inputs. This function will make necessary conversions using the supplied encoding. Inputs: username: username to authenticate password: password in plain text service: PAM service to authenticate against, defaults to 'login' Returns: success: True failure: False """ @conv_func def my_conv(n_messages, messages, p_response, app_data): """Simple conversation function that responds to any prompt where the echo is off with the supplied password""" # Create an array of n_messages response objects addr = calloc(n_messages, sizeof(PamResponse)) response = cast(addr, POINTER(PamResponse)) p_response[0] = response for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: dst = calloc(len(password)+1, sizeof(c_char)) memmove(dst, cpassword, len(password)) response[i].resp = dst response[i].resp_retcode = 0 return 0 # python3 ctypes prefers bytes if sys.version_info >= (3,): if isinstance(username, str): username = username.encode(encoding) if isinstance(password, str): password = password.encode(encoding) if isinstance(service, str): service = service.encode(encoding) else: if isinstance(username, unicode): username = username.encode(encoding) if isinstance(password, unicode): password = password.encode(encoding) if isinstance(service, unicode): service = service.encode(encoding) if b'\x00' in username or b'\x00' in password or b'\x00' in service: self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM self.reason = 'strings may not contain NUL' return False # do this up front so we can safely throw an exception if there's # anything wrong with it cpassword = c_char_p(password) handle = PamHandle() conv = PamConv(my_conv, 0) retval = pam_start(service, username, byref(conv), byref(handle)) if retval != 0: # This is not an authentication error, something has gone wrong starting up PAM self.code = retval self.reason = "pam_start() failed" return False retval = pam_authenticate(handle, 0) auth_success = retval == 0 if auth_success and resetcreds: retval = pam_setcred(handle, PAM_REINITIALIZE_CRED); # store information to inform the caller why we failed self.code = retval self.reason = pam_strerror(handle, retval) if sys.version_info >= (3,): self.reason = self.reason.decode(encoding) if hasattr(libpam, 'pam_end'): pam_end(handle, retval) return auth_success
python
{ "resource": "" }
q4665
ssh_config.load
train
def load(self): """list the hosts defined in the ssh config file""" with open(self.filename) as f: content = f.readlines() content = [" ".join(x.split()).strip('\n').lstrip().split(' ', 1) for x in content] # removes duplicated spaces, and splits in two fields, removes leading spaces hosts = {} host = "NA" for line in content: if line[0].startswith('#') or line[0] is '': pass # ignore line else: attribute = line[0] value = line[1] if attribute.lower().strip() in ['Host', 'host']: host = value hosts[host] = {'host': host} else: # In case of special configuration lines, such as port # forwarding, # there would be no 'Host india' line. if host in hosts: hosts[host][attribute] = value # pass self.hosts = hosts
python
{ "resource": "" }
q4666
LOGGER
train
def LOGGER(filename): """creates a logger with the given name. You can use it as follows:: log = cloudmesh.common.LOGGER(__file__) log.error("this is an error") log.info("this is an info") log.warning("this is a warning") """ pwd = os.getcwd() name = filename.replace(pwd, "$PWD") try: (first, name) = name.split("site-packages") name += "... site" except: pass loglevel = logging.CRITICAL try: level = grep("loglevel:", config_file( "/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower() if level.upper() == "DEBUG": loglevel = logging.DEBUG elif level.upper() == "INFO": loglevel = logging.INFO elif level.upper() == "WARNING": loglevel = logging.WARNING elif level.upper() == "ERROR": loglevel = logging.ERROR else: level = logging.CRITICAL except: # print "LOGLEVEL NOT FOUND" loglevel = logging.DEBUG log = logging.getLogger(name) log.setLevel(loglevel) formatter = logging.Formatter( 'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name)) # formatter = logging.Formatter( # 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name)) handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) return log
python
{ "resource": "" }
q4667
key_prefix_replace
train
def key_prefix_replace(d, prefix, new_prefix=""): """ replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified """ items = [] for k, v in d.items(): new_key = k for p in prefix: new_key = new_key.replace(p, new_prefix, 1) items.append((new_key, v)) return dict(items)
python
{ "resource": "" }
q4668
flatten
train
def flatten(d, parent_key='', sep='__'): """ flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict """ # http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys if type(d) == list: flat = [] for entry in d: flat.append(flatten(entry, parent_key=parent_key, sep=sep)) return flat else: items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collectionsAbc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
python
{ "resource": "" }
q4669
FlatDict2.object_to_dict
train
def object_to_dict(cls, obj): """ This function converts Objects into Dictionary """ dict_obj = dict() if obj is not None: if type(obj) == list: dict_list = [] for inst in obj: dict_list.append(cls.object_to_dict(inst)) dict_obj["list"] = dict_list elif not cls.is_primitive(obj): for key in obj.__dict__: # is an object if type(obj.__dict__[key]) == list: dict_list = [] for inst in obj.__dict__[key]: dict_list.append(cls.object_to_dict(inst)) dict_obj[key] = dict_list elif not cls.is_primitive(obj.__dict__[key]): temp_dict = cls.object_to_dict(obj.__dict__[key]) dict_obj[key] = temp_dict else: dict_obj[key] = obj.__dict__[key] elif cls.is_primitive(obj): return obj return dict_obj
python
{ "resource": "" }
q4670
StopWatch.start
train
def start(cls, name): """ starts a timer with the given name. :param name: the name of the timer :type name: string """ if cls.debug: print("Timer", name, "started ...") cls.timer_start[name] = time.time()
python
{ "resource": "" }
q4671
StopWatch.stop
train
def stop(cls, name): """ stops the timer with a given name. :param name: the name of the timer :type name: string """ cls.timer_end[name] = time.time() if cls.debug: print("Timer", name, "stopped ...")
python
{ "resource": "" }
q4672
StopWatch.get
train
def get(cls, name): """ returns the time of the timer. :param name: the name of the timer :type name: string :rtype: the elapsed time """ if name in cls.timer_end: cls.timer_elapsed[name] = cls.timer_end[name] - \ cls.timer_start[name] return cls.timer_elapsed[name] else: return "undefined"
python
{ "resource": "" }
q4673
Shell.find_cygwin_executables
train
def find_cygwin_executables(cls): """ find the executables in cygwin """ exe_paths = glob.glob(cls.cygwin_path + r'\*.exe') # print cls.cygwin_path # list all *.exe in cygwin path, use glob for c in exe_paths: exe = c.split('\\') name = exe[1].split('.')[0] # command['windows'][name] = "{:}\{:}.exe".format(cygwin_path, c) cls.command['windows'][name] = c
python
{ "resource": "" }
q4674
Shell.terminal_type
train
def terminal_type(cls): """ returns darwin, cygwin, cmd, or linux """ what = sys.platform kind = 'UNDEFINED_TERMINAL_TYPE' if 'linux' in what: kind = 'linux' elif 'darwin' in what: kind = 'darwin' elif 'cygwin' in what: kind = 'cygwin' elif 'windows' in what: kind = 'windows' return kind
python
{ "resource": "" }
q4675
Shell.execute
train
def execute(cls, cmd, arguments="", shell=False, cwd=None, traceflag=True, witherror=True): """Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in whcih the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return: """ # print "--------------" result = None terminal = cls.terminal_type() # print cls.command os_command = [cmd] if terminal in ['linux', 'windows']: os_command = [cmd] elif 'cygwin' in terminal: if not cls.command_exists(cmd): print("ERROR: the command could not be found", cmd) return else: os_command = [cls.command[cls.operating_system()][cmd]] if isinstance(arguments, list): os_command = os_command + arguments elif isinstance(arguments, tuple): os_command = os_command + list(arguments) elif isinstance(arguments, str): os_command = os_command + arguments.split() else: print("ERROR: Wrong parameter type", type(arguments)) if cwd is None: cwd = os.getcwd() try: if shell: result = subprocess.check_output( os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output( os_command, # shell=True, stderr=subprocess.STDOUT, cwd=cwd) except: if witherror: Console.error("problem executing subprocess", traceflag=traceflag) if result is not None: result = result.strip().decode() return result
python
{ "resource": "" }
q4676
tempdir
train
def tempdir(*args, **kwargs): """A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass """ d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d)
python
{ "resource": "" }
q4677
exponential_backoff
train
def exponential_backoff(fn, sleeptime_s_max=30 * 60): """Calls `fn` until it returns True, with an exponentially increasing wait time between calls""" sleeptime_ms = 500 while True: if fn(): return True else: print('Sleeping {} ms'.format(sleeptime_ms)) time.sleep(sleeptime_ms / 1000.0) sleeptime_ms *= 2 if sleeptime_ms / 1000.0 > sleeptime_s_max: return False
python
{ "resource": "" }
q4678
grep
train
def grep(pattern, filename): """Very simple grep that returns the first matching line in a file. String matching only, does not do REs as currently implemented. """ try: # for line in file # if line matches pattern: # return line return next((L for L in open(filename) if L.find(pattern) >= 0)) except StopIteration: return ''
python
{ "resource": "" }
q4679
path_expand
train
def path_expand(text): """ returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string """ result = os.path.expandvars(os.path.expanduser(text)) # template = Template(text) # result = template.substitute(os.environ) if result.startswith("."): result = result.replace(".", os.getcwd(), 1) return result
python
{ "resource": "" }
q4680
ordered_load
train
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict): """ Loads an ordered dict into a yaml while preserving the order :param stream: the name of the stream :param Loader: the yam loader (such as yaml.SafeLoader) :param object_pairs_hook: the ordered dict """ # noinspection PyClassHasNoInit class OrderedLoader(Loader): """ A helper class to define an Ordered Loader """ pass def construct_mapping(loader, node): """ construct a flattened node mapping :param loader: the loader :param node: the node dict :return: """ loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader)
python
{ "resource": "" }
q4681
read_yaml_config
train
def read_yaml_config(filename, check=True, osreplace=True, exit=True): """ reads in a yaml file from the specified filename. If check is set to true the code will fail if the file does not exist. However if it is set to false and the file does not exist, None is returned. :param exit: if true is exist with sys exit :param osreplace: if true replaces environment variables from the OS :param filename: the file name :param check: if True fails if the file does not exist, if False and the file does not exist return will be None """ location = filename if location is not None: location = path_expand(location) if not os.path.exists(location) and not check: return None if check and os.path.exists(location): # test for tab in yaml file if check_file_for_tabs(location): log.error("The file {0} contains tabs. yaml " "Files are not allowed to contain tabs".format(location)) sys.exit() result = None try: if osreplace: result = open(location, 'r').read() t = Template(result) result = t.substitute(os.environ) # data = yaml.safe_load(result) data = ordered_load(result, yaml.SafeLoader) else: f = open(location, "r") # data = yaml.safe_load(f) data = ordered_load(result, yaml.SafeLoader) f.close() return data except Exception as e: log.error( "The file {0} fails with a yaml read error".format(filename)) Error.traceback(e) sys.exit() else: log.error("The file {0} does not exist.".format(filename)) if exit: sys.exit() return None
python
{ "resource": "" }
q4682
BaseConfigDict._update_meta
train
def _update_meta(self): """ internal function to define the metadata regarding filename, location, and prefix. """ for v in ["filename", "location", "prefix"]: if "meta" not in self: self["meta"] = {} self["meta"][v] = self[v] del self[v]
python
{ "resource": "" }
q4683
BaseConfigDict.load
train
def load(self, filename): """ Loads the yaml file with the given filename. :param filename: the name of the yaml file """ self._set_filename(filename) if os.path.isfile(self['location']): # d = OrderedDict(read_yaml_config(self['location'], check=True)) d = read_yaml_config(self['location'], check=True) with open(self['location']) as myfile: document = myfile.read() x = yaml.load(document, Loader=yaml.FullLoader) try: self.update(d) except: print("ERROR: can not find", self["location"]) sys.exit() else: print( "Error while reading and updating the configuration file {:}".format( filename))
python
{ "resource": "" }
q4684
BaseConfigDict.error_keys_not_found
train
def error_keys_not_found(self, keys): """ Check if the requested keys are found in the dict. :param keys: keys to be looked for """ try: log.error("Filename: {0}".format(self['meta']['location'])) except: log.error("Filename: {0}".format(self['location'])) log.error("Key '{0}' does not exist".format('.'.join(keys))) indent = "" last_index = len(keys) - 1 for i, k in enumerate(keys): if i == last_index: log.error(indent + k + ": <- this value is missing") else: log.error(indent + k + ":") indent += " "
python
{ "resource": "" }
q4685
BaseConfigDict.yaml
train
def yaml(self): """ returns the yaml output of the dict. """ return ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False)
python
{ "resource": "" }
q4686
Printer.csv
train
def csv(cls, d, order=None, header=None, sort_keys=True): """ prints a table in csv format :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param sort_keys: TODO: not yet implemented :type sort_keys: bool :return: a string representing the table in csv format """ first_element = list(d)[0] def _keys(): return list(d[first_element]) # noinspection PyBroadException def _get(element, key): try: tmp = str(d[element][key]) except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() table = "" content = [] for attribute in order: content.append(attribute) table = table + ",".join([str(e) for e in content]) + "\n" for job in d: content = [] for attribute in order: try: content.append(d[job][attribute]) except: content.append("None") table = table + ",".join([str(e) for e in content]) + "\n" return table
python
{ "resource": "" }
q4687
get_fingerprint_from_public_key
train
def get_fingerprint_from_public_key(pubkey): """Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str """ # TODO: why is there a tmpdir? with tempdir() as workdir: key = os.path.join(workdir, 'key.pub') with open(key, 'w') as fd: fd.write(pubkey) cmd = [ 'ssh-keygen', '-l', '-f', key, ] p = Subprocess(cmd) output = p.stdout.strip() bits, fingerprint, _ = output.split(' ', 2) return fingerprint
python
{ "resource": "" }
q4688
PhotoAdmin.thumb
train
def thumb(self, obj): """ Generates html and thumbnails for admin site. """ format, created = Format.objects.get_or_create(name='newman_thumb', defaults={ 'max_width': 100, 'max_height': 100, 'flexible_height': False, 'stretch': False, 'nocrop': True, }) if created: format.sites = Site.objects.all() info = obj.get_formated_photo(format) return '<a href="%(href)s"><img src="%(src)s"></a>' % { 'href': '%s/' % obj.pk, 'src': info['url'] }
python
{ "resource": "" }
q4689
process_pdb
train
def process_pdb(pdbfile, outpath, as_string=False, outputprefix='report'): """Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output.""" if not as_string: startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1] else: startmessage = "Starting analysis from stdin.\n" write_message(startmessage) write_message('='*len(startmessage)+'\n') mol = PDBComplex() mol.output_path = outpath mol.load_pdb(pdbfile, as_string=as_string) # #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid) for ligand in mol.ligands: mol.characterize_complex(ligand) create_folder_if_not_exists(outpath) # Generate the report files streport = StructureReport(mol, outputprefix=outputprefix) config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets)) ###################################### # PyMOL Visualization (parallelized) # ###################################### if config.PYMOL or config.PICS: try: from plip.modules.visualize import visualize_in_pymol except ImportError: from modules.visualize import visualize_in_pymol complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets) if not len(mol.interaction_sets[site].interacting_res) == 0] if config.MAXTHREADS > 1: write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS) parfn = parallel_fn(visualize_in_pymol) parfn(complexes, processes=config.MAXTHREADS) else: [visualize_in_pymol(plcomplex) for plcomplex in complexes] if config.XML: # Generate report in xml format streport.write_xml(as_string=config.STDOUT) if config.TXT: # Generate report in txt (rst) format streport.write_txt(as_string=config.STDOUT)
python
{ "resource": "" }
q4690
download_structure
train
def download_structure(inputpdbid): """Given a PDB ID, downloads the corresponding PDB structure. Checks for validity of ID and handles error while downloading. Returns the path of the downloaded file.""" try: if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein': sysexit(3, 'Invalid PDB ID (Wrong format)\n') pdbfile, pdbid = fetch_pdb(inputpdbid.lower()) pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid)) create_folder_if_not_exists(config.BASEPATH) with open(pdbpath, 'w') as g: g.write(pdbfile) write_message('file downloaded as %s\n\n' % pdbpath) return pdbpath, pdbid except ValueError: # Invalid PDB ID, cannot fetch from RCBS server sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
python
{ "resource": "" }
q4691
remove_duplicates
train
def remove_duplicates(slist): """Checks input lists for duplicates and returns a list with unique entries""" unique = list(set(slist)) difference = len(slist) - len(unique) if difference == 1: write_message("Removed one duplicate entry from input list.\n") if difference > 1: write_message("Removed %i duplicate entries from input list.\n" % difference) return unique
python
{ "resource": "" }
q4692
main
train
def main(inputstructs, inputpdbids): """Main function. Calls functions for processing, report generation and visualization.""" pdbid, pdbpath = None, None # #@todo For multiprocessing, implement better stacktracing for errors # Print title and version title = "* Protein-Ligand Interaction Profiler v%s *" % __version__ write_message('\n' + '*' * len(title) + '\n') write_message(title) write_message('\n' + '*' * len(title) + '\n\n') outputprefix = config.OUTPUTFILENAME if inputstructs is not None: # Process PDB file(s) num_structures = len(inputstructs) inputstructs = remove_duplicates(inputstructs) read_from_stdin = False for inputstruct in inputstructs: if inputstruct == '-': inputstruct = sys.stdin.read() read_from_stdin = True if config.RAWSTRING: if sys.version_info < (3,): inputstruct = bytes(inputstruct).decode('unicode_escape') else: inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape') else: if os.path.getsize(inputstruct) == 0: sysexit(2, 'Empty PDB file\n') # Exit if input file is empty if num_structures > 1: basename = inputstruct.split('.')[-2].split('/')[-1] config.OUTPATH = '/'.join([config.BASEPATH, basename]) outputprefix = 'report' process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix) else: # Try to fetch the current PDB structure(s) directly from the RCBS server num_pdbids = len(inputpdbids) inputpdbids = remove_duplicates(inputpdbids) for inputpdbid in inputpdbids: pdbpath, pdbid = download_structure(inputpdbid) if num_pdbids > 1: config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()]) outputprefix = 'report' process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix) if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None: if config.BASEPATH in ['.', './']: write_message('\nFinished analysis. Find the result files in the working directory.\n\n') else: write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
python
{ "resource": "" }
q4693
FormatedPhotoForm.clean
train
def clean(self): """ Validation function that checks the dimensions of the crop whether it fits into the original and the format. """ data = self.cleaned_data photo = data['photo'] if ( (data['crop_left'] > photo.width) or (data['crop_top'] > photo.height) or ((data['crop_left'] + data['crop_width']) > photo.width) or ((data['crop_top'] + data['crop_height']) > photo.height) ): # raise forms.ValidationError, ugettext("The specified crop coordinates do not fit into the source photo.") raise ValidationError(ugettext("The specified crop coordinates do not fit into the source photo.")) return data
python
{ "resource": "" }
q4694
FormatForm.clean
train
def clean(self): """ Check format name uniqueness for sites :return: cleaned_data """ data = self.cleaned_data formats = Format.objects.filter(name=data['name']) if self.instance: formats = formats.exclude(pk=self.instance.pk) exists_sites = [] for f in formats: for s in f.sites.all(): if s in data['sites']: exists_sites.append(s.__unicode__()) if len(exists_sites): raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites))) return data
python
{ "resource": "" }
q4695
PhotoOptions.format_photo_json
train
def format_photo_json(self, request, photo, format): "Used in admin image 'crop tool'." try: photo = get_cached_object(Photo, pk=photo) format = get_cached_object(Format, pk=format) content = { 'error': False, 'image':settings.MEDIA_URL + photo.image, 'width':photo.width, 'height': photo.height, 'format_width':format.max_width, 'format_height':format.max_height } except (Photo.DoesNotExist, Format.DoesNotExist): content = {'error':True} return HttpResponse(simplejson.dumps(content))
python
{ "resource": "" }
q4696
ChimeraVisualizer.set_initial_representations
train
def set_initial_representations(self): """Set the initial representations""" self.update_model_dict() self.rc("background solid white") self.rc("setattr g display 0") # Hide all pseudobonds self.rc("~display #%i & :/isHet & ~:%s" % (self.model_dict[self.plipname], self.hetid))
python
{ "resource": "" }
q4697
ChimeraVisualizer.update_model_dict
train
def update_model_dict(self): """Updates the model dictionary""" dct = {} models = self.chimera.openModels for md in models.list(): dct[md.name] = md.id self.model_dict = dct
python
{ "resource": "" }
q4698
ChimeraVisualizer.atom_by_serialnumber
train
def atom_by_serialnumber(self): """Provides a dictionary mapping serial numbers to their atom objects.""" atm_by_snum = {} for atom in self.model.atoms: atm_by_snum[atom.serialNumber] = atom return atm_by_snum
python
{ "resource": "" }
q4699
ChimeraVisualizer.show_halogen
train
def show_halogen(self): """Visualizes halogen bonds.""" grp = self.getPseudoBondGroup("HalogenBonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.halogen_bonds: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('turquoise') self.bs_res_ids.append(i.acc_id)
python
{ "resource": "" }