sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def serialize(obj): """JSON serializer that accepts datetime & date""" from datetime import datetime, date, time if isinstance(obj, date) and not isinstance(obj, datetime): obj = datetime.combine(obj, time.min) if isinstance(obj, datetime): return obj.isoformat()
JSON serializer that accepts datetime & date
entailment
def check(response, expected_status=200, url=None): """ Check whether the status code of the response equals expected_status and raise an APIError otherwise. @param url: The url of the response (for error messages). Defaults to response.url @param json: if True, return r.json(), otherwise return r.text """ if response.status_code != expected_status: if url is None: url = response.url try: err = response.json() except: err = {} # force generic error if all(x in err for x in ("status", "message", "description", "details")): raise _APIError(err["status"], err['message'], url, err, err["description"], err["details"]) else: # generic error suffix = ".html" if "<html" in response.text else ".txt" msg = response.text if len(msg) > 200: with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f: f.write(response.text.encode("utf-8")) msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals()) msg = ("Request {url!r} returned code {response.status_code}," " expected {expected_status}. \n{msg}".format(**locals())) raise _APIError(response.status_code, msg, url, response.text) if response.headers.get('Content-Type') == 'application/json': try: return response.json() except: raise Exception("Cannot decode json; text={response.text!r}" .format(**locals())) else: return response.text
Check whether the status code of the response equals expected_status and raise an APIError otherwise. @param url: The url of the response (for error messages). Defaults to response.url @param json: if True, return r.json(), otherwise return r.text
entailment
def _get_auth(self, user=None, password=None): """ Get the authentication info for the current user, from 1) a ~/.amcatauth file, which should be a csv file containing host, username, password entries 2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables """ fn = os.path.expanduser(AUTH_FILE) if os.path.exists(fn): for i, line in enumerate(csv.reader(open(fn))): if len(line) != 3: log.warning("Cannot parse line {i} in {fn}".format(**locals())) continue hostname, username, pwd = line if (hostname in ("", "*", self.host) and (user is None or username == user)): return (username, pwd) if user is None: user = os.environ.get("AMCAT_USER", os.environ.get("USER")) if password is None: password = os.environ.get("AMCAT_PASSWORD") if user is None or password is None: raise Exception("No authentication info for {user}@{self.host} " "from {fn} or AMCAT_USER / AMCAT_PASSWORD " "variables".format(**locals())) return user, password
Get the authentication info for the current user, from 1) a ~/.amcatauth file, which should be a csv file containing host, username, password entries 2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
entailment
def request(self, url, method="get", format="json", data=None, expected_status=None, headers=None, use_xpost=True, **options): """ Make an HTTP request to the given relative URL with the host, user, and password information. Returns the deserialized json if successful, and raises an exception otherwise """ if expected_status is None: if method == "get": expected_status = 200 elif method == "post": expected_status = 201 else: raise ValueError("No expected status supplied and method unknown.") if not url.startswith("http"): url = "{self.host}/api/v4/{url}".format(**locals()) if format is not None: options = dict({'format': format}, **options) options = {field: value for field, value in options.items() if value is not None} headers = dict(headers or {}, Authorization="Token {}".format(self.token)) #headers['Accept-encoding'] = 'gzip' if method == "get" and use_xpost: # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our # query via POST. This allows for a large number of parameters to be supplied assert(data is None) headers.update({"X-HTTP-METHOD-OVERRIDE": method}) data = options options = None method = "post" r = requests.request(method, url, data=data, params=options, headers=headers) log.debug( "HTTP {method} {url} (options={options!r}, data={data!r}," "headers={headers}) -> {r.status_code}".format(**locals()) ) return check(r, expected_status=expected_status)
Make an HTTP request to the given relative URL with the host, user, and password information. Returns the deserialized json if successful, and raises an exception otherwise
entailment
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters): """ Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API """ n = 0 for page in itertools.count(page): r = self.request(url, page=page, page_size=page_size, **filters) n += len(r['results']) log.debug("Got {url} page {page} / {pages}".format(url=url, **r)) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break
Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API
entailment
def get_scroll(self, url, page_size=100, yield_pages=False, **filters): """ Scroll through the resource at url and yield the individual results :param url: url to scroll through :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: Additional filters :return: a generator of objects (dicts) from the API """ n = 0 options = dict(page_size=page_size, **filters) format = filters.get('format') while True: r = self.request(url, use_xpost=False, **options) n += len(r['results']) log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals())) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break url = r['next'] options = {'format': None}
Scroll through the resource at url and yield the individual results :param url: url to scroll through :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: Additional filters :return: a generator of objects (dicts) from the API
entailment
def get_status(self): """Get the AmCAT status page""" url = URL.status.format(**locals()) return self.get_request(url)
Get the AmCAT status page
entailment
def aggregate(self, **filters): """Conduct an aggregate query""" url = URL.aggregate.format(**locals()) return self.get_pages(url, **filters)
Conduct an aggregate query
entailment
def list_sets(self, project, **filters): """List the articlesets in a project""" url = URL.articlesets.format(**locals()) return self.get_pages(url, **filters)
List the articlesets in a project
entailment
def get_set(self, project, articleset, **filters): """List the articlesets in a project""" url = URL.articleset.format(**locals()) return self.request(url, **filters)
List the articlesets in a project
entailment
def list_articles(self, project, articleset, page=1, **filters): """List the articles in a set""" url = URL.article.format(**locals()) return self.get_pages(url, page=page, **filters)
List the articles in a set
entailment
def create_set(self, project, json_data=None, **options): """ Create a new article set. Provide the needed arguments using post_data or with key-value pairs """ url = URL.articlesets.format(**locals()) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, (string_types)): json_data = json.dumps(json_data,default = serialize) headers = {'content-type': 'application/json'} return self.request( url, method='post', data=json_data, headers=headers)
Create a new article set. Provide the needed arguments using post_data or with key-value pairs
entailment
def create_articles(self, project, articleset, json_data=None, **options): """ Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries. """ url = URL.article.format(**locals()) # TODO duplicated from create_set, move into requests # (or separate post method?) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, string_types): json_data = json.dumps(json_data, default=serialize) headers = {'content-type': 'application/json'} return self.request(url, method='post', data=json_data, headers=headers)
Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries.
entailment
def sign(self, encoded): """ Return authentication signature of encoded bytes """ signature = self._hmac.copy() signature.update(encoded) return signature.hexdigest().encode('utf-8')
Return authentication signature of encoded bytes
entailment
def split(self, encoded): """ Split into signature and message """ maxlen = len(encoded) - self.sig_size message = encoded[:maxlen] signature = encoded[-self.sig_size:] return message, signature
Split into signature and message
entailment
def auth(self, encoded): """ Validate integrity of encoded bytes """ message, signature = self.split(encoded) computed = self.sign(message) if not hmac.compare_digest(signature, computed): raise AuthenticatorInvalidSignature
Validate integrity of encoded bytes
entailment
def cached_classproperty(fun): """A memorization decorator for class properties. It implements the above `classproperty` decorator, with the difference that the function result is computed and attached to class as direct attribute. (Lazy loading and caching.) """ @functools.wraps(fun) def get(cls): try: return cls.__cache[fun] except AttributeError: cls.__cache = {} except KeyError: # pragma: no cover pass ret = cls.__cache[fun] = fun(cls) return ret return classproperty(get)
A memorization decorator for class properties. It implements the above `classproperty` decorator, with the difference that the function result is computed and attached to class as direct attribute. (Lazy loading and caching.)
entailment
def plugin_method(*plugin_names): """Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True """ def wrapper(callable_obj): for plugin_name in plugin_names: if not hasattr(callable_obj, plugin_name): setattr(callable_obj, plugin_name, True) return callable_obj return wrapper
Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True
entailment
def route_method(method_name, extra_part=False): """Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None """ def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() callable_obj.url_extra_part = callable_obj.__name__ if extra_part\ else None return classmethod(callable_obj) return wrapper
Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None
entailment
def issue_add(lancet, assign, add_to_sprint, summary): """ Create a new issue on the issue tracker. """ summary = " ".join(summary) issue = create_issue( lancet, summary, # project_id=project_id, add_to_active_sprint=add_to_sprint, ) if assign: if assign == "me": username = lancet.tracker.whoami() else: username = assign assign_issue(lancet, issue, username) click.echo("Created issue")
Create a new issue on the issue tracker.
entailment
def _maybe_update(self, user, attribute, new_value): """ DRY helper. If the specified attribute of the user differs from the specified value, it will be updated. """ old_value = getattr(user, attribute) if new_value != old_value: self.stderr.write( _('Setting {attribute} for user "{username}" to "{new_value}"').format( attribute=attribute, username=user.username, new_value=new_value ) ) setattr(user, attribute, new_value)
DRY helper. If the specified attribute of the user differs from the specified value, it will be updated.
entailment
def _check_email_match(self, user, email): """ DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else. """ if user.email != email: # The passed email address doesn't match this username's email address. # Assume a problem and fail. raise CommandError( _( 'Skipping user "{}" because the specified and existing email ' 'addresses do not match.' ).format(user.username) )
DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else.
entailment
def credentials_checker(url, username, password): """Check the provided credentials using the Harvest API.""" api = HarvestAPI(url, (username, password)) try: api.whoami() except HarvestError: return False else: return True
Check the provided credentials using the Harvest API.
entailment
def harvest(lancet, config_section): """Construct a new Harvest client.""" url, username, password = lancet.get_credentials( config_section, credentials_checker ) project_id_getter = lancet.get_instance_from_config( "timer", "project_id_getter", lancet ) task_id_getter = lancet.get_instance_from_config( "timer", "task_id_getter", lancet ) client = HarvestPlatform( server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter, ) lancet.call_on_close(client.close) return client
Construct a new Harvest client.
entailment
def temp_dir(folder=None, delete=True): # type: (Optional[str], bool) -> str """Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory """ tempdir = get_temp_dir() if folder: tempdir = join(tempdir, folder) if not exists(tempdir): makedirs(tempdir) try: yield tempdir finally: if delete: rmtree(tempdir)
Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory
entailment
def send(self, obj): """Send a push notification""" if not isinstance(obj, NotificationMessage): raise ValueError, u"You can only send NotificationMessage objects." self._send_queue.put(obj)
Send a push notification
entailment
def get_error(self, block = True, timeout = None): """ Gets the next error message. Each error message is a 2-tuple of (status, identifier).""" return self._error_queue.get(block = block, timeout = timeout)
Gets the next error message. Each error message is a 2-tuple of (status, identifier).
entailment
def get_feedback(self, block = True, timeout = None): """ Gets the next feedback message. Each feedback message is a 2-tuple of (timestamp, device_token).""" if self._feedback_greenlet is None: self._feedback_greenlet = gevent.spawn(self._feedback_loop) return self._feedback_queue.get(block = block, timeout = timeout)
Gets the next feedback message. Each feedback message is a 2-tuple of (timestamp, device_token).
entailment
def wait_send(self, timeout = None): """Wait until all queued messages are sent.""" self._send_queue_cleared.clear() self._send_queue_cleared.wait(timeout = timeout)
Wait until all queued messages are sent.
entailment
def start(self): """Start the message sending loop.""" if self._send_greenlet is None: self._send_greenlet = gevent.spawn(self._send_loop)
Start the message sending loop.
entailment
def stop(self, timeout = 10.0): """ Send all pending messages, close connection. Returns True if no message left to sent. False if dirty. - timeout: seconds to wait for sending remaining messages. disconnect immedately if None. """ if (self._send_greenlet is not None) and \ (self._send_queue.qsize() > 0): self.wait_send(timeout = timeout) if self._send_greenlet is not None: gevent.kill(self._send_greenlet) self._send_greenlet = None if self._error_greenlet is not None: gevent.kill(self._error_greenlet) self._error_greenlet = None if self._feedback_greenlet is not None: gevent.kill(self._feedback_greenlet) self._feedback_greenlet = None return self._send_queue.qsize() < 1
Send all pending messages, close connection. Returns True if no message left to sent. False if dirty. - timeout: seconds to wait for sending remaining messages. disconnect immedately if None.
entailment
def convert_to_ssml(text, text_format): """ Convert text to SSML based on the text's current format. NOTE: This module is extremely limited at the moment and will be expanded. :param text: The text to convert. :param text_format: The text format of the text. Currently supports 'plain', 'html' or None for skipping SSML conversion. """ if text_format is None: return text elif text_format == 'plain': return plain_to_ssml(text) elif text_format == 'html': return html_to_ssml(text) else: raise ValueError(text_format + ': text format not found.')
Convert text to SSML based on the text's current format. NOTE: This module is extremely limited at the moment and will be expanded. :param text: The text to convert. :param text_format: The text format of the text. Currently supports 'plain', 'html' or None for skipping SSML conversion.
entailment
def html_to_ssml(text): """ Replaces specific html tags with probable SSML counterparts. """ ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text) return ssml_text
Replaces specific html tags with probable SSML counterparts.
entailment
def multiple_replace(string, replacements): # type: (str, Dict[str,str]) -> str """Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements """ pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL) return pattern.sub(lambda x: replacements[x.group(0)], string)
Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements
entailment
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''): # type: (str, str, int, str, str) -> List[str] """Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text """ compare = difflib.SequenceMatcher(lambda x: x in ignore) compare.set_seqs(a=a, b=b) matching_text = list() for match in compare.get_matching_blocks(): start = match.a text = a[start: start+match.size] if end_characters: prev_text = text while len(text) != 0 and text[0] in end_characters: text = text[1:] while len(text) != 0 and text[-1] not in end_characters: text = text[:-1] if len(text) == 0: text = prev_text if len(text) >= match_min_size: matching_text.append(text) return matching_text
Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text
entailment
def get_matching_text(string_list, match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) a = ''.join(result) return a
Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching
entailment
def get_matching_then_nonmatching_text(string_list, separator='', match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], str, int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ def add_separator_if_needed(text_list): if separator and len(text_list) > 0 and text_list[-1][-len(separator):] != separator: text_list.append(separator) a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] combined_len = len(a) + len(b) result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) new_a = a new_b = b for text in result: new_a = new_a.replace(text, '') new_b = new_b.replace(text, '') if new_a and new_a in a: pos_a = a.index(new_a) else: pos_a = combined_len if new_b and new_b in b: pos_b = b.index(new_b) else: pos_b = combined_len if pos_b > pos_a: text_1 = new_b pos_1 = pos_b text_2 = new_a pos_2 = pos_a else: text_1 = new_a pos_1 = pos_a text_2 = new_b pos_2 = pos_b output = list() pos = 0 for text in result: output.append(text) pos += len(text) if text_1 and pos >= pos_1: add_separator_if_needed(output) output.append(text_1) pos += len(text_1) text_1 = None if text_2 and pos >= pos_2: add_separator_if_needed(output) output.append(text_2) pos += len(text_2) text_2 = None if text_1 and pos_1 == combined_len: add_separator_if_needed(output) output.append(text_1) if text_2 and pos_2 == combined_len: add_separator_if_needed(output) output.append(text_2) a = ''.join(output) return a
Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching
entailment
def iexpand(string, keep_escapes=False): """Expand braces and return an iterator.""" if isinstance(string, bytes): is_bytes = True string = string.decode('latin-1') else: is_bytes = False if is_bytes: return (entry.encode('latin-1') for entry in ExpandBrace(keep_escapes).expand(string)) else: return (entry for entry in ExpandBrace(keep_escapes).expand(string))
Expand braces and return an iterator.
entailment
def set_expanding(self): """Set that we are expanding a sequence, and return whether a release is required by the caller.""" status = not self.expanding if status: self.expanding = True return status
Set that we are expanding a sequence, and return whether a release is required by the caller.
entailment
def get_escape(self, c, i): """Get an escape.""" try: escaped = next(i) except StopIteration: escaped = '' return c + escaped if self.keep_escapes else escaped
Get an escape.
entailment
def squash(self, a, b): """ Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ``` """ return ((''.join(x) if isinstance(x, tuple) else x) for x in itertools.product(a, b))
Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ```
entailment
def get_literals(self, c, i, depth): """ Get a string literal. Gather all the literal chars up to opening curly or closing brace. Also gather chars between braces and commas within a group (is_expanding). """ result = [''] is_dollar = False try: while c: ignore_brace = is_dollar is_dollar = False if c == '$': is_dollar = True elif c == '\\': c = [self.get_escape(c, i)] elif not ignore_brace and c == '{': # Try and get the group index = i.index try: seq = self.get_sequence(next(i), i, depth + 1) if seq: c = seq except StopIteration: # Searched to end of string # and still didn't find it. i.rewind(i.index - index) elif self.is_expanding() and c in (',', '}'): # We are Expanding within a group and found a group delimiter # Return what we gathered before the group delimiters. i.rewind(1) return (x for x in result) # Squash the current set of literals. result = self.squash(result, [c] if isinstance(c, str) else c) c = next(i) except StopIteration: if self.is_expanding(): return None return (x for x in result)
Get a string literal. Gather all the literal chars up to opening curly or closing brace. Also gather chars between braces and commas within a group (is_expanding).
entailment
def combine(self, a, b): """A generator that combines two iterables.""" for l in (a, b): for x in l: yield x
A generator that combines two iterables.
entailment
def get_sequence(self, c, i, depth): """ Get the sequence. Get sequence between `{}`, such as: `{a,b}`, `{1..2[..inc]}`, etc. It will basically crawl to the end or find a valid series. """ result = [] release = self.set_expanding() has_comma = False # Used to indicate validity of group (`{1..2}` are an exception). is_empty = True # Tracks whether the current slot is empty `{slot,slot,slot}`. # Detect numerical and alphabetic series: `{1..2}` etc. i.rewind(1) item = self.get_range(i) i.advance(1) if item is not None: self.release_expanding(release) return (x for x in item) try: while c: # Bash has some special top level logic. if `}` follows `{` but hasn't matched # a group yet, keep going except when the first 2 bytes are `{}` which gets # completely ignored. keep_looking = depth == 1 and not has_comma # and i.index not in self.skip_index if (c == '}' and (not keep_looking or i.index == 2)): # If there is no comma, we know the sequence is bogus. if is_empty: result = (x for x in self.combine(result, [''])) if not has_comma: result = ('{' + literal + '}' for literal in result) self.release_expanding(release) return (x for x in result) elif c == ',': # Must be the first element in the list. has_comma = True if is_empty: result = (x for x in self.combine(result, [''])) else: is_empty = True else: if c == '}': # Top level: If we didn't find a comma, we haven't # completed the top level group. Request more and # append to what we already have for the first slot. if not result: result = (x for x in self.combine(result, [c])) else: result = self.squash(result, [c]) value = self.get_literals(next(i), i, depth) if value is not None: result = self.squash(result, value) is_empty = False else: # Lower level: Try to find group, but give up if cannot acquire. value = self.get_literals(c, i, depth) if value is not None: result = (x for x in self.combine(result, value)) is_empty = False c = next(i) except StopIteration: self.release_expanding(release) raise
Get the sequence. Get sequence between `{}`, such as: `{a,b}`, `{1..2[..inc]}`, etc. It will basically crawl to the end or find a valid series.
entailment
def get_range(self, i): """ Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine). """ try: m = i.match(RE_INT_ITER) if m: return self.get_int_range(*m.groups()) m = i.match(RE_CHR_ITER) if m: return self.get_char_range(*m.groups()) except Exception: # pragma: no cover # TODO: We really should never fail here, # but if we do, assume the sequence range # was invalid. This catch can probably # be removed in the future with more testing. pass return None
Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine).
entailment
def format_value(self, value, padding): """Get padding adjusting for negative values.""" # padding = padding - 1 if value < 0 and padding > 0 else padding # prefix = '-' if value < 0 else '' if padding: return "{:0{pad}d}".format(value, pad=padding) else: return str(value)
Get padding adjusting for negative values.
entailment
def get_int_range(self, start, end, increment=None): """Get an integer range between start and end and increments of increment.""" first, last = int(start), int(end) increment = int(increment) if increment is not None else 1 max_length = max(len(start), len(end)) # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 if start[0] == '-': start = start[1:] if end[0] == '-': end = end[1:] if (len(start) > 1 and start[0] == '0') or (len(end) > 1 and end[0] == '0'): padding = max_length else: padding = 0 if first < last: r = range(first, last + 1, -increment if increment < 0 else increment) else: r = range(first, last - 1, increment if increment < 0 else -increment) return (self.format_value(value, padding) for value in r)
Get an integer range between start and end and increments of increment.
entailment
def get_char_range(self, start, end, increment=None): """Get a range of alphabetic characters.""" increment = int(increment) if increment else 1 if increment < 0: increment = -increment # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 inverse = start > end alpha = _nalpha if inverse else _alpha start = alpha.index(start) end = alpha.index(end) if start < end: return (c for c in alpha[start:end + 1:increment]) else: return (c for c in alpha[end:start + 1:increment])
Get a range of alphabetic characters.
entailment
def expand(self, string): """Expand.""" self.expanding = False empties = [] found_literal = False if string: i = iter(StringIter(string)) for x in self.get_literals(next(i), i, 0): # We don't want to return trailing empty strings. # Store empty strings and output only when followed by a literal. if not x: empties.append(x) continue found_literal = True while empties: yield empties.pop(0) yield x empties = [] # We found no literals so return an empty string if not found_literal: yield ""
Expand.
entailment
def merge_two_dictionaries(a, b, merge_lists=False): # type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound """Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ key = None # ## debug output # sys.stderr.write('DEBUG: %s to %s\n' %(b,a)) try: if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)): # border case for first run or if a is a primitive a = b elif isinstance(a, list): # lists can be appended or replaced if isinstance(b, list): if merge_lists: # merge lists a.extend(b) else: # replace list a = b else: # append to list a.append(b) elif isinstance(a, (dict, UserDict)): # dicts must be merged if isinstance(b, (dict, UserDict)): for key in b: if key in a: a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
entailment
def merge_dictionaries(dicts, merge_lists=False): # type: (List[DictUpperBound], bool) -> DictUpperBound """Merges all dictionaries in dicts into a single dictionary and returns result Args: dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ dict1 = dicts[0] for other_dict in dicts[1:]: merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists) return dict1
Merges all dictionaries in dicts into a single dictionary and returns result Args: dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
entailment
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'): # type: (DictUpperBound, DictUpperBound, str) -> Dict """Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary """ d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary
entailment
def dict_of_lists_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None """ list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
entailment
def dict_of_sets_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None """ set_objs = dictionary.get(key, set()) set_objs.add(value) dictionary[key] = set_objs
Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None
entailment
def list_distribute_contents_simple(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) output_list = list() i = 0 done = False while not done: found = False for key in sorted(dictionary): if i < len(dictionary[key]): output_list.append(dictionary[key][i]) found = True if found: i += 1 else: done = True return output_list
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
entailment
def list_distribute_contents(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ def riffle_shuffle(piles_list): def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) if not piles_list: return [] piles_list.sort(key=len, reverse=True) width = len(piles_list[0]) pile_iters_list = [iter(pile) for pile in piles_list] pile_sizes_list = [[pile_position] * len(pile) for pile_position, pile in enumerate(piles_list)] grouped_rows = grouper(width, itertools.chain.from_iterable(pile_sizes_list)) grouped_columns = zip_longest(*grouped_rows) shuffled_pile = [next(pile_iters_list[position]) for position in itertools.chain.from_iterable(grouped_columns) if position is not None] return shuffled_pile dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) intermediate_list = list() for key in sorted(dictionary): intermediate_list.append(dictionary[key]) return riffle_shuffle(intermediate_list)
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
entailment
def extract_list_from_list_of_dict(list_of_dict, key): # type: (List[DictUpperBound], Any) -> List """Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary """ result = list() for dictionary in list_of_dict: result.append(dictionary[key]) return result
Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary
entailment
def key_value_convert(dictin, keyfn=lambda x: x, valuefn=lambda x: x, dropfailedkeys=False, dropfailedvalues=False, exception=ValueError): # type: (DictUpperBound, Callable[[Any], Any], Callable[[Any], Any], bool, bool, ExceptionUpperBound) -> Dict """Convert keys and/or values of dictionary using functions passed in as parameters Args: dictin (DictUpperBound): Input dictionary keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False. exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError. Returns: Dict: Dictionary with converted keys and/or values """ dictout = dict() for key in dictin: try: new_key = keyfn(key) except exception: if dropfailedkeys: continue new_key = key value = dictin[key] try: new_value = valuefn(value) except exception: if dropfailedvalues: continue new_value = value dictout[new_key] = new_value return dictout
Convert keys and/or values of dictionary using functions passed in as parameters Args: dictin (DictUpperBound): Input dictionary keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False. exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError. Returns: Dict: Dictionary with converted keys and/or values
entailment
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict """Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers """ return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers
entailment
def integer_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers """ return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues)
Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers
entailment
def float_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats """ return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)
Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats
entailment
def avg_dicts(dictin1, dictin2, dropmissing=True): # type: (DictUpperBound, DictUpperBound, bool) -> Dict """Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries """ dictout = dict() for key in dictin1: if key in dictin2: dictout[key] = (dictin1[key] + dictin2[key]) / 2 elif not dropmissing: dictout[key] = dictin1[key] if not dropmissing: for key in dictin2: if key not in dictin1: dictout[key] = dictin2[key] return dictout
Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries
entailment
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): # type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]] """Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form """ stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form
entailment
def write_list_to_csv(list_of_rows, filepath, headers=None): # type: (List[Union[DictUpperBound, List]], str, Union[int, List[int], List[str], None]) -> None """Write a list of rows in dict or list form to a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: list_of_rows (List[Union[DictUpperBound, List]]): List of rows in dict or list form filepath (str): Path to write to headers (Union[int, List[int], List[str], None]): Headers to write. Defaults to None. Returns: None """ stream = Stream(list_of_rows, headers=headers) stream.open() stream.save(filepath, format='csv') stream.close()
Write a list of rows in dict or list form to a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: list_of_rows (List[Union[DictUpperBound, List]]): List of rows in dict or list form filepath (str): Path to write to headers (Union[int, List[int], List[str], None]): Headers to write. Defaults to None. Returns: None
entailment
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments
entailment
def compare_files(path1, path2): # type: (str, str) -> List[str] """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
entailment
def assert_files_same(path1, path2): # type: (str, str) -> None """Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None """ difflines = compare_files(path1, path2) assert len(difflines) == 0, ''.join(['\n'] + difflines)
Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None
entailment
def main(): """Entry point when called on the command-line. """ # Locale locale.setlocale(locale.LC_ALL, '') # Encoding for output streams if str == bytes: # PY2 writer = codecs.getwriter(locale.getpreferredencoding()) o_stdout, o_stderr = sys.stdout, sys.stderr sys.stdout = writer(sys.stdout) sys.stdout.buffer = o_stdout sys.stderr = writer(sys.stderr) sys.stderr.buffer = o_stderr else: # PY3 sys.stdin = sys.stdin.buffer # Parses command-line # Runtime to setup def add_runtime_option(opt): opt.add_argument( '-r', '--runtime', action='store', help="runtime to deploy on the server if the queue doesn't exist. " "If unspecified, will auto-detect what is appropriate, and " "fallback on 'default'.") # Destination selection def add_destination_option(opt): opt.add_argument('destination', action='store', help="Machine to SSH into; [user@]host[:port]") opt.add_argument('--queue', action='store', default=DEFAULT_TEJ_DIR, help="Directory for tej's files") # Root parser parser = argparse.ArgumentParser( description="Trivial Extensible Job-submission") parser.add_argument('--version', action='version', version="tej version %s" % tej_version) parser.add_argument('-v', '--verbose', action='count', default=1, dest='verbosity', help="augments verbosity level") subparsers = parser.add_subparsers(title="commands", metavar='') # Setup action parser_setup = subparsers.add_parser( 'setup', help="Sets up tej on a remote machine") add_destination_option(parser_setup) add_runtime_option(parser_setup) parser_setup.add_argument('--make-link', action='append', dest='make_link') parser_setup.add_argument('--make-default-link', action='append_const', dest='make_link', const=DEFAULT_TEJ_DIR) parser_setup.add_argument('--force', action='store_true') parser_setup.add_argument('--only-links', action='store_true') parser_setup.set_defaults(func=_setup) # Submit action parser_submit = subparsers.add_parser( 'submit', help="Submits a job to a remote machine") add_destination_option(parser_submit) add_runtime_option(parser_submit) parser_submit.add_argument('--id', action='store', help="Identifier for the new job") parser_submit.add_argument('--script', action='store', help="Relative name of the script in the " "directory") parser_submit.add_argument('directory', action='store', help="Job directory to upload") parser_submit.set_defaults(func=_submit) # Status action parser_status = subparsers.add_parser( 'status', help="Gets the status of a job") add_destination_option(parser_status) parser_status.add_argument('--id', action='store', help="Identifier of the running job") parser_status.set_defaults(func=_status) # Download action parser_download = subparsers.add_parser( 'download', help="Downloads files from finished job") add_destination_option(parser_download) parser_download.add_argument('--id', action='store', help="Identifier of the job") parser_download.add_argument('files', action='store', nargs=argparse.ONE_OR_MORE, help="Files to download") parser_download.set_defaults(func=_download) # Kill action parser_kill = subparsers.add_parser( 'kill', help="Kills a running job") add_destination_option(parser_kill) parser_kill.add_argument('--id', action='store', help="Identifier of the running job") parser_kill.set_defaults(func=_kill) # Delete action parser_delete = subparsers.add_parser( 'delete', help="Deletes a finished job") add_destination_option(parser_delete) parser_delete.add_argument('--id', action='store', help="Identifier of the finished job") parser_delete.set_defaults(func=_delete) # List action parser_list = subparsers.add_parser( 'list', help="Lists remote jobs") add_destination_option(parser_list) parser_list.set_defaults(func=_list) args = parser.parse_args() setup_logging(args.verbosity) try: args.func(args) except Error as e: # No need to show a traceback here, this is not an internal error logger.critical(e) sys.exit(1) sys.exit(0)
Entry point when called on the command-line.
entailment
def apply(self, callback, context): # pragma: no cover """Apply the HTTPError wrapper to the callback. """ def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except bottle.HTTPError as error: return self.error_wrapper.from_status( status_line=error.status_line, msg=error.body ) return wrapper
Apply the HTTPError wrapper to the callback.
entailment
def add_episode(self, text, text_format, title, author, summary=None, publish_date=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add a new episode to the podcast. :param text: See :meth:`Episode`. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if title in self.episodes: raise ValueError('"' + title + '" already exists as an episode title.') link = self.output_path + '/' + title.replace(' ', '_').lower() + '.mp3' episode_text = convert_to_ssml(text, text_format) new_episode = Episode(episode_text, text_format, title, author, link, summary, publish_date, synthesizer, synth_args, sentence_break) self.episodes[title] = new_episode
Add a new episode to the podcast. :param text: See :meth:`Episode`. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`.
entailment
def add_scheduled_job(self, text_source, cron_args, text_format, title, author, summary=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add and start a new scheduled job to dynamically generate podcasts. Note: scheduling will end when the process ends. This works best when run inside an existing application. :param text_source: A function that generates podcast text. Examples: a function that opens a file with today's date as a filename or a function that requests a specific url and extracts the main text. Also see :meth:`Episode`. :param cron_args: A dictionary of cron parameters. Keys can be: 'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute' and 'second'. Keys that are not specified will be parsed as 'any'/'*'. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. Since titles need to be unique, a timestamp will be appended to the title for each episode. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if not callable(text_source): raise TypeError('Argument "text" must be a function') def add_episode(): episode_text = text_source() episode_title = title + '_' + datetime.utcnow().strftime('%Y%m%d%H%M%S') self.add_episode(episode_text, text_format, episode_title, author, summary, datetime.utcnow(), synthesizer, synth_args, sentence_break) self.scheduled_jobs[title] = self._scheduler.add_job(add_episode, 'cron', id=title, **cron_args) if not self._scheduler.running: self._scheduler.start()
Add and start a new scheduled job to dynamically generate podcasts. Note: scheduling will end when the process ends. This works best when run inside an existing application. :param text_source: A function that generates podcast text. Examples: a function that opens a file with today's date as a filename or a function that requests a specific url and extracts the main text. Also see :meth:`Episode`. :param cron_args: A dictionary of cron parameters. Keys can be: 'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute' and 'second'. Keys that are not specified will be parsed as 'any'/'*'. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. Since titles need to be unique, a timestamp will be appended to the title for each episode. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`.
entailment
def publish(self, titles): """ Publish a set of episodes to the Podcast's RSS feed. :param titles: Either a single episode title or a sequence of episode titles to publish. """ if isinstance(titles, Sequence) and not isinstance(titles, six.string_types): for title in titles: self.episodes[title].publish() elif isinstance(titles, six.string_types): self.episodes[titles].publish() else: raise TypeError('titles must be a string or a sequence of strings.') self.update_rss_feed()
Publish a set of episodes to the Podcast's RSS feed. :param titles: Either a single episode title or a sequence of episode titles to publish.
entailment
def render_audio(self): """ Synthesize audio from the episode's text. """ segment = text_to_speech(self._text, self.synthesizer, self.synth_args, self.sentence_break) milli = len(segment) seconds = '{0:.1f}'.format(float(milli) / 1000 % 60).zfill(2) minutes = '{0:.0f}'.format((milli / (1000 * 60)) % 60).zfill(2) hours = '{0:.0f}'.format((milli / (1000 * 60 * 60)) % 24).zfill(2) self.duration = hours + ':' + minutes + ':' + seconds segment.export(self.link, format='mp3') self.length = os.path.getsize(self.link)
Synthesize audio from the episode's text.
entailment
def publish(self): """ Mark an episode as published. """ if self.published is False: self.published = True else: raise Warning(self.title + ' is already published.')
Mark an episode as published.
entailment
def unpublish(self): """ Mark an episode as not published. """ if self.published is True: self.published = False else: raise Warning(self.title + ' is already not published.')
Mark an episode as not published.
entailment
def _environment_variables(**kwargs): # type: (Any) -> Any """ Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments """ user_agent = os.getenv('USER_AGENT') if user_agent is not None: kwargs['user_agent'] = user_agent preprefix = os.getenv('PREPREFIX') if preprefix is not None: kwargs['preprefix'] = preprefix return kwargs
Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments
entailment
def _construct(configdict, prefix, ua): # type: (Dict, str, str) -> str """ Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string """ if not ua: raise UserAgentError("User_agent parameter missing. It can be your project's name for example.") preprefix = configdict.get('preprefix') if preprefix: user_agent = '%s:' % preprefix else: user_agent = '' if prefix: user_agent = '%s%s-' % (user_agent, prefix) user_agent = '%s%s' % (user_agent, ua) return user_agent
Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string
entailment
def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None): # type: (str, str, Optional[str]) -> str """ Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent """ if not user_agent_config_yaml: user_agent_config_yaml = cls.default_user_agent_config_yaml logger.info( 'No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml) if not isfile(user_agent_config_yaml): raise UserAgentError( "User_agent should be supplied in a YAML config file. It can be your project's name for example.") logger.info('Loading user agent config from: %s' % user_agent_config_yaml) user_agent_config_dict = load_yaml(user_agent_config_yaml) if user_agent_lookup: user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup) if not user_agent_config_dict: raise UserAgentError("No user agent information read from: %s" % user_agent_config_yaml) ua = user_agent_config_dict.get('user_agent') return cls._construct(user_agent_config_dict, prefix, ua)
Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent
entailment
def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ kwargs = UserAgent._environment_variables(**kwargs) if 'user_agent' in kwargs: user_agent = kwargs['user_agent'] del kwargs['user_agent'] prefix = kwargs.get('prefix') if prefix: del kwargs['prefix'] else: prefix = 'HDXPythonUtilities/%s' % get_utils_version() if not user_agent: ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup) else: ua = cls._construct(kwargs, prefix, user_agent) return ua
Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
entailment
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> None """ Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None """ cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None
entailment
def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ if user_agent or user_agent_config_yaml or 'user_agent' in UserAgent._environment_variables(**kwargs): return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs) if cls.user_agent: return cls.user_agent else: raise UserAgentError( 'You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')
Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
entailment
def save_yaml(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
entailment
def save_json(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ with open(path, 'w') as f: if pretty: indent = 2 separators = (',', ': ') else: indent = None separators = (', ', ': ') json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)
Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
entailment
def load_yaml(path): # type: (str) -> OrderedDict """Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file """ with open(path, 'rt') as f: yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader) if not yamldict: raise (LoadError('YAML file: %s is empty!' % path)) return yamldict
Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file
entailment
def load_json(path): # type: (str) -> OrderedDict """Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file """ with open(path, 'rt') as f: jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict) if not jsondict: raise (LoadError('JSON file: %s is empty!' % path)) return jsondict
Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file
entailment
def load_file_to_str(path): # type: (str) -> str """ Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file """ with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
entailment
def contributors(lancet, output): """ List all contributors visible in the git history. """ sorting = pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE commits = lancet.repo.walk(lancet.repo.head.target, sorting) contributors = ((c.author.name, c.author.email) for c in commits) contributors = OrderedDict(contributors) template_content = content_from_path( lancet.config.get('packaging', 'contributors_template')) template = Template(template_content) output.write(template.render(contributors=contributors).encode('utf-8'))
List all contributors visible in the git history.
entailment
def text_to_speech(text, synthesizer, synth_args, sentence_break): """ Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '. """ if len(text.split()) < 50: if synthesizer == 'watson': with open('.temp.wav', 'wb') as temp: temp.write(watson_request(text=text, synth_args=synth_args).content) response = AudioSegment.from_wav('.temp.wav') os.remove('.temp.wav') return response else: raise ValueError('"' + synthesizer + '" synthesizer not found.') else: segments = [] for i, sentence in enumerate(text.split(sentence_break)): if synthesizer == 'watson': with open('.temp' + str(i) + '.wav', 'wb') as temp: temp.write(watson_request(text=sentence, synth_args=synth_args).content) segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav')) os.remove('.temp' + str(i) + '.wav') else: raise ValueError('"' + synthesizer + '" synthesizer not found.') response = segments[0] for segment in segments[1:]: response = response + segment return response
Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '.
entailment
def watson_request(text, synth_args): """ Makes a single request to the IBM Watson text-to-speech API. :param text: The text that will be synthesized to audio. :param synth_args: A dictionary of arguments to add to the request. These should include username and password for authentication. """ params = { 'text': text, 'accept': 'audio/wav' } if synth_args is not None: params.update(synth_args) if 'username' in params: username = params.pop('username') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') if 'password' in params: password = params.pop('password') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') return requests.get(watson_url, auth=(username, password), params=params)
Makes a single request to the IBM Watson text-to-speech API. :param text: The text that will be synthesized to audio. :param synth_args: A dictionary of arguments to add to the request. These should include username and password for authentication.
entailment
def build_rss_feed(podcast): """ Builds a podcast RSS feed and returns an xml file. :param podcast: A Podcast model to build the RSS feed from. """ if not os.path.exists(podcast.output_path): os.makedirs(podcast.output_path) rss = ET.Element('rss', attrib={'xmlns:itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd', 'version': '2.0'}) channel = ET.SubElement(rss, 'channel') ET.SubElement(channel, 'title').text = podcast.title ET.SubElement(channel, 'link').text = podcast.link ET.SubElement(channel, 'copyright').text = podcast.copyright ET.SubElement(channel, 'itunes:subtitle').text = podcast.subtitle ET.SubElement(channel, 'itunes:author').text = podcast.author ET.SubElement(channel, 'itunes:summary').text = podcast.description ET.SubElement(channel, 'description').text = podcast.description owner = ET.SubElement(channel, 'itunes:owner') ET.SubElement(owner, 'itunes:name').text = podcast.owner_name ET.SubElement(owner, 'itunes:email').text = podcast.owner_email ET.SubElement(channel, 'itunes:image').text = podcast.image for category in podcast.categories: ET.SubElement(channel, 'itunes:category').text = category for episode in sorted(podcast.episodes.values(), key=lambda x: x.publish_date): if episode.published is True: item = ET.SubElement(channel, 'item') ET.SubElement(item, 'title').text = episode.title ET.SubElement(item, 'author').text = episode.author ET.SubElement(item, 'summary').text = episode.summary ET.SubElement(item, 'enclosure', attrib={'url': podcast.link + '/' + episode.link, 'length': str(episode.length), 'type': 'audio/x-mp3'}) ET.SubElement(item, 'guid').text = podcast.link + '/' + episode.link ET.SubElement(item, 'pubDate').text = episode.publish_date.strftime('%a, %d %b %Y %H:%M:%S UTC') ET.SubElement(item, 'itunes:duration').text = episode.duration tree = ET.ElementTree(rss) with open(podcast.output_path + '/feed.xml', 'wb') as feed: tree.write(feed)
Builds a podcast RSS feed and returns an xml file. :param podcast: A Podcast model to build the RSS feed from.
entailment
def get(self, uid=None): """Example retrieve API method. """ # Return resource collection if uid is None: return self.response_factory.ok(data=resource_db) # Return resource based on UID. try: record = [r for r in resource_db if r.get('id') == uid].pop() except IndexError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist.'.format(uid)]) return self.response_factory.ok(data=record)
Example retrieve API method.
entailment
def post(self): """Example POST method. """ resource_data = self.request.json record = {'id': str(len(resource_db) + 1), 'name': resource_data.get('name')} resource_db.append(record) return self.response_factory.ok(data=record)
Example POST method.
entailment
def put(self, uid): """Example PUT method. """ resource_data = self.request.json try: record = resource_db[uid] except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) record['name'] = resource_data.get('name') return self.response_factory.ok(data=record)
Example PUT method.
entailment
def delete(self, uid): """Example DELETE method. """ try: record = resource_db[uid].copy() except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) del resource_db[uid] return self.response_factory.ok(data=record)
Example DELETE method.
entailment
def get_pages(url): """ Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat """ while True: yield url doc = html.parse(url).find("body") links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")] if not links: break url = urljoin(url, links[0].get('href'))
Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat
entailment
def get_article_urls(url): """ Return the articles from a page Technically, look for a div with class mw-search-result-heading and get the first link from this div """ doc = html.parse(url).getroot() for div in doc.cssselect("div.mw-search-result-heading"): href = div.cssselect("a")[0].get('href') if ":" in href: continue # skip Category: links href = urljoin(url, href) yield href
Return the articles from a page Technically, look for a div with class mw-search-result-heading and get the first link from this div
entailment
def get_article(url): """ Return a single article as a 'amcat-ready' dict Uses the 'export' function of wikinews to get an xml article """ a = html.parse(url).getroot() title = a.cssselect(".firstHeading")[0].text_content() date = a.cssselect(".published")[0].text_content() date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat() paras = a.cssselect("#mw-content-text p") paras = paras[1:] # skip first paragraph, which contains date text = "\n\n".join(p.text_content().strip() for p in paras) return dict(headline=title, date=date, url=url, text=text, medium="Wikinews")
Return a single article as a 'amcat-ready' dict Uses the 'export' function of wikinews to get an xml article
entailment
def scrape_wikinews(conn, project, articleset, query): """ Scrape wikinews articles from the given query @param conn: The AmcatAPI object @param articleset: The target articleset ID @param category: The wikinews category name """ url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query) logging.info(url) for page in get_pages(url): urls = get_article_urls(page) arts = list(get_articles(urls)) logging.info("Adding {} articles to set {}:{}" .format(len(arts), project, articleset)) conn.create_articles(project=project, articleset=articleset, json_data=arts)
Scrape wikinews articles from the given query @param conn: The AmcatAPI object @param articleset: The target articleset ID @param category: The wikinews category name
entailment
def start_service(addr, n): """ Start a service """ s = Service(addr) started = time.time() for _ in range(n): msg = s.socket.recv() s.socket.send(msg) s.socket.close() duration = time.time() - started print('Raw REP service stats:') util.print_stats(n, duration) return
Start a service
entailment
def bench(client, n): """ Benchmark n requests """ items = list(range(n)) # Time client publish operations # ------------------------------ started = time.time() msg = b'x' for i in items: client.socket.send(msg) res = client.socket.recv() assert msg == res duration = time.time() - started print('Raw REQ client stats:') util.print_stats(n, duration)
Benchmark n requests
entailment