repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
Yelp/threat_intel
threat_intel/util/http.py
AvailabilityLimiter.map_with_retries
def map_with_retries(self, requests, responses_for_requests): """Provides session-based retry functionality :param requests: A collection of Request objects. :param responses_for_requests: Dictionary mapping of requests to responses :param max_retries: The maximum number of retries to perform per session :param args: Additional arguments to pass into a retry mapping call """ retries = [] response_futures = [preq.callable() for preq in requests] for request, response_future in zip(requests, response_futures): try: response = response_future.result() if response is not None and response.status_code == 403: logging.warning('Request to {} caused a 403 response status code.'.format(request.url)) raise InvalidRequestError('Access forbidden') if response is not None: responses_for_requests[request] = response except RequestException as re: logging.error('An exception was raised for {}: {}'.format(request.url, re)) if self.total_retries > 0: self.total_retries -= 1 retries.append(request) # Recursively retry failed requests with the modified total retry count if retries: self.map_with_retries(retries, responses_for_requests)
python
def map_with_retries(self, requests, responses_for_requests): retries = [] response_futures = [preq.callable() for preq in requests] for request, response_future in zip(requests, response_futures): try: response = response_future.result() if response is not None and response.status_code == 403: logging.warning('Request to {} caused a 403 response status code.'.format(request.url)) raise InvalidRequestError('Access forbidden') if response is not None: responses_for_requests[request] = response except RequestException as re: logging.error('An exception was raised for {}: {}'.format(request.url, re)) if self.total_retries > 0: self.total_retries -= 1 retries.append(request) if retries: self.map_with_retries(retries, responses_for_requests)
[ "def", "map_with_retries", "(", "self", ",", "requests", ",", "responses_for_requests", ")", ":", "retries", "=", "[", "]", "response_futures", "=", "[", "preq", ".", "callable", "(", ")", "for", "preq", "in", "requests", "]", "for", "request", ",", "response_future", "in", "zip", "(", "requests", ",", "response_futures", ")", ":", "try", ":", "response", "=", "response_future", ".", "result", "(", ")", "if", "response", "is", "not", "None", "and", "response", ".", "status_code", "==", "403", ":", "logging", ".", "warning", "(", "'Request to {} caused a 403 response status code.'", ".", "format", "(", "request", ".", "url", ")", ")", "raise", "InvalidRequestError", "(", "'Access forbidden'", ")", "if", "response", "is", "not", "None", ":", "responses_for_requests", "[", "request", "]", "=", "response", "except", "RequestException", "as", "re", ":", "logging", ".", "error", "(", "'An exception was raised for {}: {}'", ".", "format", "(", "request", ".", "url", ",", "re", ")", ")", "if", "self", ".", "total_retries", ">", "0", ":", "self", ".", "total_retries", "-=", "1", "retries", ".", "append", "(", "request", ")", "# Recursively retry failed requests with the modified total retry count", "if", "retries", ":", "self", ".", "map_with_retries", "(", "retries", ",", "responses_for_requests", ")" ]
Provides session-based retry functionality :param requests: A collection of Request objects. :param responses_for_requests: Dictionary mapping of requests to responses :param max_retries: The maximum number of retries to perform per session :param args: Additional arguments to pass into a retry mapping call
[ "Provides", "session", "-", "based", "retry", "functionality" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L122-L151
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.multi_get
def multi_get(self, urls, query_params=None, to_json=True): """Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
python
def multi_get(self, urls, query_params=None, to_json=True): return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
[ "def", "multi_get", "(", "self", ",", "urls", ",", "query_params", "=", "None", ",", "to_json", "=", "True", ")", ":", "return", "self", ".", "_multi_request", "(", "MultiRequest", ".", "_VERB_GET", ",", "urls", ",", "query_params", ",", "data", "=", "None", ",", "to_json", "=", "to_json", ",", ")" ]
Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
[ "Issue", "multiple", "GET", "requests", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L203-L218
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.multi_post
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False): """Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file, )
python
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False): return self._multi_request( MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file, )
[ "def", "multi_post", "(", "self", ",", "urls", ",", "query_params", "=", "None", ",", "data", "=", "None", ",", "to_json", "=", "True", ",", "send_as_file", "=", "False", ")", ":", "return", "self", ".", "_multi_request", "(", "MultiRequest", ".", "_VERB_POST", ",", "urls", ",", "query_params", ",", "data", ",", "to_json", "=", "to_json", ",", "send_as_file", "=", "send_as_file", ",", ")" ]
Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
[ "Issue", "multiple", "POST", "requests", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L220-L237
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._create_request
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False): """Helper method to create a single post/get requests. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET url - A string URL query_params - None or a dict data - None or a string or a dict send_as_file - A boolean, should the data be sent as a file. Returns: requests.PreparedRequest Raises: InvalidRequestError - if an invalid verb is passed in. """ # Prepare a set of kwargs to make it easier to avoid missing default params. kwargs = { 'headers': self._default_headers, 'params': query_params, 'timeout': self._req_timeout, } if MultiRequest._VERB_POST == verb: if send_as_file: kwargs['files'] = {'file': data} else: kwargs['data'] = data return PreparedRequest(partial(self._session.post, url, **kwargs), url) elif MultiRequest._VERB_GET == verb: return PreparedRequest(partial(self._session.get, url, **kwargs), url) else: raise InvalidRequestError('Invalid verb {0}'.format(verb))
python
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False): kwargs = { 'headers': self._default_headers, 'params': query_params, 'timeout': self._req_timeout, } if MultiRequest._VERB_POST == verb: if send_as_file: kwargs['files'] = {'file': data} else: kwargs['data'] = data return PreparedRequest(partial(self._session.post, url, **kwargs), url) elif MultiRequest._VERB_GET == verb: return PreparedRequest(partial(self._session.get, url, **kwargs), url) else: raise InvalidRequestError('Invalid verb {0}'.format(verb))
[ "def", "_create_request", "(", "self", ",", "verb", ",", "url", ",", "query_params", "=", "None", ",", "data", "=", "None", ",", "send_as_file", "=", "False", ")", ":", "# Prepare a set of kwargs to make it easier to avoid missing default params.", "kwargs", "=", "{", "'headers'", ":", "self", ".", "_default_headers", ",", "'params'", ":", "query_params", ",", "'timeout'", ":", "self", ".", "_req_timeout", ",", "}", "if", "MultiRequest", ".", "_VERB_POST", "==", "verb", ":", "if", "send_as_file", ":", "kwargs", "[", "'files'", "]", "=", "{", "'file'", ":", "data", "}", "else", ":", "kwargs", "[", "'data'", "]", "=", "data", "return", "PreparedRequest", "(", "partial", "(", "self", ".", "_session", ".", "post", ",", "url", ",", "*", "*", "kwargs", ")", ",", "url", ")", "elif", "MultiRequest", ".", "_VERB_GET", "==", "verb", ":", "return", "PreparedRequest", "(", "partial", "(", "self", ".", "_session", ".", "get", ",", "url", ",", "*", "*", "kwargs", ")", ",", "url", ")", "else", ":", "raise", "InvalidRequestError", "(", "'Invalid verb {0}'", ".", "format", "(", "verb", ")", ")" ]
Helper method to create a single post/get requests. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET url - A string URL query_params - None or a dict data - None or a string or a dict send_as_file - A boolean, should the data be sent as a file. Returns: requests.PreparedRequest Raises: InvalidRequestError - if an invalid verb is passed in.
[ "Helper", "method", "to", "create", "a", "single", "post", "/", "get", "requests", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L239-L270
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._zip_request_params
def _zip_request_params(self, urls, query_params, data): """Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match """ # Everybody gets to be a list if not isinstance(urls, list): urls = [urls] if not isinstance(query_params, list): query_params = [query_params] if not isinstance(data, list): data = [data] # Counts must not mismatch url_count = len(urls) query_param_count = len(query_params) data_count = len(data) max_count = max(url_count, query_param_count, data_count) if ( max_count > url_count > 1 or max_count > query_param_count > 1 or max_count > data_count > 1 ): raise InvalidRequestError( 'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count, ) # Pad out lists if url_count < max_count: urls = urls * max_count if query_param_count < max_count: query_params = query_params * max_count if data_count < max_count: data = data * max_count return list(zip(urls, query_params, data))
python
def _zip_request_params(self, urls, query_params, data): if not isinstance(urls, list): urls = [urls] if not isinstance(query_params, list): query_params = [query_params] if not isinstance(data, list): data = [data] url_count = len(urls) query_param_count = len(query_params) data_count = len(data) max_count = max(url_count, query_param_count, data_count) if ( max_count > url_count > 1 or max_count > query_param_count > 1 or max_count > data_count > 1 ): raise InvalidRequestError( 'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count, ) if url_count < max_count: urls = urls * max_count if query_param_count < max_count: query_params = query_params * max_count if data_count < max_count: data = data * max_count return list(zip(urls, query_params, data))
[ "def", "_zip_request_params", "(", "self", ",", "urls", ",", "query_params", ",", "data", ")", ":", "# Everybody gets to be a list", "if", "not", "isinstance", "(", "urls", ",", "list", ")", ":", "urls", "=", "[", "urls", "]", "if", "not", "isinstance", "(", "query_params", ",", "list", ")", ":", "query_params", "=", "[", "query_params", "]", "if", "not", "isinstance", "(", "data", ",", "list", ")", ":", "data", "=", "[", "data", "]", "# Counts must not mismatch", "url_count", "=", "len", "(", "urls", ")", "query_param_count", "=", "len", "(", "query_params", ")", "data_count", "=", "len", "(", "data", ")", "max_count", "=", "max", "(", "url_count", ",", "query_param_count", ",", "data_count", ")", "if", "(", "max_count", ">", "url_count", ">", "1", "or", "max_count", ">", "query_param_count", ">", "1", "or", "max_count", ">", "data_count", ">", "1", ")", ":", "raise", "InvalidRequestError", "(", "'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}'", ",", "url_count", ",", "query_param_count", ",", "data_count", ",", "max_count", ",", ")", "# Pad out lists", "if", "url_count", "<", "max_count", ":", "urls", "=", "urls", "*", "max_count", "if", "query_param_count", "<", "max_count", ":", "query_params", "=", "query_params", "*", "max_count", "if", "data_count", "<", "max_count", ":", "data", "=", "data", "*", "max_count", "return", "list", "(", "zip", "(", "urls", ",", "query_params", ",", "data", ")", ")" ]
Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match
[ "Massages", "inputs", "and", "returns", "a", "list", "of", "3", "-", "tuples", "zipping", "them", "up", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L272-L322
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._wait_for_response
def _wait_for_response(self, requests): """Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response """ failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try #{0}'.format(retry + 1)) self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for request, response in responses_for_requests.items(): if self._drop_404s and response is not None and response.status_code == 404: logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif not response: failed_requests.append((request, response)) if not failed_requests: break logging.warning('Try #{0}. Expected {1} successful response(s) but only got {2}.'.format( retry + 1, len(requests), len(requests) - len(failed_requests), )) # retry only for the failed requests requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: # log the exception for the informative purposes and pass to the next iteration logging.exception('Try #{0}. Exception occured: {1}. Retrying.'.format(retry + 1, e)) pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format( len(failed_requests), self._max_retry, )) for failed_request, failed_response in failed_requests: if failed_response is not None: # in case response text does contain some non-ascii characters failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format( failed_request.url, failed_response.status_code, failed_response_text, )) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
python
def _wait_for_response(self, requests): failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for request, response in responses_for_requests.items(): if self._drop_404s and response is not None and response.status_code == 404: logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif not response: failed_requests.append((request, response)) if not failed_requests: break logging.warning('Try retry + 1, len(requests), len(requests) - len(failed_requests), )) requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: logging.exception('Try pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format( len(failed_requests), self._max_retry, )) for failed_request, failed_response in failed_requests: if failed_response is not None: failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format( failed_request.url, failed_response.status_code, failed_response_text, )) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
[ "def", "_wait_for_response", "(", "self", ",", "requests", ")", ":", "failed_requests", "=", "[", "]", "responses_for_requests", "=", "OrderedDict", ".", "fromkeys", "(", "requests", ")", "for", "retry", "in", "range", "(", "self", ".", "_max_retry", ")", ":", "try", ":", "logging", ".", "debug", "(", "'Try #{0}'", ".", "format", "(", "retry", "+", "1", ")", ")", "self", ".", "_availability_limiter", ".", "map_with_retries", "(", "requests", ",", "responses_for_requests", ")", "failed_requests", "=", "[", "]", "for", "request", ",", "response", "in", "responses_for_requests", ".", "items", "(", ")", ":", "if", "self", ".", "_drop_404s", "and", "response", "is", "not", "None", "and", "response", ".", "status_code", "==", "404", ":", "logging", ".", "warning", "(", "'Request to {0} failed with status code 404, dropping.'", ".", "format", "(", "request", ".", "url", ")", ")", "elif", "not", "response", ":", "failed_requests", ".", "append", "(", "(", "request", ",", "response", ")", ")", "if", "not", "failed_requests", ":", "break", "logging", ".", "warning", "(", "'Try #{0}. Expected {1} successful response(s) but only got {2}.'", ".", "format", "(", "retry", "+", "1", ",", "len", "(", "requests", ")", ",", "len", "(", "requests", ")", "-", "len", "(", "failed_requests", ")", ",", ")", ")", "# retry only for the failed requests", "requests", "=", "[", "fr", "[", "0", "]", "for", "fr", "in", "failed_requests", "]", "except", "InvalidRequestError", ":", "raise", "except", "Exception", "as", "e", ":", "# log the exception for the informative purposes and pass to the next iteration", "logging", ".", "exception", "(", "'Try #{0}. Exception occured: {1}. Retrying.'", ".", "format", "(", "retry", "+", "1", ",", "e", ")", ")", "pass", "if", "failed_requests", ":", "logging", ".", "warning", "(", "'Still {0} failed request(s) after {1} retries:'", ".", "format", "(", "len", "(", "failed_requests", ")", ",", "self", ".", "_max_retry", ",", ")", ")", "for", "failed_request", ",", "failed_response", "in", "failed_requests", ":", "if", "failed_response", "is", "not", "None", ":", "# in case response text does contain some non-ascii characters", "failed_response_text", "=", "failed_response", ".", "text", ".", "encode", "(", "'ascii'", ",", "'xmlcharrefreplace'", ")", "logging", ".", "warning", "(", "'Request to {0} failed with status code {1}. Response text: {2}'", ".", "format", "(", "failed_request", ".", "url", ",", "failed_response", ".", "status_code", ",", "failed_response_text", ",", ")", ")", "else", ":", "logging", ".", "warning", "(", "'Request to {0} failed with None response.'", ".", "format", "(", "failed_request", ".", "url", ")", ")", "return", "list", "(", "responses_for_requests", ".", "values", "(", ")", ")" ]
Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response
[ "Issues", "a", "batch", "of", "requests", "and", "waits", "for", "the", "responses", ".", "If", "some", "of", "the", "requests", "fail", "it", "will", "retry", "the", "failed", "ones", "up", "to", "_max_retry", "times", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L324-L380
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._convert_to_json
def _convert_to_json(self, response): """Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise. """ try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
python
def _convert_to_json(self, response): try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
[ "def", "_convert_to_json", "(", "self", ",", "response", ")", ":", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "logging", ".", "warning", "(", "'Expected response in JSON format from {0} but the actual response text is: {1}'", ".", "format", "(", "response", ".", "request", ".", "url", ",", "response", ".", "text", ",", ")", ")", "return", "None" ]
Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise.
[ "Converts", "response", "to", "JSON", ".", "If", "the", "response", "cannot", "be", "converted", "to", "JSON", "then", "None", "is", "returned", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L382-L397
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._multi_request
def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False): """Issues multiple batches of simultaneous HTTP requests and waits for responses. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs Returns: If multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise If a single request is made, the return is not a list Raises: InvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response """ if not urls: raise InvalidRequestError('No URL supplied') # Break the params into batches of request_params request_params = self._zip_request_params(urls, query_params, data) batch_of_params = [ request_params[pos:pos + self._max_requests] for pos in range(0, len(request_params), self._max_requests) ] # Iteratively issue each batch, applying the rate limiter if necessary all_responses = [] for param_batch in batch_of_params: if self._rate_limiter: self._rate_limiter.make_calls(num_calls=len(param_batch)) prepared_requests = [ self._create_request( verb, url, query_params=query_param, data=datum, send_as_file=send_as_file, ) for url, query_param, datum in param_batch ] responses = self._wait_for_response(prepared_requests) for response in responses: if response: all_responses.append(self._convert_to_json(response) if to_json else response) else: all_responses.append(None) return all_responses
python
def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False): if not urls: raise InvalidRequestError('No URL supplied') request_params = self._zip_request_params(urls, query_params, data) batch_of_params = [ request_params[pos:pos + self._max_requests] for pos in range(0, len(request_params), self._max_requests) ] all_responses = [] for param_batch in batch_of_params: if self._rate_limiter: self._rate_limiter.make_calls(num_calls=len(param_batch)) prepared_requests = [ self._create_request( verb, url, query_params=query_param, data=datum, send_as_file=send_as_file, ) for url, query_param, datum in param_batch ] responses = self._wait_for_response(prepared_requests) for response in responses: if response: all_responses.append(self._convert_to_json(response) if to_json else response) else: all_responses.append(None) return all_responses
[ "def", "_multi_request", "(", "self", ",", "verb", ",", "urls", ",", "query_params", ",", "data", ",", "to_json", "=", "True", ",", "send_as_file", "=", "False", ")", ":", "if", "not", "urls", ":", "raise", "InvalidRequestError", "(", "'No URL supplied'", ")", "# Break the params into batches of request_params", "request_params", "=", "self", ".", "_zip_request_params", "(", "urls", ",", "query_params", ",", "data", ")", "batch_of_params", "=", "[", "request_params", "[", "pos", ":", "pos", "+", "self", ".", "_max_requests", "]", "for", "pos", "in", "range", "(", "0", ",", "len", "(", "request_params", ")", ",", "self", ".", "_max_requests", ")", "]", "# Iteratively issue each batch, applying the rate limiter if necessary", "all_responses", "=", "[", "]", "for", "param_batch", "in", "batch_of_params", ":", "if", "self", ".", "_rate_limiter", ":", "self", ".", "_rate_limiter", ".", "make_calls", "(", "num_calls", "=", "len", "(", "param_batch", ")", ")", "prepared_requests", "=", "[", "self", ".", "_create_request", "(", "verb", ",", "url", ",", "query_params", "=", "query_param", ",", "data", "=", "datum", ",", "send_as_file", "=", "send_as_file", ",", ")", "for", "url", ",", "query_param", ",", "datum", "in", "param_batch", "]", "responses", "=", "self", ".", "_wait_for_response", "(", "prepared_requests", ")", "for", "response", "in", "responses", ":", "if", "response", ":", "all_responses", ".", "append", "(", "self", ".", "_convert_to_json", "(", "response", ")", "if", "to_json", "else", "response", ")", "else", ":", "all_responses", ".", "append", "(", "None", ")", "return", "all_responses" ]
Issues multiple batches of simultaneous HTTP requests and waits for responses. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs Returns: If multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise If a single request is made, the return is not a list Raises: InvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response
[ "Issues", "multiple", "batches", "of", "simultaneous", "HTTP", "requests", "and", "waits", "for", "responses", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L399-L443
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.error_handling
def error_handling(cls, fn): """Decorator to handle errors""" def wrapper(*args, **kwargs): try: result = fn(*args, **kwargs) return result except InvalidRequestError as e: write_exception(e) if hasattr(e, 'request'): write_error_message('request {0}'.format(repr(e.request))) if hasattr(e, 'response'): write_error_message('response {0}'.format(repr(e.response))) raise e return wrapper
python
def error_handling(cls, fn): def wrapper(*args, **kwargs): try: result = fn(*args, **kwargs) return result except InvalidRequestError as e: write_exception(e) if hasattr(e, 'request'): write_error_message('request {0}'.format(repr(e.request))) if hasattr(e, 'response'): write_error_message('response {0}'.format(repr(e.response))) raise e return wrapper
[ "def", "error_handling", "(", "cls", ",", "fn", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "result", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "except", "InvalidRequestError", "as", "e", ":", "write_exception", "(", "e", ")", "if", "hasattr", "(", "e", ",", "'request'", ")", ":", "write_error_message", "(", "'request {0}'", ".", "format", "(", "repr", "(", "e", ".", "request", ")", ")", ")", "if", "hasattr", "(", "e", ",", "'response'", ")", ":", "write_error_message", "(", "'response {0}'", ".", "format", "(", "repr", "(", "e", ".", "response", ")", ")", ")", "raise", "e", "return", "wrapper" ]
Decorator to handle errors
[ "Decorator", "to", "handle", "errors" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L450-L465
glasslion/redlock
redlock/lock.py
RedLockFactory.create_lock
def create_lock(self, resource, **kwargs): """ Create a new RedLock object and reuse stored Redis clients. All the kwargs it received would be passed to the RedLock's __init__ function. """ lock = RedLock(resource=resource, created_by_factory=True, **kwargs) lock.redis_nodes = self.redis_nodes lock.quorum = self.quorum lock.factory = self return lock
python
def create_lock(self, resource, **kwargs): lock = RedLock(resource=resource, created_by_factory=True, **kwargs) lock.redis_nodes = self.redis_nodes lock.quorum = self.quorum lock.factory = self return lock
[ "def", "create_lock", "(", "self", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "lock", "=", "RedLock", "(", "resource", "=", "resource", ",", "created_by_factory", "=", "True", ",", "*", "*", "kwargs", ")", "lock", ".", "redis_nodes", "=", "self", ".", "redis_nodes", "lock", ".", "quorum", "=", "self", ".", "quorum", "lock", ".", "factory", "=", "self", "return", "lock" ]
Create a new RedLock object and reuse stored Redis clients. All the kwargs it received would be passed to the RedLock's __init__ function.
[ "Create", "a", "new", "RedLock", "object", "and", "reuse", "stored", "Redis", "clients", ".", "All", "the", "kwargs", "it", "received", "would", "be", "passed", "to", "the", "RedLock", "s", "__init__", "function", "." ]
train
https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L62-L72
glasslion/redlock
redlock/lock.py
RedLock.acquire_node
def acquire_node(self, node): """ acquire a single redis node """ try: return node.set(self.resource, self.lock_key, nx=True, px=self.ttl) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): return False
python
def acquire_node(self, node): try: return node.set(self.resource, self.lock_key, nx=True, px=self.ttl) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): return False
[ "def", "acquire_node", "(", "self", ",", "node", ")", ":", "try", ":", "return", "node", ".", "set", "(", "self", ".", "resource", ",", "self", ".", "lock_key", ",", "nx", "=", "True", ",", "px", "=", "self", ".", "ttl", ")", "except", "(", "redis", ".", "exceptions", ".", "ConnectionError", ",", "redis", ".", "exceptions", ".", "TimeoutError", ")", ":", "return", "False" ]
acquire a single redis node
[ "acquire", "a", "single", "redis", "node" ]
train
https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L135-L142
glasslion/redlock
redlock/lock.py
RedLock.release_node
def release_node(self, node): """ release a single redis node """ # use the lua script to release the lock in a safe way try: node._release_script(keys=[self.resource], args=[self.lock_key]) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass
python
def release_node(self, node): try: node._release_script(keys=[self.resource], args=[self.lock_key]) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass
[ "def", "release_node", "(", "self", ",", "node", ")", ":", "# use the lua script to release the lock in a safe way", "try", ":", "node", ".", "_release_script", "(", "keys", "=", "[", "self", ".", "resource", "]", ",", "args", "=", "[", "self", ".", "lock_key", "]", ")", "except", "(", "redis", ".", "exceptions", ".", "ConnectionError", ",", "redis", ".", "exceptions", ".", "TimeoutError", ")", ":", "pass" ]
release a single redis node
[ "release", "a", "single", "redis", "node" ]
train
https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L144-L152
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi.get_alexa_rankings
def get_alexa_rankings(self, domains): """Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value. """ api_name = 'alexa_rankings' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports(domains) for domain, response in zip(domains, responses): xml_response = self._extract_response_xml(domain, response) if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = xml_response return all_responses
python
def get_alexa_rankings(self, domains): api_name = 'alexa_rankings' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports(domains) for domain, response in zip(domains, responses): xml_response = self._extract_response_xml(domain, response) if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = xml_response return all_responses
[ "def", "get_alexa_rankings", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'alexa_rankings'", "(", "all_responses", ",", "domains", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "domains", ")", "responses", "=", "self", ".", "_request_reports", "(", "domains", ")", "for", "domain", ",", "response", "in", "zip", "(", "domains", ",", "responses", ")", ":", "xml_response", "=", "self", ".", "_extract_response_xml", "(", "domain", ",", "response", ")", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "domain", ",", "response", ")", "all_responses", "[", "domain", "]", "=", "xml_response", "return", "all_responses" ]
Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value.
[ "Retrieves", "the", "most", "recent", "VT", "info", "for", "a", "set", "of", "domains", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L38-L57
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi._request_reports
def _request_reports(self, domains): """Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: AlexaRankingApi endpoint URL suffix. Returns: A list of the responses. """ params = [{'url': domain} for domain in domains] responses = self._requests.multi_get( self.BASE_URL, query_params=params, to_json=False) return responses
python
def _request_reports(self, domains): params = [{'url': domain} for domain in domains] responses = self._requests.multi_get( self.BASE_URL, query_params=params, to_json=False) return responses
[ "def", "_request_reports", "(", "self", ",", "domains", ")", ":", "params", "=", "[", "{", "'url'", ":", "domain", "}", "for", "domain", "in", "domains", "]", "responses", "=", "self", ".", "_requests", ".", "multi_get", "(", "self", ".", "BASE_URL", ",", "query_params", "=", "params", ",", "to_json", "=", "False", ")", "return", "responses" ]
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: AlexaRankingApi endpoint URL suffix. Returns: A list of the responses.
[ "Sends", "multiples", "requests", "for", "the", "resources", "to", "a", "particular", "endpoint", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L59-L72
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi._extract_response_xml
def _extract_response_xml(self, domain, response): """Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}. """ attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: # Skip ill-formatted XML and return no Alexa attributes pass attributes['domain'] = domain return {'attributes': attributes}
python
def _extract_response_xml(self, domain, response): attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: pass attributes['domain'] = domain return {'attributes': attributes}
[ "def", "_extract_response_xml", "(", "self", ",", "domain", ",", "response", ")", ":", "attributes", "=", "{", "}", "alexa_keys", "=", "{", "'POPULARITY'", ":", "'TEXT'", ",", "'REACH'", ":", "'RANK'", ",", "'RANK'", ":", "'DELTA'", "}", "try", ":", "xml_root", "=", "ET", ".", "fromstring", "(", "response", ".", "_content", ")", "for", "xml_child", "in", "xml_root", ".", "findall", "(", "'SD//'", ")", ":", "if", "xml_child", ".", "tag", "in", "alexa_keys", "and", "alexa_keys", "[", "xml_child", ".", "tag", "]", "in", "xml_child", ".", "attrib", ":", "attributes", "[", "xml_child", ".", "tag", ".", "lower", "(", ")", "]", "=", "xml_child", ".", "attrib", "[", "alexa_keys", "[", "xml_child", ".", "tag", "]", "]", "except", "ParseError", ":", "# Skip ill-formatted XML and return no Alexa attributes", "pass", "attributes", "[", "'domain'", "]", "=", "domain", "return", "{", "'attributes'", ":", "attributes", "}" ]
Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}.
[ "Extract", "XML", "content", "of", "an", "HTTP", "response", "into", "dictionary", "format", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L74-L95
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi._bulk_cache_lookup
def _bulk_cache_lookup(self, api_name, keys): """Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys). """ if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
python
def _bulk_cache_lookup(self, api_name, keys): if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
[ "def", "_bulk_cache_lookup", "(", "self", ",", "api_name", ",", "keys", ")", ":", "if", "self", ".", "_cache", ":", "responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "api_name", ",", "keys", ")", "missing_keys", "=", "[", "key", "for", "key", "in", "keys", "if", "key", "not", "in", "responses", ".", "keys", "(", ")", "]", "return", "(", "responses", ",", "missing_keys", ")", "return", "(", "{", "}", ",", "keys", ")" ]
Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys).
[ "Performes", "a", "bulk", "cache", "lookup", "and", "returns", "a", "tuple", "with", "the", "results", "found", "and", "the", "keys", "missing", "in", "the", "cache", ".", "If", "cached", "is", "not", "configured", "it", "will", "return", "an", "empty", "dictionary", "of", "found", "results", "and", "the", "initial", "list", "of", "keys", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L97-L114
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache.close
def close(self): """Write the contents of the cache to disk (only if `update_cache` parameter during the object initialization was not set to `False`) and clear the in memory cache.""" if self._cache: if self._update_cache: self._write_cache_to_file() self._cache = None
python
def close(self): if self._cache: if self._update_cache: self._write_cache_to_file() self._cache = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_cache", ":", "if", "self", ".", "_update_cache", ":", "self", ".", "_write_cache_to_file", "(", ")", "self", ".", "_cache", "=", "None" ]
Write the contents of the cache to disk (only if `update_cache` parameter during the object initialization was not set to `False`) and clear the in memory cache.
[ "Write", "the", "contents", "of", "the", "cache", "to", "disk", "(", "only", "if", "update_cache", "parameter", "during", "the", "object", "initialization", "was", "not", "set", "to", "False", ")", "and", "clear", "the", "in", "memory", "cache", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L33-L40
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache._write_cache_to_file
def _write_cache_to_file(self): """Write the contents of the cache to a file on disk.""" with(open(self._cache_file_name, 'w')) as fp: fp.write(simplejson.dumps(self._cache))
python
def _write_cache_to_file(self): with(open(self._cache_file_name, 'w')) as fp: fp.write(simplejson.dumps(self._cache))
[ "def", "_write_cache_to_file", "(", "self", ")", ":", "with", "(", "open", "(", "self", ".", "_cache_file_name", ",", "'w'", ")", ")", "as", "fp", ":", "fp", ".", "write", "(", "simplejson", ".", "dumps", "(", "self", ".", "_cache", ")", ")" ]
Write the contents of the cache to a file on disk.
[ "Write", "the", "contents", "of", "the", "cache", "to", "a", "file", "on", "disk", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L42-L45
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache._read_cache_from_file
def _read_cache_from_file(self): """Read the contents of the cache from a file on disk.""" cache = {} try: with(open(self._cache_file_name, 'r')) as fp: contents = fp.read() cache = simplejson.loads(contents) except (IOError, JSONDecodeError): # The file could not be read. This is not a problem if the file does not exist. pass return cache
python
def _read_cache_from_file(self): cache = {} try: with(open(self._cache_file_name, 'r')) as fp: contents = fp.read() cache = simplejson.loads(contents) except (IOError, JSONDecodeError): pass return cache
[ "def", "_read_cache_from_file", "(", "self", ")", ":", "cache", "=", "{", "}", "try", ":", "with", "(", "open", "(", "self", ".", "_cache_file_name", ",", "'r'", ")", ")", "as", "fp", ":", "contents", "=", "fp", ".", "read", "(", ")", "cache", "=", "simplejson", ".", "loads", "(", "contents", ")", "except", "(", "IOError", ",", "JSONDecodeError", ")", ":", "# The file could not be read. This is not a problem if the file does not exist.", "pass", "return", "cache" ]
Read the contents of the cache from a file on disk.
[ "Read", "the", "contents", "of", "the", "cache", "from", "a", "file", "on", "disk", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L47-L58
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache.cache_value
def cache_value(self, api_name, key, value): """Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call. value: the value of the call using the specific key """ self._cache.setdefault(api_name, {}) self._cache[api_name][key] = value
python
def cache_value(self, api_name, key, value): self._cache.setdefault(api_name, {}) self._cache[api_name][key] = value
[ "def", "cache_value", "(", "self", ",", "api_name", ",", "key", ",", "value", ")", ":", "self", ".", "_cache", ".", "setdefault", "(", "api_name", ",", "{", "}", ")", "self", ".", "_cache", "[", "api_name", "]", "[", "key", "]", "=", "value" ]
Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call. value: the value of the call using the specific key
[ "Add", "the", "value", "of", "an", "API", "call", "to", "the", "cache", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L60-L69
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache.lookup_value
def lookup_value(self, api_name, key): """Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call. """ if api_name in self._cache: return self._cache[api_name].get(key, None) return None
python
def lookup_value(self, api_name, key): if api_name in self._cache: return self._cache[api_name].get(key, None) return None
[ "def", "lookup_value", "(", "self", ",", "api_name", ",", "key", ")", ":", "if", "api_name", "in", "self", ".", "_cache", ":", "return", "self", ".", "_cache", "[", "api_name", "]", ".", "get", "(", "key", ",", "None", ")", "return", "None" ]
Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call.
[ "Add", "the", "value", "of", "an", "API", "call", "to", "the", "cache", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L71-L80
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache.bulk_lookup
def bulk_lookup(self, api_name, keys): """Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys. """ cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if value is not None: cached_data[key] = value return cached_data
python
def bulk_lookup(self, api_name, keys): cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if value is not None: cached_data[key] = value return cached_data
[ "def", "bulk_lookup", "(", "self", ",", "api_name", ",", "keys", ")", ":", "cached_data", "=", "{", "}", "for", "key", "in", "keys", ":", "value", "=", "self", ".", "lookup_value", "(", "api_name", ",", "key", ")", "if", "value", "is", "not", "None", ":", "cached_data", "[", "key", "]", "=", "value", "return", "cached_data" ]
Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys.
[ "Perform", "lookup", "on", "an", "enumerable", "of", "keys", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L82-L95
Yelp/threat_intel
threat_intel/opendns.py
_cached_by_domain
def _cached_by_domain(api_name): """A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid. """ def wrapped(func): def decorated(self, domains): if not self._cache: return func(self, domains) all_responses = self._cache.bulk_lookup(api_name, domains) domains = list(set(domains) - set(all_responses)) if domains: response = func(self, domains) if not response: raise ResponseError("No response for uncached domains") for domain in response: self._cache.cache_value(api_name, domain, response[domain]) all_responses[domain] = response[domain] return all_responses return decorated return wrapped
python
def _cached_by_domain(api_name): def wrapped(func): def decorated(self, domains): if not self._cache: return func(self, domains) all_responses = self._cache.bulk_lookup(api_name, domains) domains = list(set(domains) - set(all_responses)) if domains: response = func(self, domains) if not response: raise ResponseError("No response for uncached domains") for domain in response: self._cache.cache_value(api_name, domain, response[domain]) all_responses[domain] = response[domain] return all_responses return decorated return wrapped
[ "def", "_cached_by_domain", "(", "api_name", ")", ":", "def", "wrapped", "(", "func", ")", ":", "def", "decorated", "(", "self", ",", "domains", ")", ":", "if", "not", "self", ".", "_cache", ":", "return", "func", "(", "self", ",", "domains", ")", "all_responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "api_name", ",", "domains", ")", "domains", "=", "list", "(", "set", "(", "domains", ")", "-", "set", "(", "all_responses", ")", ")", "if", "domains", ":", "response", "=", "func", "(", "self", ",", "domains", ")", "if", "not", "response", ":", "raise", "ResponseError", "(", "\"No response for uncached domains\"", ")", "for", "domain", "in", "response", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "domain", ",", "response", "[", "domain", "]", ")", "all_responses", "[", "domain", "]", "=", "response", "[", "domain", "]", "return", "all_responses", "return", "decorated", "return", "wrapped" ]
A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid.
[ "A", "caching", "wrapper", "for", "functions", "that", "take", "a", "list", "of", "domains", "as", "parameters", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L16-L45
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.domain_score
def domain_score(self, domains): """Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated. """ warn( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead', DeprecationWarning, ) url_path = 'domains/score/' return self._multi_post(url_path, domains)
python
def domain_score(self, domains): warn( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead', DeprecationWarning, ) url_path = 'domains/score/' return self._multi_post(url_path, domains)
[ "def", "domain_score", "(", "self", ",", "domains", ")", ":", "warn", "(", "'OpenDNS Domain Scores endpoint is deprecated. Use '", "'InvestigateApi.categorization() instead'", ",", "DeprecationWarning", ",", ")", "url_path", "=", "'domains/score/'", "return", "self", ".", "_multi_post", "(", "url_path", ",", "domains", ")" ]
Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated.
[ "Calls", "domain", "scores", "endpoint", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L115-L126
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi._multi_get
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): """Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result} """ all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
python
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
[ "def", "_multi_get", "(", "self", ",", "cache_api_name", ",", "fmt_url_path", ",", "url_params", ",", "query_params", "=", "None", ")", ":", "all_responses", "=", "{", "}", "if", "self", ".", "_cache", ":", "all_responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "cache_api_name", ",", "url_params", ")", "url_params", "=", "[", "key", "for", "key", "in", "url_params", "if", "key", "not", "in", "all_responses", ".", "keys", "(", ")", "]", "if", "len", "(", "url_params", ")", ":", "urls", "=", "self", ".", "_to_urls", "(", "fmt_url_path", ",", "url_params", ")", "responses", "=", "self", ".", "_requests", ".", "multi_get", "(", "urls", ",", "query_params", ")", "for", "url_param", ",", "response", "in", "zip", "(", "url_params", ",", "responses", ")", ":", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "cache_api_name", ",", "url_param", ",", "response", ")", "all_responses", "[", "url_param", "]", "=", "response", "return", "all_responses" ]
Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result}
[ "Makes", "multiple", "GETs", "to", "an", "OpenDNS", "endpoint", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L129-L154
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.security
def security(self, domains): """Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result} """ api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
python
def security(self, domains): api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "security", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-security'", "fmt_url_path", "=", "u'security/name/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result}
[ "Calls", "security", "end", "point", "and", "adds", "an", "is_suspicious", "key", "to", "each", "response", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L156-L166
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_emails
def whois_emails(self, emails): """Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result} """ api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
python
def whois_emails(self, emails): api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
[ "def", "whois_emails", "(", "self", ",", "emails", ")", ":", "api_name", "=", "'opendns-whois-emails'", "fmt_url_path", "=", "u'whois/emails/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "emails", ")" ]
Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result}
[ "Calls", "WHOIS", "Email", "end", "point" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L168-L178
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_nameservers
def whois_nameservers(self, nameservers): """Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result} """ api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
python
def whois_nameservers(self, nameservers): api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
[ "def", "whois_nameservers", "(", "self", ",", "nameservers", ")", ":", "api_name", "=", "'opendns-whois-nameservers'", "fmt_url_path", "=", "u'whois/nameservers/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "nameservers", ")" ]
Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result}
[ "Calls", "WHOIS", "Nameserver", "end", "point" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L180-L190
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_domains
def whois_domains(self, domains): """Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result} """ api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
python
def whois_domains(self, domains): api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "whois_domains", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-whois-domain'", "fmt_url_path", "=", "u'whois/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result}
[ "Calls", "WHOIS", "domain", "end", "point" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L192-L202
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_domains_history
def whois_domains_history(self, domains): """Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result} """ api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
python
def whois_domains_history(self, domains): api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "whois_domains_history", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-whois-domain-history'", "fmt_url_path", "=", "u'whois/{0}/history'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result}
[ "Calls", "WHOIS", "domain", "history", "end", "point" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L204-L214
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.cooccurrences
def cooccurrences(self, domains): """Get the domains related to input domains. Args: domains: an enumerable of strings domain names Returns: An enumerable of string domain names """ api_name = 'opendns-cooccurrences' fmt_url_path = u'recommendations/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
python
def cooccurrences(self, domains): api_name = 'opendns-cooccurrences' fmt_url_path = u'recommendations/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "cooccurrences", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-cooccurrences'", "fmt_url_path", "=", "u'recommendations/name/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Get the domains related to input domains. Args: domains: an enumerable of strings domain names Returns: An enumerable of string domain names
[ "Get", "the", "domains", "related", "to", "input", "domains", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L216-L226
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.domain_tag
def domain_tag(self, domains): """Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url """ api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
python
def domain_tag(self, domains): api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "domain_tag", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-domain_tag'", "fmt_url_path", "=", "u'domains/{0}/latest_tags'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url
[ "Get", "the", "data", "range", "when", "a", "domain", "is", "part", "of", "OpenDNS", "block", "list", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L228-L238
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.related_domains
def related_domains(self, domains): """Get list of domain names that have been seen requested around the same time (up to 60 seconds before or after) to the given domain name. Args: domains: an enumerable of strings domain names Returns: An enumerable of [domain name, scores] """ api_name = 'opendns-related_domains' fmt_url_path = u'links/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
python
def related_domains(self, domains): api_name = 'opendns-related_domains' fmt_url_path = u'links/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "related_domains", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-related_domains'", "fmt_url_path", "=", "u'links/name/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Get list of domain names that have been seen requested around the same time (up to 60 seconds before or after) to the given domain name. Args: domains: an enumerable of strings domain names Returns: An enumerable of [domain name, scores]
[ "Get", "list", "of", "domain", "names", "that", "have", "been", "seen", "requested", "around", "the", "same", "time", "(", "up", "to", "60", "seconds", "before", "or", "after", ")", "to", "the", "given", "domain", "name", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L240-L251
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.rr_history
def rr_history(self, ips): """Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features """ api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
python
def rr_history(self, ips): api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
[ "def", "rr_history", "(", "self", ",", "ips", ")", ":", "api_name", "=", "'opendns-rr_history'", "fmt_url_path", "=", "u'dnsdb/ip/a/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "ips", ")" ]
Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features
[ "Get", "the", "domains", "related", "to", "input", "ips", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L253-L263
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.dns_rr
def dns_rr(self, ips): """Get the domains related to input domains. Args: domains: an enumerable of strings as domains Returns: An enumerable of resource records and features """ api_name = 'opendns-dns_rr' fmt_url_path = u'dnsdb/name/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
python
def dns_rr(self, ips): api_name = 'opendns-dns_rr' fmt_url_path = u'dnsdb/name/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
[ "def", "dns_rr", "(", "self", ",", "ips", ")", ":", "api_name", "=", "'opendns-dns_rr'", "fmt_url_path", "=", "u'dnsdb/name/a/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "ips", ")" ]
Get the domains related to input domains. Args: domains: an enumerable of strings as domains Returns: An enumerable of resource records and features
[ "Get", "the", "domains", "related", "to", "input", "domains", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L265-L275
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.sample
def sample(self, hashes): """Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples """ api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
python
def sample(self, hashes): api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
[ "def", "sample", "(", "self", ",", "hashes", ")", ":", "api_name", "=", "'opendns-sample'", "fmt_url_path", "=", "u'sample/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "hashes", ")" ]
Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples
[ "Get", "the", "information", "about", "a", "sample", "based", "on", "its", "hash", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L289-L300
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.search
def search(self, patterns, start=30, limit=1000, include_category=False): """Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings """ api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
python
def search(self, patterns, start=30, limit=1000, include_category=False): api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
[ "def", "search", "(", "self", ",", "patterns", ",", "start", "=", "30", ",", "limit", "=", "1000", ",", "include_category", "=", "False", ")", ":", "api_name", "=", "'opendns-patterns'", "fmt_url_path", "=", "u'search/{0}'", "start", "=", "'-{0}days'", ".", "format", "(", "start", ")", "include_category", "=", "str", "(", "include_category", ")", ".", "lower", "(", ")", "query_params", "=", "{", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "'includecategory'", ":", "include_category", ",", "}", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "patterns", ",", "query_params", ")" ]
Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings
[ "Performs", "pattern", "searches", "against", "the", "Investigate", "database", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L302-L322
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.risk_score
def risk_score(self, domains): """Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores """ api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
python
def risk_score(self, domains): api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "risk_score", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-risk_score'", "fmt_url_path", "=", "u'domains/risk-score/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores
[ "Performs", "Umbrella", "risk", "score", "analysis", "on", "the", "input", "domains" ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L324-L334
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._extract_all_responses
def _extract_all_responses(self, resources, api_endpoint, api_name): """ Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value. """ all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def _extract_all_responses(self, resources, api_endpoint, api_name): all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "_extract_all_responses", "(", "self", ",", "resources", ",", "api_endpoint", ",", "api_name", ")", ":", "all_responses", ",", "resources", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "resources", ")", "resource_chunks", "=", "self", ".", "_prepare_resource_chunks", "(", "resources", ")", "response_chunks", "=", "self", ".", "_request_reports", "(", "\"resource\"", ",", "resource_chunks", ",", "api_endpoint", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value.
[ "Aux", "function", "to", "extract", "all", "the", "API", "endpoint", "responses", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L52-L67
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_behaviour
def get_file_behaviour(self, resources): """Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of a file when executed in a sandboxed environment (Cuckoo sandbox). Args: resources: list of string hashes. """ api_name = 'virustotal-file-behaviour' api_endpoint = 'file/behaviour' return self._extract_all_responses(resources, api_endpoint, api_name)
python
def get_file_behaviour(self, resources): api_name = 'virustotal-file-behaviour' api_endpoint = 'file/behaviour' return self._extract_all_responses(resources, api_endpoint, api_name)
[ "def", "get_file_behaviour", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-file-behaviour'", "api_endpoint", "=", "'file/behaviour'", "return", "self", ".", "_extract_all_responses", "(", "resources", ",", "api_endpoint", ",", "api_name", ")" ]
Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of a file when executed in a sandboxed environment (Cuckoo sandbox). Args: resources: list of string hashes.
[ "Retrieves", "a", "report", "about", "the", "behaviour", "of", "a", "md5", "sha1", "and", "/", "or", "sha2", "hash", "of", "a", "file", "when", "executed", "in", "a", "sandboxed", "environment", "(", "Cuckoo", "sandbox", ")", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L70-L79
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_download
def get_file_download(self, resources): """Retrieves a file from its a md5, sha1, and/or sha2 hash. Args: resources: list of string hashes. Returns: a file download """ api_name = 'virustotal-file-download' api_endpoint = 'file/download' return self._extract_all_responses(resources, api_endpoint, api_name)
python
def get_file_download(self, resources): api_name = 'virustotal-file-download' api_endpoint = 'file/download' return self._extract_all_responses(resources, api_endpoint, api_name)
[ "def", "get_file_download", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-file-download'", "api_endpoint", "=", "'file/download'", "return", "self", ".", "_extract_all_responses", "(", "resources", ",", "api_endpoint", ",", "api_name", ")" ]
Retrieves a file from its a md5, sha1, and/or sha2 hash. Args: resources: list of string hashes. Returns: a file download
[ "Retrieves", "a", "file", "from", "its", "a", "md5", "sha1", "and", "/", "or", "sha2", "hash", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L82-L92
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_network_traffic
def get_file_network_traffic(self, resources): """Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of file, when it is executed. Args: resources: list of string hashes. """ api_name = 'virustotal-file-network-traffic' api_endpoint = 'file/network-traffic' return self._extract_all_responses(resources, api_endpoint, api_name)
python
def get_file_network_traffic(self, resources): api_name = 'virustotal-file-network-traffic' api_endpoint = 'file/network-traffic' return self._extract_all_responses(resources, api_endpoint, api_name)
[ "def", "get_file_network_traffic", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-file-network-traffic'", "api_endpoint", "=", "'file/network-traffic'", "return", "self", ".", "_extract_all_responses", "(", "resources", ",", "api_endpoint", ",", "api_name", ")" ]
Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of file, when it is executed. Args: resources: list of string hashes.
[ "Retrieves", "a", "report", "about", "the", "network", "traffic", "of", "a", "md5", "sha1", "and", "/", "or", "sha2", "hash", "of", "file", "when", "it", "is", "executed", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L95-L104
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_domain_reports
def get_domain_reports(self, domains): """Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value. """ api_name = 'virustotal-domain-reports' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports("domain", domains, 'domain/report') for domain, response in zip(domains, responses): if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = response return all_responses
python
def get_domain_reports(self, domains): api_name = 'virustotal-domain-reports' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports("domain", domains, 'domain/report') for domain, response in zip(domains, responses): if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = response return all_responses
[ "def", "get_domain_reports", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'virustotal-domain-reports'", "(", "all_responses", ",", "domains", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "domains", ")", "responses", "=", "self", ".", "_request_reports", "(", "\"domain\"", ",", "domains", ",", "'domain/report'", ")", "for", "domain", ",", "response", "in", "zip", "(", "domains", ",", "responses", ")", ":", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "domain", ",", "response", ")", "all_responses", "[", "domain", "]", "=", "response", "return", "all_responses" ]
Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value.
[ "Retrieves", "the", "most", "recent", "VT", "info", "for", "a", "set", "of", "domains", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L107-L125
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_url_distribution
def get_url_distribution(self, params=None): """Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report. """ params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def get_url_distribution(self, params=None): params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "get_url_distribution", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "or", "{", "}", "all_responses", "=", "{", "}", "api_name", "=", "'virustotal-url-distribution'", "response_chunks", "=", "self", ".", "_request_reports", "(", "list", "(", "params", ".", "keys", "(", ")", ")", ",", "list", "(", "params", ".", "values", "(", ")", ")", ",", "'url/distribution'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report.
[ "Retrieves", "a", "live", "feed", "with", "the", "latest", "URLs", "submitted", "to", "VT", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L128-L143
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_url_reports
def get_url_reports(self, resources): """Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value. """ api_name = 'virustotal-url-reports' (all_responses, resources) = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources, '\n') response_chunks = self._request_reports("resource", resource_chunks, 'url/report') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def get_url_reports(self, resources): api_name = 'virustotal-url-reports' (all_responses, resources) = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources, '\n') response_chunks = self._request_reports("resource", resource_chunks, 'url/report') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "get_url_reports", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-url-reports'", "(", "all_responses", ",", "resources", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "resources", ")", "resource_chunks", "=", "self", ".", "_prepare_resource_chunks", "(", "resources", ",", "'\\n'", ")", "response_chunks", "=", "self", ".", "_request_reports", "(", "\"resource\"", ",", "resource_chunks", ",", "'url/report'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value.
[ "Retrieves", "a", "scan", "report", "on", "a", "given", "URL", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L167-L182
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_ip_reports
def get_ip_reports(self, ips): """Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value. """ api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
python
def get_ip_reports(self, ips): api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
[ "def", "get_ip_reports", "(", "self", ",", "ips", ")", ":", "api_name", "=", "'virustotal-ip-address-reports'", "(", "all_responses", ",", "ips", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "ips", ")", "responses", "=", "self", ".", "_request_reports", "(", "\"ip\"", ",", "ips", ",", "'ip-address/report'", ")", "for", "ip", ",", "response", "in", "zip", "(", "ips", ",", "responses", ")", ":", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "ip", ",", "response", ")", "all_responses", "[", "ip", "]", "=", "response", "return", "all_responses" ]
Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value.
[ "Retrieves", "the", "most", "recent", "VT", "info", "for", "a", "set", "of", "ips", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L185-L203
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_search
def get_file_search(self, query): """Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report. """ api_name = 'virustotal-file-search' (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, 'file/search') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def get_file_search(self, query): api_name = 'virustotal-file-search' (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, 'file/search') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "get_file_search", "(", "self", ",", "query", ")", ":", "api_name", "=", "'virustotal-file-search'", "(", "all_responses", ",", "query", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "query", ")", "response_chunks", "=", "self", ".", "_request_reports", "(", "\"query\"", ",", "query", ",", "'file/search'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report.
[ "Performs", "advanced", "search", "on", "samples", "matching", "certain", "binary", "/", "metadata", "/", "detection", "criteria", ".", "Possible", "queries", ":", "file", "size", "file", "type", "first", "or", "last", "submission", "to", "VT", "number", "of", "positives", "bynary", "content", "etc", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L206-L224
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_clusters
def get_file_clusters(self, date): """Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report. """ api_name = 'virustotal-file-clusters' (all_responses, resources) = self._bulk_cache_lookup(api_name, date) response = self._request_reports("date", date, 'file/clusters') self._extract_response_chunks(all_responses, response, api_name) return all_responses
python
def get_file_clusters(self, date): api_name = 'virustotal-file-clusters' (all_responses, resources) = self._bulk_cache_lookup(api_name, date) response = self._request_reports("date", date, 'file/clusters') self._extract_response_chunks(all_responses, response, api_name) return all_responses
[ "def", "get_file_clusters", "(", "self", ",", "date", ")", ":", "api_name", "=", "'virustotal-file-clusters'", "(", "all_responses", ",", "resources", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "date", ")", "response", "=", "self", ".", "_request_reports", "(", "\"date\"", ",", "date", ",", "'file/clusters'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response", ",", "api_name", ")", "return", "all_responses" ]
Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report.
[ "Retrieves", "file", "similarity", "clusters", "for", "a", "given", "time", "frame", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L227-L242
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._prepare_resource_chunks
def _prepare_resource_chunks(self, resources, resource_delim=','): """As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources. """ return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
python
def _prepare_resource_chunks(self, resources, resource_delim=','): return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
[ "def", "_prepare_resource_chunks", "(", "self", ",", "resources", ",", "resource_delim", "=", "','", ")", ":", "return", "[", "self", ".", "_prepare_resource_chunk", "(", "resources", ",", "resource_delim", ",", "pos", ")", "for", "pos", "in", "range", "(", "0", ",", "len", "(", "resources", ")", ",", "self", ".", "_resources_per_req", ")", "]" ]
As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources.
[ "As", "in", "some", "VirusTotal", "API", "methods", "the", "call", "can", "be", "made", "for", "multiple", "resources", "at", "once", "this", "method", "prepares", "a", "list", "of", "concatenated", "resources", "according", "to", "the", "maximum", "number", "of", "resources", "per", "requests", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L263-L276
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._request_reports
def _request_reports(self, resource_param_name, resources, endpoint_name): """Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: VirusTotal endpoint URL suffix. Returns: A list of the responses. """ params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources] return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
python
def _request_reports(self, resource_param_name, resources, endpoint_name): params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources] return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
[ "def", "_request_reports", "(", "self", ",", "resource_param_name", ",", "resources", ",", "endpoint_name", ")", ":", "params", "=", "[", "{", "resource_param_name", ":", "resource", ",", "'apikey'", ":", "self", ".", "_api_key", "}", "for", "resource", "in", "resources", "]", "return", "self", ".", "_requests", ".", "multi_get", "(", "self", ".", "BASE_DOMAIN", "+", "endpoint_name", ",", "query_params", "=", "params", ")" ]
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: VirusTotal endpoint URL suffix. Returns: A list of the responses.
[ "Sends", "multiples", "requests", "for", "the", "resources", "to", "a", "particular", "endpoint", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L282-L293
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._extract_response_chunks
def _extract_response_chunks(self, all_responses, response_chunks, api_name): """Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API. """ for response_chunk in response_chunks: if not isinstance(response_chunk, list): response_chunk = [response_chunk] for response in response_chunk: if not response: continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
python
def _extract_response_chunks(self, all_responses, response_chunks, api_name): for response_chunk in response_chunks: if not isinstance(response_chunk, list): response_chunk = [response_chunk] for response in response_chunk: if not response: continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
[ "def", "_extract_response_chunks", "(", "self", ",", "all_responses", ",", "response_chunks", ",", "api_name", ")", ":", "for", "response_chunk", "in", "response_chunks", ":", "if", "not", "isinstance", "(", "response_chunk", ",", "list", ")", ":", "response_chunk", "=", "[", "response_chunk", "]", "for", "response", "in", "response_chunk", ":", "if", "not", "response", ":", "continue", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "response", "[", "'resource'", "]", ",", "response", ")", "all_responses", "[", "response", "[", "'resource'", "]", "]", "=", "response" ]
Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API.
[ "Extracts", "and", "caches", "the", "responses", "from", "the", "response", "chunks", "in", "case", "of", "the", "responses", "for", "the", "requests", "containing", "multiple", "concatenated", "resources", ".", "Extracted", "responses", "are", "added", "to", "the", "already", "cached", "responses", "passed", "in", "the", "all_responses", "parameter", "." ]
train
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L295-L315
letmaik/lensfunpy
lensfunpy/util.py
remapOpenCv
def remapOpenCv(im, coords): """ Remap an image using OpenCV. See :func:`remap` for parameters. """ # required for older OpenCV versions im = np.require(im, im.dtype, 'C') return cv2.remap(im, coords, None, cv2.INTER_LANCZOS4)
python
def remapOpenCv(im, coords): im = np.require(im, im.dtype, 'C') return cv2.remap(im, coords, None, cv2.INTER_LANCZOS4)
[ "def", "remapOpenCv", "(", "im", ",", "coords", ")", ":", "# required for older OpenCV versions", "im", "=", "np", ".", "require", "(", "im", ",", "im", ".", "dtype", ",", "'C'", ")", "return", "cv2", ".", "remap", "(", "im", ",", "coords", ",", "None", ",", "cv2", ".", "INTER_LANCZOS4", ")" ]
Remap an image using OpenCV. See :func:`remap` for parameters.
[ "Remap", "an", "image", "using", "OpenCV", ".", "See", ":", "func", ":", "remap", "for", "parameters", "." ]
train
https://github.com/letmaik/lensfunpy/blob/e8800496874b1b1360cb9c245e2f137febbd41d7/lensfunpy/util.py#L12-L18
letmaik/lensfunpy
lensfunpy/util.py
remapScipy
def remapScipy(im, coords): """ Remap an image using SciPy. See :func:`remap` for parameters. """ height, width = im.shape[0], im.shape[1] # switch to y,x order coords = coords[:,:,::-1] # make it (h, w, 3, 3) coords_channels = np.empty((height, width, 3, 3)) coords_channel = np.zeros((height, width, 3)) coords_channel[:,:,:2] = coords coords_channels[:,:,0] = coords_channel coords_channels[:,:,1] = coords_channel coords_channels[:,:,1,2] = 1 coords_channels[:,:,2] = coords_channel coords_channels[:,:,2,2] = 2 coords = coords_channels # (3, h, w, 3) coords = np.rollaxis(coords, 3) return map_coordinates(im, coords, order=1)
python
def remapScipy(im, coords): height, width = im.shape[0], im.shape[1] coords = coords[:,:,::-1] coords_channels = np.empty((height, width, 3, 3)) coords_channel = np.zeros((height, width, 3)) coords_channel[:,:,:2] = coords coords_channels[:,:,0] = coords_channel coords_channels[:,:,1] = coords_channel coords_channels[:,:,1,2] = 1 coords_channels[:,:,2] = coords_channel coords_channels[:,:,2,2] = 2 coords = coords_channels coords = np.rollaxis(coords, 3) return map_coordinates(im, coords, order=1)
[ "def", "remapScipy", "(", "im", ",", "coords", ")", ":", "height", ",", "width", "=", "im", ".", "shape", "[", "0", "]", ",", "im", ".", "shape", "[", "1", "]", "# switch to y,x order", "coords", "=", "coords", "[", ":", ",", ":", ",", ":", ":", "-", "1", "]", "# make it (h, w, 3, 3)", "coords_channels", "=", "np", ".", "empty", "(", "(", "height", ",", "width", ",", "3", ",", "3", ")", ")", "coords_channel", "=", "np", ".", "zeros", "(", "(", "height", ",", "width", ",", "3", ")", ")", "coords_channel", "[", ":", ",", ":", ",", ":", "2", "]", "=", "coords", "coords_channels", "[", ":", ",", ":", ",", "0", "]", "=", "coords_channel", "coords_channels", "[", ":", ",", ":", ",", "1", "]", "=", "coords_channel", "coords_channels", "[", ":", ",", ":", ",", "1", ",", "2", "]", "=", "1", "coords_channels", "[", ":", ",", ":", ",", "2", "]", "=", "coords_channel", "coords_channels", "[", ":", ",", ":", ",", "2", ",", "2", "]", "=", "2", "coords", "=", "coords_channels", "# (3, h, w, 3)", "coords", "=", "np", ".", "rollaxis", "(", "coords", ",", "3", ")", "return", "map_coordinates", "(", "im", ",", "coords", ",", "order", "=", "1", ")" ]
Remap an image using SciPy. See :func:`remap` for parameters.
[ "Remap", "an", "image", "using", "SciPy", ".", "See", ":", "func", ":", "remap", "for", "parameters", "." ]
train
https://github.com/letmaik/lensfunpy/blob/e8800496874b1b1360cb9c245e2f137febbd41d7/lensfunpy/util.py#L20-L43
letmaik/lensfunpy
lensfunpy/util.py
remap
def remap(im, coords): """ Remap an RGB image using the given target coordinate array. If available, OpenCV is used (faster), otherwise SciPy. :type im: ndarray of shape (h,w,3) :param im: RGB image to be remapped :type coords: ndarray of shape (h,w,2) :param coords: target coordinates in x,y order for each pixel :return: remapped RGB image :rtype: ndarray of shape (h,w,3) """ if cv2: return remapOpenCv(im, coords) else: return remapScipy(im, coords)
python
def remap(im, coords): if cv2: return remapOpenCv(im, coords) else: return remapScipy(im, coords)
[ "def", "remap", "(", "im", ",", "coords", ")", ":", "if", "cv2", ":", "return", "remapOpenCv", "(", "im", ",", "coords", ")", "else", ":", "return", "remapScipy", "(", "im", ",", "coords", ")" ]
Remap an RGB image using the given target coordinate array. If available, OpenCV is used (faster), otherwise SciPy. :type im: ndarray of shape (h,w,3) :param im: RGB image to be remapped :type coords: ndarray of shape (h,w,2) :param coords: target coordinates in x,y order for each pixel :return: remapped RGB image :rtype: ndarray of shape (h,w,3)
[ "Remap", "an", "RGB", "image", "using", "the", "given", "target", "coordinate", "array", ".", "If", "available", "OpenCV", "is", "used", "(", "faster", ")", "otherwise", "SciPy", ".", ":", "type", "im", ":", "ndarray", "of", "shape", "(", "h", "w", "3", ")", ":", "param", "im", ":", "RGB", "image", "to", "be", "remapped", ":", "type", "coords", ":", "ndarray", "of", "shape", "(", "h", "w", "2", ")", ":", "param", "coords", ":", "target", "coordinates", "in", "x", "y", "order", "for", "each", "pixel", ":", "return", ":", "remapped", "RGB", "image", ":", "rtype", ":", "ndarray", "of", "shape", "(", "h", "w", "3", ")" ]
train
https://github.com/letmaik/lensfunpy/blob/e8800496874b1b1360cb9c245e2f137febbd41d7/lensfunpy/util.py#L45-L61
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/cms_plugins.py
TextPlugin.get_editor_widget
def get_editor_widget(self, request, plugins, plugin): """ Returns the Django form Widget to be used for the text area """ cancel_url_name = self.get_admin_url_name('delete_on_cancel') cancel_url = reverse('admin:%s' % cancel_url_name) render_plugin_url_name = self.get_admin_url_name('render_plugin') render_plugin_url = reverse('admin:%s' % render_plugin_url_name) action_token = self.get_action_token(request, plugin) # should we delete the text plugin when # the user cancels? delete_text_on_cancel = ( 'delete-on-cancel' in request.GET and # noqa not plugin.get_plugin_instance()[0] ) widget = TextEditorWidget( installed_plugins=plugins, pk=plugin.pk, placeholder=plugin.placeholder, plugin_language=plugin.language, configuration=self.ckeditor_configuration, render_plugin_url=render_plugin_url, cancel_url=cancel_url, action_token=action_token, delete_on_cancel=delete_text_on_cancel, ) return widget
python
def get_editor_widget(self, request, plugins, plugin): cancel_url_name = self.get_admin_url_name('delete_on_cancel') cancel_url = reverse('admin:%s' % cancel_url_name) render_plugin_url_name = self.get_admin_url_name('render_plugin') render_plugin_url = reverse('admin:%s' % render_plugin_url_name) action_token = self.get_action_token(request, plugin) delete_text_on_cancel = ( 'delete-on-cancel' in request.GET and not plugin.get_plugin_instance()[0] ) widget = TextEditorWidget( installed_plugins=plugins, pk=plugin.pk, placeholder=plugin.placeholder, plugin_language=plugin.language, configuration=self.ckeditor_configuration, render_plugin_url=render_plugin_url, cancel_url=cancel_url, action_token=action_token, delete_on_cancel=delete_text_on_cancel, ) return widget
[ "def", "get_editor_widget", "(", "self", ",", "request", ",", "plugins", ",", "plugin", ")", ":", "cancel_url_name", "=", "self", ".", "get_admin_url_name", "(", "'delete_on_cancel'", ")", "cancel_url", "=", "reverse", "(", "'admin:%s'", "%", "cancel_url_name", ")", "render_plugin_url_name", "=", "self", ".", "get_admin_url_name", "(", "'render_plugin'", ")", "render_plugin_url", "=", "reverse", "(", "'admin:%s'", "%", "render_plugin_url_name", ")", "action_token", "=", "self", ".", "get_action_token", "(", "request", ",", "plugin", ")", "# should we delete the text plugin when", "# the user cancels?", "delete_text_on_cancel", "=", "(", "'delete-on-cancel'", "in", "request", ".", "GET", "and", "# noqa", "not", "plugin", ".", "get_plugin_instance", "(", ")", "[", "0", "]", ")", "widget", "=", "TextEditorWidget", "(", "installed_plugins", "=", "plugins", ",", "pk", "=", "plugin", ".", "pk", ",", "placeholder", "=", "plugin", ".", "placeholder", ",", "plugin_language", "=", "plugin", ".", "language", ",", "configuration", "=", "self", ".", "ckeditor_configuration", ",", "render_plugin_url", "=", "render_plugin_url", ",", "cancel_url", "=", "cancel_url", ",", "action_token", "=", "action_token", ",", "delete_on_cancel", "=", "delete_text_on_cancel", ",", ")", "return", "widget" ]
Returns the Django form Widget to be used for the text area
[ "Returns", "the", "Django", "form", "Widget", "to", "be", "used", "for", "the", "text", "area" ]
train
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/cms_plugins.py#L227-L257
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/cms_plugins.py
TextPlugin.get_form_class
def get_form_class(self, request, plugins, plugin): """ Returns a subclass of Form to be used by this plugin """ widget = self.get_editor_widget( request=request, plugins=plugins, plugin=plugin, ) instance = plugin.get_plugin_instance()[0] if instance: context = RequestContext(request) context['request'] = request rendered_text = plugin_tags_to_admin_html( text=instance.body, context=context, ) else: rendered_text = None # We avoid mutating the Form declared above by subclassing class TextPluginForm(self.form): body = CharField(widget=widget, required=False) def __init__(self, *args, **kwargs): initial = kwargs.pop('initial', {}) if rendered_text: initial['body'] = rendered_text super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs) return TextPluginForm
python
def get_form_class(self, request, plugins, plugin): widget = self.get_editor_widget( request=request, plugins=plugins, plugin=plugin, ) instance = plugin.get_plugin_instance()[0] if instance: context = RequestContext(request) context['request'] = request rendered_text = plugin_tags_to_admin_html( text=instance.body, context=context, ) else: rendered_text = None class TextPluginForm(self.form): body = CharField(widget=widget, required=False) def __init__(self, *args, **kwargs): initial = kwargs.pop('initial', {}) if rendered_text: initial['body'] = rendered_text super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs) return TextPluginForm
[ "def", "get_form_class", "(", "self", ",", "request", ",", "plugins", ",", "plugin", ")", ":", "widget", "=", "self", ".", "get_editor_widget", "(", "request", "=", "request", ",", "plugins", "=", "plugins", ",", "plugin", "=", "plugin", ",", ")", "instance", "=", "plugin", ".", "get_plugin_instance", "(", ")", "[", "0", "]", "if", "instance", ":", "context", "=", "RequestContext", "(", "request", ")", "context", "[", "'request'", "]", "=", "request", "rendered_text", "=", "plugin_tags_to_admin_html", "(", "text", "=", "instance", ".", "body", ",", "context", "=", "context", ",", ")", "else", ":", "rendered_text", "=", "None", "# We avoid mutating the Form declared above by subclassing", "class", "TextPluginForm", "(", "self", ".", "form", ")", ":", "body", "=", "CharField", "(", "widget", "=", "widget", ",", "required", "=", "False", ")", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "initial", "=", "kwargs", ".", "pop", "(", "'initial'", ",", "{", "}", ")", "if", "rendered_text", ":", "initial", "[", "'body'", "]", "=", "rendered_text", "super", "(", "TextPluginForm", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "initial", "=", "initial", ",", "*", "*", "kwargs", ")", "return", "TextPluginForm" ]
Returns a subclass of Form to be used by this plugin
[ "Returns", "a", "subclass", "of", "Form", "to", "be", "used", "by", "this", "plugin" ]
train
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/cms_plugins.py#L259-L291
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/utils.py
_plugin_tags_to_html
def _plugin_tags_to_html(text, output_func): """ Convert plugin object 'tags' into the form for public site. context is the template context to use, placeholder is the placeholder name """ plugins_by_id = get_plugins_from_text(text) def _render_tag(m): try: plugin_id = int(m.groupdict()['pk']) obj = plugins_by_id[plugin_id] except KeyError: # Object must have been deleted. It cannot be rendered to # end user so just remove it from the HTML altogether return u'' else: obj._render_meta.text_enabled = True return output_func(obj, m) return OBJ_ADMIN_RE.sub(_render_tag, text)
python
def _plugin_tags_to_html(text, output_func): plugins_by_id = get_plugins_from_text(text) def _render_tag(m): try: plugin_id = int(m.groupdict()['pk']) obj = plugins_by_id[plugin_id] except KeyError: return u'' else: obj._render_meta.text_enabled = True return output_func(obj, m) return OBJ_ADMIN_RE.sub(_render_tag, text)
[ "def", "_plugin_tags_to_html", "(", "text", ",", "output_func", ")", ":", "plugins_by_id", "=", "get_plugins_from_text", "(", "text", ")", "def", "_render_tag", "(", "m", ")", ":", "try", ":", "plugin_id", "=", "int", "(", "m", ".", "groupdict", "(", ")", "[", "'pk'", "]", ")", "obj", "=", "plugins_by_id", "[", "plugin_id", "]", "except", "KeyError", ":", "# Object must have been deleted. It cannot be rendered to", "# end user so just remove it from the HTML altogether", "return", "u''", "else", ":", "obj", ".", "_render_meta", ".", "text_enabled", "=", "True", "return", "output_func", "(", "obj", ",", "m", ")", "return", "OBJ_ADMIN_RE", ".", "sub", "(", "_render_tag", ",", "text", ")" ]
Convert plugin object 'tags' into the form for public site. context is the template context to use, placeholder is the placeholder name
[ "Convert", "plugin", "object", "tags", "into", "the", "form", "for", "public", "site", "." ]
train
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/utils.py#L91-L110
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/html.py
clean_html
def clean_html(data, full=True, parser=DEFAULT_PARSER): """ Cleans HTML from XSS vulnerabilities using html5lib If full is False, only the contents inside <body> will be returned (without the <body> tags). """ if full: dom_tree = parser.parse(data) else: dom_tree = parser.parseFragment(data) walker = treewalkers.getTreeWalker('dom') kwargs = _filter_kwargs() stream = TextSanitizer(walker(dom_tree), **kwargs) s = serializer.HTMLSerializer( omit_optional_tags=False, quote_attr_values='always', ) return u''.join(s.serialize(stream))
python
def clean_html(data, full=True, parser=DEFAULT_PARSER): if full: dom_tree = parser.parse(data) else: dom_tree = parser.parseFragment(data) walker = treewalkers.getTreeWalker('dom') kwargs = _filter_kwargs() stream = TextSanitizer(walker(dom_tree), **kwargs) s = serializer.HTMLSerializer( omit_optional_tags=False, quote_attr_values='always', ) return u''.join(s.serialize(stream))
[ "def", "clean_html", "(", "data", ",", "full", "=", "True", ",", "parser", "=", "DEFAULT_PARSER", ")", ":", "if", "full", ":", "dom_tree", "=", "parser", ".", "parse", "(", "data", ")", "else", ":", "dom_tree", "=", "parser", ".", "parseFragment", "(", "data", ")", "walker", "=", "treewalkers", ".", "getTreeWalker", "(", "'dom'", ")", "kwargs", "=", "_filter_kwargs", "(", ")", "stream", "=", "TextSanitizer", "(", "walker", "(", "dom_tree", ")", ",", "*", "*", "kwargs", ")", "s", "=", "serializer", ".", "HTMLSerializer", "(", "omit_optional_tags", "=", "False", ",", "quote_attr_values", "=", "'always'", ",", ")", "return", "u''", ".", "join", "(", "s", ".", "serialize", "(", "stream", ")", ")" ]
Cleans HTML from XSS vulnerabilities using html5lib If full is False, only the contents inside <body> will be returned (without the <body> tags).
[ "Cleans", "HTML", "from", "XSS", "vulnerabilities", "using", "html5lib", "If", "full", "is", "False", "only", "the", "contents", "inside", "<body", ">", "will", "be", "returned", "(", "without", "the", "<body", ">", "tags", ")", "." ]
train
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/html.py#L56-L73
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/html.py
extract_images
def extract_images(data, plugin): """ extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins """ if not settings.TEXT_SAVE_IMAGE_FUNCTION: return data tree_builder = html5lib.treebuilders.getTreeBuilder('dom') parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(data) found = False for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if not src.startswith('data:'): # nothing to do continue width = img.getAttribute('width') height = img.getAttribute('height') # extract the image data data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)') m = data_re.search(src) dr = m.groupdict() mime_type = dr['mime_type'] image_data = dr['data'] if mime_type.find(';'): mime_type = mime_type.split(';')[0] try: image_data = base64.b64decode(image_data) except Exception: image_data = base64.urlsafe_b64decode(image_data) try: image_type = mime_type.split('/')[1] except IndexError: # No image type specified -- will convert to jpg below if it's valid image data image_type = '' image = BytesIO(image_data) # genarate filename and normalize image format if image_type == 'jpg' or image_type == 'jpeg': file_ending = 'jpg' elif image_type == 'png': file_ending = 'png' elif image_type == 'gif': file_ending = 'gif' else: # any not "web-safe" image format we try to convert to jpg im = Image.open(image) new_image = BytesIO() file_ending = 'jpg' im.save(new_image, 'JPEG') new_image.seek(0) image = new_image filename = u'%s.%s' % (uuid.uuid4(), file_ending) # transform image into a cms plugin image_plugin = img_data_to_plugin( filename, image, parent_plugin=plugin, width=width, height=height ) # render the new html for the plugin new_img_html = plugin_to_tag(image_plugin) # replace the original image node with the newly created cms plugin html img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img) found = True if found: return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes]) else: return data
python
def extract_images(data, plugin): if not settings.TEXT_SAVE_IMAGE_FUNCTION: return data tree_builder = html5lib.treebuilders.getTreeBuilder('dom') parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(data) found = False for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if not src.startswith('data:'): continue width = img.getAttribute('width') height = img.getAttribute('height') data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)') m = data_re.search(src) dr = m.groupdict() mime_type = dr['mime_type'] image_data = dr['data'] if mime_type.find(';'): mime_type = mime_type.split(';')[0] try: image_data = base64.b64decode(image_data) except Exception: image_data = base64.urlsafe_b64decode(image_data) try: image_type = mime_type.split('/')[1] except IndexError: image_type = '' image = BytesIO(image_data) if image_type == 'jpg' or image_type == 'jpeg': file_ending = 'jpg' elif image_type == 'png': file_ending = 'png' elif image_type == 'gif': file_ending = 'gif' else: im = Image.open(image) new_image = BytesIO() file_ending = 'jpg' im.save(new_image, 'JPEG') new_image.seek(0) image = new_image filename = u'%s.%s' % (uuid.uuid4(), file_ending) image_plugin = img_data_to_plugin( filename, image, parent_plugin=plugin, width=width, height=height ) new_img_html = plugin_to_tag(image_plugin) img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img) found = True if found: return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes]) else: return data
[ "def", "extract_images", "(", "data", ",", "plugin", ")", ":", "if", "not", "settings", ".", "TEXT_SAVE_IMAGE_FUNCTION", ":", "return", "data", "tree_builder", "=", "html5lib", ".", "treebuilders", ".", "getTreeBuilder", "(", "'dom'", ")", "parser", "=", "html5lib", ".", "html5parser", ".", "HTMLParser", "(", "tree", "=", "tree_builder", ")", "dom", "=", "parser", ".", "parse", "(", "data", ")", "found", "=", "False", "for", "img", "in", "dom", ".", "getElementsByTagName", "(", "'img'", ")", ":", "src", "=", "img", ".", "getAttribute", "(", "'src'", ")", "if", "not", "src", ".", "startswith", "(", "'data:'", ")", ":", "# nothing to do", "continue", "width", "=", "img", ".", "getAttribute", "(", "'width'", ")", "height", "=", "img", ".", "getAttribute", "(", "'height'", ")", "# extract the image data", "data_re", "=", "re", ".", "compile", "(", "r'data:(?P<mime_type>[^\"]*);(?P<encoding>[^\"]*),(?P<data>[^\"]*)'", ")", "m", "=", "data_re", ".", "search", "(", "src", ")", "dr", "=", "m", ".", "groupdict", "(", ")", "mime_type", "=", "dr", "[", "'mime_type'", "]", "image_data", "=", "dr", "[", "'data'", "]", "if", "mime_type", ".", "find", "(", "';'", ")", ":", "mime_type", "=", "mime_type", ".", "split", "(", "';'", ")", "[", "0", "]", "try", ":", "image_data", "=", "base64", ".", "b64decode", "(", "image_data", ")", "except", "Exception", ":", "image_data", "=", "base64", ".", "urlsafe_b64decode", "(", "image_data", ")", "try", ":", "image_type", "=", "mime_type", ".", "split", "(", "'/'", ")", "[", "1", "]", "except", "IndexError", ":", "# No image type specified -- will convert to jpg below if it's valid image data", "image_type", "=", "''", "image", "=", "BytesIO", "(", "image_data", ")", "# genarate filename and normalize image format", "if", "image_type", "==", "'jpg'", "or", "image_type", "==", "'jpeg'", ":", "file_ending", "=", "'jpg'", "elif", "image_type", "==", "'png'", ":", "file_ending", "=", "'png'", "elif", "image_type", "==", "'gif'", ":", "file_ending", "=", "'gif'", "else", ":", "# any not \"web-safe\" image format we try to convert to jpg", "im", "=", "Image", ".", "open", "(", "image", ")", "new_image", "=", "BytesIO", "(", ")", "file_ending", "=", "'jpg'", "im", ".", "save", "(", "new_image", ",", "'JPEG'", ")", "new_image", ".", "seek", "(", "0", ")", "image", "=", "new_image", "filename", "=", "u'%s.%s'", "%", "(", "uuid", ".", "uuid4", "(", ")", ",", "file_ending", ")", "# transform image into a cms plugin", "image_plugin", "=", "img_data_to_plugin", "(", "filename", ",", "image", ",", "parent_plugin", "=", "plugin", ",", "width", "=", "width", ",", "height", "=", "height", ")", "# render the new html for the plugin", "new_img_html", "=", "plugin_to_tag", "(", "image_plugin", ")", "# replace the original image node with the newly created cms plugin html", "img", ".", "parentNode", ".", "replaceChild", "(", "parser", ".", "parseFragment", "(", "new_img_html", ")", ".", "childNodes", "[", "0", "]", ",", "img", ")", "found", "=", "True", "if", "found", ":", "return", "u''", ".", "join", "(", "[", "y", ".", "toxml", "(", ")", "for", "y", "in", "dom", ".", "getElementsByTagName", "(", "'body'", ")", "[", "0", "]", ".", "childNodes", "]", ")", "else", ":", "return", "data" ]
extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins
[ "extracts", "base64", "encoded", "images", "from", "drag", "and", "drop", "actions", "in", "browser", "and", "saves", "those", "images", "as", "plugins" ]
train
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/html.py#L76-L140
edx/i18n-tools
i18n/config.py
Configuration.default_config_filename
def default_config_filename(root_dir=None): """ Returns the default name of the configuration file. """ root_dir = Path(root_dir) if root_dir else Path('.').abspath() locale_dir = root_dir / 'locale' if not os.path.exists(locale_dir): locale_dir = root_dir / 'conf' / 'locale' return locale_dir / BASE_CONFIG_FILENAME
python
def default_config_filename(root_dir=None): root_dir = Path(root_dir) if root_dir else Path('.').abspath() locale_dir = root_dir / 'locale' if not os.path.exists(locale_dir): locale_dir = root_dir / 'conf' / 'locale' return locale_dir / BASE_CONFIG_FILENAME
[ "def", "default_config_filename", "(", "root_dir", "=", "None", ")", ":", "root_dir", "=", "Path", "(", "root_dir", ")", "if", "root_dir", "else", "Path", "(", "'.'", ")", ".", "abspath", "(", ")", "locale_dir", "=", "root_dir", "/", "'locale'", "if", "not", "os", ".", "path", ".", "exists", "(", "locale_dir", ")", ":", "locale_dir", "=", "root_dir", "/", "'conf'", "/", "'locale'", "return", "locale_dir", "/", "BASE_CONFIG_FILENAME" ]
Returns the default name of the configuration file.
[ "Returns", "the", "default", "name", "of", "the", "configuration", "file", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/config.py#L47-L55
edx/i18n-tools
i18n/config.py
Configuration.read_config
def read_config(self, filename): """ Returns data found in config file (as dict), or raises exception if file not found """ if not os.path.exists(filename): raise Exception("Configuration file cannot be found: %s" % filename) with io.open(filename, encoding='UTF-8') as stream: return yaml.safe_load(stream)
python
def read_config(self, filename): if not os.path.exists(filename): raise Exception("Configuration file cannot be found: %s" % filename) with io.open(filename, encoding='UTF-8') as stream: return yaml.safe_load(stream)
[ "def", "read_config", "(", "self", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "Exception", "(", "\"Configuration file cannot be found: %s\"", "%", "filename", ")", "with", "io", ".", "open", "(", "filename", ",", "encoding", "=", "'UTF-8'", ")", "as", "stream", ":", "return", "yaml", ".", "safe_load", "(", "stream", ")" ]
Returns data found in config file (as dict), or raises exception if file not found
[ "Returns", "data", "found", "in", "config", "file", "(", "as", "dict", ")", "or", "raises", "exception", "if", "file", "not", "found" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/config.py#L57-L64
edx/i18n-tools
i18n/config.py
Configuration.rtl_langs
def rtl_langs(self): """ Returns the set of translated RTL language codes present in self.locales. Ignores source locale. """ def is_rtl(lang): """ Returns True if lang is a RTL language args: lang (str): The language to be checked Returns: True if lang is an RTL language. """ # Base RTL langs are Arabic, Farsi, Hebrew, and Urdu base_rtl = ['ar', 'fa', 'he', 'ur'] # do this to capture both 'fa' and 'fa_IR' return any([lang.startswith(base_code) for base_code in base_rtl]) return sorted(set([lang for lang in self.translated_locales if is_rtl(lang)]))
python
def rtl_langs(self): def is_rtl(lang): base_rtl = ['ar', 'fa', 'he', 'ur'] return any([lang.startswith(base_code) for base_code in base_rtl]) return sorted(set([lang for lang in self.translated_locales if is_rtl(lang)]))
[ "def", "rtl_langs", "(", "self", ")", ":", "def", "is_rtl", "(", "lang", ")", ":", "\"\"\"\n Returns True if lang is a RTL language\n\n args:\n lang (str): The language to be checked\n\n Returns:\n True if lang is an RTL language.\n \"\"\"", "# Base RTL langs are Arabic, Farsi, Hebrew, and Urdu", "base_rtl", "=", "[", "'ar'", ",", "'fa'", ",", "'he'", ",", "'ur'", "]", "# do this to capture both 'fa' and 'fa_IR'", "return", "any", "(", "[", "lang", ".", "startswith", "(", "base_code", ")", "for", "base_code", "in", "base_rtl", "]", ")", "return", "sorted", "(", "set", "(", "[", "lang", "for", "lang", "in", "self", ".", "translated_locales", "if", "is_rtl", "(", "lang", ")", "]", ")", ")" ]
Returns the set of translated RTL language codes present in self.locales. Ignores source locale.
[ "Returns", "the", "set", "of", "translated", "RTL", "language", "codes", "present", "in", "self", ".", "locales", ".", "Ignores", "source", "locale", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/config.py#L94-L115
edx/i18n-tools
i18n/branch_cleanup.py
BranchCleanup.clean_conf_folder
def clean_conf_folder(self, locale): """Remove the configuration directory for `locale`""" dirname = self.configuration.get_messages_dir(locale) dirname.removedirs_p()
python
def clean_conf_folder(self, locale): dirname = self.configuration.get_messages_dir(locale) dirname.removedirs_p()
[ "def", "clean_conf_folder", "(", "self", ",", "locale", ")", ":", "dirname", "=", "self", ".", "configuration", ".", "get_messages_dir", "(", "locale", ")", "dirname", ".", "removedirs_p", "(", ")" ]
Remove the configuration directory for `locale`
[ "Remove", "the", "configuration", "directory", "for", "locale" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/branch_cleanup.py#L27-L30
edx/i18n-tools
i18n/changed.py
Changed.run
def run(self, args): """ Main entry point of script """ changes_detected = self.detect_changes() message = self.get_message(changes_detected) print(message) return int(changes_detected)
python
def run(self, args): changes_detected = self.detect_changes() message = self.get_message(changes_detected) print(message) return int(changes_detected)
[ "def", "run", "(", "self", ",", "args", ")", ":", "changes_detected", "=", "self", ".", "detect_changes", "(", ")", "message", "=", "self", ".", "get_message", "(", "changes_detected", ")", "print", "(", "message", ")", "return", "int", "(", "changes_detected", ")" ]
Main entry point of script
[ "Main", "entry", "point", "of", "script" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/changed.py#L16-L23
edx/i18n-tools
i18n/segment.py
segment_pofiles
def segment_pofiles(configuration, locale): """Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written. """ files_written = set() for filename, segments in configuration.segment.items(): filename = configuration.get_messages_dir(locale) / filename files_written.update(segment_pofile(filename, segments)) return files_written
python
def segment_pofiles(configuration, locale): files_written = set() for filename, segments in configuration.segment.items(): filename = configuration.get_messages_dir(locale) / filename files_written.update(segment_pofile(filename, segments)) return files_written
[ "def", "segment_pofiles", "(", "configuration", ",", "locale", ")", ":", "files_written", "=", "set", "(", ")", "for", "filename", ",", "segments", "in", "configuration", ".", "segment", ".", "items", "(", ")", ":", "filename", "=", "configuration", ".", "get_messages_dir", "(", "locale", ")", "/", "filename", "files_written", ".", "update", "(", "segment_pofile", "(", "filename", ",", "segments", ")", ")", "return", "files_written" ]
Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written.
[ "Segment", "all", "the", "pofiles", "for", "locale", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L20-L30
edx/i18n-tools
i18n/segment.py
segment_pofile
def segment_pofile(filename, segments): """Segment a .po file using patterns in `segments`. The .po file at `filename` is read, and the occurrence locations of its messages are examined. `segments` is a dictionary: the keys are segment .po filenames, the values are lists of patterns:: { 'django-studio.po': [ 'cms/*', 'some-other-studio-place/*', ], 'django-weird.po': [ '*/weird_*.*', ], } If all a message's occurrences match the patterns for a segment, then that message is written to the new segmented .po file. Any message that matches no segments, or more than one, is written back to the original file. Arguments: filename (path.path): a path object referring to the original .po file. segments (dict): specification of the segments to create. Returns: a set of path objects, all the segment files written. """ reading_msg = "Reading {num} entries from {file}" writing_msg = "Writing {num} entries to {file}" source_po = polib.pofile(filename) LOG.info(reading_msg.format(file=filename, num=len(source_po))) # pylint: disable=logging-format-interpolation # A new pofile just like the source, but with no messages. We'll put # anything not segmented into this file. remaining_po = copy.deepcopy(source_po) remaining_po[:] = [] # Turn the segments dictionary into two structures: segment_patterns is a # list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping # segment file names to pofile objects of their contents. segment_po_files = {filename: remaining_po} segment_patterns = [] for segmentfile, patterns in segments.items(): segment_po_files[segmentfile] = copy.deepcopy(remaining_po) segment_patterns.extend((pat, segmentfile) for pat in patterns) # Examine each message in the source file. If all of its occurrences match # a pattern for the same segment, it goes in that segment. Otherwise, it # goes in remaining. for msg in source_po: msg_segments = set() for occ_file, _ in msg.occurrences: for pat, segment_file in segment_patterns: if fnmatch.fnmatch(occ_file, pat): msg_segments.add(segment_file) break else: msg_segments.add(filename) assert msg_segments if len(msg_segments) == 1: # This message belongs in this segment. segment_file = msg_segments.pop() segment_po_files[segment_file].append(msg) else: # It's in more than one segment, so put it back in the main file. remaining_po.append(msg) # Write out the results. files_written = set() for segment_file, pofile in segment_po_files.items(): out_file = filename.dirname() / segment_file if not pofile: LOG.error("No messages to write to %s, did you run segment twice?", out_file) else: LOG.info(writing_msg.format(file=out_file, num=len(pofile))) # pylint: disable=logging-format-interpolation pofile.save(out_file) files_written.add(out_file) return files_written
python
def segment_pofile(filename, segments): reading_msg = "Reading {num} entries from {file}" writing_msg = "Writing {num} entries to {file}" source_po = polib.pofile(filename) LOG.info(reading_msg.format(file=filename, num=len(source_po))) remaining_po = copy.deepcopy(source_po) remaining_po[:] = [] segment_po_files = {filename: remaining_po} segment_patterns = [] for segmentfile, patterns in segments.items(): segment_po_files[segmentfile] = copy.deepcopy(remaining_po) segment_patterns.extend((pat, segmentfile) for pat in patterns) for msg in source_po: msg_segments = set() for occ_file, _ in msg.occurrences: for pat, segment_file in segment_patterns: if fnmatch.fnmatch(occ_file, pat): msg_segments.add(segment_file) break else: msg_segments.add(filename) assert msg_segments if len(msg_segments) == 1: segment_file = msg_segments.pop() segment_po_files[segment_file].append(msg) else: remaining_po.append(msg) files_written = set() for segment_file, pofile in segment_po_files.items(): out_file = filename.dirname() / segment_file if not pofile: LOG.error("No messages to write to %s, did you run segment twice?", out_file) else: LOG.info(writing_msg.format(file=out_file, num=len(pofile))) pofile.save(out_file) files_written.add(out_file) return files_written
[ "def", "segment_pofile", "(", "filename", ",", "segments", ")", ":", "reading_msg", "=", "\"Reading {num} entries from {file}\"", "writing_msg", "=", "\"Writing {num} entries to {file}\"", "source_po", "=", "polib", ".", "pofile", "(", "filename", ")", "LOG", ".", "info", "(", "reading_msg", ".", "format", "(", "file", "=", "filename", ",", "num", "=", "len", "(", "source_po", ")", ")", ")", "# pylint: disable=logging-format-interpolation", "# A new pofile just like the source, but with no messages. We'll put", "# anything not segmented into this file.", "remaining_po", "=", "copy", ".", "deepcopy", "(", "source_po", ")", "remaining_po", "[", ":", "]", "=", "[", "]", "# Turn the segments dictionary into two structures: segment_patterns is a", "# list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping", "# segment file names to pofile objects of their contents.", "segment_po_files", "=", "{", "filename", ":", "remaining_po", "}", "segment_patterns", "=", "[", "]", "for", "segmentfile", ",", "patterns", "in", "segments", ".", "items", "(", ")", ":", "segment_po_files", "[", "segmentfile", "]", "=", "copy", ".", "deepcopy", "(", "remaining_po", ")", "segment_patterns", ".", "extend", "(", "(", "pat", ",", "segmentfile", ")", "for", "pat", "in", "patterns", ")", "# Examine each message in the source file. If all of its occurrences match", "# a pattern for the same segment, it goes in that segment. Otherwise, it", "# goes in remaining.", "for", "msg", "in", "source_po", ":", "msg_segments", "=", "set", "(", ")", "for", "occ_file", ",", "_", "in", "msg", ".", "occurrences", ":", "for", "pat", ",", "segment_file", "in", "segment_patterns", ":", "if", "fnmatch", ".", "fnmatch", "(", "occ_file", ",", "pat", ")", ":", "msg_segments", ".", "add", "(", "segment_file", ")", "break", "else", ":", "msg_segments", ".", "add", "(", "filename", ")", "assert", "msg_segments", "if", "len", "(", "msg_segments", ")", "==", "1", ":", "# This message belongs in this segment.", "segment_file", "=", "msg_segments", ".", "pop", "(", ")", "segment_po_files", "[", "segment_file", "]", ".", "append", "(", "msg", ")", "else", ":", "# It's in more than one segment, so put it back in the main file.", "remaining_po", ".", "append", "(", "msg", ")", "# Write out the results.", "files_written", "=", "set", "(", ")", "for", "segment_file", ",", "pofile", "in", "segment_po_files", ".", "items", "(", ")", ":", "out_file", "=", "filename", ".", "dirname", "(", ")", "/", "segment_file", "if", "not", "pofile", ":", "LOG", ".", "error", "(", "\"No messages to write to %s, did you run segment twice?\"", ",", "out_file", ")", "else", ":", "LOG", ".", "info", "(", "writing_msg", ".", "format", "(", "file", "=", "out_file", ",", "num", "=", "len", "(", "pofile", ")", ")", ")", "# pylint: disable=logging-format-interpolation", "pofile", ".", "save", "(", "out_file", ")", "files_written", ".", "add", "(", "out_file", ")", "return", "files_written" ]
Segment a .po file using patterns in `segments`. The .po file at `filename` is read, and the occurrence locations of its messages are examined. `segments` is a dictionary: the keys are segment .po filenames, the values are lists of patterns:: { 'django-studio.po': [ 'cms/*', 'some-other-studio-place/*', ], 'django-weird.po': [ '*/weird_*.*', ], } If all a message's occurrences match the patterns for a segment, then that message is written to the new segmented .po file. Any message that matches no segments, or more than one, is written back to the original file. Arguments: filename (path.path): a path object referring to the original .po file. segments (dict): specification of the segments to create. Returns: a set of path objects, all the segment files written.
[ "Segment", "a", ".", "po", "file", "using", "patterns", "in", "segments", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L33-L116
edx/i18n-tools
i18n/segment.py
get_parser
def get_parser(parser): """ Grabs the parser. args: parser: The parser """ parser.description = textwrap.dedent(""" Segment the .po files in LOCALE(s) based on the segmenting rules in config.yaml. Note that segmenting is *not* idempotent: it modifies the input file, so be careful that you don't run it twice on the same file. """.strip()) parser.add_argument("locale", nargs="+", help="a locale to segment")
python
def get_parser(parser): parser.description = textwrap.dedent(.strip()) parser.add_argument("locale", nargs="+", help="a locale to segment")
[ "def", "get_parser", "(", "parser", ")", ":", "parser", ".", "description", "=", "textwrap", ".", "dedent", "(", "\"\"\"\n Segment the .po files in LOCALE(s) based on the segmenting rules in\n config.yaml.\n\n Note that segmenting is *not* idempotent: it modifies the input file, so\n be careful that you don't run it twice on the same file.\n \"\"\"", ".", "strip", "(", ")", ")", "parser", ".", "add_argument", "(", "\"locale\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"a locale to segment\"", ")" ]
Grabs the parser. args: parser: The parser
[ "Grabs", "the", "parser", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L119-L133
edx/i18n-tools
i18n/segment.py
Segment.add_args
def add_args(self): """ Adds arguments """ self.parser.description = textwrap.dedent(""" Segment the .po files in LOCALE(s) based on the segmenting rules in config.yaml. Note that segmenting is *not* idempotent: it modifies the input file, so be careful that you don't run it twice on the same file. """.strip()) self.parser.add_argument("locale", nargs="+", help="a locale to segment")
python
def add_args(self): self.parser.description = textwrap.dedent(.strip()) self.parser.add_argument("locale", nargs="+", help="a locale to segment")
[ "def", "add_args", "(", "self", ")", ":", "self", ".", "parser", ".", "description", "=", "textwrap", ".", "dedent", "(", "\"\"\"\n Segment the .po files in LOCALE(s) based on the segmenting rules in\n config.yaml.\n\n Note that segmenting is *not* idempotent: it modifies the input file, so\n be careful that you don't run it twice on the same file.\n \"\"\"", ".", "strip", "(", ")", ")", "self", ".", "parser", ".", "add_argument", "(", "\"locale\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"a locale to segment\"", ")" ]
Adds arguments
[ "Adds", "arguments" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L140-L151
edx/i18n-tools
i18n/segment.py
Segment.run
def run(self, args): # pylint: disable=unused-argument """ Main entry point of script """ logging.basicConfig(stream=sys.stdout, level=logging.INFO) # This is used as a tool only to segment translation files when adding a # new segment. In the regular workflow, the work is done by the extract # phase calling the functions above. locales = args.locale or [] for locale in locales: segment_pofiles(self.configuration, locale)
python
def run(self, args): logging.basicConfig(stream=sys.stdout, level=logging.INFO) locales = args.locale or [] for locale in locales: segment_pofiles(self.configuration, locale)
[ "def", "run", "(", "self", ",", "args", ")", ":", "# pylint: disable=unused-argument", "logging", ".", "basicConfig", "(", "stream", "=", "sys", ".", "stdout", ",", "level", "=", "logging", ".", "INFO", ")", "# This is used as a tool only to segment translation files when adding a", "# new segment. In the regular workflow, the work is done by the extract", "# phase calling the functions above.", "locales", "=", "args", ".", "locale", "or", "[", "]", "for", "locale", "in", "locales", ":", "segment_pofiles", "(", "self", ".", "configuration", ",", "locale", ")" ]
Main entry point of script
[ "Main", "entry", "point", "of", "script" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L153-L164
edx/i18n-tools
i18n/extract.py
fix_header
def fix_header(pofile): """ Replace default headers with edX headers """ # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile.metadata_is_fuzzy = [] # remove [u'fuzzy'] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
python
def fix_header(pofile): pofile.metadata_is_fuzzy = [] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
[ "def", "fix_header", "(", "pofile", ")", ":", "# By default, django-admin.py makemessages creates this header:", "#", "# SOME DESCRIPTIVE TITLE.", "# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER", "# This file is distributed under the same license as the PACKAGE package.", "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.", "pofile", ".", "metadata_is_fuzzy", "=", "[", "]", "# remove [u'fuzzy']", "header", "=", "pofile", ".", "header", "fixes", "=", "(", "(", "'SOME DESCRIPTIVE TITLE'", ",", "EDX_MARKER", ")", ",", "(", "'Translations template for PROJECT.'", ",", "EDX_MARKER", ")", ",", "(", "'YEAR'", ",", "str", "(", "datetime", ".", "utcnow", "(", ")", ".", "year", ")", ")", ",", "(", "'ORGANIZATION'", ",", "'edX'", ")", ",", "(", "\"THE PACKAGE'S COPYRIGHT HOLDER\"", ",", "\"EdX\"", ")", ",", "(", "'This file is distributed under the same license as the PROJECT project.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'This file is distributed under the same license as the PACKAGE package.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'FIRST AUTHOR <EMAIL@ADDRESS>'", ",", "'EdX Team <[email protected]>'", ")", ",", ")", "for", "src", ",", "dest", "in", "fixes", ":", "header", "=", "header", ".", "replace", "(", "src", ",", "dest", ")", "pofile", ".", "header", "=", "header" ]
Replace default headers with edX headers
[ "Replace", "default", "headers", "with", "edX", "headers" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L184-L216
edx/i18n-tools
i18n/extract.py
strip_key_strings
def strip_key_strings(pofile): """ Removes all entries in PO which are key strings. These entries should appear only in messages.po, not in any other po files. """ newlist = [entry for entry in pofile if not is_key_string(entry.msgid)] del pofile[:] pofile += newlist
python
def strip_key_strings(pofile): newlist = [entry for entry in pofile if not is_key_string(entry.msgid)] del pofile[:] pofile += newlist
[ "def", "strip_key_strings", "(", "pofile", ")", ":", "newlist", "=", "[", "entry", "for", "entry", "in", "pofile", "if", "not", "is_key_string", "(", "entry", ".", "msgid", ")", "]", "del", "pofile", "[", ":", "]", "pofile", "+=", "newlist" ]
Removes all entries in PO which are key strings. These entries should appear only in messages.po, not in any other po files.
[ "Removes", "all", "entries", "in", "PO", "which", "are", "key", "strings", ".", "These", "entries", "should", "appear", "only", "in", "messages", ".", "po", "not", "in", "any", "other", "po", "files", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L249-L256
edx/i18n-tools
i18n/extract.py
Extract.base
def base(self, path1, *paths): """Return a relative path from config.BASE_DIR to path1 / paths[0] / ... """ root_dir = self.configuration.root_dir return root_dir.relpathto(path1.joinpath(*paths))
python
def base(self, path1, *paths): root_dir = self.configuration.root_dir return root_dir.relpathto(path1.joinpath(*paths))
[ "def", "base", "(", "self", ",", "path1", ",", "*", "paths", ")", ":", "root_dir", "=", "self", ".", "configuration", ".", "root_dir", "return", "root_dir", ".", "relpathto", "(", "path1", ".", "joinpath", "(", "*", "paths", ")", ")" ]
Return a relative path from config.BASE_DIR to path1 / paths[0] / ...
[ "Return", "a", "relative", "path", "from", "config", ".", "BASE_DIR", "to", "path1", "/", "paths", "[", "0", "]", "/", "..." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L43-L46
edx/i18n-tools
i18n/extract.py
Extract.rename_source_file
def rename_source_file(self, src, dst): """ Rename a file in the source directory. """ try: os.rename(self.source_msgs_dir.joinpath(src), self.source_msgs_dir.joinpath(dst)) except OSError: pass
python
def rename_source_file(self, src, dst): try: os.rename(self.source_msgs_dir.joinpath(src), self.source_msgs_dir.joinpath(dst)) except OSError: pass
[ "def", "rename_source_file", "(", "self", ",", "src", ",", "dst", ")", ":", "try", ":", "os", ".", "rename", "(", "self", ".", "source_msgs_dir", ".", "joinpath", "(", "src", ")", ",", "self", ".", "source_msgs_dir", ".", "joinpath", "(", "dst", ")", ")", "except", "OSError", ":", "pass" ]
Rename a file in the source directory.
[ "Rename", "a", "file", "in", "the", "source", "directory", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L55-L62
edx/i18n-tools
i18n/extract.py
Extract.run
def run(self, args): """ Main entry point of script """ logging.basicConfig(stream=sys.stdout, level=logging.INFO) configuration = self.configuration configuration.locale_dir.parent.makedirs_p() # pylint: disable=attribute-defined-outside-init self.source_msgs_dir = configuration.source_messages_dir # The extraction process clobbers django.po and djangojs.po. # Save them so that it won't do that. self.rename_source_file('django.po', 'django-saved.po') self.rename_source_file('djangojs.po', 'djangojs-saved.po') # Extract strings from mako templates. verbosity_map = { 0: "-q", 1: "", 2: "-v", } babel_verbosity = verbosity_map.get(args.verbose, "") if args.verbose: stderr = None else: stderr = DEVNULL # --keyword informs Babel that `interpolate()` is an expected # gettext function, which is necessary because the `tokenize` function # in the `markey` module marks it as such and passes it to Babel. # (These functions are called in the django-babel-underscore module.) babel_cmd_template = ( 'pybabel {verbosity} extract --mapping={config} ' '--add-comments="Translators:" --keyword="interpolate" ' '. --output={output}' ) babel_mako_cfg = self.base(configuration.locale_dir, 'babel_mako.cfg') if babel_mako_cfg.exists(): babel_mako_cmd = babel_cmd_template.format( verbosity=babel_verbosity, config=babel_mako_cfg, output=self.base(configuration.source_messages_dir, 'mako.po'), ) execute(babel_mako_cmd, working_directory=configuration.root_dir, stderr=stderr) babel_underscore_cfg = self.base(configuration.locale_dir, 'babel_underscore.cfg') if babel_underscore_cfg.exists(): babel_underscore_cmd = babel_cmd_template.format( verbosity=babel_verbosity, config=babel_underscore_cfg, output=self.base(configuration.source_messages_dir, 'underscore.po'), ) execute(babel_underscore_cmd, working_directory=configuration.root_dir, stderr=stderr) makemessages = "django-admin.py makemessages -l en -v{}".format(args.verbose) ignores = " ".join('--ignore="{}/*"'.format(d) for d in configuration.ignore_dirs) if ignores: makemessages += " " + ignores # Extract strings from django source files (*.py, *.html, *.txt). make_django_cmd = makemessages + ' -d django' execute(make_django_cmd, working_directory=configuration.root_dir, stderr=stderr) # Extract strings from Javascript source files (*.js, *jsx). make_djangojs_cmd = makemessages + ' -d djangojs -e js,jsx' execute(make_djangojs_cmd, working_directory=configuration.root_dir, stderr=stderr) # makemessages creates 'django.po'. This filename is hardcoded. # Rename it to django-partial.po to enable merging into django.po later. self.rename_source_file('django.po', 'django-partial.po') # makemessages creates 'djangojs.po'. This filename is hardcoded. # Rename it to djangojs-partial.po to enable merging into djangojs.po later. self.rename_source_file('djangojs.po', 'djangojs-partial.po') files_to_clean = set() # Extract strings from third-party applications. for app_name in configuration.third_party: # Import the app to find out where it is. Then use pybabel to extract # from that directory. app_module = importlib.import_module(app_name) app_dir = Path(app_module.__file__).dirname().dirname() # pylint: disable=no-value-for-parameter output_file = self.source_msgs_dir / (app_name + ".po") files_to_clean.add(output_file) babel_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" {app} -o {output}' babel_cmd = babel_cmd.format( verbosity=babel_verbosity, config=configuration.locale_dir / 'babel_third_party.cfg', app=app_name, output=output_file, ) execute(babel_cmd, working_directory=app_dir, stderr=stderr) # Segment the generated files. segmented_files = segment_pofiles(configuration, configuration.source_locale) files_to_clean.update(segmented_files) # Finish each file. for filename in files_to_clean: LOG.info('Cleaning %s', filename) pofile = polib.pofile(self.source_msgs_dir.joinpath(filename)) # replace default headers with edX headers fix_header(pofile) # replace default metadata with edX metadata fix_metadata(pofile) # remove key strings which belong in messages.po strip_key_strings(pofile) pofile.save() # Restore the saved .po files. self.rename_source_file('django-saved.po', 'django.po') self.rename_source_file('djangojs-saved.po', 'djangojs.po')
python
def run(self, args): logging.basicConfig(stream=sys.stdout, level=logging.INFO) configuration = self.configuration configuration.locale_dir.parent.makedirs_p() self.source_msgs_dir = configuration.source_messages_dir self.rename_source_file('django.po', 'django-saved.po') self.rename_source_file('djangojs.po', 'djangojs-saved.po') verbosity_map = { 0: "-q", 1: "", 2: "-v", } babel_verbosity = verbosity_map.get(args.verbose, "") if args.verbose: stderr = None else: stderr = DEVNULL babel_cmd_template = ( 'pybabel {verbosity} extract --mapping={config} ' '--add-comments="Translators:" --keyword="interpolate" ' '. --output={output}' ) babel_mako_cfg = self.base(configuration.locale_dir, 'babel_mako.cfg') if babel_mako_cfg.exists(): babel_mako_cmd = babel_cmd_template.format( verbosity=babel_verbosity, config=babel_mako_cfg, output=self.base(configuration.source_messages_dir, 'mako.po'), ) execute(babel_mako_cmd, working_directory=configuration.root_dir, stderr=stderr) babel_underscore_cfg = self.base(configuration.locale_dir, 'babel_underscore.cfg') if babel_underscore_cfg.exists(): babel_underscore_cmd = babel_cmd_template.format( verbosity=babel_verbosity, config=babel_underscore_cfg, output=self.base(configuration.source_messages_dir, 'underscore.po'), ) execute(babel_underscore_cmd, working_directory=configuration.root_dir, stderr=stderr) makemessages = "django-admin.py makemessages -l en -v{}".format(args.verbose) ignores = " ".join('--ignore="{}/*"'.format(d) for d in configuration.ignore_dirs) if ignores: makemessages += " " + ignores make_django_cmd = makemessages + ' -d django' execute(make_django_cmd, working_directory=configuration.root_dir, stderr=stderr) make_djangojs_cmd = makemessages + ' -d djangojs -e js,jsx' execute(make_djangojs_cmd, working_directory=configuration.root_dir, stderr=stderr) self.rename_source_file('django.po', 'django-partial.po') self.rename_source_file('djangojs.po', 'djangojs-partial.po') files_to_clean = set() for app_name in configuration.third_party: app_module = importlib.import_module(app_name) app_dir = Path(app_module.__file__).dirname().dirname() output_file = self.source_msgs_dir / (app_name + ".po") files_to_clean.add(output_file) babel_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" {app} -o {output}' babel_cmd = babel_cmd.format( verbosity=babel_verbosity, config=configuration.locale_dir / 'babel_third_party.cfg', app=app_name, output=output_file, ) execute(babel_cmd, working_directory=app_dir, stderr=stderr) segmented_files = segment_pofiles(configuration, configuration.source_locale) files_to_clean.update(segmented_files) for filename in files_to_clean: LOG.info('Cleaning %s', filename) pofile = polib.pofile(self.source_msgs_dir.joinpath(filename)) fix_header(pofile) fix_metadata(pofile) strip_key_strings(pofile) pofile.save() self.rename_source_file('django-saved.po', 'django.po') self.rename_source_file('djangojs-saved.po', 'djangojs.po')
[ "def", "run", "(", "self", ",", "args", ")", ":", "logging", ".", "basicConfig", "(", "stream", "=", "sys", ".", "stdout", ",", "level", "=", "logging", ".", "INFO", ")", "configuration", "=", "self", ".", "configuration", "configuration", ".", "locale_dir", ".", "parent", ".", "makedirs_p", "(", ")", "# pylint: disable=attribute-defined-outside-init", "self", ".", "source_msgs_dir", "=", "configuration", ".", "source_messages_dir", "# The extraction process clobbers django.po and djangojs.po.", "# Save them so that it won't do that.", "self", ".", "rename_source_file", "(", "'django.po'", ",", "'django-saved.po'", ")", "self", ".", "rename_source_file", "(", "'djangojs.po'", ",", "'djangojs-saved.po'", ")", "# Extract strings from mako templates.", "verbosity_map", "=", "{", "0", ":", "\"-q\"", ",", "1", ":", "\"\"", ",", "2", ":", "\"-v\"", ",", "}", "babel_verbosity", "=", "verbosity_map", ".", "get", "(", "args", ".", "verbose", ",", "\"\"", ")", "if", "args", ".", "verbose", ":", "stderr", "=", "None", "else", ":", "stderr", "=", "DEVNULL", "# --keyword informs Babel that `interpolate()` is an expected", "# gettext function, which is necessary because the `tokenize` function", "# in the `markey` module marks it as such and passes it to Babel.", "# (These functions are called in the django-babel-underscore module.)", "babel_cmd_template", "=", "(", "'pybabel {verbosity} extract --mapping={config} '", "'--add-comments=\"Translators:\" --keyword=\"interpolate\" '", "'. --output={output}'", ")", "babel_mako_cfg", "=", "self", ".", "base", "(", "configuration", ".", "locale_dir", ",", "'babel_mako.cfg'", ")", "if", "babel_mako_cfg", ".", "exists", "(", ")", ":", "babel_mako_cmd", "=", "babel_cmd_template", ".", "format", "(", "verbosity", "=", "babel_verbosity", ",", "config", "=", "babel_mako_cfg", ",", "output", "=", "self", ".", "base", "(", "configuration", ".", "source_messages_dir", ",", "'mako.po'", ")", ",", ")", "execute", "(", "babel_mako_cmd", ",", "working_directory", "=", "configuration", ".", "root_dir", ",", "stderr", "=", "stderr", ")", "babel_underscore_cfg", "=", "self", ".", "base", "(", "configuration", ".", "locale_dir", ",", "'babel_underscore.cfg'", ")", "if", "babel_underscore_cfg", ".", "exists", "(", ")", ":", "babel_underscore_cmd", "=", "babel_cmd_template", ".", "format", "(", "verbosity", "=", "babel_verbosity", ",", "config", "=", "babel_underscore_cfg", ",", "output", "=", "self", ".", "base", "(", "configuration", ".", "source_messages_dir", ",", "'underscore.po'", ")", ",", ")", "execute", "(", "babel_underscore_cmd", ",", "working_directory", "=", "configuration", ".", "root_dir", ",", "stderr", "=", "stderr", ")", "makemessages", "=", "\"django-admin.py makemessages -l en -v{}\"", ".", "format", "(", "args", ".", "verbose", ")", "ignores", "=", "\" \"", ".", "join", "(", "'--ignore=\"{}/*\"'", ".", "format", "(", "d", ")", "for", "d", "in", "configuration", ".", "ignore_dirs", ")", "if", "ignores", ":", "makemessages", "+=", "\" \"", "+", "ignores", "# Extract strings from django source files (*.py, *.html, *.txt).", "make_django_cmd", "=", "makemessages", "+", "' -d django'", "execute", "(", "make_django_cmd", ",", "working_directory", "=", "configuration", ".", "root_dir", ",", "stderr", "=", "stderr", ")", "# Extract strings from Javascript source files (*.js, *jsx).", "make_djangojs_cmd", "=", "makemessages", "+", "' -d djangojs -e js,jsx'", "execute", "(", "make_djangojs_cmd", ",", "working_directory", "=", "configuration", ".", "root_dir", ",", "stderr", "=", "stderr", ")", "# makemessages creates 'django.po'. This filename is hardcoded.", "# Rename it to django-partial.po to enable merging into django.po later.", "self", ".", "rename_source_file", "(", "'django.po'", ",", "'django-partial.po'", ")", "# makemessages creates 'djangojs.po'. This filename is hardcoded.", "# Rename it to djangojs-partial.po to enable merging into djangojs.po later.", "self", ".", "rename_source_file", "(", "'djangojs.po'", ",", "'djangojs-partial.po'", ")", "files_to_clean", "=", "set", "(", ")", "# Extract strings from third-party applications.", "for", "app_name", "in", "configuration", ".", "third_party", ":", "# Import the app to find out where it is. Then use pybabel to extract", "# from that directory.", "app_module", "=", "importlib", ".", "import_module", "(", "app_name", ")", "app_dir", "=", "Path", "(", "app_module", ".", "__file__", ")", ".", "dirname", "(", ")", ".", "dirname", "(", ")", "# pylint: disable=no-value-for-parameter", "output_file", "=", "self", ".", "source_msgs_dir", "/", "(", "app_name", "+", "\".po\"", ")", "files_to_clean", ".", "add", "(", "output_file", ")", "babel_cmd", "=", "'pybabel {verbosity} extract -F {config} -c \"Translators:\" {app} -o {output}'", "babel_cmd", "=", "babel_cmd", ".", "format", "(", "verbosity", "=", "babel_verbosity", ",", "config", "=", "configuration", ".", "locale_dir", "/", "'babel_third_party.cfg'", ",", "app", "=", "app_name", ",", "output", "=", "output_file", ",", ")", "execute", "(", "babel_cmd", ",", "working_directory", "=", "app_dir", ",", "stderr", "=", "stderr", ")", "# Segment the generated files.", "segmented_files", "=", "segment_pofiles", "(", "configuration", ",", "configuration", ".", "source_locale", ")", "files_to_clean", ".", "update", "(", "segmented_files", ")", "# Finish each file.", "for", "filename", "in", "files_to_clean", ":", "LOG", ".", "info", "(", "'Cleaning %s'", ",", "filename", ")", "pofile", "=", "polib", ".", "pofile", "(", "self", ".", "source_msgs_dir", ".", "joinpath", "(", "filename", ")", ")", "# replace default headers with edX headers", "fix_header", "(", "pofile", ")", "# replace default metadata with edX metadata", "fix_metadata", "(", "pofile", ")", "# remove key strings which belong in messages.po", "strip_key_strings", "(", "pofile", ")", "pofile", ".", "save", "(", ")", "# Restore the saved .po files.", "self", ".", "rename_source_file", "(", "'django-saved.po'", ",", "'django.po'", ")", "self", ".", "rename_source_file", "(", "'djangojs-saved.po'", ",", "'djangojs.po'", ")" ]
Main entry point of script
[ "Main", "entry", "point", "of", "script" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L64-L181
edx/i18n-tools
i18n/main.py
get_valid_commands
def get_valid_commands(): """ Returns valid commands. Returns: commands (list): List of valid commands """ modules = [m.basename().split('.')[0] for m in Path(__file__).dirname().files('*.py')] commands = [] for modname in modules: if modname == 'main': continue mod = importlib.import_module('i18n.%s' % modname) if hasattr(mod, 'main'): commands.append(modname) return commands
python
def get_valid_commands(): modules = [m.basename().split('.')[0] for m in Path(__file__).dirname().files('*.py')] commands = [] for modname in modules: if modname == 'main': continue mod = importlib.import_module('i18n.%s' % modname) if hasattr(mod, 'main'): commands.append(modname) return commands
[ "def", "get_valid_commands", "(", ")", ":", "modules", "=", "[", "m", ".", "basename", "(", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "m", "in", "Path", "(", "__file__", ")", ".", "dirname", "(", ")", ".", "files", "(", "'*.py'", ")", "]", "commands", "=", "[", "]", "for", "modname", "in", "modules", ":", "if", "modname", "==", "'main'", ":", "continue", "mod", "=", "importlib", ".", "import_module", "(", "'i18n.%s'", "%", "modname", ")", "if", "hasattr", "(", "mod", ",", "'main'", ")", ":", "commands", ".", "append", "(", "modname", ")", "return", "commands" ]
Returns valid commands. Returns: commands (list): List of valid commands
[ "Returns", "valid", "commands", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L11-L26
edx/i18n-tools
i18n/main.py
error_message
def error_message(): """ Writes out error message specifying the valid commands. Returns: Failure code for system exit """ sys.stderr.write('valid commands:\n') for cmd in get_valid_commands(): sys.stderr.write('\t%s\n' % cmd) return -1
python
def error_message(): sys.stderr.write('valid commands:\n') for cmd in get_valid_commands(): sys.stderr.write('\t%s\n' % cmd) return -1
[ "def", "error_message", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'valid commands:\\n'", ")", "for", "cmd", "in", "get_valid_commands", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'\\t%s\\n'", "%", "cmd", ")", "return", "-", "1" ]
Writes out error message specifying the valid commands. Returns: Failure code for system exit
[ "Writes", "out", "error", "message", "specifying", "the", "valid", "commands", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L29-L39
edx/i18n-tools
i18n/main.py
main
def main(): """ Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid. """ try: command = sys.argv[1] except IndexError: return error_message() try: module = importlib.import_module('i18n.%s' % command) module.main.args = sys.argv[2:] except (ImportError, AttributeError): return error_message() return module.main()
python
def main(): try: command = sys.argv[1] except IndexError: return error_message() try: module = importlib.import_module('i18n.%s' % command) module.main.args = sys.argv[2:] except (ImportError, AttributeError): return error_message() return module.main()
[ "def", "main", "(", ")", ":", "try", ":", "command", "=", "sys", ".", "argv", "[", "1", "]", "except", "IndexError", ":", "return", "error_message", "(", ")", "try", ":", "module", "=", "importlib", ".", "import_module", "(", "'i18n.%s'", "%", "command", ")", "module", ".", "main", ".", "args", "=", "sys", ".", "argv", "[", "2", ":", "]", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "return", "error_message", "(", ")", "return", "module", ".", "main", "(", ")" ]
Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid.
[ "Executes", "the", "given", "command", ".", "Returns", "error_message", "if", "command", "is", "not", "valid", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L42-L60
edx/i18n-tools
i18n/validate.py
validate_po_files
def validate_po_files(configuration, locale_dir, root_dir=None, report_empty=False, check_all=False): """ Validate all of the po files found in the root directory that are not product of a merge. Returns a boolean indicating whether or not problems were found. """ found_problems = False # List of .po files that are the product of a merge (see generate.py). merged_files = configuration.generate_merge.keys() for dirpath, __, filenames in os.walk(root_dir if root_dir else locale_dir): for name in filenames: __, ext = os.path.splitext(name) filename = os.path.join(dirpath, name) # Validate only .po files that are not product of a merge (see generate.py) unless check_all is true. # If django-partial.po has a problem, then django.po will also, so don't report it. if ext.lower() == '.po' and (check_all or os.path.basename(filename) not in merged_files): # First validate the format of this file if msgfmt_check_po_file(locale_dir, filename): found_problems = True # Check that the translated strings are valid, and optionally # check for empty translations. But don't check English. if "/locale/en/" not in filename: problems = check_messages(filename, report_empty) if problems: report_problems(filename, problems) found_problems = True dup_filename = filename.replace('.po', '.dup') has_duplicates = os.path.exists(dup_filename) if has_duplicates: log.warning(" Duplicates found in %s, details in .dup file", dup_filename) found_problems = True if not (problems or has_duplicates): log.info(" No problems found in %s", filename) return found_problems
python
def validate_po_files(configuration, locale_dir, root_dir=None, report_empty=False, check_all=False): found_problems = False merged_files = configuration.generate_merge.keys() for dirpath, __, filenames in os.walk(root_dir if root_dir else locale_dir): for name in filenames: __, ext = os.path.splitext(name) filename = os.path.join(dirpath, name) if ext.lower() == '.po' and (check_all or os.path.basename(filename) not in merged_files): if msgfmt_check_po_file(locale_dir, filename): found_problems = True if "/locale/en/" not in filename: problems = check_messages(filename, report_empty) if problems: report_problems(filename, problems) found_problems = True dup_filename = filename.replace('.po', '.dup') has_duplicates = os.path.exists(dup_filename) if has_duplicates: log.warning(" Duplicates found in %s, details in .dup file", dup_filename) found_problems = True if not (problems or has_duplicates): log.info(" No problems found in %s", filename) return found_problems
[ "def", "validate_po_files", "(", "configuration", ",", "locale_dir", ",", "root_dir", "=", "None", ",", "report_empty", "=", "False", ",", "check_all", "=", "False", ")", ":", "found_problems", "=", "False", "# List of .po files that are the product of a merge (see generate.py).", "merged_files", "=", "configuration", ".", "generate_merge", ".", "keys", "(", ")", "for", "dirpath", ",", "__", ",", "filenames", "in", "os", ".", "walk", "(", "root_dir", "if", "root_dir", "else", "locale_dir", ")", ":", "for", "name", "in", "filenames", ":", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", "# Validate only .po files that are not product of a merge (see generate.py) unless check_all is true.", "# If django-partial.po has a problem, then django.po will also, so don't report it.", "if", "ext", ".", "lower", "(", ")", "==", "'.po'", "and", "(", "check_all", "or", "os", ".", "path", ".", "basename", "(", "filename", ")", "not", "in", "merged_files", ")", ":", "# First validate the format of this file", "if", "msgfmt_check_po_file", "(", "locale_dir", ",", "filename", ")", ":", "found_problems", "=", "True", "# Check that the translated strings are valid, and optionally", "# check for empty translations. But don't check English.", "if", "\"/locale/en/\"", "not", "in", "filename", ":", "problems", "=", "check_messages", "(", "filename", ",", "report_empty", ")", "if", "problems", ":", "report_problems", "(", "filename", ",", "problems", ")", "found_problems", "=", "True", "dup_filename", "=", "filename", ".", "replace", "(", "'.po'", ",", "'.dup'", ")", "has_duplicates", "=", "os", ".", "path", ".", "exists", "(", "dup_filename", ")", "if", "has_duplicates", ":", "log", ".", "warning", "(", "\" Duplicates found in %s, details in .dup file\"", ",", "dup_filename", ")", "found_problems", "=", "True", "if", "not", "(", "problems", "or", "has_duplicates", ")", ":", "log", ".", "info", "(", "\" No problems found in %s\"", ",", "filename", ")", "return", "found_problems" ]
Validate all of the po files found in the root directory that are not product of a merge. Returns a boolean indicating whether or not problems were found.
[ "Validate", "all", "of", "the", "po", "files", "found", "in", "the", "root", "directory", "that", "are", "not", "product", "of", "a", "merge", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L22-L63
edx/i18n-tools
i18n/validate.py
msgfmt_check_po_file
def msgfmt_check_po_file(locale_dir, filename): """ Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. Returns a boolean indicating whether or not problems were found. """ found_problems = False # Use relative paths to make output less noisy. rfile = os.path.relpath(filename, locale_dir) out, err = call('msgfmt -c -o /dev/null {}'.format(rfile), working_directory=locale_dir) if err: log.info(u'\n' + out.decode('utf8')) log.warning(u'\n' + err.decode('utf8')) found_problems = True return found_problems
python
def msgfmt_check_po_file(locale_dir, filename): found_problems = False rfile = os.path.relpath(filename, locale_dir) out, err = call('msgfmt -c -o /dev/null {}'.format(rfile), working_directory=locale_dir) if err: log.info(u'\n' + out.decode('utf8')) log.warning(u'\n' + err.decode('utf8')) found_problems = True return found_problems
[ "def", "msgfmt_check_po_file", "(", "locale_dir", ",", "filename", ")", ":", "found_problems", "=", "False", "# Use relative paths to make output less noisy.", "rfile", "=", "os", ".", "path", ".", "relpath", "(", "filename", ",", "locale_dir", ")", "out", ",", "err", "=", "call", "(", "'msgfmt -c -o /dev/null {}'", ".", "format", "(", "rfile", ")", ",", "working_directory", "=", "locale_dir", ")", "if", "err", ":", "log", ".", "info", "(", "u'\\n'", "+", "out", ".", "decode", "(", "'utf8'", ")", ")", "log", ".", "warning", "(", "u'\\n'", "+", "err", ".", "decode", "(", "'utf8'", ")", ")", "found_problems", "=", "True", "return", "found_problems" ]
Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. Returns a boolean indicating whether or not problems were found.
[ "Call", "GNU", "msgfmt", "-", "c", "on", "each", ".", "po", "file", "to", "validate", "its", "format", ".", "Any", "errors", "caught", "by", "msgfmt", "are", "logged", "to", "log", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L66-L83
edx/i18n-tools
i18n/validate.py
tags_in_string
def tags_in_string(msg): """ Return the set of tags in a message string. Tags includes HTML tags, data placeholders, etc. Skips tags that might change due to translations: HTML entities, <abbr>, and so on. """ def is_linguistic_tag(tag): """Is this tag one that can change with the language?""" if tag.startswith("&"): return True if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]): return True return False __, tags = Converter().detag_string(msg) return set(t for t in tags if not is_linguistic_tag(t))
python
def tags_in_string(msg): def is_linguistic_tag(tag): if tag.startswith("&"): return True if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]): return True return False __, tags = Converter().detag_string(msg) return set(t for t in tags if not is_linguistic_tag(t))
[ "def", "tags_in_string", "(", "msg", ")", ":", "def", "is_linguistic_tag", "(", "tag", ")", ":", "\"\"\"Is this tag one that can change with the language?\"\"\"", "if", "tag", ".", "startswith", "(", "\"&\"", ")", ":", "return", "True", "if", "any", "(", "x", "in", "tag", "for", "x", "in", "[", "\"<abbr>\"", ",", "\"<abbr \"", ",", "\"</abbr>\"", "]", ")", ":", "return", "True", "return", "False", "__", ",", "tags", "=", "Converter", "(", ")", ".", "detag_string", "(", "msg", ")", "return", "set", "(", "t", "for", "t", "in", "tags", "if", "not", "is_linguistic_tag", "(", "t", ")", ")" ]
Return the set of tags in a message string. Tags includes HTML tags, data placeholders, etc. Skips tags that might change due to translations: HTML entities, <abbr>, and so on.
[ "Return", "the", "set", "of", "tags", "in", "a", "message", "string", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L86-L105
edx/i18n-tools
i18n/validate.py
astral
def astral(msg): """Does `msg` have characters outside the Basic Multilingual Plane?""" # Python2 narrow builds present astral characters as surrogate pairs. # By encoding as utf32, and decoding DWORDS, we can get at the real code # points. utf32 = msg.encode("utf32")[4:] # [4:] to drop the bom code_points = struct.unpack("%dI" % (len(utf32) / 4), utf32) return any(cp > 0xFFFF for cp in code_points)
python
def astral(msg): utf32 = msg.encode("utf32")[4:] code_points = struct.unpack("%dI" % (len(utf32) / 4), utf32) return any(cp > 0xFFFF for cp in code_points)
[ "def", "astral", "(", "msg", ")", ":", "# Python2 narrow builds present astral characters as surrogate pairs.", "# By encoding as utf32, and decoding DWORDS, we can get at the real code", "# points.", "utf32", "=", "msg", ".", "encode", "(", "\"utf32\"", ")", "[", "4", ":", "]", "# [4:] to drop the bom", "code_points", "=", "struct", ".", "unpack", "(", "\"%dI\"", "%", "(", "len", "(", "utf32", ")", "/", "4", ")", ",", "utf32", ")", "return", "any", "(", "cp", ">", "0xFFFF", "for", "cp", "in", "code_points", ")" ]
Does `msg` have characters outside the Basic Multilingual Plane?
[ "Does", "msg", "have", "characters", "outside", "the", "Basic", "Multilingual", "Plane?" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L108-L115
edx/i18n-tools
i18n/validate.py
check_messages
def check_messages(filename, report_empty=False): """ Checks messages in `filename` in various ways: * Translations must have the same slots as the English. * Messages can't have astral characters in them. If `report_empty` is True, will also report empty translation strings. Returns the problems, a list of tuples. Each is a description, a msgid, and then zero or more translations. """ problems = [] pomsgs = polib.pofile(filename) for msg in pomsgs: # Check for characters Javascript can't support. # https://code.djangoproject.com/ticket/21725 if astral(msg.msgstr): problems.append(("Non-BMP char", msg.msgid, msg.msgstr)) if is_format_message(msg): # LONG_DATE_FORMAT, etc, have %s etc in them, and that's ok. continue if msg.msgid_plural: # Plurals: two strings in, N strings out. source = msg.msgid + " | " + msg.msgid_plural translation = " | ".join(v for k, v in sorted(msg.msgstr_plural.items())) empty = any(not t.strip() for t in msg.msgstr_plural.values()) else: # Singular: just one string in and one string out. source = msg.msgid translation = msg.msgstr empty = not msg.msgstr.strip() if empty: if report_empty: problems.append(("Empty translation", source)) else: id_tags = tags_in_string(source) tx_tags = tags_in_string(translation) # Check if tags don't match if id_tags != tx_tags: id_has = u", ".join(sorted(u'"{}"'.format(t) for t in id_tags - tx_tags)) tx_has = u", ".join(sorted(u'"{}"'.format(t) for t in tx_tags - id_tags)) if id_has and tx_has: diff = u"{} vs {}".format(id_has, tx_has) elif id_has: diff = u"{} missing".format(id_has) else: diff = u"{} added".format(tx_has) problems.append(( "Different tags in source and translation", source, translation, diff )) return problems
python
def check_messages(filename, report_empty=False): problems = [] pomsgs = polib.pofile(filename) for msg in pomsgs: if astral(msg.msgstr): problems.append(("Non-BMP char", msg.msgid, msg.msgstr)) if is_format_message(msg): continue if msg.msgid_plural: source = msg.msgid + " | " + msg.msgid_plural translation = " | ".join(v for k, v in sorted(msg.msgstr_plural.items())) empty = any(not t.strip() for t in msg.msgstr_plural.values()) else: source = msg.msgid translation = msg.msgstr empty = not msg.msgstr.strip() if empty: if report_empty: problems.append(("Empty translation", source)) else: id_tags = tags_in_string(source) tx_tags = tags_in_string(translation) if id_tags != tx_tags: id_has = u", ".join(sorted(u'"{}"'.format(t) for t in id_tags - tx_tags)) tx_has = u", ".join(sorted(u'"{}"'.format(t) for t in tx_tags - id_tags)) if id_has and tx_has: diff = u"{} vs {}".format(id_has, tx_has) elif id_has: diff = u"{} missing".format(id_has) else: diff = u"{} added".format(tx_has) problems.append(( "Different tags in source and translation", source, translation, diff )) return problems
[ "def", "check_messages", "(", "filename", ",", "report_empty", "=", "False", ")", ":", "problems", "=", "[", "]", "pomsgs", "=", "polib", ".", "pofile", "(", "filename", ")", "for", "msg", "in", "pomsgs", ":", "# Check for characters Javascript can't support.", "# https://code.djangoproject.com/ticket/21725", "if", "astral", "(", "msg", ".", "msgstr", ")", ":", "problems", ".", "append", "(", "(", "\"Non-BMP char\"", ",", "msg", ".", "msgid", ",", "msg", ".", "msgstr", ")", ")", "if", "is_format_message", "(", "msg", ")", ":", "# LONG_DATE_FORMAT, etc, have %s etc in them, and that's ok.", "continue", "if", "msg", ".", "msgid_plural", ":", "# Plurals: two strings in, N strings out.", "source", "=", "msg", ".", "msgid", "+", "\" | \"", "+", "msg", ".", "msgid_plural", "translation", "=", "\" | \"", ".", "join", "(", "v", "for", "k", ",", "v", "in", "sorted", "(", "msg", ".", "msgstr_plural", ".", "items", "(", ")", ")", ")", "empty", "=", "any", "(", "not", "t", ".", "strip", "(", ")", "for", "t", "in", "msg", ".", "msgstr_plural", ".", "values", "(", ")", ")", "else", ":", "# Singular: just one string in and one string out.", "source", "=", "msg", ".", "msgid", "translation", "=", "msg", ".", "msgstr", "empty", "=", "not", "msg", ".", "msgstr", ".", "strip", "(", ")", "if", "empty", ":", "if", "report_empty", ":", "problems", ".", "append", "(", "(", "\"Empty translation\"", ",", "source", ")", ")", "else", ":", "id_tags", "=", "tags_in_string", "(", "source", ")", "tx_tags", "=", "tags_in_string", "(", "translation", ")", "# Check if tags don't match", "if", "id_tags", "!=", "tx_tags", ":", "id_has", "=", "u\", \"", ".", "join", "(", "sorted", "(", "u'\"{}\"'", ".", "format", "(", "t", ")", "for", "t", "in", "id_tags", "-", "tx_tags", ")", ")", "tx_has", "=", "u\", \"", ".", "join", "(", "sorted", "(", "u'\"{}\"'", ".", "format", "(", "t", ")", "for", "t", "in", "tx_tags", "-", "id_tags", ")", ")", "if", "id_has", "and", "tx_has", ":", "diff", "=", "u\"{} vs {}\"", ".", "format", "(", "id_has", ",", "tx_has", ")", "elif", "id_has", ":", "diff", "=", "u\"{} missing\"", ".", "format", "(", "id_has", ")", "else", ":", "diff", "=", "u\"{} added\"", ".", "format", "(", "tx_has", ")", "problems", ".", "append", "(", "(", "\"Different tags in source and translation\"", ",", "source", ",", "translation", ",", "diff", ")", ")", "return", "problems" ]
Checks messages in `filename` in various ways: * Translations must have the same slots as the English. * Messages can't have astral characters in them. If `report_empty` is True, will also report empty translation strings. Returns the problems, a list of tuples. Each is a description, a msgid, and then zero or more translations.
[ "Checks", "messages", "in", "filename", "in", "various", "ways", ":" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L118-L179
edx/i18n-tools
i18n/validate.py
report_problems
def report_problems(filename, problems): """ Report on the problems found in `filename`. `problems` is a list of tuples as returned by `check_messages`. """ problem_file = filename.replace(".po", ".prob") id_filler = textwrap.TextWrapper(width=79, initial_indent=" msgid: ", subsequent_indent=" " * 9) tx_filler = textwrap.TextWrapper(width=79, initial_indent=" -----> ", subsequent_indent=" " * 9) with codecs.open(problem_file, "w", encoding="utf8") as prob_file: for problem in problems: desc, msgid = problem[:2] prob_file.write(u"{}\n{}\n".format(desc, id_filler.fill(msgid))) info = u"{}\n{}\n".format(desc, id_filler.fill(msgid)) for translation in problem[2:]: prob_file.write(u"{}\n".format(tx_filler.fill(translation))) info += u"{}\n".format(tx_filler.fill(translation)) log.info(info) prob_file.write(u"\n") log.error(" %s problems in %s, details in .prob file", len(problems), filename)
python
def report_problems(filename, problems): problem_file = filename.replace(".po", ".prob") id_filler = textwrap.TextWrapper(width=79, initial_indent=" msgid: ", subsequent_indent=" " * 9) tx_filler = textwrap.TextWrapper(width=79, initial_indent=" -----> ", subsequent_indent=" " * 9) with codecs.open(problem_file, "w", encoding="utf8") as prob_file: for problem in problems: desc, msgid = problem[:2] prob_file.write(u"{}\n{}\n".format(desc, id_filler.fill(msgid))) info = u"{}\n{}\n".format(desc, id_filler.fill(msgid)) for translation in problem[2:]: prob_file.write(u"{}\n".format(tx_filler.fill(translation))) info += u"{}\n".format(tx_filler.fill(translation)) log.info(info) prob_file.write(u"\n") log.error(" %s problems in %s, details in .prob file", len(problems), filename)
[ "def", "report_problems", "(", "filename", ",", "problems", ")", ":", "problem_file", "=", "filename", ".", "replace", "(", "\".po\"", ",", "\".prob\"", ")", "id_filler", "=", "textwrap", ".", "TextWrapper", "(", "width", "=", "79", ",", "initial_indent", "=", "\" msgid: \"", ",", "subsequent_indent", "=", "\" \"", "*", "9", ")", "tx_filler", "=", "textwrap", ".", "TextWrapper", "(", "width", "=", "79", ",", "initial_indent", "=", "\" -----> \"", ",", "subsequent_indent", "=", "\" \"", "*", "9", ")", "with", "codecs", ".", "open", "(", "problem_file", ",", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "prob_file", ":", "for", "problem", "in", "problems", ":", "desc", ",", "msgid", "=", "problem", "[", ":", "2", "]", "prob_file", ".", "write", "(", "u\"{}\\n{}\\n\"", ".", "format", "(", "desc", ",", "id_filler", ".", "fill", "(", "msgid", ")", ")", ")", "info", "=", "u\"{}\\n{}\\n\"", ".", "format", "(", "desc", ",", "id_filler", ".", "fill", "(", "msgid", ")", ")", "for", "translation", "in", "problem", "[", "2", ":", "]", ":", "prob_file", ".", "write", "(", "u\"{}\\n\"", ".", "format", "(", "tx_filler", ".", "fill", "(", "translation", ")", ")", ")", "info", "+=", "u\"{}\\n\"", ".", "format", "(", "tx_filler", ".", "fill", "(", "translation", ")", ")", "log", ".", "info", "(", "info", ")", "prob_file", ".", "write", "(", "u\"\\n\"", ")", "log", ".", "error", "(", "\" %s problems in %s, details in .prob file\"", ",", "len", "(", "problems", ")", ",", "filename", ")" ]
Report on the problems found in `filename`. `problems` is a list of tuples as returned by `check_messages`.
[ "Report", "on", "the", "problems", "found", "in", "filename", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L182-L203
edx/i18n-tools
i18n/validate.py
Validate.run
def run(self, args): """ Main entry point for script Returns an integer representing the exit code that should be returned by the script. """ exit_code = 0 if args.verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(stream=sys.stdout, level=log_level) languages = args.language or [] locale_dir = self.configuration.locale_dir if not languages: # validate all languages if validate_po_files(self.configuration, locale_dir, report_empty=args.empty, check_all=args.check_all): exit_code = 1 else: # languages will be a list of language codes; test each language. for language in languages: root_dir = self.configuration.locale_dir / language # Assert that a directory for this language code exists on the system if not root_dir.isdir(): log.error(" %s is not a valid directory.\nSkipping language '%s'", root_dir, language) continue # If we found the language code's directory, validate the files. if validate_po_files(self.configuration, locale_dir, root_dir=root_dir, report_empty=args.empty, check_all=args.check_all): exit_code = 1 return exit_code
python
def run(self, args): exit_code = 0 if args.verbose: log_level = logging.INFO else: log_level = logging.WARNING logging.basicConfig(stream=sys.stdout, level=log_level) languages = args.language or [] locale_dir = self.configuration.locale_dir if not languages: if validate_po_files(self.configuration, locale_dir, report_empty=args.empty, check_all=args.check_all): exit_code = 1 else: for language in languages: root_dir = self.configuration.locale_dir / language if not root_dir.isdir(): log.error(" %s is not a valid directory.\nSkipping language '%s'", root_dir, language) continue if validate_po_files(self.configuration, locale_dir, root_dir=root_dir, report_empty=args.empty, check_all=args.check_all): exit_code = 1 return exit_code
[ "def", "run", "(", "self", ",", "args", ")", ":", "exit_code", "=", "0", "if", "args", ".", "verbose", ":", "log_level", "=", "logging", ".", "INFO", "else", ":", "log_level", "=", "logging", ".", "WARNING", "logging", ".", "basicConfig", "(", "stream", "=", "sys", ".", "stdout", ",", "level", "=", "log_level", ")", "languages", "=", "args", ".", "language", "or", "[", "]", "locale_dir", "=", "self", ".", "configuration", ".", "locale_dir", "if", "not", "languages", ":", "# validate all languages", "if", "validate_po_files", "(", "self", ".", "configuration", ",", "locale_dir", ",", "report_empty", "=", "args", ".", "empty", ",", "check_all", "=", "args", ".", "check_all", ")", ":", "exit_code", "=", "1", "else", ":", "# languages will be a list of language codes; test each language.", "for", "language", "in", "languages", ":", "root_dir", "=", "self", ".", "configuration", ".", "locale_dir", "/", "language", "# Assert that a directory for this language code exists on the system", "if", "not", "root_dir", ".", "isdir", "(", ")", ":", "log", ".", "error", "(", "\" %s is not a valid directory.\\nSkipping language '%s'\"", ",", "root_dir", ",", "language", ")", "continue", "# If we found the language code's directory, validate the files.", "if", "validate_po_files", "(", "self", ".", "configuration", ",", "locale_dir", ",", "root_dir", "=", "root_dir", ",", "report_empty", "=", "args", ".", "empty", ",", "check_all", "=", "args", ".", "check_all", ")", ":", "exit_code", "=", "1", "return", "exit_code" ]
Main entry point for script Returns an integer representing the exit code that should be returned by the script.
[ "Main", "entry", "point", "for", "script" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L234-L268
edx/i18n-tools
i18n/generate.py
merge
def merge(configuration, locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True): """ For the given locale, merge the `sources` files to become the `target` file. Note that the target file might also be one of the sources. If fail_if_missing is true, and the files to be merged are missing, throw an Exception, otherwise return silently. If fail_if_missing is false, and the files to be merged are missing, just return silently. """ LOG.info('Merging %s locale %s', target, locale) locale_directory = configuration.get_messages_dir(locale) try: validate_files(locale_directory, sources) except Exception: # pylint: disable=broad-except if not fail_if_missing: return raise # merged file is merged.po merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources) execute(merge_cmd, working_directory=locale_directory) # clean up redunancies in the metadata merged_filename = locale_directory.joinpath('merged.po') duplicate_entries = clean_pofile(merged_filename) # rename merged.po -> django.po (default) target_filename = locale_directory.joinpath(target) os.rename(merged_filename, target_filename) # Write duplicate messages to a file if duplicate_entries: dup_file = target_filename.replace(".po", ".dup") with codecs.open(dup_file, "w", encoding="utf8") as dfile: for (entry, translations) in duplicate_entries: dfile.write(u"{}\n".format(entry)) dfile.write(u"Translations found were:\n\t{}\n\n".format(translations)) LOG.warning(" %s duplicates in %s, details in .dup file", len(duplicate_entries), target_filename)
python
def merge(configuration, locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True): LOG.info('Merging %s locale %s', target, locale) locale_directory = configuration.get_messages_dir(locale) try: validate_files(locale_directory, sources) except Exception: if not fail_if_missing: return raise merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources) execute(merge_cmd, working_directory=locale_directory) merged_filename = locale_directory.joinpath('merged.po') duplicate_entries = clean_pofile(merged_filename) target_filename = locale_directory.joinpath(target) os.rename(merged_filename, target_filename) if duplicate_entries: dup_file = target_filename.replace(".po", ".dup") with codecs.open(dup_file, "w", encoding="utf8") as dfile: for (entry, translations) in duplicate_entries: dfile.write(u"{}\n".format(entry)) dfile.write(u"Translations found were:\n\t{}\n\n".format(translations)) LOG.warning(" %s duplicates in %s, details in .dup file", len(duplicate_entries), target_filename)
[ "def", "merge", "(", "configuration", ",", "locale", ",", "target", "=", "'django.po'", ",", "sources", "=", "(", "'django-partial.po'", ",", ")", ",", "fail_if_missing", "=", "True", ")", ":", "LOG", ".", "info", "(", "'Merging %s locale %s'", ",", "target", ",", "locale", ")", "locale_directory", "=", "configuration", ".", "get_messages_dir", "(", "locale", ")", "try", ":", "validate_files", "(", "locale_directory", ",", "sources", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "if", "not", "fail_if_missing", ":", "return", "raise", "# merged file is merged.po", "merge_cmd", "=", "'msgcat -o merged.po '", "+", "' '", ".", "join", "(", "sources", ")", "execute", "(", "merge_cmd", ",", "working_directory", "=", "locale_directory", ")", "# clean up redunancies in the metadata", "merged_filename", "=", "locale_directory", ".", "joinpath", "(", "'merged.po'", ")", "duplicate_entries", "=", "clean_pofile", "(", "merged_filename", ")", "# rename merged.po -> django.po (default)", "target_filename", "=", "locale_directory", ".", "joinpath", "(", "target", ")", "os", ".", "rename", "(", "merged_filename", ",", "target_filename", ")", "# Write duplicate messages to a file", "if", "duplicate_entries", ":", "dup_file", "=", "target_filename", ".", "replace", "(", "\".po\"", ",", "\".dup\"", ")", "with", "codecs", ".", "open", "(", "dup_file", ",", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "dfile", ":", "for", "(", "entry", ",", "translations", ")", "in", "duplicate_entries", ":", "dfile", ".", "write", "(", "u\"{}\\n\"", ".", "format", "(", "entry", ")", ")", "dfile", ".", "write", "(", "u\"Translations found were:\\n\\t{}\\n\\n\"", ".", "format", "(", "translations", ")", ")", "LOG", ".", "warning", "(", "\" %s duplicates in %s, details in .dup file\"", ",", "len", "(", "duplicate_entries", ")", ",", "target_filename", ")" ]
For the given locale, merge the `sources` files to become the `target` file. Note that the target file might also be one of the sources. If fail_if_missing is true, and the files to be merged are missing, throw an Exception, otherwise return silently. If fail_if_missing is false, and the files to be merged are missing, just return silently.
[ "For", "the", "given", "locale", "merge", "the", "sources", "files", "to", "become", "the", "target", "file", ".", "Note", "that", "the", "target", "file", "might", "also", "be", "one", "of", "the", "sources", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L32-L72
edx/i18n-tools
i18n/generate.py
merge_files
def merge_files(configuration, locale, fail_if_missing=True): """ Merge all the files in `locale`, as specified in config.yaml. """ for target, sources in configuration.generate_merge.items(): merge(configuration, locale, target, sources, fail_if_missing)
python
def merge_files(configuration, locale, fail_if_missing=True): for target, sources in configuration.generate_merge.items(): merge(configuration, locale, target, sources, fail_if_missing)
[ "def", "merge_files", "(", "configuration", ",", "locale", ",", "fail_if_missing", "=", "True", ")", ":", "for", "target", ",", "sources", "in", "configuration", ".", "generate_merge", ".", "items", "(", ")", ":", "merge", "(", "configuration", ",", "locale", ",", "target", ",", "sources", ",", "fail_if_missing", ")" ]
Merge all the files in `locale`, as specified in config.yaml.
[ "Merge", "all", "the", "files", "in", "locale", "as", "specified", "in", "config", ".", "yaml", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L75-L80
edx/i18n-tools
i18n/generate.py
clean_pofile
def clean_pofile(pofile_path): """ Clean various aspect of a .po file. Fixes: - Removes the fuzzy flag on metadata. - Removes occurrence line numbers so that the generated files don't generate a lot of line noise when they're committed. Returns a list of any duplicate entries found. """ # Reading in the .po file and saving it again fixes redundancies. pomsgs = pofile(pofile_path) # The msgcat tool marks the metadata as fuzzy, but it's ok as it is. pomsgs.metadata_is_fuzzy = False duplicate_entries = [] for entry in pomsgs: # Remove line numbers entry.occurrences = [(filename, None) for filename, __ in entry.occurrences] # Check for merge conflicts. Pick the first, and emit a warning. if 'fuzzy' in entry.flags: # Remove fuzzy from flags entry.flags = [f for f in entry.flags if f != 'fuzzy'] # Save a warning message dup_msg = 'Multiple translations found for single string.\n\tString "{0}"\n\tPresent in files {1}'.format( entry.msgid, [f for (f, __) in entry.occurrences] ) duplicate_entries.append((dup_msg, entry.msgstr)) # Pick the first entry for msgstr in DUPLICATE_ENTRY_PATTERN.split(entry.msgstr): # Ignore any empty strings that may result from the split call if msgstr: # Set the first one we find to be the right one. Strip to remove extraneous # new lines that exist. entry.msgstr = msgstr.strip() # Raise error if there's new lines starting or ending the id string. if entry.msgid.startswith('\n') or entry.msgid.endswith('\n'): raise ValueError( u'{} starts or ends with a new line character, which is not allowed. ' 'Please fix before continuing. Source string is found in {}'.format( entry.msgid, entry.occurrences ).encode('utf-8') ) break pomsgs.save() return duplicate_entries
python
def clean_pofile(pofile_path): pomsgs = pofile(pofile_path) pomsgs.metadata_is_fuzzy = False duplicate_entries = [] for entry in pomsgs: entry.occurrences = [(filename, None) for filename, __ in entry.occurrences] if 'fuzzy' in entry.flags: entry.flags = [f for f in entry.flags if f != 'fuzzy'] dup_msg = 'Multiple translations found for single string.\n\tString "{0}"\n\tPresent in files {1}'.format( entry.msgid, [f for (f, __) in entry.occurrences] ) duplicate_entries.append((dup_msg, entry.msgstr)) for msgstr in DUPLICATE_ENTRY_PATTERN.split(entry.msgstr): if msgstr: entry.msgstr = msgstr.strip() if entry.msgid.startswith('\n') or entry.msgid.endswith('\n'): raise ValueError( u'{} starts or ends with a new line character, which is not allowed. ' 'Please fix before continuing. Source string is found in {}'.format( entry.msgid, entry.occurrences ).encode('utf-8') ) break pomsgs.save() return duplicate_entries
[ "def", "clean_pofile", "(", "pofile_path", ")", ":", "# Reading in the .po file and saving it again fixes redundancies.", "pomsgs", "=", "pofile", "(", "pofile_path", ")", "# The msgcat tool marks the metadata as fuzzy, but it's ok as it is.", "pomsgs", ".", "metadata_is_fuzzy", "=", "False", "duplicate_entries", "=", "[", "]", "for", "entry", "in", "pomsgs", ":", "# Remove line numbers", "entry", ".", "occurrences", "=", "[", "(", "filename", ",", "None", ")", "for", "filename", ",", "__", "in", "entry", ".", "occurrences", "]", "# Check for merge conflicts. Pick the first, and emit a warning.", "if", "'fuzzy'", "in", "entry", ".", "flags", ":", "# Remove fuzzy from flags", "entry", ".", "flags", "=", "[", "f", "for", "f", "in", "entry", ".", "flags", "if", "f", "!=", "'fuzzy'", "]", "# Save a warning message", "dup_msg", "=", "'Multiple translations found for single string.\\n\\tString \"{0}\"\\n\\tPresent in files {1}'", ".", "format", "(", "entry", ".", "msgid", ",", "[", "f", "for", "(", "f", ",", "__", ")", "in", "entry", ".", "occurrences", "]", ")", "duplicate_entries", ".", "append", "(", "(", "dup_msg", ",", "entry", ".", "msgstr", ")", ")", "# Pick the first entry", "for", "msgstr", "in", "DUPLICATE_ENTRY_PATTERN", ".", "split", "(", "entry", ".", "msgstr", ")", ":", "# Ignore any empty strings that may result from the split call", "if", "msgstr", ":", "# Set the first one we find to be the right one. Strip to remove extraneous", "# new lines that exist.", "entry", ".", "msgstr", "=", "msgstr", ".", "strip", "(", ")", "# Raise error if there's new lines starting or ending the id string.", "if", "entry", ".", "msgid", ".", "startswith", "(", "'\\n'", ")", "or", "entry", ".", "msgid", ".", "endswith", "(", "'\\n'", ")", ":", "raise", "ValueError", "(", "u'{} starts or ends with a new line character, which is not allowed. '", "'Please fix before continuing. Source string is found in {}'", ".", "format", "(", "entry", ".", "msgid", ",", "entry", ".", "occurrences", ")", ".", "encode", "(", "'utf-8'", ")", ")", "break", "pomsgs", ".", "save", "(", ")", "return", "duplicate_entries" ]
Clean various aspect of a .po file. Fixes: - Removes the fuzzy flag on metadata. - Removes occurrence line numbers so that the generated files don't generate a lot of line noise when they're committed. Returns a list of any duplicate entries found.
[ "Clean", "various", "aspect", "of", "a", ".", "po", "file", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L83-L135
edx/i18n-tools
i18n/generate.py
validate_files
def validate_files(directory, files_to_merge): """ Asserts that the given files exist. files_to_merge is a list of file names (no directories). directory is the directory (a path object from path.py) in which the files should appear. raises an Exception if any of the files are not in dir. """ for file_path in files_to_merge: pathname = directory.joinpath(file_path) if not pathname.exists(): raise Exception("I18N: Cannot generate because file not found: {0}".format(pathname)) # clean sources clean_pofile(pathname)
python
def validate_files(directory, files_to_merge): for file_path in files_to_merge: pathname = directory.joinpath(file_path) if not pathname.exists(): raise Exception("I18N: Cannot generate because file not found: {0}".format(pathname)) clean_pofile(pathname)
[ "def", "validate_files", "(", "directory", ",", "files_to_merge", ")", ":", "for", "file_path", "in", "files_to_merge", ":", "pathname", "=", "directory", ".", "joinpath", "(", "file_path", ")", "if", "not", "pathname", ".", "exists", "(", ")", ":", "raise", "Exception", "(", "\"I18N: Cannot generate because file not found: {0}\"", ".", "format", "(", "pathname", ")", ")", "# clean sources", "clean_pofile", "(", "pathname", ")" ]
Asserts that the given files exist. files_to_merge is a list of file names (no directories). directory is the directory (a path object from path.py) in which the files should appear. raises an Exception if any of the files are not in dir.
[ "Asserts", "that", "the", "given", "files", "exist", ".", "files_to_merge", "is", "a", "list", "of", "file", "names", "(", "no", "directories", ")", ".", "directory", "is", "the", "directory", "(", "a", "path", "object", "from", "path", ".", "py", ")", "in", "which", "the", "files", "should", "appear", ".", "raises", "an", "Exception", "if", "any", "of", "the", "files", "are", "not", "in", "dir", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L138-L150
edx/i18n-tools
i18n/generate.py
Generate.run
def run(self, args): """ Main entry point for script """ logging.basicConfig(stream=sys.stdout, level=logging.INFO) configuration = self.configuration if args.ltr: langs = configuration.ltr_langs elif args.rtl: langs = configuration.rtl_langs else: langs = configuration.translated_locales for locale in langs: merge_files(configuration, locale, fail_if_missing=args.strict) # Dummy text is not required. Don't raise exception if files are missing. for locale in configuration.dummy_locales: merge_files(configuration, locale, fail_if_missing=False) # Merge the source locale, so we have the canonical .po files. if configuration.source_locale not in langs: merge_files(configuration, configuration.source_locale, fail_if_missing=args.strict) compile_cmd = 'django-admin.py compilemessages -v{}'.format(args.verbose) if args.verbose: stderr = None else: stderr = DEVNULL execute(compile_cmd, working_directory=configuration.root_dir, stderr=stderr) # Check for any mapped languages and copy directories around accordingly for source_locale, dest_locale in configuration.edx_lang_map.items(): source_dirname = configuration.get_messages_dir(source_locale) dest_dirname = configuration.get_messages_dir(dest_locale) LOG.info("Copying mapped locale %s to %s", source_dirname, dest_dirname) path.rmtree_p(path(dest_dirname)) path.copytree(path(source_dirname), path(dest_dirname))
python
def run(self, args): logging.basicConfig(stream=sys.stdout, level=logging.INFO) configuration = self.configuration if args.ltr: langs = configuration.ltr_langs elif args.rtl: langs = configuration.rtl_langs else: langs = configuration.translated_locales for locale in langs: merge_files(configuration, locale, fail_if_missing=args.strict) for locale in configuration.dummy_locales: merge_files(configuration, locale, fail_if_missing=False) if configuration.source_locale not in langs: merge_files(configuration, configuration.source_locale, fail_if_missing=args.strict) compile_cmd = 'django-admin.py compilemessages -v{}'.format(args.verbose) if args.verbose: stderr = None else: stderr = DEVNULL execute(compile_cmd, working_directory=configuration.root_dir, stderr=stderr) for source_locale, dest_locale in configuration.edx_lang_map.items(): source_dirname = configuration.get_messages_dir(source_locale) dest_dirname = configuration.get_messages_dir(dest_locale) LOG.info("Copying mapped locale %s to %s", source_dirname, dest_dirname) path.rmtree_p(path(dest_dirname)) path.copytree(path(source_dirname), path(dest_dirname))
[ "def", "run", "(", "self", ",", "args", ")", ":", "logging", ".", "basicConfig", "(", "stream", "=", "sys", ".", "stdout", ",", "level", "=", "logging", ".", "INFO", ")", "configuration", "=", "self", ".", "configuration", "if", "args", ".", "ltr", ":", "langs", "=", "configuration", ".", "ltr_langs", "elif", "args", ".", "rtl", ":", "langs", "=", "configuration", ".", "rtl_langs", "else", ":", "langs", "=", "configuration", ".", "translated_locales", "for", "locale", "in", "langs", ":", "merge_files", "(", "configuration", ",", "locale", ",", "fail_if_missing", "=", "args", ".", "strict", ")", "# Dummy text is not required. Don't raise exception if files are missing.", "for", "locale", "in", "configuration", ".", "dummy_locales", ":", "merge_files", "(", "configuration", ",", "locale", ",", "fail_if_missing", "=", "False", ")", "# Merge the source locale, so we have the canonical .po files.", "if", "configuration", ".", "source_locale", "not", "in", "langs", ":", "merge_files", "(", "configuration", ",", "configuration", ".", "source_locale", ",", "fail_if_missing", "=", "args", ".", "strict", ")", "compile_cmd", "=", "'django-admin.py compilemessages -v{}'", ".", "format", "(", "args", ".", "verbose", ")", "if", "args", ".", "verbose", ":", "stderr", "=", "None", "else", ":", "stderr", "=", "DEVNULL", "execute", "(", "compile_cmd", ",", "working_directory", "=", "configuration", ".", "root_dir", ",", "stderr", "=", "stderr", ")", "# Check for any mapped languages and copy directories around accordingly", "for", "source_locale", ",", "dest_locale", "in", "configuration", ".", "edx_lang_map", ".", "items", "(", ")", ":", "source_dirname", "=", "configuration", ".", "get_messages_dir", "(", "source_locale", ")", "dest_dirname", "=", "configuration", ".", "get_messages_dir", "(", "dest_locale", ")", "LOG", ".", "info", "(", "\"Copying mapped locale %s to %s\"", ",", "source_dirname", ",", "dest_dirname", ")", "path", ".", "rmtree_p", "(", "path", "(", "dest_dirname", ")", ")", "path", ".", "copytree", "(", "path", "(", "source_dirname", ")", ",", "path", "(", "dest_dirname", ")", ")" ]
Main entry point for script
[ "Main", "entry", "point", "for", "script" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L161-L198
edx/i18n-tools
i18n/dummy.py
make_dummy
def make_dummy(filename, locale, converter): """ Takes a source po file, reads it, and writes out a new po file in :param locale: containing a dummy translation. """ if not Path(filename).exists(): raise IOError('File does not exist: %r' % filename) pofile = polib.pofile(filename) for msg in pofile: # Some strings are actually formatting strings, don't dummy-ify them, # or dates will look like "DÀTÉ_TÌMÉ_FÖRMÀT Ⱡ'σ# EST" if is_format_message(msg): continue converter.convert_msg(msg) pofile.metadata['Language'] = locale # Apply declaration for English pluralization rules so that ngettext will # do something reasonable. pofile.metadata['Plural-Forms'] = 'nplurals=2; plural=(n != 1);' new_file = new_filename(filename, locale) new_file.parent.makedirs_p() pofile.save(new_file) clean_pofile(new_file)
python
def make_dummy(filename, locale, converter): if not Path(filename).exists(): raise IOError('File does not exist: %r' % filename) pofile = polib.pofile(filename) for msg in pofile: if is_format_message(msg): continue converter.convert_msg(msg) pofile.metadata['Language'] = locale pofile.metadata['Plural-Forms'] = 'nplurals=2; plural=(n != 1);' new_file = new_filename(filename, locale) new_file.parent.makedirs_p() pofile.save(new_file) clean_pofile(new_file)
[ "def", "make_dummy", "(", "filename", ",", "locale", ",", "converter", ")", ":", "if", "not", "Path", "(", "filename", ")", ".", "exists", "(", ")", ":", "raise", "IOError", "(", "'File does not exist: %r'", "%", "filename", ")", "pofile", "=", "polib", ".", "pofile", "(", "filename", ")", "for", "msg", "in", "pofile", ":", "# Some strings are actually formatting strings, don't dummy-ify them,", "# or dates will look like \"DÀTÉ_TÌMÉ_FÖRMÀT Ⱡ'σ# EST\"", "if", "is_format_message", "(", "msg", ")", ":", "continue", "converter", ".", "convert_msg", "(", "msg", ")", "pofile", ".", "metadata", "[", "'Language'", "]", "=", "locale", "# Apply declaration for English pluralization rules so that ngettext will", "# do something reasonable.", "pofile", ".", "metadata", "[", "'Plural-Forms'", "]", "=", "'nplurals=2; plural=(n != 1);'", "new_file", "=", "new_filename", "(", "filename", ",", "locale", ")", "new_file", ".", "parent", ".", "makedirs_p", "(", ")", "pofile", ".", "save", "(", "new_file", ")", "clean_pofile", "(", "new_file", ")" ]
Takes a source po file, reads it, and writes out a new po file in :param locale: containing a dummy translation.
[ "Takes", "a", "source", "po", "file", "reads", "it", "and", "writes", "out", "a", "new", "po", "file", "in", ":", "param", "locale", ":", "containing", "a", "dummy", "translation", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L190-L214
edx/i18n-tools
i18n/dummy.py
new_filename
def new_filename(original_filename, new_locale): """Returns a filename derived from original_filename, using new_locale as the locale""" orig_file = Path(original_filename) new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name return new_file.abspath()
python
def new_filename(original_filename, new_locale): orig_file = Path(original_filename) new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name return new_file.abspath()
[ "def", "new_filename", "(", "original_filename", ",", "new_locale", ")", ":", "orig_file", "=", "Path", "(", "original_filename", ")", "new_file", "=", "orig_file", ".", "parent", ".", "parent", ".", "parent", "/", "new_locale", "/", "orig_file", ".", "parent", ".", "name", "/", "orig_file", ".", "name", "return", "new_file", ".", "abspath", "(", ")" ]
Returns a filename derived from original_filename, using new_locale as the locale
[ "Returns", "a", "filename", "derived", "from", "original_filename", "using", "new_locale", "as", "the", "locale" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L217-L221
edx/i18n-tools
i18n/dummy.py
BaseDummyConverter.convert_msg
def convert_msg(self, msg): """ Takes one POEntry object and converts it (adds a dummy translation to it) msg is an instance of polib.POEntry """ source = msg.msgid if not source: # don't translate empty string return plural = msg.msgid_plural if plural: # translate singular and plural foreign_single = self.convert(source) foreign_plural = self.convert(plural) plural = { '0': self.final_newline(source, foreign_single), '1': self.final_newline(plural, foreign_plural), } msg.msgstr_plural = plural else: foreign = self.convert(source) msg.msgstr = self.final_newline(source, foreign)
python
def convert_msg(self, msg): source = msg.msgid if not source: return plural = msg.msgid_plural if plural: foreign_single = self.convert(source) foreign_plural = self.convert(plural) plural = { '0': self.final_newline(source, foreign_single), '1': self.final_newline(plural, foreign_plural), } msg.msgstr_plural = plural else: foreign = self.convert(source) msg.msgstr = self.final_newline(source, foreign)
[ "def", "convert_msg", "(", "self", ",", "msg", ")", ":", "source", "=", "msg", ".", "msgid", "if", "not", "source", ":", "# don't translate empty string", "return", "plural", "=", "msg", ".", "msgid_plural", "if", "plural", ":", "# translate singular and plural", "foreign_single", "=", "self", ".", "convert", "(", "source", ")", "foreign_plural", "=", "self", ".", "convert", "(", "plural", ")", "plural", "=", "{", "'0'", ":", "self", ".", "final_newline", "(", "source", ",", "foreign_single", ")", ",", "'1'", ":", "self", ".", "final_newline", "(", "plural", ",", "foreign_plural", ")", ",", "}", "msg", ".", "msgstr_plural", "=", "plural", "else", ":", "foreign", "=", "self", ".", "convert", "(", "source", ")", "msg", ".", "msgstr", "=", "self", ".", "final_newline", "(", "source", ",", "foreign", ")" ]
Takes one POEntry object and converts it (adds a dummy translation to it) msg is an instance of polib.POEntry
[ "Takes", "one", "POEntry", "object", "and", "converts", "it", "(", "adds", "a", "dummy", "translation", "to", "it", ")", "msg", "is", "an", "instance", "of", "polib", ".", "POEntry" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L68-L90
edx/i18n-tools
i18n/dummy.py
Dummy.pad
def pad(self, string): """ Add some lorem ipsum text to the end of string to simulate more verbose languages (like German). Padding factor extrapolated by guidelines at http://www.w3.org/International/articles/article-text-size.en """ size = len(string) target = size * (4.75 - size ** 0.27) pad_len = int(target) - size return string + self.LOREM[:pad_len] + "#"
python
def pad(self, string): size = len(string) target = size * (4.75 - size ** 0.27) pad_len = int(target) - size return string + self.LOREM[:pad_len] + "
[ "def", "pad", "(", "self", ",", "string", ")", ":", "size", "=", "len", "(", "string", ")", "target", "=", "size", "*", "(", "4.75", "-", "size", "**", "0.27", ")", "pad_len", "=", "int", "(", "target", ")", "-", "size", "return", "string", "+", "self", ".", "LOREM", "[", ":", "pad_len", "]", "+", "\"#\"" ]
Add some lorem ipsum text to the end of string to simulate more verbose languages (like German). Padding factor extrapolated by guidelines at http://www.w3.org/International/articles/article-text-size.en
[ "Add", "some", "lorem", "ipsum", "text", "to", "the", "end", "of", "string", "to", "simulate", "more", "verbose", "languages", "(", "like", "German", ")", ".", "Padding", "factor", "extrapolated", "by", "guidelines", "at", "http", ":", "//", "www", ".", "w3", ".", "org", "/", "International", "/", "articles", "/", "article", "-", "text", "-", "size", ".", "en" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L156-L164
edx/i18n-tools
i18n/dummy.py
DummyCommand.run
def run(self, args): """ Generate dummy strings for all source po files. """ configuration = self.configuration source_messages_dir = configuration.source_messages_dir for locale, converter in zip(configuration.dummy_locales, [Dummy(), Dummy2(), ArabicDummy()]): print('Processing source language files into dummy strings, locale "{}"'.format(locale)) for source_file in configuration.source_messages_dir.walkfiles('*.po'): if args.verbose: print(' ', source_file.relpath()) make_dummy(source_messages_dir.joinpath(source_file), locale, converter) if args.verbose: print()
python
def run(self, args): configuration = self.configuration source_messages_dir = configuration.source_messages_dir for locale, converter in zip(configuration.dummy_locales, [Dummy(), Dummy2(), ArabicDummy()]): print('Processing source language files into dummy strings, locale "{}"'.format(locale)) for source_file in configuration.source_messages_dir.walkfiles('*.po'): if args.verbose: print(' ', source_file.relpath()) make_dummy(source_messages_dir.joinpath(source_file), locale, converter) if args.verbose: print()
[ "def", "run", "(", "self", ",", "args", ")", ":", "configuration", "=", "self", ".", "configuration", "source_messages_dir", "=", "configuration", ".", "source_messages_dir", "for", "locale", ",", "converter", "in", "zip", "(", "configuration", ".", "dummy_locales", ",", "[", "Dummy", "(", ")", ",", "Dummy2", "(", ")", ",", "ArabicDummy", "(", ")", "]", ")", ":", "print", "(", "'Processing source language files into dummy strings, locale \"{}\"'", ".", "format", "(", "locale", ")", ")", "for", "source_file", "in", "configuration", ".", "source_messages_dir", ".", "walkfiles", "(", "'*.po'", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "' '", ",", "source_file", ".", "relpath", "(", ")", ")", "make_dummy", "(", "source_messages_dir", ".", "joinpath", "(", "source_file", ")", ",", "locale", ",", "converter", ")", "if", "args", ".", "verbose", ":", "print", "(", ")" ]
Generate dummy strings for all source po files.
[ "Generate", "dummy", "strings", "for", "all", "source", "po", "files", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L235-L248
edx/i18n-tools
i18n/execute.py
execute
def execute(command, working_directory=config.BASE_DIR, stderr=sp.STDOUT): """ Executes shell command in a given working_directory. Command is a string to pass to the shell. Output is ignored. """ LOG.info("Executing in %s ...", working_directory) LOG.info(command) sp.check_call(command, cwd=working_directory, stderr=stderr, shell=True)
python
def execute(command, working_directory=config.BASE_DIR, stderr=sp.STDOUT): LOG.info("Executing in %s ...", working_directory) LOG.info(command) sp.check_call(command, cwd=working_directory, stderr=stderr, shell=True)
[ "def", "execute", "(", "command", ",", "working_directory", "=", "config", ".", "BASE_DIR", ",", "stderr", "=", "sp", ".", "STDOUT", ")", ":", "LOG", ".", "info", "(", "\"Executing in %s ...\"", ",", "working_directory", ")", "LOG", ".", "info", "(", "command", ")", "sp", ".", "check_call", "(", "command", ",", "cwd", "=", "working_directory", ",", "stderr", "=", "stderr", ",", "shell", "=", "True", ")" ]
Executes shell command in a given working_directory. Command is a string to pass to the shell. Output is ignored.
[ "Executes", "shell", "command", "in", "a", "given", "working_directory", ".", "Command", "is", "a", "string", "to", "pass", "to", "the", "shell", ".", "Output", "is", "ignored", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/execute.py#L13-L21
edx/i18n-tools
i18n/execute.py
call
def call(command, working_directory=config.BASE_DIR): """ Executes shell command in a given working_directory. Command is a list of strings to execute as a command line. Returns a tuple of two byte strings: (stdout, stderr) """ LOG.info(command) proc = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, cwd=working_directory, shell=True) out, err = proc.communicate() return (out, err)
python
def call(command, working_directory=config.BASE_DIR): LOG.info(command) proc = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, cwd=working_directory, shell=True) out, err = proc.communicate() return (out, err)
[ "def", "call", "(", "command", ",", "working_directory", "=", "config", ".", "BASE_DIR", ")", ":", "LOG", ".", "info", "(", "command", ")", "proc", "=", "sp", ".", "Popen", "(", "command", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ",", "cwd", "=", "working_directory", ",", "shell", "=", "True", ")", "out", ",", "err", "=", "proc", ".", "communicate", "(", ")", "return", "(", "out", ",", "err", ")" ]
Executes shell command in a given working_directory. Command is a list of strings to execute as a command line. Returns a tuple of two byte strings: (stdout, stderr)
[ "Executes", "shell", "command", "in", "a", "given", "working_directory", ".", "Command", "is", "a", "list", "of", "strings", "to", "execute", "as", "a", "command", "line", ".", "Returns", "a", "tuple", "of", "two", "byte", "strings", ":", "(", "stdout", "stderr", ")" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/execute.py#L24-L34
edx/i18n-tools
i18n/execute.py
remove_file
def remove_file(filename, verbose=True): """ Attempt to delete filename. log is boolean. If true, removal is logged. Log a warning if file does not exist. Logging filenames are relative to config.BASE_DIR to cut down on noise in output. """ if verbose: LOG.info('Deleting file %s', os.path.relpath(filename, config.BASE_DIR)) if not os.path.exists(filename): LOG.warning("File does not exist: %s", os.path.relpath(filename, config.BASE_DIR)) else: os.remove(filename)
python
def remove_file(filename, verbose=True): if verbose: LOG.info('Deleting file %s', os.path.relpath(filename, config.BASE_DIR)) if not os.path.exists(filename): LOG.warning("File does not exist: %s", os.path.relpath(filename, config.BASE_DIR)) else: os.remove(filename)
[ "def", "remove_file", "(", "filename", ",", "verbose", "=", "True", ")", ":", "if", "verbose", ":", "LOG", ".", "info", "(", "'Deleting file %s'", ",", "os", ".", "path", ".", "relpath", "(", "filename", ",", "config", ".", "BASE_DIR", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOG", ".", "warning", "(", "\"File does not exist: %s\"", ",", "os", ".", "path", ".", "relpath", "(", "filename", ",", "config", ".", "BASE_DIR", ")", ")", "else", ":", "os", ".", "remove", "(", "filename", ")" ]
Attempt to delete filename. log is boolean. If true, removal is logged. Log a warning if file does not exist. Logging filenames are relative to config.BASE_DIR to cut down on noise in output.
[ "Attempt", "to", "delete", "filename", ".", "log", "is", "boolean", ".", "If", "true", "removal", "is", "logged", ".", "Log", "a", "warning", "if", "file", "does", "not", "exist", ".", "Logging", "filenames", "are", "relative", "to", "config", ".", "BASE_DIR", "to", "cut", "down", "on", "noise", "in", "output", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/execute.py#L37-L49
edx/i18n-tools
i18n/transifex.py
push
def push(*resources): """ Push translation source English files to Transifex. Arguments name specific resources to push. Otherwise, push all the source files. """ cmd = 'tx push -s' if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd)
python
def push(*resources): cmd = 'tx push -s' if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd)
[ "def", "push", "(", "*", "resources", ")", ":", "cmd", "=", "'tx push -s'", "if", "resources", ":", "for", "resource", "in", "resources", ":", "execute", "(", "cmd", "+", "' -r {resource}'", ".", "format", "(", "resource", "=", "resource", ")", ")", "else", ":", "execute", "(", "cmd", ")" ]
Push translation source English files to Transifex. Arguments name specific resources to push. Otherwise, push all the source files.
[ "Push", "translation", "source", "English", "files", "to", "Transifex", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L17-L29
edx/i18n-tools
i18n/transifex.py
pull
def pull(configuration, *resources): """ Pull translations from all languages listed in conf/locale/config.yaml where there is at least 10% reviewed translations. If arguments are provided, they are specific resources to pull. Otherwise, all resources are pulled. """ print("Pulling conf/locale/config.yaml:locales from Transifex...") for lang in configuration.translated_locales: cmd = 'tx pull -f --mode=reviewed --minimum-perc=3 -l {lang}'.format(lang=lang) if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd) clean_translated_locales(configuration)
python
def pull(configuration, *resources): print("Pulling conf/locale/config.yaml:locales from Transifex...") for lang in configuration.translated_locales: cmd = 'tx pull -f --mode=reviewed --minimum-perc=3 -l {lang}'.format(lang=lang) if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd) clean_translated_locales(configuration)
[ "def", "pull", "(", "configuration", ",", "*", "resources", ")", ":", "print", "(", "\"Pulling conf/locale/config.yaml:locales from Transifex...\"", ")", "for", "lang", "in", "configuration", ".", "translated_locales", ":", "cmd", "=", "'tx pull -f --mode=reviewed --minimum-perc=3 -l {lang}'", ".", "format", "(", "lang", "=", "lang", ")", "if", "resources", ":", "for", "resource", "in", "resources", ":", "execute", "(", "cmd", "+", "' -r {resource}'", ".", "format", "(", "resource", "=", "resource", ")", ")", "else", ":", "execute", "(", "cmd", ")", "clean_translated_locales", "(", "configuration", ")" ]
Pull translations from all languages listed in conf/locale/config.yaml where there is at least 10% reviewed translations. If arguments are provided, they are specific resources to pull. Otherwise, all resources are pulled.
[ "Pull", "translations", "from", "all", "languages", "listed", "in", "conf", "/", "locale", "/", "config", ".", "yaml", "where", "there", "is", "at", "least", "10%", "reviewed", "translations", "." ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L47-L65
edx/i18n-tools
i18n/transifex.py
pull_all_ltr
def pull_all_ltr(configuration): """ Pulls all translations - reviewed or not - for LTR languages """ print("Pulling all translated LTR languages from transifex...") for lang in configuration.ltr_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.ltr_langs)
python
def pull_all_ltr(configuration): print("Pulling all translated LTR languages from transifex...") for lang in configuration.ltr_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.ltr_langs)
[ "def", "pull_all_ltr", "(", "configuration", ")", ":", "print", "(", "\"Pulling all translated LTR languages from transifex...\"", ")", "for", "lang", "in", "configuration", ".", "ltr_langs", ":", "print", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'tx pull -l '", "+", "lang", ")", "clean_translated_locales", "(", "configuration", ",", "langs", "=", "configuration", ".", "ltr_langs", ")" ]
Pulls all translations - reviewed or not - for LTR languages
[ "Pulls", "all", "translations", "-", "reviewed", "or", "not", "-", "for", "LTR", "languages" ]
train
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L79-L88