id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
1,200
TracyWebTech/django-revproxy
revproxy/views.py
ProxyView.get_encoded_query_params
def get_encoded_query_params(self): """Return encoded query params to be used in proxied request""" get_data = encode_items(self.request.GET.lists()) return urlencode(get_data)
python
def get_encoded_query_params(self): """Return encoded query params to be used in proxied request""" get_data = encode_items(self.request.GET.lists()) return urlencode(get_data)
[ "def", "get_encoded_query_params", "(", "self", ")", ":", "get_data", "=", "encode_items", "(", "self", ".", "request", ".", "GET", ".", "lists", "(", ")", ")", "return", "urlencode", "(", "get_data", ")" ]
Return encoded query params to be used in proxied request
[ "Return", "encoded", "query", "params", "to", "be", "used", "in", "proxied", "request" ]
b8d1d9e44eadbafbd16bc03f04d15560089d4472
https://github.com/TracyWebTech/django-revproxy/blob/b8d1d9e44eadbafbd16bc03f04d15560089d4472/revproxy/views.py#L140-L143
1,201
jsfenfen/990-xml-reader
irs_reader/file_utils.py
stream_download
def stream_download(url, target_path, verbose=False): """ Download a large file without loading it into memory. """ response = requests.get(url, stream=True) handle = open(target_path, "wb") if verbose: print("Beginning streaming download of %s" % url) start = datetime.now() try: content_length = int(response.headers['Content-Length']) content_MB = content_length/1048576.0 print("Total file size: %.2f MB" % content_MB) except KeyError: pass # allow Content-Length to be missing for chunk in response.iter_content(chunk_size=512): if chunk: # filter out keep-alive new chunks handle.write(chunk) if verbose: print( "Download completed to %s in %s" % (target_path, datetime.now() - start))
python
def stream_download(url, target_path, verbose=False): """ Download a large file without loading it into memory. """ response = requests.get(url, stream=True) handle = open(target_path, "wb") if verbose: print("Beginning streaming download of %s" % url) start = datetime.now() try: content_length = int(response.headers['Content-Length']) content_MB = content_length/1048576.0 print("Total file size: %.2f MB" % content_MB) except KeyError: pass # allow Content-Length to be missing for chunk in response.iter_content(chunk_size=512): if chunk: # filter out keep-alive new chunks handle.write(chunk) if verbose: print( "Download completed to %s in %s" % (target_path, datetime.now() - start))
[ "def", "stream_download", "(", "url", ",", "target_path", ",", "verbose", "=", "False", ")", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "handle", "=", "open", "(", "target_path", ",", "\"wb\"", ")", "if", "verbose", ":", "print", "(", "\"Beginning streaming download of %s\"", "%", "url", ")", "start", "=", "datetime", ".", "now", "(", ")", "try", ":", "content_length", "=", "int", "(", "response", ".", "headers", "[", "'Content-Length'", "]", ")", "content_MB", "=", "content_length", "/", "1048576.0", "print", "(", "\"Total file size: %.2f MB\"", "%", "content_MB", ")", "except", "KeyError", ":", "pass", "# allow Content-Length to be missing", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "512", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "handle", ".", "write", "(", "chunk", ")", "if", "verbose", ":", "print", "(", "\"Download completed to %s in %s\"", "%", "(", "target_path", ",", "datetime", ".", "now", "(", ")", "-", "start", ")", ")" ]
Download a large file without loading it into memory.
[ "Download", "a", "large", "file", "without", "loading", "it", "into", "memory", "." ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/file_utils.py#L20-L39
1,202
jsfenfen/990-xml-reader
irs_reader/file_utils.py
validate_object_id
def validate_object_id(object_id): """ It's easy to make a mistake entering these, validate the format """ result = re.match(OBJECT_ID_RE, str(object_id)) if not result: print("'%s' appears not to be a valid 990 object_id" % object_id) raise RuntimeError(OBJECT_ID_MSG) return object_id
python
def validate_object_id(object_id): """ It's easy to make a mistake entering these, validate the format """ result = re.match(OBJECT_ID_RE, str(object_id)) if not result: print("'%s' appears not to be a valid 990 object_id" % object_id) raise RuntimeError(OBJECT_ID_MSG) return object_id
[ "def", "validate_object_id", "(", "object_id", ")", ":", "result", "=", "re", ".", "match", "(", "OBJECT_ID_RE", ",", "str", "(", "object_id", ")", ")", "if", "not", "result", ":", "print", "(", "\"'%s' appears not to be a valid 990 object_id\"", "%", "object_id", ")", "raise", "RuntimeError", "(", "OBJECT_ID_MSG", ")", "return", "object_id" ]
It's easy to make a mistake entering these, validate the format
[ "It", "s", "easy", "to", "make", "a", "mistake", "entering", "these", "validate", "the", "format" ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/file_utils.py#L42-L48
1,203
jsfenfen/990-xml-reader
irs_reader/sked_dict_reader.py
SkedDictReader._get_table_start
def _get_table_start(self): """ prefill the columns we need for all tables """ if self.documentation: standardized_table_start = { 'object_id': { 'value': self.object_id, 'ordering': -1, 'line_number': 'NA', 'description': 'IRS-assigned object id', 'db_type': 'String(18)' }, 'ein': { 'value': self.ein, 'ordering': -2, 'line_number': 'NA', 'description': 'IRS employer id number', 'db_type': 'String(9)' } } if self.documentId: standardized_table_start['documentId'] = { 'value': self.documentId, 'description': 'Document ID', 'ordering': 0 } else: standardized_table_start = { 'object_id': self.object_id, 'ein': self.ein } if self.documentId: standardized_table_start['documentId'] = self.documentId return standardized_table_start
python
def _get_table_start(self): """ prefill the columns we need for all tables """ if self.documentation: standardized_table_start = { 'object_id': { 'value': self.object_id, 'ordering': -1, 'line_number': 'NA', 'description': 'IRS-assigned object id', 'db_type': 'String(18)' }, 'ein': { 'value': self.ein, 'ordering': -2, 'line_number': 'NA', 'description': 'IRS employer id number', 'db_type': 'String(9)' } } if self.documentId: standardized_table_start['documentId'] = { 'value': self.documentId, 'description': 'Document ID', 'ordering': 0 } else: standardized_table_start = { 'object_id': self.object_id, 'ein': self.ein } if self.documentId: standardized_table_start['documentId'] = self.documentId return standardized_table_start
[ "def", "_get_table_start", "(", "self", ")", ":", "if", "self", ".", "documentation", ":", "standardized_table_start", "=", "{", "'object_id'", ":", "{", "'value'", ":", "self", ".", "object_id", ",", "'ordering'", ":", "-", "1", ",", "'line_number'", ":", "'NA'", ",", "'description'", ":", "'IRS-assigned object id'", ",", "'db_type'", ":", "'String(18)'", "}", ",", "'ein'", ":", "{", "'value'", ":", "self", ".", "ein", ",", "'ordering'", ":", "-", "2", ",", "'line_number'", ":", "'NA'", ",", "'description'", ":", "'IRS employer id number'", ",", "'db_type'", ":", "'String(9)'", "}", "}", "if", "self", ".", "documentId", ":", "standardized_table_start", "[", "'documentId'", "]", "=", "{", "'value'", ":", "self", ".", "documentId", ",", "'description'", ":", "'Document ID'", ",", "'ordering'", ":", "0", "}", "else", ":", "standardized_table_start", "=", "{", "'object_id'", ":", "self", ".", "object_id", ",", "'ein'", ":", "self", ".", "ein", "}", "if", "self", ".", "documentId", ":", "standardized_table_start", "[", "'documentId'", "]", "=", "self", ".", "documentId", "return", "standardized_table_start" ]
prefill the columns we need for all tables
[ "prefill", "the", "columns", "we", "need", "for", "all", "tables" ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/sked_dict_reader.py#L45-L78
1,204
jsfenfen/990-xml-reader
irs_reader/text_format_utils.py
debracket
def debracket(string): """ Eliminate the bracketed var names in doc, line strings """ result = re.sub(BRACKET_RE, ';', str(string)) result = result.lstrip(';') result = result.lstrip(' ') result = result.replace('; ;',';') return result
python
def debracket(string): """ Eliminate the bracketed var names in doc, line strings """ result = re.sub(BRACKET_RE, ';', str(string)) result = result.lstrip(';') result = result.lstrip(' ') result = result.replace('; ;',';') return result
[ "def", "debracket", "(", "string", ")", ":", "result", "=", "re", ".", "sub", "(", "BRACKET_RE", ",", "';'", ",", "str", "(", "string", ")", ")", "result", "=", "result", ".", "lstrip", "(", "';'", ")", "result", "=", "result", ".", "lstrip", "(", "' '", ")", "result", "=", "result", ".", "replace", "(", "'; ;'", ",", "';'", ")", "return", "result" ]
Eliminate the bracketed var names in doc, line strings
[ "Eliminate", "the", "bracketed", "var", "names", "in", "doc", "line", "strings" ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/text_format_utils.py#L15-L21
1,205
jsfenfen/990-xml-reader
irs_reader/filing.py
Filing._set_schedules
def _set_schedules(self): """ Attach the known and unknown schedules """ self.schedules = ['ReturnHeader990x', ] self.otherforms = [] for sked in self.raw_irs_dict['Return']['ReturnData'].keys(): if not sked.startswith("@"): if sked in KNOWN_SCHEDULES: self.schedules.append(sked) else: self.otherforms.append(sked)
python
def _set_schedules(self): """ Attach the known and unknown schedules """ self.schedules = ['ReturnHeader990x', ] self.otherforms = [] for sked in self.raw_irs_dict['Return']['ReturnData'].keys(): if not sked.startswith("@"): if sked in KNOWN_SCHEDULES: self.schedules.append(sked) else: self.otherforms.append(sked)
[ "def", "_set_schedules", "(", "self", ")", ":", "self", ".", "schedules", "=", "[", "'ReturnHeader990x'", ",", "]", "self", ".", "otherforms", "=", "[", "]", "for", "sked", "in", "self", ".", "raw_irs_dict", "[", "'Return'", "]", "[", "'ReturnData'", "]", ".", "keys", "(", ")", ":", "if", "not", "sked", ".", "startswith", "(", "\"@\"", ")", ":", "if", "sked", "in", "KNOWN_SCHEDULES", ":", "self", ".", "schedules", ".", "append", "(", "sked", ")", "else", ":", "self", ".", "otherforms", ".", "append", "(", "sked", ")" ]
Attach the known and unknown schedules
[ "Attach", "the", "known", "and", "unknown", "schedules" ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/filing.py#L101-L110
1,206
jsfenfen/990-xml-reader
irs_reader/filing.py
Filing.get_parsed_sked
def get_parsed_sked(self, skedname): """ Returns an array because multiple sked K's are allowed""" if not self.processed: raise Exception("Filing must be processed to return parsed sked") if skedname in self.schedules: matching_skeds = [] for sked in self.result: if sked['schedule_name']==skedname: matching_skeds.append(sked) return matching_skeds else: return []
python
def get_parsed_sked(self, skedname): """ Returns an array because multiple sked K's are allowed""" if not self.processed: raise Exception("Filing must be processed to return parsed sked") if skedname in self.schedules: matching_skeds = [] for sked in self.result: if sked['schedule_name']==skedname: matching_skeds.append(sked) return matching_skeds else: return []
[ "def", "get_parsed_sked", "(", "self", ",", "skedname", ")", ":", "if", "not", "self", ".", "processed", ":", "raise", "Exception", "(", "\"Filing must be processed to return parsed sked\"", ")", "if", "skedname", "in", "self", ".", "schedules", ":", "matching_skeds", "=", "[", "]", "for", "sked", "in", "self", ".", "result", ":", "if", "sked", "[", "'schedule_name'", "]", "==", "skedname", ":", "matching_skeds", ".", "append", "(", "sked", ")", "return", "matching_skeds", "else", ":", "return", "[", "]" ]
Returns an array because multiple sked K's are allowed
[ "Returns", "an", "array", "because", "multiple", "sked", "K", "s", "are", "allowed" ]
00020529b789081329a31a2e30b5ee729ce7596a
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/filing.py#L176-L187
1,207
tus/tus-py-client
tusclient/uploader.py
Uploader.headers
def headers(self): """ Return headers of the uploader instance. This would include the headers of the client instance. """ client_headers = getattr(self.client, 'headers', {}) return dict(self.DEFAULT_HEADERS, **client_headers)
python
def headers(self): """ Return headers of the uploader instance. This would include the headers of the client instance. """ client_headers = getattr(self.client, 'headers', {}) return dict(self.DEFAULT_HEADERS, **client_headers)
[ "def", "headers", "(", "self", ")", ":", "client_headers", "=", "getattr", "(", "self", ".", "client", ",", "'headers'", ",", "{", "}", ")", "return", "dict", "(", "self", ".", "DEFAULT_HEADERS", ",", "*", "*", "client_headers", ")" ]
Return headers of the uploader instance. This would include the headers of the client instance.
[ "Return", "headers", "of", "the", "uploader", "instance", ".", "This", "would", "include", "the", "headers", "of", "the", "client", "instance", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L139-L145
1,208
tus/tus-py-client
tusclient/uploader.py
Uploader.headers_as_list
def headers_as_list(self): """ Does the same as 'headers' except it is returned as a list. """ headers = self.headers headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)] return headers_list
python
def headers_as_list(self): """ Does the same as 'headers' except it is returned as a list. """ headers = self.headers headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)] return headers_list
[ "def", "headers_as_list", "(", "self", ")", ":", "headers", "=", "self", ".", "headers", "headers_list", "=", "[", "'{}: {}'", ".", "format", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "iteritems", "(", "headers", ")", "]", "return", "headers_list" ]
Does the same as 'headers' except it is returned as a list.
[ "Does", "the", "same", "as", "headers", "except", "it", "is", "returned", "as", "a", "list", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L148-L154
1,209
tus/tus-py-client
tusclient/uploader.py
Uploader.get_offset
def get_offset(self): """ Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset. """ resp = requests.head(self.url, headers=self.headers) offset = resp.headers.get('upload-offset') if offset is None: msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return int(offset)
python
def get_offset(self): """ Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset. """ resp = requests.head(self.url, headers=self.headers) offset = resp.headers.get('upload-offset') if offset is None: msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return int(offset)
[ "def", "get_offset", "(", "self", ")", ":", "resp", "=", "requests", ".", "head", "(", "self", ".", "url", ",", "headers", "=", "self", ".", "headers", ")", "offset", "=", "resp", ".", "headers", ".", "get", "(", "'upload-offset'", ")", "if", "offset", "is", "None", ":", "msg", "=", "'Attempt to retrieve offset fails with status {}'", ".", "format", "(", "resp", ".", "status_code", ")", "raise", "TusCommunicationError", "(", "msg", ",", "resp", ".", "status_code", ",", "resp", ".", "content", ")", "return", "int", "(", "offset", ")" ]
Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset.
[ "Return", "offset", "from", "tus", "server", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L170-L182
1,210
tus/tus-py-client
tusclient/uploader.py
Uploader.encode_metadata
def encode_metadata(self): """ Return list of encoded metadata as defined by the Tus protocol. """ encoded_list = [] for key, value in iteritems(self.metadata): key_str = str(key) # dict keys may be of any object type. # confirm that the key does not contain unwanted characters. if re.search(r'^$|[\s,]+', key_str): msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.' raise ValueError(msg.format(key_str)) value_bytes = b(value) # python 3 only encodes bytes encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii'))) return encoded_list
python
def encode_metadata(self): """ Return list of encoded metadata as defined by the Tus protocol. """ encoded_list = [] for key, value in iteritems(self.metadata): key_str = str(key) # dict keys may be of any object type. # confirm that the key does not contain unwanted characters. if re.search(r'^$|[\s,]+', key_str): msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.' raise ValueError(msg.format(key_str)) value_bytes = b(value) # python 3 only encodes bytes encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii'))) return encoded_list
[ "def", "encode_metadata", "(", "self", ")", ":", "encoded_list", "=", "[", "]", "for", "key", ",", "value", "in", "iteritems", "(", "self", ".", "metadata", ")", ":", "key_str", "=", "str", "(", "key", ")", "# dict keys may be of any object type.", "# confirm that the key does not contain unwanted characters.", "if", "re", ".", "search", "(", "r'^$|[\\s,]+'", ",", "key_str", ")", ":", "msg", "=", "'Upload-metadata key \"{}\" cannot be empty nor contain spaces or commas.'", "raise", "ValueError", "(", "msg", ".", "format", "(", "key_str", ")", ")", "value_bytes", "=", "b", "(", "value", ")", "# python 3 only encodes bytes", "encoded_list", ".", "append", "(", "'{} {}'", ".", "format", "(", "key_str", ",", "b64encode", "(", "value_bytes", ")", ".", "decode", "(", "'ascii'", ")", ")", ")", "return", "encoded_list" ]
Return list of encoded metadata as defined by the Tus protocol.
[ "Return", "list", "of", "encoded", "metadata", "as", "defined", "by", "the", "Tus", "protocol", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L184-L199
1,211
tus/tus-py-client
tusclient/uploader.py
Uploader.get_url
def get_url(self): """ Return the tus upload url. If resumability is enabled, this would try to get the url from storage if available, otherwise it would request a new upload url from the tus server. """ if self.store_url and self.url_storage: key = self.fingerprinter.get_fingerprint(self.get_file_stream()) url = self.url_storage.get_item(key) if not url: url = self.create_url() self.url_storage.set_item(key, url) return url else: return self.create_url()
python
def get_url(self): """ Return the tus upload url. If resumability is enabled, this would try to get the url from storage if available, otherwise it would request a new upload url from the tus server. """ if self.store_url and self.url_storage: key = self.fingerprinter.get_fingerprint(self.get_file_stream()) url = self.url_storage.get_item(key) if not url: url = self.create_url() self.url_storage.set_item(key, url) return url else: return self.create_url()
[ "def", "get_url", "(", "self", ")", ":", "if", "self", ".", "store_url", "and", "self", ".", "url_storage", ":", "key", "=", "self", ".", "fingerprinter", ".", "get_fingerprint", "(", "self", ".", "get_file_stream", "(", ")", ")", "url", "=", "self", ".", "url_storage", ".", "get_item", "(", "key", ")", "if", "not", "url", ":", "url", "=", "self", ".", "create_url", "(", ")", "self", ".", "url_storage", ".", "set_item", "(", "key", ",", "url", ")", "return", "url", "else", ":", "return", "self", ".", "create_url", "(", ")" ]
Return the tus upload url. If resumability is enabled, this would try to get the url from storage if available, otherwise it would request a new upload url from the tus server.
[ "Return", "the", "tus", "upload", "url", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L201-L216
1,212
tus/tus-py-client
tusclient/uploader.py
Uploader.create_url
def create_url(self): """ Return upload url. Makes request to tus server to create a new upload url for the required file upload. """ headers = self.headers headers['upload-length'] = str(self.file_size) headers['upload-metadata'] = ','.join(self.encode_metadata()) resp = requests.post(self.client.url, headers=headers) url = resp.headers.get("location") if url is None: msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return urljoin(self.client.url, url)
python
def create_url(self): """ Return upload url. Makes request to tus server to create a new upload url for the required file upload. """ headers = self.headers headers['upload-length'] = str(self.file_size) headers['upload-metadata'] = ','.join(self.encode_metadata()) resp = requests.post(self.client.url, headers=headers) url = resp.headers.get("location") if url is None: msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return urljoin(self.client.url, url)
[ "def", "create_url", "(", "self", ")", ":", "headers", "=", "self", ".", "headers", "headers", "[", "'upload-length'", "]", "=", "str", "(", "self", ".", "file_size", ")", "headers", "[", "'upload-metadata'", "]", "=", "','", ".", "join", "(", "self", ".", "encode_metadata", "(", ")", ")", "resp", "=", "requests", ".", "post", "(", "self", ".", "client", ".", "url", ",", "headers", "=", "headers", ")", "url", "=", "resp", ".", "headers", ".", "get", "(", "\"location\"", ")", "if", "url", "is", "None", ":", "msg", "=", "'Attempt to retrieve create file url with status {}'", ".", "format", "(", "resp", ".", "status_code", ")", "raise", "TusCommunicationError", "(", "msg", ",", "resp", ".", "status_code", ",", "resp", ".", "content", ")", "return", "urljoin", "(", "self", ".", "client", ".", "url", ",", "url", ")" ]
Return upload url. Makes request to tus server to create a new upload url for the required file upload.
[ "Return", "upload", "url", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L219-L233
1,213
tus/tus-py-client
tusclient/uploader.py
Uploader.request_length
def request_length(self): """ Return length of next chunk upload. """ remainder = self.stop_at - self.offset return self.chunk_size if remainder > self.chunk_size else remainder
python
def request_length(self): """ Return length of next chunk upload. """ remainder = self.stop_at - self.offset return self.chunk_size if remainder > self.chunk_size else remainder
[ "def", "request_length", "(", "self", ")", ":", "remainder", "=", "self", ".", "stop_at", "-", "self", ".", "offset", "return", "self", ".", "chunk_size", "if", "remainder", ">", "self", ".", "chunk_size", "else", "remainder" ]
Return length of next chunk upload.
[ "Return", "length", "of", "next", "chunk", "upload", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L236-L241
1,214
tus/tus-py-client
tusclient/uploader.py
Uploader.verify_upload
def verify_upload(self): """ Confirm that the last upload was sucessful. Raises TusUploadFailed exception if the upload was not sucessful. """ if self.request.status_code == 204: return True else: raise TusUploadFailed('', self.request.status_code, self.request.response_content)
python
def verify_upload(self): """ Confirm that the last upload was sucessful. Raises TusUploadFailed exception if the upload was not sucessful. """ if self.request.status_code == 204: return True else: raise TusUploadFailed('', self.request.status_code, self.request.response_content)
[ "def", "verify_upload", "(", "self", ")", ":", "if", "self", ".", "request", ".", "status_code", "==", "204", ":", "return", "True", "else", ":", "raise", "TusUploadFailed", "(", "''", ",", "self", ".", "request", ".", "status_code", ",", "self", ".", "request", ".", "response_content", ")" ]
Confirm that the last upload was sucessful. Raises TusUploadFailed exception if the upload was not sucessful.
[ "Confirm", "that", "the", "last", "upload", "was", "sucessful", ".", "Raises", "TusUploadFailed", "exception", "if", "the", "upload", "was", "not", "sucessful", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L243-L251
1,215
tus/tus-py-client
tusclient/uploader.py
Uploader.get_file_stream
def get_file_stream(self): """ Return a file stream instance of the upload. """ if self.file_stream: self.file_stream.seek(0) return self.file_stream elif os.path.isfile(self.file_path): return open(self.file_path, 'rb') else: raise ValueError("invalid file {}".format(self.file_path))
python
def get_file_stream(self): """ Return a file stream instance of the upload. """ if self.file_stream: self.file_stream.seek(0) return self.file_stream elif os.path.isfile(self.file_path): return open(self.file_path, 'rb') else: raise ValueError("invalid file {}".format(self.file_path))
[ "def", "get_file_stream", "(", "self", ")", ":", "if", "self", ".", "file_stream", ":", "self", ".", "file_stream", ".", "seek", "(", "0", ")", "return", "self", ".", "file_stream", "elif", "os", ".", "path", ".", "isfile", "(", "self", ".", "file_path", ")", ":", "return", "open", "(", "self", ".", "file_path", ",", "'rb'", ")", "else", ":", "raise", "ValueError", "(", "\"invalid file {}\"", ".", "format", "(", "self", ".", "file_path", ")", ")" ]
Return a file stream instance of the upload.
[ "Return", "a", "file", "stream", "instance", "of", "the", "upload", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L253-L263
1,216
tus/tus-py-client
tusclient/uploader.py
Uploader.file_size
def file_size(self): """ Return size of the file. """ stream = self.get_file_stream() stream.seek(0, os.SEEK_END) return stream.tell()
python
def file_size(self): """ Return size of the file. """ stream = self.get_file_stream() stream.seek(0, os.SEEK_END) return stream.tell()
[ "def", "file_size", "(", "self", ")", ":", "stream", "=", "self", ".", "get_file_stream", "(", ")", "stream", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "return", "stream", ".", "tell", "(", ")" ]
Return size of the file.
[ "Return", "size", "of", "the", "file", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L266-L272
1,217
tus/tus-py-client
tusclient/uploader.py
Uploader.upload
def upload(self, stop_at=None): """ Perform file upload. Performs continous upload of chunks of the file. The size uploaded at each cycle is the value of the attribute 'chunk_size'. :Args: - stop_at (Optional[int]): Determines at what offset value the upload should stop. If not specified this defaults to the file size. """ self.stop_at = stop_at or self.file_size while self.offset < self.stop_at: self.upload_chunk() else: if self.log_func: self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
python
def upload(self, stop_at=None): """ Perform file upload. Performs continous upload of chunks of the file. The size uploaded at each cycle is the value of the attribute 'chunk_size'. :Args: - stop_at (Optional[int]): Determines at what offset value the upload should stop. If not specified this defaults to the file size. """ self.stop_at = stop_at or self.file_size while self.offset < self.stop_at: self.upload_chunk() else: if self.log_func: self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
[ "def", "upload", "(", "self", ",", "stop_at", "=", "None", ")", ":", "self", ".", "stop_at", "=", "stop_at", "or", "self", ".", "file_size", "while", "self", ".", "offset", "<", "self", ".", "stop_at", ":", "self", ".", "upload_chunk", "(", ")", "else", ":", "if", "self", ".", "log_func", ":", "self", ".", "log_func", "(", "\"maximum upload specified({} bytes) has been reached\"", ".", "format", "(", "self", ".", "stop_at", ")", ")" ]
Perform file upload. Performs continous upload of chunks of the file. The size uploaded at each cycle is the value of the attribute 'chunk_size'. :Args: - stop_at (Optional[int]): Determines at what offset value the upload should stop. If not specified this defaults to the file size.
[ "Perform", "file", "upload", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L274-L292
1,218
tus/tus-py-client
tusclient/uploader.py
Uploader.upload_chunk
def upload_chunk(self): """ Upload chunk of file. """ self._retried = 0 self._do_request() self.offset = int(self.request.response_headers.get('upload-offset')) if self.log_func: msg = '{} bytes uploaded ...'.format(self.offset) self.log_func(msg)
python
def upload_chunk(self): """ Upload chunk of file. """ self._retried = 0 self._do_request() self.offset = int(self.request.response_headers.get('upload-offset')) if self.log_func: msg = '{} bytes uploaded ...'.format(self.offset) self.log_func(msg)
[ "def", "upload_chunk", "(", "self", ")", ":", "self", ".", "_retried", "=", "0", "self", ".", "_do_request", "(", ")", "self", ".", "offset", "=", "int", "(", "self", ".", "request", ".", "response_headers", ".", "get", "(", "'upload-offset'", ")", ")", "if", "self", ".", "log_func", ":", "msg", "=", "'{} bytes uploaded ...'", ".", "format", "(", "self", ".", "offset", ")", "self", ".", "log_func", "(", "msg", ")" ]
Upload chunk of file.
[ "Upload", "chunk", "of", "file", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L294-L303
1,219
tus/tus-py-client
tusclient/storage/filestorage.py
FileStorage.get_item
def get_item(self, key): """ Return the tus url of a file, identified by the key specified. :Args: - key[str]: The unique id for the stored item (in this case, url) :Returns: url[str] """ result = self._db.search(self._urls.key == key) return result[0].get('url') if result else None
python
def get_item(self, key): """ Return the tus url of a file, identified by the key specified. :Args: - key[str]: The unique id for the stored item (in this case, url) :Returns: url[str] """ result = self._db.search(self._urls.key == key) return result[0].get('url') if result else None
[ "def", "get_item", "(", "self", ",", "key", ")", ":", "result", "=", "self", ".", "_db", ".", "search", "(", "self", ".", "_urls", ".", "key", "==", "key", ")", "return", "result", "[", "0", "]", ".", "get", "(", "'url'", ")", "if", "result", "else", "None" ]
Return the tus url of a file, identified by the key specified. :Args: - key[str]: The unique id for the stored item (in this case, url) :Returns: url[str]
[ "Return", "the", "tus", "url", "of", "a", "file", "identified", "by", "the", "key", "specified", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/storage/filestorage.py#L14-L23
1,220
tus/tus-py-client
tusclient/storage/filestorage.py
FileStorage.set_item
def set_item(self, key, url): """ Store the url value under the unique key. :Args: - key[str]: The unique id to which the item (in this case, url) would be stored. - value[str]: The actual url value to be stored. """ if self._db.search(self._urls.key == key): self._db.update({'url': url}, self._urls.key == key) else: self._db.insert({'key': key, 'url': url})
python
def set_item(self, key, url): """ Store the url value under the unique key. :Args: - key[str]: The unique id to which the item (in this case, url) would be stored. - value[str]: The actual url value to be stored. """ if self._db.search(self._urls.key == key): self._db.update({'url': url}, self._urls.key == key) else: self._db.insert({'key': key, 'url': url})
[ "def", "set_item", "(", "self", ",", "key", ",", "url", ")", ":", "if", "self", ".", "_db", ".", "search", "(", "self", ".", "_urls", ".", "key", "==", "key", ")", ":", "self", ".", "_db", ".", "update", "(", "{", "'url'", ":", "url", "}", ",", "self", ".", "_urls", ".", "key", "==", "key", ")", "else", ":", "self", ".", "_db", ".", "insert", "(", "{", "'key'", ":", "key", ",", "'url'", ":", "url", "}", ")" ]
Store the url value under the unique key. :Args: - key[str]: The unique id to which the item (in this case, url) would be stored. - value[str]: The actual url value to be stored.
[ "Store", "the", "url", "value", "under", "the", "unique", "key", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/storage/filestorage.py#L25-L36
1,221
tus/tus-py-client
tusclient/request.py
TusRequest.perform
def perform(self): """ Perform actual request. """ try: host = '{}://{}'.format(self._url.scheme, self._url.netloc) path = self._url.geturl().replace(host, '', 1) chunk = self.file.read(self._content_length) if self._upload_checksum: self._request_headers["upload-checksum"] = \ " ".join(( self._checksum_algorithm_name, base64.b64encode( self._checksum_algorithm(chunk).digest() ).decode("ascii"), )) self.handle.request("PATCH", path, chunk, self._request_headers) self._response = self.handle.getresponse() self.status_code = self._response.status self.response_headers = {k.lower(): v for k, v in self._response.getheaders()} except http.client.HTTPException as e: raise TusUploadFailed(e) # wrap connection related errors not raised by the http.client.HTTP(S)Connection # as TusUploadFailed exceptions to enable retries except OSError as e: if e.errno in (errno.EPIPE, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET): raise TusUploadFailed(e) raise e
python
def perform(self): """ Perform actual request. """ try: host = '{}://{}'.format(self._url.scheme, self._url.netloc) path = self._url.geturl().replace(host, '', 1) chunk = self.file.read(self._content_length) if self._upload_checksum: self._request_headers["upload-checksum"] = \ " ".join(( self._checksum_algorithm_name, base64.b64encode( self._checksum_algorithm(chunk).digest() ).decode("ascii"), )) self.handle.request("PATCH", path, chunk, self._request_headers) self._response = self.handle.getresponse() self.status_code = self._response.status self.response_headers = {k.lower(): v for k, v in self._response.getheaders()} except http.client.HTTPException as e: raise TusUploadFailed(e) # wrap connection related errors not raised by the http.client.HTTP(S)Connection # as TusUploadFailed exceptions to enable retries except OSError as e: if e.errno in (errno.EPIPE, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET): raise TusUploadFailed(e) raise e
[ "def", "perform", "(", "self", ")", ":", "try", ":", "host", "=", "'{}://{}'", ".", "format", "(", "self", ".", "_url", ".", "scheme", ",", "self", ".", "_url", ".", "netloc", ")", "path", "=", "self", ".", "_url", ".", "geturl", "(", ")", ".", "replace", "(", "host", ",", "''", ",", "1", ")", "chunk", "=", "self", ".", "file", ".", "read", "(", "self", ".", "_content_length", ")", "if", "self", ".", "_upload_checksum", ":", "self", ".", "_request_headers", "[", "\"upload-checksum\"", "]", "=", "\" \"", ".", "join", "(", "(", "self", ".", "_checksum_algorithm_name", ",", "base64", ".", "b64encode", "(", "self", ".", "_checksum_algorithm", "(", "chunk", ")", ".", "digest", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", ",", ")", ")", "self", ".", "handle", ".", "request", "(", "\"PATCH\"", ",", "path", ",", "chunk", ",", "self", ".", "_request_headers", ")", "self", ".", "_response", "=", "self", ".", "handle", ".", "getresponse", "(", ")", "self", ".", "status_code", "=", "self", ".", "_response", ".", "status", "self", ".", "response_headers", "=", "{", "k", ".", "lower", "(", ")", ":", "v", "for", "k", ",", "v", "in", "self", ".", "_response", ".", "getheaders", "(", ")", "}", "except", "http", ".", "client", ".", "HTTPException", "as", "e", ":", "raise", "TusUploadFailed", "(", "e", ")", "# wrap connection related errors not raised by the http.client.HTTP(S)Connection", "# as TusUploadFailed exceptions to enable retries", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "in", "(", "errno", ".", "EPIPE", ",", "errno", ".", "ESHUTDOWN", ",", "errno", ".", "ECONNABORTED", ",", "errno", ".", "ECONNREFUSED", ",", "errno", ".", "ECONNRESET", ")", ":", "raise", "TusUploadFailed", "(", "e", ")", "raise", "e" ]
Perform actual request.
[ "Perform", "actual", "request", "." ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/request.py#L56-L84
1,222
tus/tus-py-client
tusclient/fingerprint/fingerprint.py
Fingerprint.get_fingerprint
def get_fingerprint(self, fs): """ Return a unique fingerprint string value based on the file stream recevied :Args: - fs[file]: The file stream instance of the file for which a fingerprint would be generated. :Returns: fingerprint[str] """ hasher = hashlib.md5() # we encode the content to avoid python 3 uncicode errors buf = self._encode_data(fs.read(self.BLOCK_SIZE)) while len(buf) > 0: hasher.update(buf) buf = fs.read(self.BLOCK_SIZE) return 'md5:' + hasher.hexdigest()
python
def get_fingerprint(self, fs): """ Return a unique fingerprint string value based on the file stream recevied :Args: - fs[file]: The file stream instance of the file for which a fingerprint would be generated. :Returns: fingerprint[str] """ hasher = hashlib.md5() # we encode the content to avoid python 3 uncicode errors buf = self._encode_data(fs.read(self.BLOCK_SIZE)) while len(buf) > 0: hasher.update(buf) buf = fs.read(self.BLOCK_SIZE) return 'md5:' + hasher.hexdigest()
[ "def", "get_fingerprint", "(", "self", ",", "fs", ")", ":", "hasher", "=", "hashlib", ".", "md5", "(", ")", "# we encode the content to avoid python 3 uncicode errors", "buf", "=", "self", ".", "_encode_data", "(", "fs", ".", "read", "(", "self", ".", "BLOCK_SIZE", ")", ")", "while", "len", "(", "buf", ")", ">", "0", ":", "hasher", ".", "update", "(", "buf", ")", "buf", "=", "fs", ".", "read", "(", "self", ".", "BLOCK_SIZE", ")", "return", "'md5:'", "+", "hasher", ".", "hexdigest", "(", ")" ]
Return a unique fingerprint string value based on the file stream recevied :Args: - fs[file]: The file stream instance of the file for which a fingerprint would be generated. :Returns: fingerprint[str]
[ "Return", "a", "unique", "fingerprint", "string", "value", "based", "on", "the", "file", "stream", "recevied" ]
0e5856efcfae6fc281171359ce38488a70468993
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/fingerprint/fingerprint.py#L15-L29
1,223
Yelp/threat_intel
threat_intel/util/http.py
SSLAdapter.init_poolmanager
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): """Called to initialize the HTTPAdapter when no proxy is used.""" try: pool_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: pool_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
python
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): """Called to initialize the HTTPAdapter when no proxy is used.""" try: pool_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: pool_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
[ "def", "init_poolmanager", "(", "self", ",", "connections", ",", "maxsize", ",", "block", "=", "False", ",", "*", "*", "pool_kwargs", ")", ":", "try", ":", "pool_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_TLS", "except", "AttributeError", ":", "pool_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_SSLv23", "return", "super", "(", "SSLAdapter", ",", "self", ")", ".", "init_poolmanager", "(", "connections", ",", "maxsize", ",", "block", ",", "*", "*", "pool_kwargs", ")" ]
Called to initialize the HTTPAdapter when no proxy is used.
[ "Called", "to", "initialize", "the", "HTTPAdapter", "when", "no", "proxy", "is", "used", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L51-L57
1,224
Yelp/threat_intel
threat_intel/util/http.py
SSLAdapter.proxy_manager_for
def proxy_manager_for(self, proxy, **proxy_kwargs): """Called to initialize the HTTPAdapter when a proxy is used.""" try: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).proxy_manager_for(proxy, **proxy_kwargs)
python
def proxy_manager_for(self, proxy, **proxy_kwargs): """Called to initialize the HTTPAdapter when a proxy is used.""" try: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_TLS except AttributeError: proxy_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23 return super(SSLAdapter, self).proxy_manager_for(proxy, **proxy_kwargs)
[ "def", "proxy_manager_for", "(", "self", ",", "proxy", ",", "*", "*", "proxy_kwargs", ")", ":", "try", ":", "proxy_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_TLS", "except", "AttributeError", ":", "proxy_kwargs", "[", "'ssl_version'", "]", "=", "ssl", ".", "PROTOCOL_SSLv23", "return", "super", "(", "SSLAdapter", ",", "self", ")", ".", "proxy_manager_for", "(", "proxy", ",", "*", "*", "proxy_kwargs", ")" ]
Called to initialize the HTTPAdapter when a proxy is used.
[ "Called", "to", "initialize", "the", "HTTPAdapter", "when", "a", "proxy", "is", "used", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L59-L65
1,225
Yelp/threat_intel
threat_intel/util/http.py
RateLimiter.make_calls
def make_calls(self, num_calls=1): """Adds appropriate sleep to avoid making too many calls. Args: num_calls: int the number of calls which will be made """ self._cull() while self._outstanding_calls + num_calls > self._max_calls_per_second: time.sleep(0) # yield self._cull() self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls)) self._outstanding_calls += num_calls
python
def make_calls(self, num_calls=1): """Adds appropriate sleep to avoid making too many calls. Args: num_calls: int the number of calls which will be made """ self._cull() while self._outstanding_calls + num_calls > self._max_calls_per_second: time.sleep(0) # yield self._cull() self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls)) self._outstanding_calls += num_calls
[ "def", "make_calls", "(", "self", ",", "num_calls", "=", "1", ")", ":", "self", ".", "_cull", "(", ")", "while", "self", ".", "_outstanding_calls", "+", "num_calls", ">", "self", ".", "_max_calls_per_second", ":", "time", ".", "sleep", "(", "0", ")", "# yield", "self", ".", "_cull", "(", ")", "self", ".", "_call_times", ".", "append", "(", "self", ".", "CallRecord", "(", "time", "=", "time", ".", "time", "(", ")", ",", "num_calls", "=", "num_calls", ")", ")", "self", ".", "_outstanding_calls", "+=", "num_calls" ]
Adds appropriate sleep to avoid making too many calls. Args: num_calls: int the number of calls which will be made
[ "Adds", "appropriate", "sleep", "to", "avoid", "making", "too", "many", "calls", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L79-L91
1,226
Yelp/threat_intel
threat_intel/util/http.py
RateLimiter._cull
def _cull(self): """Remove calls more than 1 second old from the queue.""" right_now = time.time() cull_from = -1 for index in range(len(self._call_times)): if right_now - self._call_times[index].time >= 1.0: cull_from = index self._outstanding_calls -= self._call_times[index].num_calls else: break if cull_from > -1: self._call_times = self._call_times[cull_from + 1:]
python
def _cull(self): """Remove calls more than 1 second old from the queue.""" right_now = time.time() cull_from = -1 for index in range(len(self._call_times)): if right_now - self._call_times[index].time >= 1.0: cull_from = index self._outstanding_calls -= self._call_times[index].num_calls else: break if cull_from > -1: self._call_times = self._call_times[cull_from + 1:]
[ "def", "_cull", "(", "self", ")", ":", "right_now", "=", "time", ".", "time", "(", ")", "cull_from", "=", "-", "1", "for", "index", "in", "range", "(", "len", "(", "self", ".", "_call_times", ")", ")", ":", "if", "right_now", "-", "self", ".", "_call_times", "[", "index", "]", ".", "time", ">=", "1.0", ":", "cull_from", "=", "index", "self", ".", "_outstanding_calls", "-=", "self", ".", "_call_times", "[", "index", "]", ".", "num_calls", "else", ":", "break", "if", "cull_from", ">", "-", "1", ":", "self", ".", "_call_times", "=", "self", ".", "_call_times", "[", "cull_from", "+", "1", ":", "]" ]
Remove calls more than 1 second old from the queue.
[ "Remove", "calls", "more", "than", "1", "second", "old", "from", "the", "queue", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L93-L106
1,227
Yelp/threat_intel
threat_intel/util/http.py
AvailabilityLimiter.map_with_retries
def map_with_retries(self, requests, responses_for_requests): """Provides session-based retry functionality :param requests: A collection of Request objects. :param responses_for_requests: Dictionary mapping of requests to responses :param max_retries: The maximum number of retries to perform per session :param args: Additional arguments to pass into a retry mapping call """ retries = [] response_futures = [preq.callable() for preq in requests] for request, response_future in zip(requests, response_futures): try: response = response_future.result() if response is not None and response.status_code == 403: logging.warning('Request to {} caused a 403 response status code.'.format(request.url)) raise InvalidRequestError('Access forbidden') if response is not None: responses_for_requests[request] = response except RequestException as re: logging.error('An exception was raised for {}: {}'.format(request.url, re)) if self.total_retries > 0: self.total_retries -= 1 retries.append(request) # Recursively retry failed requests with the modified total retry count if retries: self.map_with_retries(retries, responses_for_requests)
python
def map_with_retries(self, requests, responses_for_requests): """Provides session-based retry functionality :param requests: A collection of Request objects. :param responses_for_requests: Dictionary mapping of requests to responses :param max_retries: The maximum number of retries to perform per session :param args: Additional arguments to pass into a retry mapping call """ retries = [] response_futures = [preq.callable() for preq in requests] for request, response_future in zip(requests, response_futures): try: response = response_future.result() if response is not None and response.status_code == 403: logging.warning('Request to {} caused a 403 response status code.'.format(request.url)) raise InvalidRequestError('Access forbidden') if response is not None: responses_for_requests[request] = response except RequestException as re: logging.error('An exception was raised for {}: {}'.format(request.url, re)) if self.total_retries > 0: self.total_retries -= 1 retries.append(request) # Recursively retry failed requests with the modified total retry count if retries: self.map_with_retries(retries, responses_for_requests)
[ "def", "map_with_retries", "(", "self", ",", "requests", ",", "responses_for_requests", ")", ":", "retries", "=", "[", "]", "response_futures", "=", "[", "preq", ".", "callable", "(", ")", "for", "preq", "in", "requests", "]", "for", "request", ",", "response_future", "in", "zip", "(", "requests", ",", "response_futures", ")", ":", "try", ":", "response", "=", "response_future", ".", "result", "(", ")", "if", "response", "is", "not", "None", "and", "response", ".", "status_code", "==", "403", ":", "logging", ".", "warning", "(", "'Request to {} caused a 403 response status code.'", ".", "format", "(", "request", ".", "url", ")", ")", "raise", "InvalidRequestError", "(", "'Access forbidden'", ")", "if", "response", "is", "not", "None", ":", "responses_for_requests", "[", "request", "]", "=", "response", "except", "RequestException", "as", "re", ":", "logging", ".", "error", "(", "'An exception was raised for {}: {}'", ".", "format", "(", "request", ".", "url", ",", "re", ")", ")", "if", "self", ".", "total_retries", ">", "0", ":", "self", ".", "total_retries", "-=", "1", "retries", ".", "append", "(", "request", ")", "# Recursively retry failed requests with the modified total retry count", "if", "retries", ":", "self", ".", "map_with_retries", "(", "retries", ",", "responses_for_requests", ")" ]
Provides session-based retry functionality :param requests: A collection of Request objects. :param responses_for_requests: Dictionary mapping of requests to responses :param max_retries: The maximum number of retries to perform per session :param args: Additional arguments to pass into a retry mapping call
[ "Provides", "session", "-", "based", "retry", "functionality" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L122-L151
1,228
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.multi_get
def multi_get(self, urls, query_params=None, to_json=True): """Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
python
def multi_get(self, urls, query_params=None, to_json=True): """Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
[ "def", "multi_get", "(", "self", ",", "urls", ",", "query_params", "=", "None", ",", "to_json", "=", "True", ")", ":", "return", "self", ".", "_multi_request", "(", "MultiRequest", ".", "_VERB_GET", ",", "urls", ",", "query_params", ",", "data", "=", "None", ",", "to_json", "=", "to_json", ",", ")" ]
Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
[ "Issue", "multiple", "GET", "requests", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L203-L218
1,229
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.multi_post
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False): """Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file, )
python
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False): """Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file, )
[ "def", "multi_post", "(", "self", ",", "urls", ",", "query_params", "=", "None", ",", "data", "=", "None", ",", "to_json", "=", "True", ",", "send_as_file", "=", "False", ")", ":", "return", "self", ".", "_multi_request", "(", "MultiRequest", ".", "_VERB_POST", ",", "urls", ",", "query_params", ",", "data", ",", "to_json", "=", "to_json", ",", "send_as_file", "=", "send_as_file", ",", ")" ]
Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
[ "Issue", "multiple", "POST", "requests", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L220-L237
1,230
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._zip_request_params
def _zip_request_params(self, urls, query_params, data): """Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match """ # Everybody gets to be a list if not isinstance(urls, list): urls = [urls] if not isinstance(query_params, list): query_params = [query_params] if not isinstance(data, list): data = [data] # Counts must not mismatch url_count = len(urls) query_param_count = len(query_params) data_count = len(data) max_count = max(url_count, query_param_count, data_count) if ( max_count > url_count > 1 or max_count > query_param_count > 1 or max_count > data_count > 1 ): raise InvalidRequestError( 'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count, ) # Pad out lists if url_count < max_count: urls = urls * max_count if query_param_count < max_count: query_params = query_params * max_count if data_count < max_count: data = data * max_count return list(zip(urls, query_params, data))
python
def _zip_request_params(self, urls, query_params, data): """Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match """ # Everybody gets to be a list if not isinstance(urls, list): urls = [urls] if not isinstance(query_params, list): query_params = [query_params] if not isinstance(data, list): data = [data] # Counts must not mismatch url_count = len(urls) query_param_count = len(query_params) data_count = len(data) max_count = max(url_count, query_param_count, data_count) if ( max_count > url_count > 1 or max_count > query_param_count > 1 or max_count > data_count > 1 ): raise InvalidRequestError( 'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count, ) # Pad out lists if url_count < max_count: urls = urls * max_count if query_param_count < max_count: query_params = query_params * max_count if data_count < max_count: data = data * max_count return list(zip(urls, query_params, data))
[ "def", "_zip_request_params", "(", "self", ",", "urls", ",", "query_params", ",", "data", ")", ":", "# Everybody gets to be a list", "if", "not", "isinstance", "(", "urls", ",", "list", ")", ":", "urls", "=", "[", "urls", "]", "if", "not", "isinstance", "(", "query_params", ",", "list", ")", ":", "query_params", "=", "[", "query_params", "]", "if", "not", "isinstance", "(", "data", ",", "list", ")", ":", "data", "=", "[", "data", "]", "# Counts must not mismatch", "url_count", "=", "len", "(", "urls", ")", "query_param_count", "=", "len", "(", "query_params", ")", "data_count", "=", "len", "(", "data", ")", "max_count", "=", "max", "(", "url_count", ",", "query_param_count", ",", "data_count", ")", "if", "(", "max_count", ">", "url_count", ">", "1", "or", "max_count", ">", "query_param_count", ">", "1", "or", "max_count", ">", "data_count", ">", "1", ")", ":", "raise", "InvalidRequestError", "(", "'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}'", ",", "url_count", ",", "query_param_count", ",", "data_count", ",", "max_count", ",", ")", "# Pad out lists", "if", "url_count", "<", "max_count", ":", "urls", "=", "urls", "*", "max_count", "if", "query_param_count", "<", "max_count", ":", "query_params", "=", "query_params", "*", "max_count", "if", "data_count", "<", "max_count", ":", "data", "=", "data", "*", "max_count", "return", "list", "(", "zip", "(", "urls", ",", "query_params", ",", "data", ")", ")" ]
Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match
[ "Massages", "inputs", "and", "returns", "a", "list", "of", "3", "-", "tuples", "zipping", "them", "up", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L272-L322
1,231
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._wait_for_response
def _wait_for_response(self, requests): """Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response """ failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try #{0}'.format(retry + 1)) self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for request, response in responses_for_requests.items(): if self._drop_404s and response is not None and response.status_code == 404: logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif not response: failed_requests.append((request, response)) if not failed_requests: break logging.warning('Try #{0}. Expected {1} successful response(s) but only got {2}.'.format( retry + 1, len(requests), len(requests) - len(failed_requests), )) # retry only for the failed requests requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: # log the exception for the informative purposes and pass to the next iteration logging.exception('Try #{0}. Exception occured: {1}. Retrying.'.format(retry + 1, e)) pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format( len(failed_requests), self._max_retry, )) for failed_request, failed_response in failed_requests: if failed_response is not None: # in case response text does contain some non-ascii characters failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format( failed_request.url, failed_response.status_code, failed_response_text, )) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
python
def _wait_for_response(self, requests): """Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response """ failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try #{0}'.format(retry + 1)) self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for request, response in responses_for_requests.items(): if self._drop_404s and response is not None and response.status_code == 404: logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif not response: failed_requests.append((request, response)) if not failed_requests: break logging.warning('Try #{0}. Expected {1} successful response(s) but only got {2}.'.format( retry + 1, len(requests), len(requests) - len(failed_requests), )) # retry only for the failed requests requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: # log the exception for the informative purposes and pass to the next iteration logging.exception('Try #{0}. Exception occured: {1}. Retrying.'.format(retry + 1, e)) pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format( len(failed_requests), self._max_retry, )) for failed_request, failed_response in failed_requests: if failed_response is not None: # in case response text does contain some non-ascii characters failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format( failed_request.url, failed_response.status_code, failed_response_text, )) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
[ "def", "_wait_for_response", "(", "self", ",", "requests", ")", ":", "failed_requests", "=", "[", "]", "responses_for_requests", "=", "OrderedDict", ".", "fromkeys", "(", "requests", ")", "for", "retry", "in", "range", "(", "self", ".", "_max_retry", ")", ":", "try", ":", "logging", ".", "debug", "(", "'Try #{0}'", ".", "format", "(", "retry", "+", "1", ")", ")", "self", ".", "_availability_limiter", ".", "map_with_retries", "(", "requests", ",", "responses_for_requests", ")", "failed_requests", "=", "[", "]", "for", "request", ",", "response", "in", "responses_for_requests", ".", "items", "(", ")", ":", "if", "self", ".", "_drop_404s", "and", "response", "is", "not", "None", "and", "response", ".", "status_code", "==", "404", ":", "logging", ".", "warning", "(", "'Request to {0} failed with status code 404, dropping.'", ".", "format", "(", "request", ".", "url", ")", ")", "elif", "not", "response", ":", "failed_requests", ".", "append", "(", "(", "request", ",", "response", ")", ")", "if", "not", "failed_requests", ":", "break", "logging", ".", "warning", "(", "'Try #{0}. Expected {1} successful response(s) but only got {2}.'", ".", "format", "(", "retry", "+", "1", ",", "len", "(", "requests", ")", ",", "len", "(", "requests", ")", "-", "len", "(", "failed_requests", ")", ",", ")", ")", "# retry only for the failed requests", "requests", "=", "[", "fr", "[", "0", "]", "for", "fr", "in", "failed_requests", "]", "except", "InvalidRequestError", ":", "raise", "except", "Exception", "as", "e", ":", "# log the exception for the informative purposes and pass to the next iteration", "logging", ".", "exception", "(", "'Try #{0}. Exception occured: {1}. Retrying.'", ".", "format", "(", "retry", "+", "1", ",", "e", ")", ")", "pass", "if", "failed_requests", ":", "logging", ".", "warning", "(", "'Still {0} failed request(s) after {1} retries:'", ".", "format", "(", "len", "(", "failed_requests", ")", ",", "self", ".", "_max_retry", ",", ")", ")", "for", "failed_request", ",", "failed_response", "in", "failed_requests", ":", "if", "failed_response", "is", "not", "None", ":", "# in case response text does contain some non-ascii characters", "failed_response_text", "=", "failed_response", ".", "text", ".", "encode", "(", "'ascii'", ",", "'xmlcharrefreplace'", ")", "logging", ".", "warning", "(", "'Request to {0} failed with status code {1}. Response text: {2}'", ".", "format", "(", "failed_request", ".", "url", ",", "failed_response", ".", "status_code", ",", "failed_response_text", ",", ")", ")", "else", ":", "logging", ".", "warning", "(", "'Request to {0} failed with None response.'", ".", "format", "(", "failed_request", ".", "url", ")", ")", "return", "list", "(", "responses_for_requests", ".", "values", "(", ")", ")" ]
Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response
[ "Issues", "a", "batch", "of", "requests", "and", "waits", "for", "the", "responses", ".", "If", "some", "of", "the", "requests", "fail", "it", "will", "retry", "the", "failed", "ones", "up", "to", "_max_retry", "times", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L324-L380
1,232
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._convert_to_json
def _convert_to_json(self, response): """Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise. """ try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
python
def _convert_to_json(self, response): """Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise. """ try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
[ "def", "_convert_to_json", "(", "self", ",", "response", ")", ":", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "logging", ".", "warning", "(", "'Expected response in JSON format from {0} but the actual response text is: {1}'", ".", "format", "(", "response", ".", "request", ".", "url", ",", "response", ".", "text", ",", ")", ")", "return", "None" ]
Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise.
[ "Converts", "response", "to", "JSON", ".", "If", "the", "response", "cannot", "be", "converted", "to", "JSON", "then", "None", "is", "returned", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L382-L397
1,233
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest._multi_request
def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False): """Issues multiple batches of simultaneous HTTP requests and waits for responses. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs Returns: If multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise If a single request is made, the return is not a list Raises: InvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response """ if not urls: raise InvalidRequestError('No URL supplied') # Break the params into batches of request_params request_params = self._zip_request_params(urls, query_params, data) batch_of_params = [ request_params[pos:pos + self._max_requests] for pos in range(0, len(request_params), self._max_requests) ] # Iteratively issue each batch, applying the rate limiter if necessary all_responses = [] for param_batch in batch_of_params: if self._rate_limiter: self._rate_limiter.make_calls(num_calls=len(param_batch)) prepared_requests = [ self._create_request( verb, url, query_params=query_param, data=datum, send_as_file=send_as_file, ) for url, query_param, datum in param_batch ] responses = self._wait_for_response(prepared_requests) for response in responses: if response: all_responses.append(self._convert_to_json(response) if to_json else response) else: all_responses.append(None) return all_responses
python
def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False): """Issues multiple batches of simultaneous HTTP requests and waits for responses. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs Returns: If multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise If a single request is made, the return is not a list Raises: InvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response """ if not urls: raise InvalidRequestError('No URL supplied') # Break the params into batches of request_params request_params = self._zip_request_params(urls, query_params, data) batch_of_params = [ request_params[pos:pos + self._max_requests] for pos in range(0, len(request_params), self._max_requests) ] # Iteratively issue each batch, applying the rate limiter if necessary all_responses = [] for param_batch in batch_of_params: if self._rate_limiter: self._rate_limiter.make_calls(num_calls=len(param_batch)) prepared_requests = [ self._create_request( verb, url, query_params=query_param, data=datum, send_as_file=send_as_file, ) for url, query_param, datum in param_batch ] responses = self._wait_for_response(prepared_requests) for response in responses: if response: all_responses.append(self._convert_to_json(response) if to_json else response) else: all_responses.append(None) return all_responses
[ "def", "_multi_request", "(", "self", ",", "verb", ",", "urls", ",", "query_params", ",", "data", ",", "to_json", "=", "True", ",", "send_as_file", "=", "False", ")", ":", "if", "not", "urls", ":", "raise", "InvalidRequestError", "(", "'No URL supplied'", ")", "# Break the params into batches of request_params", "request_params", "=", "self", ".", "_zip_request_params", "(", "urls", ",", "query_params", ",", "data", ")", "batch_of_params", "=", "[", "request_params", "[", "pos", ":", "pos", "+", "self", ".", "_max_requests", "]", "for", "pos", "in", "range", "(", "0", ",", "len", "(", "request_params", ")", ",", "self", ".", "_max_requests", ")", "]", "# Iteratively issue each batch, applying the rate limiter if necessary", "all_responses", "=", "[", "]", "for", "param_batch", "in", "batch_of_params", ":", "if", "self", ".", "_rate_limiter", ":", "self", ".", "_rate_limiter", ".", "make_calls", "(", "num_calls", "=", "len", "(", "param_batch", ")", ")", "prepared_requests", "=", "[", "self", ".", "_create_request", "(", "verb", ",", "url", ",", "query_params", "=", "query_param", ",", "data", "=", "datum", ",", "send_as_file", "=", "send_as_file", ",", ")", "for", "url", ",", "query_param", ",", "datum", "in", "param_batch", "]", "responses", "=", "self", ".", "_wait_for_response", "(", "prepared_requests", ")", "for", "response", "in", "responses", ":", "if", "response", ":", "all_responses", ".", "append", "(", "self", ".", "_convert_to_json", "(", "response", ")", "if", "to_json", "else", "response", ")", "else", ":", "all_responses", ".", "append", "(", "None", ")", "return", "all_responses" ]
Issues multiple batches of simultaneous HTTP requests and waits for responses. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs Returns: If multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise If a single request is made, the return is not a list Raises: InvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response
[ "Issues", "multiple", "batches", "of", "simultaneous", "HTTP", "requests", "and", "waits", "for", "responses", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L399-L443
1,234
Yelp/threat_intel
threat_intel/util/http.py
MultiRequest.error_handling
def error_handling(cls, fn): """Decorator to handle errors""" def wrapper(*args, **kwargs): try: result = fn(*args, **kwargs) return result except InvalidRequestError as e: write_exception(e) if hasattr(e, 'request'): write_error_message('request {0}'.format(repr(e.request))) if hasattr(e, 'response'): write_error_message('response {0}'.format(repr(e.response))) raise e return wrapper
python
def error_handling(cls, fn): """Decorator to handle errors""" def wrapper(*args, **kwargs): try: result = fn(*args, **kwargs) return result except InvalidRequestError as e: write_exception(e) if hasattr(e, 'request'): write_error_message('request {0}'.format(repr(e.request))) if hasattr(e, 'response'): write_error_message('response {0}'.format(repr(e.response))) raise e return wrapper
[ "def", "error_handling", "(", "cls", ",", "fn", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "result", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "except", "InvalidRequestError", "as", "e", ":", "write_exception", "(", "e", ")", "if", "hasattr", "(", "e", ",", "'request'", ")", ":", "write_error_message", "(", "'request {0}'", ".", "format", "(", "repr", "(", "e", ".", "request", ")", ")", ")", "if", "hasattr", "(", "e", ",", "'response'", ")", ":", "write_error_message", "(", "'response {0}'", ".", "format", "(", "repr", "(", "e", ".", "response", ")", ")", ")", "raise", "e", "return", "wrapper" ]
Decorator to handle errors
[ "Decorator", "to", "handle", "errors" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L450-L465
1,235
glasslion/redlock
redlock/lock.py
RedLock.acquire_node
def acquire_node(self, node): """ acquire a single redis node """ try: return node.set(self.resource, self.lock_key, nx=True, px=self.ttl) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): return False
python
def acquire_node(self, node): """ acquire a single redis node """ try: return node.set(self.resource, self.lock_key, nx=True, px=self.ttl) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): return False
[ "def", "acquire_node", "(", "self", ",", "node", ")", ":", "try", ":", "return", "node", ".", "set", "(", "self", ".", "resource", ",", "self", ".", "lock_key", ",", "nx", "=", "True", ",", "px", "=", "self", ".", "ttl", ")", "except", "(", "redis", ".", "exceptions", ".", "ConnectionError", ",", "redis", ".", "exceptions", ".", "TimeoutError", ")", ":", "return", "False" ]
acquire a single redis node
[ "acquire", "a", "single", "redis", "node" ]
7f873cc362eefa7f7adee8d4913e64f87c1fd1c9
https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L135-L142
1,236
glasslion/redlock
redlock/lock.py
RedLock.release_node
def release_node(self, node): """ release a single redis node """ # use the lua script to release the lock in a safe way try: node._release_script(keys=[self.resource], args=[self.lock_key]) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass
python
def release_node(self, node): """ release a single redis node """ # use the lua script to release the lock in a safe way try: node._release_script(keys=[self.resource], args=[self.lock_key]) except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass
[ "def", "release_node", "(", "self", ",", "node", ")", ":", "# use the lua script to release the lock in a safe way", "try", ":", "node", ".", "_release_script", "(", "keys", "=", "[", "self", ".", "resource", "]", ",", "args", "=", "[", "self", ".", "lock_key", "]", ")", "except", "(", "redis", ".", "exceptions", ".", "ConnectionError", ",", "redis", ".", "exceptions", ".", "TimeoutError", ")", ":", "pass" ]
release a single redis node
[ "release", "a", "single", "redis", "node" ]
7f873cc362eefa7f7adee8d4913e64f87c1fd1c9
https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L144-L152
1,237
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi._extract_response_xml
def _extract_response_xml(self, domain, response): """Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}. """ attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: # Skip ill-formatted XML and return no Alexa attributes pass attributes['domain'] = domain return {'attributes': attributes}
python
def _extract_response_xml(self, domain, response): """Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}. """ attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: # Skip ill-formatted XML and return no Alexa attributes pass attributes['domain'] = domain return {'attributes': attributes}
[ "def", "_extract_response_xml", "(", "self", ",", "domain", ",", "response", ")", ":", "attributes", "=", "{", "}", "alexa_keys", "=", "{", "'POPULARITY'", ":", "'TEXT'", ",", "'REACH'", ":", "'RANK'", ",", "'RANK'", ":", "'DELTA'", "}", "try", ":", "xml_root", "=", "ET", ".", "fromstring", "(", "response", ".", "_content", ")", "for", "xml_child", "in", "xml_root", ".", "findall", "(", "'SD//'", ")", ":", "if", "xml_child", ".", "tag", "in", "alexa_keys", "and", "alexa_keys", "[", "xml_child", ".", "tag", "]", "in", "xml_child", ".", "attrib", ":", "attributes", "[", "xml_child", ".", "tag", ".", "lower", "(", ")", "]", "=", "xml_child", ".", "attrib", "[", "alexa_keys", "[", "xml_child", ".", "tag", "]", "]", "except", "ParseError", ":", "# Skip ill-formatted XML and return no Alexa attributes", "pass", "attributes", "[", "'domain'", "]", "=", "domain", "return", "{", "'attributes'", ":", "attributes", "}" ]
Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}.
[ "Extract", "XML", "content", "of", "an", "HTTP", "response", "into", "dictionary", "format", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L74-L95
1,238
Yelp/threat_intel
threat_intel/alexaranking.py
AlexaRankingApi._bulk_cache_lookup
def _bulk_cache_lookup(self, api_name, keys): """Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys). """ if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
python
def _bulk_cache_lookup(self, api_name, keys): """Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys). """ if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
[ "def", "_bulk_cache_lookup", "(", "self", ",", "api_name", ",", "keys", ")", ":", "if", "self", ".", "_cache", ":", "responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "api_name", ",", "keys", ")", "missing_keys", "=", "[", "key", "for", "key", "in", "keys", "if", "key", "not", "in", "responses", ".", "keys", "(", ")", "]", "return", "(", "responses", ",", "missing_keys", ")", "return", "(", "{", "}", ",", "keys", ")" ]
Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys).
[ "Performes", "a", "bulk", "cache", "lookup", "and", "returns", "a", "tuple", "with", "the", "results", "found", "and", "the", "keys", "missing", "in", "the", "cache", ".", "If", "cached", "is", "not", "configured", "it", "will", "return", "an", "empty", "dictionary", "of", "found", "results", "and", "the", "initial", "list", "of", "keys", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/alexaranking.py#L97-L114
1,239
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache._write_cache_to_file
def _write_cache_to_file(self): """Write the contents of the cache to a file on disk.""" with(open(self._cache_file_name, 'w')) as fp: fp.write(simplejson.dumps(self._cache))
python
def _write_cache_to_file(self): """Write the contents of the cache to a file on disk.""" with(open(self._cache_file_name, 'w')) as fp: fp.write(simplejson.dumps(self._cache))
[ "def", "_write_cache_to_file", "(", "self", ")", ":", "with", "(", "open", "(", "self", ".", "_cache_file_name", ",", "'w'", ")", ")", "as", "fp", ":", "fp", ".", "write", "(", "simplejson", ".", "dumps", "(", "self", ".", "_cache", ")", ")" ]
Write the contents of the cache to a file on disk.
[ "Write", "the", "contents", "of", "the", "cache", "to", "a", "file", "on", "disk", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L42-L45
1,240
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache._read_cache_from_file
def _read_cache_from_file(self): """Read the contents of the cache from a file on disk.""" cache = {} try: with(open(self._cache_file_name, 'r')) as fp: contents = fp.read() cache = simplejson.loads(contents) except (IOError, JSONDecodeError): # The file could not be read. This is not a problem if the file does not exist. pass return cache
python
def _read_cache_from_file(self): """Read the contents of the cache from a file on disk.""" cache = {} try: with(open(self._cache_file_name, 'r')) as fp: contents = fp.read() cache = simplejson.loads(contents) except (IOError, JSONDecodeError): # The file could not be read. This is not a problem if the file does not exist. pass return cache
[ "def", "_read_cache_from_file", "(", "self", ")", ":", "cache", "=", "{", "}", "try", ":", "with", "(", "open", "(", "self", ".", "_cache_file_name", ",", "'r'", ")", ")", "as", "fp", ":", "contents", "=", "fp", ".", "read", "(", ")", "cache", "=", "simplejson", ".", "loads", "(", "contents", ")", "except", "(", "IOError", ",", "JSONDecodeError", ")", ":", "# The file could not be read. This is not a problem if the file does not exist.", "pass", "return", "cache" ]
Read the contents of the cache from a file on disk.
[ "Read", "the", "contents", "of", "the", "cache", "from", "a", "file", "on", "disk", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L47-L58
1,241
Yelp/threat_intel
threat_intel/util/api_cache.py
ApiCache.bulk_lookup
def bulk_lookup(self, api_name, keys): """Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys. """ cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if value is not None: cached_data[key] = value return cached_data
python
def bulk_lookup(self, api_name, keys): """Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys. """ cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if value is not None: cached_data[key] = value return cached_data
[ "def", "bulk_lookup", "(", "self", ",", "api_name", ",", "keys", ")", ":", "cached_data", "=", "{", "}", "for", "key", "in", "keys", ":", "value", "=", "self", ".", "lookup_value", "(", "api_name", ",", "key", ")", "if", "value", "is", "not", "None", ":", "cached_data", "[", "key", "]", "=", "value", "return", "cached_data" ]
Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys.
[ "Perform", "lookup", "on", "an", "enumerable", "of", "keys", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L82-L95
1,242
Yelp/threat_intel
threat_intel/opendns.py
_cached_by_domain
def _cached_by_domain(api_name): """A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid. """ def wrapped(func): def decorated(self, domains): if not self._cache: return func(self, domains) all_responses = self._cache.bulk_lookup(api_name, domains) domains = list(set(domains) - set(all_responses)) if domains: response = func(self, domains) if not response: raise ResponseError("No response for uncached domains") for domain in response: self._cache.cache_value(api_name, domain, response[domain]) all_responses[domain] = response[domain] return all_responses return decorated return wrapped
python
def _cached_by_domain(api_name): """A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid. """ def wrapped(func): def decorated(self, domains): if not self._cache: return func(self, domains) all_responses = self._cache.bulk_lookup(api_name, domains) domains = list(set(domains) - set(all_responses)) if domains: response = func(self, domains) if not response: raise ResponseError("No response for uncached domains") for domain in response: self._cache.cache_value(api_name, domain, response[domain]) all_responses[domain] = response[domain] return all_responses return decorated return wrapped
[ "def", "_cached_by_domain", "(", "api_name", ")", ":", "def", "wrapped", "(", "func", ")", ":", "def", "decorated", "(", "self", ",", "domains", ")", ":", "if", "not", "self", ".", "_cache", ":", "return", "func", "(", "self", ",", "domains", ")", "all_responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "api_name", ",", "domains", ")", "domains", "=", "list", "(", "set", "(", "domains", ")", "-", "set", "(", "all_responses", ")", ")", "if", "domains", ":", "response", "=", "func", "(", "self", ",", "domains", ")", "if", "not", "response", ":", "raise", "ResponseError", "(", "\"No response for uncached domains\"", ")", "for", "domain", "in", "response", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "domain", ",", "response", "[", "domain", "]", ")", "all_responses", "[", "domain", "]", "=", "response", "[", "domain", "]", "return", "all_responses", "return", "decorated", "return", "wrapped" ]
A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid.
[ "A", "caching", "wrapper", "for", "functions", "that", "take", "a", "list", "of", "domains", "as", "parameters", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L16-L45
1,243
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.domain_score
def domain_score(self, domains): """Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated. """ warn( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead', DeprecationWarning, ) url_path = 'domains/score/' return self._multi_post(url_path, domains)
python
def domain_score(self, domains): """Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated. """ warn( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead', DeprecationWarning, ) url_path = 'domains/score/' return self._multi_post(url_path, domains)
[ "def", "domain_score", "(", "self", ",", "domains", ")", ":", "warn", "(", "'OpenDNS Domain Scores endpoint is deprecated. Use '", "'InvestigateApi.categorization() instead'", ",", "DeprecationWarning", ",", ")", "url_path", "=", "'domains/score/'", "return", "self", ".", "_multi_post", "(", "url_path", ",", "domains", ")" ]
Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated.
[ "Calls", "domain", "scores", "endpoint", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L115-L126
1,244
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi._multi_get
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): """Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result} """ all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
python
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): """Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result} """ all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
[ "def", "_multi_get", "(", "self", ",", "cache_api_name", ",", "fmt_url_path", ",", "url_params", ",", "query_params", "=", "None", ")", ":", "all_responses", "=", "{", "}", "if", "self", ".", "_cache", ":", "all_responses", "=", "self", ".", "_cache", ".", "bulk_lookup", "(", "cache_api_name", ",", "url_params", ")", "url_params", "=", "[", "key", "for", "key", "in", "url_params", "if", "key", "not", "in", "all_responses", ".", "keys", "(", ")", "]", "if", "len", "(", "url_params", ")", ":", "urls", "=", "self", ".", "_to_urls", "(", "fmt_url_path", ",", "url_params", ")", "responses", "=", "self", ".", "_requests", ".", "multi_get", "(", "urls", ",", "query_params", ")", "for", "url_param", ",", "response", "in", "zip", "(", "url_params", ",", "responses", ")", ":", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "cache_api_name", ",", "url_param", ",", "response", ")", "all_responses", "[", "url_param", "]", "=", "response", "return", "all_responses" ]
Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result}
[ "Makes", "multiple", "GETs", "to", "an", "OpenDNS", "endpoint", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L129-L154
1,245
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.security
def security(self, domains): """Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result} """ api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
python
def security(self, domains): """Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result} """ api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "security", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-security'", "fmt_url_path", "=", "u'security/name/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result}
[ "Calls", "security", "end", "point", "and", "adds", "an", "is_suspicious", "key", "to", "each", "response", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L156-L166
1,246
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_emails
def whois_emails(self, emails): """Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result} """ api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
python
def whois_emails(self, emails): """Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result} """ api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
[ "def", "whois_emails", "(", "self", ",", "emails", ")", ":", "api_name", "=", "'opendns-whois-emails'", "fmt_url_path", "=", "u'whois/emails/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "emails", ")" ]
Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result}
[ "Calls", "WHOIS", "Email", "end", "point" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L168-L178
1,247
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_nameservers
def whois_nameservers(self, nameservers): """Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result} """ api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
python
def whois_nameservers(self, nameservers): """Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result} """ api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
[ "def", "whois_nameservers", "(", "self", ",", "nameservers", ")", ":", "api_name", "=", "'opendns-whois-nameservers'", "fmt_url_path", "=", "u'whois/nameservers/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "nameservers", ")" ]
Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result}
[ "Calls", "WHOIS", "Nameserver", "end", "point" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L180-L190
1,248
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_domains
def whois_domains(self, domains): """Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result} """ api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
python
def whois_domains(self, domains): """Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result} """ api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "whois_domains", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-whois-domain'", "fmt_url_path", "=", "u'whois/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result}
[ "Calls", "WHOIS", "domain", "end", "point" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L192-L202
1,249
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.whois_domains_history
def whois_domains_history(self, domains): """Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result} """ api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
python
def whois_domains_history(self, domains): """Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result} """ api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "whois_domains_history", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-whois-domain-history'", "fmt_url_path", "=", "u'whois/{0}/history'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result}
[ "Calls", "WHOIS", "domain", "history", "end", "point" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L204-L214
1,250
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.domain_tag
def domain_tag(self, domains): """Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url """ api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
python
def domain_tag(self, domains): """Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url """ api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "domain_tag", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-domain_tag'", "fmt_url_path", "=", "u'domains/{0}/latest_tags'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url
[ "Get", "the", "data", "range", "when", "a", "domain", "is", "part", "of", "OpenDNS", "block", "list", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L228-L238
1,251
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.rr_history
def rr_history(self, ips): """Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features """ api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
python
def rr_history(self, ips): """Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features """ api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
[ "def", "rr_history", "(", "self", ",", "ips", ")", ":", "api_name", "=", "'opendns-rr_history'", "fmt_url_path", "=", "u'dnsdb/ip/a/{0}.json'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "ips", ")" ]
Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features
[ "Get", "the", "domains", "related", "to", "input", "ips", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L253-L263
1,252
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.sample
def sample(self, hashes): """Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples """ api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
python
def sample(self, hashes): """Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples """ api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
[ "def", "sample", "(", "self", ",", "hashes", ")", ":", "api_name", "=", "'opendns-sample'", "fmt_url_path", "=", "u'sample/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "hashes", ")" ]
Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples
[ "Get", "the", "information", "about", "a", "sample", "based", "on", "its", "hash", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L289-L300
1,253
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.search
def search(self, patterns, start=30, limit=1000, include_category=False): """Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings """ api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
python
def search(self, patterns, start=30, limit=1000, include_category=False): """Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings """ api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
[ "def", "search", "(", "self", ",", "patterns", ",", "start", "=", "30", ",", "limit", "=", "1000", ",", "include_category", "=", "False", ")", ":", "api_name", "=", "'opendns-patterns'", "fmt_url_path", "=", "u'search/{0}'", "start", "=", "'-{0}days'", ".", "format", "(", "start", ")", "include_category", "=", "str", "(", "include_category", ")", ".", "lower", "(", ")", "query_params", "=", "{", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "'includecategory'", ":", "include_category", ",", "}", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "patterns", ",", "query_params", ")" ]
Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings
[ "Performs", "pattern", "searches", "against", "the", "Investigate", "database", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L302-L322
1,254
Yelp/threat_intel
threat_intel/opendns.py
InvestigateApi.risk_score
def risk_score(self, domains): """Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores """ api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
python
def risk_score(self, domains): """Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores """ api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
[ "def", "risk_score", "(", "self", ",", "domains", ")", ":", "api_name", "=", "'opendns-risk_score'", "fmt_url_path", "=", "u'domains/risk-score/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "domains", ")" ]
Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores
[ "Performs", "Umbrella", "risk", "score", "analysis", "on", "the", "input", "domains" ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L324-L334
1,255
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._extract_all_responses
def _extract_all_responses(self, resources, api_endpoint, api_name): """ Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value. """ all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def _extract_all_responses(self, resources, api_endpoint, api_name): """ Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value. """ all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "_extract_all_responses", "(", "self", ",", "resources", ",", "api_endpoint", ",", "api_name", ")", ":", "all_responses", ",", "resources", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "resources", ")", "resource_chunks", "=", "self", ".", "_prepare_resource_chunks", "(", "resources", ")", "response_chunks", "=", "self", ".", "_request_reports", "(", "\"resource\"", ",", "resource_chunks", ",", "api_endpoint", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value.
[ "Aux", "function", "to", "extract", "all", "the", "API", "endpoint", "responses", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L52-L67
1,256
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_url_distribution
def get_url_distribution(self, params=None): """Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report. """ params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def get_url_distribution(self, params=None): """Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report. """ params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "get_url_distribution", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "or", "{", "}", "all_responses", "=", "{", "}", "api_name", "=", "'virustotal-url-distribution'", "response_chunks", "=", "self", ".", "_request_reports", "(", "list", "(", "params", ".", "keys", "(", ")", ")", ",", "list", "(", "params", ".", "values", "(", ")", ")", ",", "'url/distribution'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report.
[ "Retrieves", "a", "live", "feed", "with", "the", "latest", "URLs", "submitted", "to", "VT", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L128-L143
1,257
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_url_reports
def get_url_reports(self, resources): """Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value. """ api_name = 'virustotal-url-reports' (all_responses, resources) = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources, '\n') response_chunks = self._request_reports("resource", resource_chunks, 'url/report') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
python
def get_url_reports(self, resources): """Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value. """ api_name = 'virustotal-url-reports' (all_responses, resources) = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources, '\n') response_chunks = self._request_reports("resource", resource_chunks, 'url/report') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
[ "def", "get_url_reports", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-url-reports'", "(", "all_responses", ",", "resources", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "resources", ")", "resource_chunks", "=", "self", ".", "_prepare_resource_chunks", "(", "resources", ",", "'\\n'", ")", "response_chunks", "=", "self", ".", "_request_reports", "(", "\"resource\"", ",", "resource_chunks", ",", "'url/report'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response_chunks", ",", "api_name", ")", "return", "all_responses" ]
Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value.
[ "Retrieves", "a", "scan", "report", "on", "a", "given", "URL", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L167-L182
1,258
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_ip_reports
def get_ip_reports(self, ips): """Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value. """ api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
python
def get_ip_reports(self, ips): """Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value. """ api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
[ "def", "get_ip_reports", "(", "self", ",", "ips", ")", ":", "api_name", "=", "'virustotal-ip-address-reports'", "(", "all_responses", ",", "ips", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "ips", ")", "responses", "=", "self", ".", "_request_reports", "(", "\"ip\"", ",", "ips", ",", "'ip-address/report'", ")", "for", "ip", ",", "response", "in", "zip", "(", "ips", ",", "responses", ")", ":", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "ip", ",", "response", ")", "all_responses", "[", "ip", "]", "=", "response", "return", "all_responses" ]
Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value.
[ "Retrieves", "the", "most", "recent", "VT", "info", "for", "a", "set", "of", "ips", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L185-L203
1,259
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi.get_file_clusters
def get_file_clusters(self, date): """Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report. """ api_name = 'virustotal-file-clusters' (all_responses, resources) = self._bulk_cache_lookup(api_name, date) response = self._request_reports("date", date, 'file/clusters') self._extract_response_chunks(all_responses, response, api_name) return all_responses
python
def get_file_clusters(self, date): """Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report. """ api_name = 'virustotal-file-clusters' (all_responses, resources) = self._bulk_cache_lookup(api_name, date) response = self._request_reports("date", date, 'file/clusters') self._extract_response_chunks(all_responses, response, api_name) return all_responses
[ "def", "get_file_clusters", "(", "self", ",", "date", ")", ":", "api_name", "=", "'virustotal-file-clusters'", "(", "all_responses", ",", "resources", ")", "=", "self", ".", "_bulk_cache_lookup", "(", "api_name", ",", "date", ")", "response", "=", "self", ".", "_request_reports", "(", "\"date\"", ",", "date", ",", "'file/clusters'", ")", "self", ".", "_extract_response_chunks", "(", "all_responses", ",", "response", ",", "api_name", ")", "return", "all_responses" ]
Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report.
[ "Retrieves", "file", "similarity", "clusters", "for", "a", "given", "time", "frame", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L227-L242
1,260
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._prepare_resource_chunks
def _prepare_resource_chunks(self, resources, resource_delim=','): """As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources. """ return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
python
def _prepare_resource_chunks(self, resources, resource_delim=','): """As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources. """ return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
[ "def", "_prepare_resource_chunks", "(", "self", ",", "resources", ",", "resource_delim", "=", "','", ")", ":", "return", "[", "self", ".", "_prepare_resource_chunk", "(", "resources", ",", "resource_delim", ",", "pos", ")", "for", "pos", "in", "range", "(", "0", ",", "len", "(", "resources", ")", ",", "self", ".", "_resources_per_req", ")", "]" ]
As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources.
[ "As", "in", "some", "VirusTotal", "API", "methods", "the", "call", "can", "be", "made", "for", "multiple", "resources", "at", "once", "this", "method", "prepares", "a", "list", "of", "concatenated", "resources", "according", "to", "the", "maximum", "number", "of", "resources", "per", "requests", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L263-L276
1,261
Yelp/threat_intel
threat_intel/virustotal.py
VirusTotalApi._extract_response_chunks
def _extract_response_chunks(self, all_responses, response_chunks, api_name): """Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API. """ for response_chunk in response_chunks: if not isinstance(response_chunk, list): response_chunk = [response_chunk] for response in response_chunk: if not response: continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
python
def _extract_response_chunks(self, all_responses, response_chunks, api_name): """Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API. """ for response_chunk in response_chunks: if not isinstance(response_chunk, list): response_chunk = [response_chunk] for response in response_chunk: if not response: continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
[ "def", "_extract_response_chunks", "(", "self", ",", "all_responses", ",", "response_chunks", ",", "api_name", ")", ":", "for", "response_chunk", "in", "response_chunks", ":", "if", "not", "isinstance", "(", "response_chunk", ",", "list", ")", ":", "response_chunk", "=", "[", "response_chunk", "]", "for", "response", "in", "response_chunk", ":", "if", "not", "response", ":", "continue", "if", "self", ".", "_cache", ":", "self", ".", "_cache", ".", "cache_value", "(", "api_name", ",", "response", "[", "'resource'", "]", ",", "response", ")", "all_responses", "[", "response", "[", "'resource'", "]", "]", "=", "response" ]
Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API.
[ "Extracts", "and", "caches", "the", "responses", "from", "the", "response", "chunks", "in", "case", "of", "the", "responses", "for", "the", "requests", "containing", "multiple", "concatenated", "resources", ".", "Extracted", "responses", "are", "added", "to", "the", "already", "cached", "responses", "passed", "in", "the", "all_responses", "parameter", "." ]
60eef841d7cca115ec7857aeb9c553b72b694851
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L295-L315
1,262
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/cms_plugins.py
TextPlugin.get_editor_widget
def get_editor_widget(self, request, plugins, plugin): """ Returns the Django form Widget to be used for the text area """ cancel_url_name = self.get_admin_url_name('delete_on_cancel') cancel_url = reverse('admin:%s' % cancel_url_name) render_plugin_url_name = self.get_admin_url_name('render_plugin') render_plugin_url = reverse('admin:%s' % render_plugin_url_name) action_token = self.get_action_token(request, plugin) # should we delete the text plugin when # the user cancels? delete_text_on_cancel = ( 'delete-on-cancel' in request.GET and # noqa not plugin.get_plugin_instance()[0] ) widget = TextEditorWidget( installed_plugins=plugins, pk=plugin.pk, placeholder=plugin.placeholder, plugin_language=plugin.language, configuration=self.ckeditor_configuration, render_plugin_url=render_plugin_url, cancel_url=cancel_url, action_token=action_token, delete_on_cancel=delete_text_on_cancel, ) return widget
python
def get_editor_widget(self, request, plugins, plugin): """ Returns the Django form Widget to be used for the text area """ cancel_url_name = self.get_admin_url_name('delete_on_cancel') cancel_url = reverse('admin:%s' % cancel_url_name) render_plugin_url_name = self.get_admin_url_name('render_plugin') render_plugin_url = reverse('admin:%s' % render_plugin_url_name) action_token = self.get_action_token(request, plugin) # should we delete the text plugin when # the user cancels? delete_text_on_cancel = ( 'delete-on-cancel' in request.GET and # noqa not plugin.get_plugin_instance()[0] ) widget = TextEditorWidget( installed_plugins=plugins, pk=plugin.pk, placeholder=plugin.placeholder, plugin_language=plugin.language, configuration=self.ckeditor_configuration, render_plugin_url=render_plugin_url, cancel_url=cancel_url, action_token=action_token, delete_on_cancel=delete_text_on_cancel, ) return widget
[ "def", "get_editor_widget", "(", "self", ",", "request", ",", "plugins", ",", "plugin", ")", ":", "cancel_url_name", "=", "self", ".", "get_admin_url_name", "(", "'delete_on_cancel'", ")", "cancel_url", "=", "reverse", "(", "'admin:%s'", "%", "cancel_url_name", ")", "render_plugin_url_name", "=", "self", ".", "get_admin_url_name", "(", "'render_plugin'", ")", "render_plugin_url", "=", "reverse", "(", "'admin:%s'", "%", "render_plugin_url_name", ")", "action_token", "=", "self", ".", "get_action_token", "(", "request", ",", "plugin", ")", "# should we delete the text plugin when", "# the user cancels?", "delete_text_on_cancel", "=", "(", "'delete-on-cancel'", "in", "request", ".", "GET", "and", "# noqa", "not", "plugin", ".", "get_plugin_instance", "(", ")", "[", "0", "]", ")", "widget", "=", "TextEditorWidget", "(", "installed_plugins", "=", "plugins", ",", "pk", "=", "plugin", ".", "pk", ",", "placeholder", "=", "plugin", ".", "placeholder", ",", "plugin_language", "=", "plugin", ".", "language", ",", "configuration", "=", "self", ".", "ckeditor_configuration", ",", "render_plugin_url", "=", "render_plugin_url", ",", "cancel_url", "=", "cancel_url", ",", "action_token", "=", "action_token", ",", "delete_on_cancel", "=", "delete_text_on_cancel", ",", ")", "return", "widget" ]
Returns the Django form Widget to be used for the text area
[ "Returns", "the", "Django", "form", "Widget", "to", "be", "used", "for", "the", "text", "area" ]
a6069096fdac80931fd328055d1d615d168c33df
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/cms_plugins.py#L227-L257
1,263
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/cms_plugins.py
TextPlugin.get_form_class
def get_form_class(self, request, plugins, plugin): """ Returns a subclass of Form to be used by this plugin """ widget = self.get_editor_widget( request=request, plugins=plugins, plugin=plugin, ) instance = plugin.get_plugin_instance()[0] if instance: context = RequestContext(request) context['request'] = request rendered_text = plugin_tags_to_admin_html( text=instance.body, context=context, ) else: rendered_text = None # We avoid mutating the Form declared above by subclassing class TextPluginForm(self.form): body = CharField(widget=widget, required=False) def __init__(self, *args, **kwargs): initial = kwargs.pop('initial', {}) if rendered_text: initial['body'] = rendered_text super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs) return TextPluginForm
python
def get_form_class(self, request, plugins, plugin): """ Returns a subclass of Form to be used by this plugin """ widget = self.get_editor_widget( request=request, plugins=plugins, plugin=plugin, ) instance = plugin.get_plugin_instance()[0] if instance: context = RequestContext(request) context['request'] = request rendered_text = plugin_tags_to_admin_html( text=instance.body, context=context, ) else: rendered_text = None # We avoid mutating the Form declared above by subclassing class TextPluginForm(self.form): body = CharField(widget=widget, required=False) def __init__(self, *args, **kwargs): initial = kwargs.pop('initial', {}) if rendered_text: initial['body'] = rendered_text super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs) return TextPluginForm
[ "def", "get_form_class", "(", "self", ",", "request", ",", "plugins", ",", "plugin", ")", ":", "widget", "=", "self", ".", "get_editor_widget", "(", "request", "=", "request", ",", "plugins", "=", "plugins", ",", "plugin", "=", "plugin", ",", ")", "instance", "=", "plugin", ".", "get_plugin_instance", "(", ")", "[", "0", "]", "if", "instance", ":", "context", "=", "RequestContext", "(", "request", ")", "context", "[", "'request'", "]", "=", "request", "rendered_text", "=", "plugin_tags_to_admin_html", "(", "text", "=", "instance", ".", "body", ",", "context", "=", "context", ",", ")", "else", ":", "rendered_text", "=", "None", "# We avoid mutating the Form declared above by subclassing", "class", "TextPluginForm", "(", "self", ".", "form", ")", ":", "body", "=", "CharField", "(", "widget", "=", "widget", ",", "required", "=", "False", ")", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "initial", "=", "kwargs", ".", "pop", "(", "'initial'", ",", "{", "}", ")", "if", "rendered_text", ":", "initial", "[", "'body'", "]", "=", "rendered_text", "super", "(", "TextPluginForm", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "initial", "=", "initial", ",", "*", "*", "kwargs", ")", "return", "TextPluginForm" ]
Returns a subclass of Form to be used by this plugin
[ "Returns", "a", "subclass", "of", "Form", "to", "be", "used", "by", "this", "plugin" ]
a6069096fdac80931fd328055d1d615d168c33df
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/cms_plugins.py#L259-L291
1,264
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/utils.py
_plugin_tags_to_html
def _plugin_tags_to_html(text, output_func): """ Convert plugin object 'tags' into the form for public site. context is the template context to use, placeholder is the placeholder name """ plugins_by_id = get_plugins_from_text(text) def _render_tag(m): try: plugin_id = int(m.groupdict()['pk']) obj = plugins_by_id[plugin_id] except KeyError: # Object must have been deleted. It cannot be rendered to # end user so just remove it from the HTML altogether return u'' else: obj._render_meta.text_enabled = True return output_func(obj, m) return OBJ_ADMIN_RE.sub(_render_tag, text)
python
def _plugin_tags_to_html(text, output_func): """ Convert plugin object 'tags' into the form for public site. context is the template context to use, placeholder is the placeholder name """ plugins_by_id = get_plugins_from_text(text) def _render_tag(m): try: plugin_id = int(m.groupdict()['pk']) obj = plugins_by_id[plugin_id] except KeyError: # Object must have been deleted. It cannot be rendered to # end user so just remove it from the HTML altogether return u'' else: obj._render_meta.text_enabled = True return output_func(obj, m) return OBJ_ADMIN_RE.sub(_render_tag, text)
[ "def", "_plugin_tags_to_html", "(", "text", ",", "output_func", ")", ":", "plugins_by_id", "=", "get_plugins_from_text", "(", "text", ")", "def", "_render_tag", "(", "m", ")", ":", "try", ":", "plugin_id", "=", "int", "(", "m", ".", "groupdict", "(", ")", "[", "'pk'", "]", ")", "obj", "=", "plugins_by_id", "[", "plugin_id", "]", "except", "KeyError", ":", "# Object must have been deleted. It cannot be rendered to", "# end user so just remove it from the HTML altogether", "return", "u''", "else", ":", "obj", ".", "_render_meta", ".", "text_enabled", "=", "True", "return", "output_func", "(", "obj", ",", "m", ")", "return", "OBJ_ADMIN_RE", ".", "sub", "(", "_render_tag", ",", "text", ")" ]
Convert plugin object 'tags' into the form for public site. context is the template context to use, placeholder is the placeholder name
[ "Convert", "plugin", "object", "tags", "into", "the", "form", "for", "public", "site", "." ]
a6069096fdac80931fd328055d1d615d168c33df
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/utils.py#L91-L110
1,265
divio/djangocms-text-ckeditor
djangocms_text_ckeditor/html.py
extract_images
def extract_images(data, plugin): """ extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins """ if not settings.TEXT_SAVE_IMAGE_FUNCTION: return data tree_builder = html5lib.treebuilders.getTreeBuilder('dom') parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(data) found = False for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if not src.startswith('data:'): # nothing to do continue width = img.getAttribute('width') height = img.getAttribute('height') # extract the image data data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)') m = data_re.search(src) dr = m.groupdict() mime_type = dr['mime_type'] image_data = dr['data'] if mime_type.find(';'): mime_type = mime_type.split(';')[0] try: image_data = base64.b64decode(image_data) except Exception: image_data = base64.urlsafe_b64decode(image_data) try: image_type = mime_type.split('/')[1] except IndexError: # No image type specified -- will convert to jpg below if it's valid image data image_type = '' image = BytesIO(image_data) # genarate filename and normalize image format if image_type == 'jpg' or image_type == 'jpeg': file_ending = 'jpg' elif image_type == 'png': file_ending = 'png' elif image_type == 'gif': file_ending = 'gif' else: # any not "web-safe" image format we try to convert to jpg im = Image.open(image) new_image = BytesIO() file_ending = 'jpg' im.save(new_image, 'JPEG') new_image.seek(0) image = new_image filename = u'%s.%s' % (uuid.uuid4(), file_ending) # transform image into a cms plugin image_plugin = img_data_to_plugin( filename, image, parent_plugin=plugin, width=width, height=height ) # render the new html for the plugin new_img_html = plugin_to_tag(image_plugin) # replace the original image node with the newly created cms plugin html img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img) found = True if found: return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes]) else: return data
python
def extract_images(data, plugin): """ extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins """ if not settings.TEXT_SAVE_IMAGE_FUNCTION: return data tree_builder = html5lib.treebuilders.getTreeBuilder('dom') parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(data) found = False for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if not src.startswith('data:'): # nothing to do continue width = img.getAttribute('width') height = img.getAttribute('height') # extract the image data data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)') m = data_re.search(src) dr = m.groupdict() mime_type = dr['mime_type'] image_data = dr['data'] if mime_type.find(';'): mime_type = mime_type.split(';')[0] try: image_data = base64.b64decode(image_data) except Exception: image_data = base64.urlsafe_b64decode(image_data) try: image_type = mime_type.split('/')[1] except IndexError: # No image type specified -- will convert to jpg below if it's valid image data image_type = '' image = BytesIO(image_data) # genarate filename and normalize image format if image_type == 'jpg' or image_type == 'jpeg': file_ending = 'jpg' elif image_type == 'png': file_ending = 'png' elif image_type == 'gif': file_ending = 'gif' else: # any not "web-safe" image format we try to convert to jpg im = Image.open(image) new_image = BytesIO() file_ending = 'jpg' im.save(new_image, 'JPEG') new_image.seek(0) image = new_image filename = u'%s.%s' % (uuid.uuid4(), file_ending) # transform image into a cms plugin image_plugin = img_data_to_plugin( filename, image, parent_plugin=plugin, width=width, height=height ) # render the new html for the plugin new_img_html = plugin_to_tag(image_plugin) # replace the original image node with the newly created cms plugin html img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img) found = True if found: return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes]) else: return data
[ "def", "extract_images", "(", "data", ",", "plugin", ")", ":", "if", "not", "settings", ".", "TEXT_SAVE_IMAGE_FUNCTION", ":", "return", "data", "tree_builder", "=", "html5lib", ".", "treebuilders", ".", "getTreeBuilder", "(", "'dom'", ")", "parser", "=", "html5lib", ".", "html5parser", ".", "HTMLParser", "(", "tree", "=", "tree_builder", ")", "dom", "=", "parser", ".", "parse", "(", "data", ")", "found", "=", "False", "for", "img", "in", "dom", ".", "getElementsByTagName", "(", "'img'", ")", ":", "src", "=", "img", ".", "getAttribute", "(", "'src'", ")", "if", "not", "src", ".", "startswith", "(", "'data:'", ")", ":", "# nothing to do", "continue", "width", "=", "img", ".", "getAttribute", "(", "'width'", ")", "height", "=", "img", ".", "getAttribute", "(", "'height'", ")", "# extract the image data", "data_re", "=", "re", ".", "compile", "(", "r'data:(?P<mime_type>[^\"]*);(?P<encoding>[^\"]*),(?P<data>[^\"]*)'", ")", "m", "=", "data_re", ".", "search", "(", "src", ")", "dr", "=", "m", ".", "groupdict", "(", ")", "mime_type", "=", "dr", "[", "'mime_type'", "]", "image_data", "=", "dr", "[", "'data'", "]", "if", "mime_type", ".", "find", "(", "';'", ")", ":", "mime_type", "=", "mime_type", ".", "split", "(", "';'", ")", "[", "0", "]", "try", ":", "image_data", "=", "base64", ".", "b64decode", "(", "image_data", ")", "except", "Exception", ":", "image_data", "=", "base64", ".", "urlsafe_b64decode", "(", "image_data", ")", "try", ":", "image_type", "=", "mime_type", ".", "split", "(", "'/'", ")", "[", "1", "]", "except", "IndexError", ":", "# No image type specified -- will convert to jpg below if it's valid image data", "image_type", "=", "''", "image", "=", "BytesIO", "(", "image_data", ")", "# genarate filename and normalize image format", "if", "image_type", "==", "'jpg'", "or", "image_type", "==", "'jpeg'", ":", "file_ending", "=", "'jpg'", "elif", "image_type", "==", "'png'", ":", "file_ending", "=", "'png'", "elif", "image_type", "==", "'gif'", ":", "file_ending", "=", "'gif'", "else", ":", "# any not \"web-safe\" image format we try to convert to jpg", "im", "=", "Image", ".", "open", "(", "image", ")", "new_image", "=", "BytesIO", "(", ")", "file_ending", "=", "'jpg'", "im", ".", "save", "(", "new_image", ",", "'JPEG'", ")", "new_image", ".", "seek", "(", "0", ")", "image", "=", "new_image", "filename", "=", "u'%s.%s'", "%", "(", "uuid", ".", "uuid4", "(", ")", ",", "file_ending", ")", "# transform image into a cms plugin", "image_plugin", "=", "img_data_to_plugin", "(", "filename", ",", "image", ",", "parent_plugin", "=", "plugin", ",", "width", "=", "width", ",", "height", "=", "height", ")", "# render the new html for the plugin", "new_img_html", "=", "plugin_to_tag", "(", "image_plugin", ")", "# replace the original image node with the newly created cms plugin html", "img", ".", "parentNode", ".", "replaceChild", "(", "parser", ".", "parseFragment", "(", "new_img_html", ")", ".", "childNodes", "[", "0", "]", ",", "img", ")", "found", "=", "True", "if", "found", ":", "return", "u''", ".", "join", "(", "[", "y", ".", "toxml", "(", ")", "for", "y", "in", "dom", ".", "getElementsByTagName", "(", "'body'", ")", "[", "0", "]", ".", "childNodes", "]", ")", "else", ":", "return", "data" ]
extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins
[ "extracts", "base64", "encoded", "images", "from", "drag", "and", "drop", "actions", "in", "browser", "and", "saves", "those", "images", "as", "plugins" ]
a6069096fdac80931fd328055d1d615d168c33df
https://github.com/divio/djangocms-text-ckeditor/blob/a6069096fdac80931fd328055d1d615d168c33df/djangocms_text_ckeditor/html.py#L76-L140
1,266
edx/i18n-tools
i18n/config.py
Configuration.default_config_filename
def default_config_filename(root_dir=None): """ Returns the default name of the configuration file. """ root_dir = Path(root_dir) if root_dir else Path('.').abspath() locale_dir = root_dir / 'locale' if not os.path.exists(locale_dir): locale_dir = root_dir / 'conf' / 'locale' return locale_dir / BASE_CONFIG_FILENAME
python
def default_config_filename(root_dir=None): """ Returns the default name of the configuration file. """ root_dir = Path(root_dir) if root_dir else Path('.').abspath() locale_dir = root_dir / 'locale' if not os.path.exists(locale_dir): locale_dir = root_dir / 'conf' / 'locale' return locale_dir / BASE_CONFIG_FILENAME
[ "def", "default_config_filename", "(", "root_dir", "=", "None", ")", ":", "root_dir", "=", "Path", "(", "root_dir", ")", "if", "root_dir", "else", "Path", "(", "'.'", ")", ".", "abspath", "(", ")", "locale_dir", "=", "root_dir", "/", "'locale'", "if", "not", "os", ".", "path", ".", "exists", "(", "locale_dir", ")", ":", "locale_dir", "=", "root_dir", "/", "'conf'", "/", "'locale'", "return", "locale_dir", "/", "BASE_CONFIG_FILENAME" ]
Returns the default name of the configuration file.
[ "Returns", "the", "default", "name", "of", "the", "configuration", "file", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/config.py#L47-L55
1,267
edx/i18n-tools
i18n/config.py
Configuration.rtl_langs
def rtl_langs(self): """ Returns the set of translated RTL language codes present in self.locales. Ignores source locale. """ def is_rtl(lang): """ Returns True if lang is a RTL language args: lang (str): The language to be checked Returns: True if lang is an RTL language. """ # Base RTL langs are Arabic, Farsi, Hebrew, and Urdu base_rtl = ['ar', 'fa', 'he', 'ur'] # do this to capture both 'fa' and 'fa_IR' return any([lang.startswith(base_code) for base_code in base_rtl]) return sorted(set([lang for lang in self.translated_locales if is_rtl(lang)]))
python
def rtl_langs(self): """ Returns the set of translated RTL language codes present in self.locales. Ignores source locale. """ def is_rtl(lang): """ Returns True if lang is a RTL language args: lang (str): The language to be checked Returns: True if lang is an RTL language. """ # Base RTL langs are Arabic, Farsi, Hebrew, and Urdu base_rtl = ['ar', 'fa', 'he', 'ur'] # do this to capture both 'fa' and 'fa_IR' return any([lang.startswith(base_code) for base_code in base_rtl]) return sorted(set([lang for lang in self.translated_locales if is_rtl(lang)]))
[ "def", "rtl_langs", "(", "self", ")", ":", "def", "is_rtl", "(", "lang", ")", ":", "\"\"\"\n Returns True if lang is a RTL language\n\n args:\n lang (str): The language to be checked\n\n Returns:\n True if lang is an RTL language.\n \"\"\"", "# Base RTL langs are Arabic, Farsi, Hebrew, and Urdu", "base_rtl", "=", "[", "'ar'", ",", "'fa'", ",", "'he'", ",", "'ur'", "]", "# do this to capture both 'fa' and 'fa_IR'", "return", "any", "(", "[", "lang", ".", "startswith", "(", "base_code", ")", "for", "base_code", "in", "base_rtl", "]", ")", "return", "sorted", "(", "set", "(", "[", "lang", "for", "lang", "in", "self", ".", "translated_locales", "if", "is_rtl", "(", "lang", ")", "]", ")", ")" ]
Returns the set of translated RTL language codes present in self.locales. Ignores source locale.
[ "Returns", "the", "set", "of", "translated", "RTL", "language", "codes", "present", "in", "self", ".", "locales", ".", "Ignores", "source", "locale", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/config.py#L94-L115
1,268
edx/i18n-tools
i18n/branch_cleanup.py
BranchCleanup.clean_conf_folder
def clean_conf_folder(self, locale): """Remove the configuration directory for `locale`""" dirname = self.configuration.get_messages_dir(locale) dirname.removedirs_p()
python
def clean_conf_folder(self, locale): """Remove the configuration directory for `locale`""" dirname = self.configuration.get_messages_dir(locale) dirname.removedirs_p()
[ "def", "clean_conf_folder", "(", "self", ",", "locale", ")", ":", "dirname", "=", "self", ".", "configuration", ".", "get_messages_dir", "(", "locale", ")", "dirname", ".", "removedirs_p", "(", ")" ]
Remove the configuration directory for `locale`
[ "Remove", "the", "configuration", "directory", "for", "locale" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/branch_cleanup.py#L27-L30
1,269
edx/i18n-tools
i18n/segment.py
segment_pofiles
def segment_pofiles(configuration, locale): """Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written. """ files_written = set() for filename, segments in configuration.segment.items(): filename = configuration.get_messages_dir(locale) / filename files_written.update(segment_pofile(filename, segments)) return files_written
python
def segment_pofiles(configuration, locale): """Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written. """ files_written = set() for filename, segments in configuration.segment.items(): filename = configuration.get_messages_dir(locale) / filename files_written.update(segment_pofile(filename, segments)) return files_written
[ "def", "segment_pofiles", "(", "configuration", ",", "locale", ")", ":", "files_written", "=", "set", "(", ")", "for", "filename", ",", "segments", "in", "configuration", ".", "segment", ".", "items", "(", ")", ":", "filename", "=", "configuration", ".", "get_messages_dir", "(", "locale", ")", "/", "filename", "files_written", ".", "update", "(", "segment_pofile", "(", "filename", ",", "segments", ")", ")", "return", "files_written" ]
Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written.
[ "Segment", "all", "the", "pofiles", "for", "locale", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L20-L30
1,270
edx/i18n-tools
i18n/segment.py
segment_pofile
def segment_pofile(filename, segments): """Segment a .po file using patterns in `segments`. The .po file at `filename` is read, and the occurrence locations of its messages are examined. `segments` is a dictionary: the keys are segment .po filenames, the values are lists of patterns:: { 'django-studio.po': [ 'cms/*', 'some-other-studio-place/*', ], 'django-weird.po': [ '*/weird_*.*', ], } If all a message's occurrences match the patterns for a segment, then that message is written to the new segmented .po file. Any message that matches no segments, or more than one, is written back to the original file. Arguments: filename (path.path): a path object referring to the original .po file. segments (dict): specification of the segments to create. Returns: a set of path objects, all the segment files written. """ reading_msg = "Reading {num} entries from {file}" writing_msg = "Writing {num} entries to {file}" source_po = polib.pofile(filename) LOG.info(reading_msg.format(file=filename, num=len(source_po))) # pylint: disable=logging-format-interpolation # A new pofile just like the source, but with no messages. We'll put # anything not segmented into this file. remaining_po = copy.deepcopy(source_po) remaining_po[:] = [] # Turn the segments dictionary into two structures: segment_patterns is a # list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping # segment file names to pofile objects of their contents. segment_po_files = {filename: remaining_po} segment_patterns = [] for segmentfile, patterns in segments.items(): segment_po_files[segmentfile] = copy.deepcopy(remaining_po) segment_patterns.extend((pat, segmentfile) for pat in patterns) # Examine each message in the source file. If all of its occurrences match # a pattern for the same segment, it goes in that segment. Otherwise, it # goes in remaining. for msg in source_po: msg_segments = set() for occ_file, _ in msg.occurrences: for pat, segment_file in segment_patterns: if fnmatch.fnmatch(occ_file, pat): msg_segments.add(segment_file) break else: msg_segments.add(filename) assert msg_segments if len(msg_segments) == 1: # This message belongs in this segment. segment_file = msg_segments.pop() segment_po_files[segment_file].append(msg) else: # It's in more than one segment, so put it back in the main file. remaining_po.append(msg) # Write out the results. files_written = set() for segment_file, pofile in segment_po_files.items(): out_file = filename.dirname() / segment_file if not pofile: LOG.error("No messages to write to %s, did you run segment twice?", out_file) else: LOG.info(writing_msg.format(file=out_file, num=len(pofile))) # pylint: disable=logging-format-interpolation pofile.save(out_file) files_written.add(out_file) return files_written
python
def segment_pofile(filename, segments): """Segment a .po file using patterns in `segments`. The .po file at `filename` is read, and the occurrence locations of its messages are examined. `segments` is a dictionary: the keys are segment .po filenames, the values are lists of patterns:: { 'django-studio.po': [ 'cms/*', 'some-other-studio-place/*', ], 'django-weird.po': [ '*/weird_*.*', ], } If all a message's occurrences match the patterns for a segment, then that message is written to the new segmented .po file. Any message that matches no segments, or more than one, is written back to the original file. Arguments: filename (path.path): a path object referring to the original .po file. segments (dict): specification of the segments to create. Returns: a set of path objects, all the segment files written. """ reading_msg = "Reading {num} entries from {file}" writing_msg = "Writing {num} entries to {file}" source_po = polib.pofile(filename) LOG.info(reading_msg.format(file=filename, num=len(source_po))) # pylint: disable=logging-format-interpolation # A new pofile just like the source, but with no messages. We'll put # anything not segmented into this file. remaining_po = copy.deepcopy(source_po) remaining_po[:] = [] # Turn the segments dictionary into two structures: segment_patterns is a # list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping # segment file names to pofile objects of their contents. segment_po_files = {filename: remaining_po} segment_patterns = [] for segmentfile, patterns in segments.items(): segment_po_files[segmentfile] = copy.deepcopy(remaining_po) segment_patterns.extend((pat, segmentfile) for pat in patterns) # Examine each message in the source file. If all of its occurrences match # a pattern for the same segment, it goes in that segment. Otherwise, it # goes in remaining. for msg in source_po: msg_segments = set() for occ_file, _ in msg.occurrences: for pat, segment_file in segment_patterns: if fnmatch.fnmatch(occ_file, pat): msg_segments.add(segment_file) break else: msg_segments.add(filename) assert msg_segments if len(msg_segments) == 1: # This message belongs in this segment. segment_file = msg_segments.pop() segment_po_files[segment_file].append(msg) else: # It's in more than one segment, so put it back in the main file. remaining_po.append(msg) # Write out the results. files_written = set() for segment_file, pofile in segment_po_files.items(): out_file = filename.dirname() / segment_file if not pofile: LOG.error("No messages to write to %s, did you run segment twice?", out_file) else: LOG.info(writing_msg.format(file=out_file, num=len(pofile))) # pylint: disable=logging-format-interpolation pofile.save(out_file) files_written.add(out_file) return files_written
[ "def", "segment_pofile", "(", "filename", ",", "segments", ")", ":", "reading_msg", "=", "\"Reading {num} entries from {file}\"", "writing_msg", "=", "\"Writing {num} entries to {file}\"", "source_po", "=", "polib", ".", "pofile", "(", "filename", ")", "LOG", ".", "info", "(", "reading_msg", ".", "format", "(", "file", "=", "filename", ",", "num", "=", "len", "(", "source_po", ")", ")", ")", "# pylint: disable=logging-format-interpolation", "# A new pofile just like the source, but with no messages. We'll put", "# anything not segmented into this file.", "remaining_po", "=", "copy", ".", "deepcopy", "(", "source_po", ")", "remaining_po", "[", ":", "]", "=", "[", "]", "# Turn the segments dictionary into two structures: segment_patterns is a", "# list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping", "# segment file names to pofile objects of their contents.", "segment_po_files", "=", "{", "filename", ":", "remaining_po", "}", "segment_patterns", "=", "[", "]", "for", "segmentfile", ",", "patterns", "in", "segments", ".", "items", "(", ")", ":", "segment_po_files", "[", "segmentfile", "]", "=", "copy", ".", "deepcopy", "(", "remaining_po", ")", "segment_patterns", ".", "extend", "(", "(", "pat", ",", "segmentfile", ")", "for", "pat", "in", "patterns", ")", "# Examine each message in the source file. If all of its occurrences match", "# a pattern for the same segment, it goes in that segment. Otherwise, it", "# goes in remaining.", "for", "msg", "in", "source_po", ":", "msg_segments", "=", "set", "(", ")", "for", "occ_file", ",", "_", "in", "msg", ".", "occurrences", ":", "for", "pat", ",", "segment_file", "in", "segment_patterns", ":", "if", "fnmatch", ".", "fnmatch", "(", "occ_file", ",", "pat", ")", ":", "msg_segments", ".", "add", "(", "segment_file", ")", "break", "else", ":", "msg_segments", ".", "add", "(", "filename", ")", "assert", "msg_segments", "if", "len", "(", "msg_segments", ")", "==", "1", ":", "# This message belongs in this segment.", "segment_file", "=", "msg_segments", ".", "pop", "(", ")", "segment_po_files", "[", "segment_file", "]", ".", "append", "(", "msg", ")", "else", ":", "# It's in more than one segment, so put it back in the main file.", "remaining_po", ".", "append", "(", "msg", ")", "# Write out the results.", "files_written", "=", "set", "(", ")", "for", "segment_file", ",", "pofile", "in", "segment_po_files", ".", "items", "(", ")", ":", "out_file", "=", "filename", ".", "dirname", "(", ")", "/", "segment_file", "if", "not", "pofile", ":", "LOG", ".", "error", "(", "\"No messages to write to %s, did you run segment twice?\"", ",", "out_file", ")", "else", ":", "LOG", ".", "info", "(", "writing_msg", ".", "format", "(", "file", "=", "out_file", ",", "num", "=", "len", "(", "pofile", ")", ")", ")", "# pylint: disable=logging-format-interpolation", "pofile", ".", "save", "(", "out_file", ")", "files_written", ".", "add", "(", "out_file", ")", "return", "files_written" ]
Segment a .po file using patterns in `segments`. The .po file at `filename` is read, and the occurrence locations of its messages are examined. `segments` is a dictionary: the keys are segment .po filenames, the values are lists of patterns:: { 'django-studio.po': [ 'cms/*', 'some-other-studio-place/*', ], 'django-weird.po': [ '*/weird_*.*', ], } If all a message's occurrences match the patterns for a segment, then that message is written to the new segmented .po file. Any message that matches no segments, or more than one, is written back to the original file. Arguments: filename (path.path): a path object referring to the original .po file. segments (dict): specification of the segments to create. Returns: a set of path objects, all the segment files written.
[ "Segment", "a", ".", "po", "file", "using", "patterns", "in", "segments", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/segment.py#L33-L116
1,271
edx/i18n-tools
i18n/extract.py
fix_header
def fix_header(pofile): """ Replace default headers with edX headers """ # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile.metadata_is_fuzzy = [] # remove [u'fuzzy'] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
python
def fix_header(pofile): """ Replace default headers with edX headers """ # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile.metadata_is_fuzzy = [] # remove [u'fuzzy'] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
[ "def", "fix_header", "(", "pofile", ")", ":", "# By default, django-admin.py makemessages creates this header:", "#", "# SOME DESCRIPTIVE TITLE.", "# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER", "# This file is distributed under the same license as the PACKAGE package.", "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.", "pofile", ".", "metadata_is_fuzzy", "=", "[", "]", "# remove [u'fuzzy']", "header", "=", "pofile", ".", "header", "fixes", "=", "(", "(", "'SOME DESCRIPTIVE TITLE'", ",", "EDX_MARKER", ")", ",", "(", "'Translations template for PROJECT.'", ",", "EDX_MARKER", ")", ",", "(", "'YEAR'", ",", "str", "(", "datetime", ".", "utcnow", "(", ")", ".", "year", ")", ")", ",", "(", "'ORGANIZATION'", ",", "'edX'", ")", ",", "(", "\"THE PACKAGE'S COPYRIGHT HOLDER\"", ",", "\"EdX\"", ")", ",", "(", "'This file is distributed under the same license as the PROJECT project.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'This file is distributed under the same license as the PACKAGE package.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'FIRST AUTHOR <EMAIL@ADDRESS>'", ",", "'EdX Team <[email protected]>'", ")", ",", ")", "for", "src", ",", "dest", "in", "fixes", ":", "header", "=", "header", ".", "replace", "(", "src", ",", "dest", ")", "pofile", ".", "header", "=", "header" ]
Replace default headers with edX headers
[ "Replace", "default", "headers", "with", "edX", "headers" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L184-L216
1,272
edx/i18n-tools
i18n/extract.py
strip_key_strings
def strip_key_strings(pofile): """ Removes all entries in PO which are key strings. These entries should appear only in messages.po, not in any other po files. """ newlist = [entry for entry in pofile if not is_key_string(entry.msgid)] del pofile[:] pofile += newlist
python
def strip_key_strings(pofile): """ Removes all entries in PO which are key strings. These entries should appear only in messages.po, not in any other po files. """ newlist = [entry for entry in pofile if not is_key_string(entry.msgid)] del pofile[:] pofile += newlist
[ "def", "strip_key_strings", "(", "pofile", ")", ":", "newlist", "=", "[", "entry", "for", "entry", "in", "pofile", "if", "not", "is_key_string", "(", "entry", ".", "msgid", ")", "]", "del", "pofile", "[", ":", "]", "pofile", "+=", "newlist" ]
Removes all entries in PO which are key strings. These entries should appear only in messages.po, not in any other po files.
[ "Removes", "all", "entries", "in", "PO", "which", "are", "key", "strings", ".", "These", "entries", "should", "appear", "only", "in", "messages", ".", "po", "not", "in", "any", "other", "po", "files", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L249-L256
1,273
edx/i18n-tools
i18n/extract.py
Extract.rename_source_file
def rename_source_file(self, src, dst): """ Rename a file in the source directory. """ try: os.rename(self.source_msgs_dir.joinpath(src), self.source_msgs_dir.joinpath(dst)) except OSError: pass
python
def rename_source_file(self, src, dst): """ Rename a file in the source directory. """ try: os.rename(self.source_msgs_dir.joinpath(src), self.source_msgs_dir.joinpath(dst)) except OSError: pass
[ "def", "rename_source_file", "(", "self", ",", "src", ",", "dst", ")", ":", "try", ":", "os", ".", "rename", "(", "self", ".", "source_msgs_dir", ".", "joinpath", "(", "src", ")", ",", "self", ".", "source_msgs_dir", ".", "joinpath", "(", "dst", ")", ")", "except", "OSError", ":", "pass" ]
Rename a file in the source directory.
[ "Rename", "a", "file", "in", "the", "source", "directory", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L55-L62
1,274
edx/i18n-tools
i18n/main.py
get_valid_commands
def get_valid_commands(): """ Returns valid commands. Returns: commands (list): List of valid commands """ modules = [m.basename().split('.')[0] for m in Path(__file__).dirname().files('*.py')] commands = [] for modname in modules: if modname == 'main': continue mod = importlib.import_module('i18n.%s' % modname) if hasattr(mod, 'main'): commands.append(modname) return commands
python
def get_valid_commands(): """ Returns valid commands. Returns: commands (list): List of valid commands """ modules = [m.basename().split('.')[0] for m in Path(__file__).dirname().files('*.py')] commands = [] for modname in modules: if modname == 'main': continue mod = importlib.import_module('i18n.%s' % modname) if hasattr(mod, 'main'): commands.append(modname) return commands
[ "def", "get_valid_commands", "(", ")", ":", "modules", "=", "[", "m", ".", "basename", "(", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "m", "in", "Path", "(", "__file__", ")", ".", "dirname", "(", ")", ".", "files", "(", "'*.py'", ")", "]", "commands", "=", "[", "]", "for", "modname", "in", "modules", ":", "if", "modname", "==", "'main'", ":", "continue", "mod", "=", "importlib", ".", "import_module", "(", "'i18n.%s'", "%", "modname", ")", "if", "hasattr", "(", "mod", ",", "'main'", ")", ":", "commands", ".", "append", "(", "modname", ")", "return", "commands" ]
Returns valid commands. Returns: commands (list): List of valid commands
[ "Returns", "valid", "commands", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L11-L26
1,275
edx/i18n-tools
i18n/main.py
error_message
def error_message(): """ Writes out error message specifying the valid commands. Returns: Failure code for system exit """ sys.stderr.write('valid commands:\n') for cmd in get_valid_commands(): sys.stderr.write('\t%s\n' % cmd) return -1
python
def error_message(): """ Writes out error message specifying the valid commands. Returns: Failure code for system exit """ sys.stderr.write('valid commands:\n') for cmd in get_valid_commands(): sys.stderr.write('\t%s\n' % cmd) return -1
[ "def", "error_message", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'valid commands:\\n'", ")", "for", "cmd", "in", "get_valid_commands", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'\\t%s\\n'", "%", "cmd", ")", "return", "-", "1" ]
Writes out error message specifying the valid commands. Returns: Failure code for system exit
[ "Writes", "out", "error", "message", "specifying", "the", "valid", "commands", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L29-L39
1,276
edx/i18n-tools
i18n/main.py
main
def main(): """ Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid. """ try: command = sys.argv[1] except IndexError: return error_message() try: module = importlib.import_module('i18n.%s' % command) module.main.args = sys.argv[2:] except (ImportError, AttributeError): return error_message() return module.main()
python
def main(): """ Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid. """ try: command = sys.argv[1] except IndexError: return error_message() try: module = importlib.import_module('i18n.%s' % command) module.main.args = sys.argv[2:] except (ImportError, AttributeError): return error_message() return module.main()
[ "def", "main", "(", ")", ":", "try", ":", "command", "=", "sys", ".", "argv", "[", "1", "]", "except", "IndexError", ":", "return", "error_message", "(", ")", "try", ":", "module", "=", "importlib", ".", "import_module", "(", "'i18n.%s'", "%", "command", ")", "module", ".", "main", ".", "args", "=", "sys", ".", "argv", "[", "2", ":", "]", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "return", "error_message", "(", ")", "return", "module", ".", "main", "(", ")" ]
Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid.
[ "Executes", "the", "given", "command", ".", "Returns", "error_message", "if", "command", "is", "not", "valid", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/main.py#L42-L60
1,277
edx/i18n-tools
i18n/validate.py
validate_po_files
def validate_po_files(configuration, locale_dir, root_dir=None, report_empty=False, check_all=False): """ Validate all of the po files found in the root directory that are not product of a merge. Returns a boolean indicating whether or not problems were found. """ found_problems = False # List of .po files that are the product of a merge (see generate.py). merged_files = configuration.generate_merge.keys() for dirpath, __, filenames in os.walk(root_dir if root_dir else locale_dir): for name in filenames: __, ext = os.path.splitext(name) filename = os.path.join(dirpath, name) # Validate only .po files that are not product of a merge (see generate.py) unless check_all is true. # If django-partial.po has a problem, then django.po will also, so don't report it. if ext.lower() == '.po' and (check_all or os.path.basename(filename) not in merged_files): # First validate the format of this file if msgfmt_check_po_file(locale_dir, filename): found_problems = True # Check that the translated strings are valid, and optionally # check for empty translations. But don't check English. if "/locale/en/" not in filename: problems = check_messages(filename, report_empty) if problems: report_problems(filename, problems) found_problems = True dup_filename = filename.replace('.po', '.dup') has_duplicates = os.path.exists(dup_filename) if has_duplicates: log.warning(" Duplicates found in %s, details in .dup file", dup_filename) found_problems = True if not (problems or has_duplicates): log.info(" No problems found in %s", filename) return found_problems
python
def validate_po_files(configuration, locale_dir, root_dir=None, report_empty=False, check_all=False): """ Validate all of the po files found in the root directory that are not product of a merge. Returns a boolean indicating whether or not problems were found. """ found_problems = False # List of .po files that are the product of a merge (see generate.py). merged_files = configuration.generate_merge.keys() for dirpath, __, filenames in os.walk(root_dir if root_dir else locale_dir): for name in filenames: __, ext = os.path.splitext(name) filename = os.path.join(dirpath, name) # Validate only .po files that are not product of a merge (see generate.py) unless check_all is true. # If django-partial.po has a problem, then django.po will also, so don't report it. if ext.lower() == '.po' and (check_all or os.path.basename(filename) not in merged_files): # First validate the format of this file if msgfmt_check_po_file(locale_dir, filename): found_problems = True # Check that the translated strings are valid, and optionally # check for empty translations. But don't check English. if "/locale/en/" not in filename: problems = check_messages(filename, report_empty) if problems: report_problems(filename, problems) found_problems = True dup_filename = filename.replace('.po', '.dup') has_duplicates = os.path.exists(dup_filename) if has_duplicates: log.warning(" Duplicates found in %s, details in .dup file", dup_filename) found_problems = True if not (problems or has_duplicates): log.info(" No problems found in %s", filename) return found_problems
[ "def", "validate_po_files", "(", "configuration", ",", "locale_dir", ",", "root_dir", "=", "None", ",", "report_empty", "=", "False", ",", "check_all", "=", "False", ")", ":", "found_problems", "=", "False", "# List of .po files that are the product of a merge (see generate.py).", "merged_files", "=", "configuration", ".", "generate_merge", ".", "keys", "(", ")", "for", "dirpath", ",", "__", ",", "filenames", "in", "os", ".", "walk", "(", "root_dir", "if", "root_dir", "else", "locale_dir", ")", ":", "for", "name", "in", "filenames", ":", "__", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", "# Validate only .po files that are not product of a merge (see generate.py) unless check_all is true.", "# If django-partial.po has a problem, then django.po will also, so don't report it.", "if", "ext", ".", "lower", "(", ")", "==", "'.po'", "and", "(", "check_all", "or", "os", ".", "path", ".", "basename", "(", "filename", ")", "not", "in", "merged_files", ")", ":", "# First validate the format of this file", "if", "msgfmt_check_po_file", "(", "locale_dir", ",", "filename", ")", ":", "found_problems", "=", "True", "# Check that the translated strings are valid, and optionally", "# check for empty translations. But don't check English.", "if", "\"/locale/en/\"", "not", "in", "filename", ":", "problems", "=", "check_messages", "(", "filename", ",", "report_empty", ")", "if", "problems", ":", "report_problems", "(", "filename", ",", "problems", ")", "found_problems", "=", "True", "dup_filename", "=", "filename", ".", "replace", "(", "'.po'", ",", "'.dup'", ")", "has_duplicates", "=", "os", ".", "path", ".", "exists", "(", "dup_filename", ")", "if", "has_duplicates", ":", "log", ".", "warning", "(", "\" Duplicates found in %s, details in .dup file\"", ",", "dup_filename", ")", "found_problems", "=", "True", "if", "not", "(", "problems", "or", "has_duplicates", ")", ":", "log", ".", "info", "(", "\" No problems found in %s\"", ",", "filename", ")", "return", "found_problems" ]
Validate all of the po files found in the root directory that are not product of a merge. Returns a boolean indicating whether or not problems were found.
[ "Validate", "all", "of", "the", "po", "files", "found", "in", "the", "root", "directory", "that", "are", "not", "product", "of", "a", "merge", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L22-L63
1,278
edx/i18n-tools
i18n/validate.py
msgfmt_check_po_file
def msgfmt_check_po_file(locale_dir, filename): """ Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. Returns a boolean indicating whether or not problems were found. """ found_problems = False # Use relative paths to make output less noisy. rfile = os.path.relpath(filename, locale_dir) out, err = call('msgfmt -c -o /dev/null {}'.format(rfile), working_directory=locale_dir) if err: log.info(u'\n' + out.decode('utf8')) log.warning(u'\n' + err.decode('utf8')) found_problems = True return found_problems
python
def msgfmt_check_po_file(locale_dir, filename): """ Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. Returns a boolean indicating whether or not problems were found. """ found_problems = False # Use relative paths to make output less noisy. rfile = os.path.relpath(filename, locale_dir) out, err = call('msgfmt -c -o /dev/null {}'.format(rfile), working_directory=locale_dir) if err: log.info(u'\n' + out.decode('utf8')) log.warning(u'\n' + err.decode('utf8')) found_problems = True return found_problems
[ "def", "msgfmt_check_po_file", "(", "locale_dir", ",", "filename", ")", ":", "found_problems", "=", "False", "# Use relative paths to make output less noisy.", "rfile", "=", "os", ".", "path", ".", "relpath", "(", "filename", ",", "locale_dir", ")", "out", ",", "err", "=", "call", "(", "'msgfmt -c -o /dev/null {}'", ".", "format", "(", "rfile", ")", ",", "working_directory", "=", "locale_dir", ")", "if", "err", ":", "log", ".", "info", "(", "u'\\n'", "+", "out", ".", "decode", "(", "'utf8'", ")", ")", "log", ".", "warning", "(", "u'\\n'", "+", "err", ".", "decode", "(", "'utf8'", ")", ")", "found_problems", "=", "True", "return", "found_problems" ]
Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. Returns a boolean indicating whether or not problems were found.
[ "Call", "GNU", "msgfmt", "-", "c", "on", "each", ".", "po", "file", "to", "validate", "its", "format", ".", "Any", "errors", "caught", "by", "msgfmt", "are", "logged", "to", "log", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L66-L83
1,279
edx/i18n-tools
i18n/validate.py
tags_in_string
def tags_in_string(msg): """ Return the set of tags in a message string. Tags includes HTML tags, data placeholders, etc. Skips tags that might change due to translations: HTML entities, <abbr>, and so on. """ def is_linguistic_tag(tag): """Is this tag one that can change with the language?""" if tag.startswith("&"): return True if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]): return True return False __, tags = Converter().detag_string(msg) return set(t for t in tags if not is_linguistic_tag(t))
python
def tags_in_string(msg): """ Return the set of tags in a message string. Tags includes HTML tags, data placeholders, etc. Skips tags that might change due to translations: HTML entities, <abbr>, and so on. """ def is_linguistic_tag(tag): """Is this tag one that can change with the language?""" if tag.startswith("&"): return True if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]): return True return False __, tags = Converter().detag_string(msg) return set(t for t in tags if not is_linguistic_tag(t))
[ "def", "tags_in_string", "(", "msg", ")", ":", "def", "is_linguistic_tag", "(", "tag", ")", ":", "\"\"\"Is this tag one that can change with the language?\"\"\"", "if", "tag", ".", "startswith", "(", "\"&\"", ")", ":", "return", "True", "if", "any", "(", "x", "in", "tag", "for", "x", "in", "[", "\"<abbr>\"", ",", "\"<abbr \"", ",", "\"</abbr>\"", "]", ")", ":", "return", "True", "return", "False", "__", ",", "tags", "=", "Converter", "(", ")", ".", "detag_string", "(", "msg", ")", "return", "set", "(", "t", "for", "t", "in", "tags", "if", "not", "is_linguistic_tag", "(", "t", ")", ")" ]
Return the set of tags in a message string. Tags includes HTML tags, data placeholders, etc. Skips tags that might change due to translations: HTML entities, <abbr>, and so on.
[ "Return", "the", "set", "of", "tags", "in", "a", "message", "string", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L86-L105
1,280
edx/i18n-tools
i18n/validate.py
astral
def astral(msg): """Does `msg` have characters outside the Basic Multilingual Plane?""" # Python2 narrow builds present astral characters as surrogate pairs. # By encoding as utf32, and decoding DWORDS, we can get at the real code # points. utf32 = msg.encode("utf32")[4:] # [4:] to drop the bom code_points = struct.unpack("%dI" % (len(utf32) / 4), utf32) return any(cp > 0xFFFF for cp in code_points)
python
def astral(msg): """Does `msg` have characters outside the Basic Multilingual Plane?""" # Python2 narrow builds present astral characters as surrogate pairs. # By encoding as utf32, and decoding DWORDS, we can get at the real code # points. utf32 = msg.encode("utf32")[4:] # [4:] to drop the bom code_points = struct.unpack("%dI" % (len(utf32) / 4), utf32) return any(cp > 0xFFFF for cp in code_points)
[ "def", "astral", "(", "msg", ")", ":", "# Python2 narrow builds present astral characters as surrogate pairs.", "# By encoding as utf32, and decoding DWORDS, we can get at the real code", "# points.", "utf32", "=", "msg", ".", "encode", "(", "\"utf32\"", ")", "[", "4", ":", "]", "# [4:] to drop the bom", "code_points", "=", "struct", ".", "unpack", "(", "\"%dI\"", "%", "(", "len", "(", "utf32", ")", "/", "4", ")", ",", "utf32", ")", "return", "any", "(", "cp", ">", "0xFFFF", "for", "cp", "in", "code_points", ")" ]
Does `msg` have characters outside the Basic Multilingual Plane?
[ "Does", "msg", "have", "characters", "outside", "the", "Basic", "Multilingual", "Plane?" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L108-L115
1,281
edx/i18n-tools
i18n/validate.py
report_problems
def report_problems(filename, problems): """ Report on the problems found in `filename`. `problems` is a list of tuples as returned by `check_messages`. """ problem_file = filename.replace(".po", ".prob") id_filler = textwrap.TextWrapper(width=79, initial_indent=" msgid: ", subsequent_indent=" " * 9) tx_filler = textwrap.TextWrapper(width=79, initial_indent=" -----> ", subsequent_indent=" " * 9) with codecs.open(problem_file, "w", encoding="utf8") as prob_file: for problem in problems: desc, msgid = problem[:2] prob_file.write(u"{}\n{}\n".format(desc, id_filler.fill(msgid))) info = u"{}\n{}\n".format(desc, id_filler.fill(msgid)) for translation in problem[2:]: prob_file.write(u"{}\n".format(tx_filler.fill(translation))) info += u"{}\n".format(tx_filler.fill(translation)) log.info(info) prob_file.write(u"\n") log.error(" %s problems in %s, details in .prob file", len(problems), filename)
python
def report_problems(filename, problems): """ Report on the problems found in `filename`. `problems` is a list of tuples as returned by `check_messages`. """ problem_file = filename.replace(".po", ".prob") id_filler = textwrap.TextWrapper(width=79, initial_indent=" msgid: ", subsequent_indent=" " * 9) tx_filler = textwrap.TextWrapper(width=79, initial_indent=" -----> ", subsequent_indent=" " * 9) with codecs.open(problem_file, "w", encoding="utf8") as prob_file: for problem in problems: desc, msgid = problem[:2] prob_file.write(u"{}\n{}\n".format(desc, id_filler.fill(msgid))) info = u"{}\n{}\n".format(desc, id_filler.fill(msgid)) for translation in problem[2:]: prob_file.write(u"{}\n".format(tx_filler.fill(translation))) info += u"{}\n".format(tx_filler.fill(translation)) log.info(info) prob_file.write(u"\n") log.error(" %s problems in %s, details in .prob file", len(problems), filename)
[ "def", "report_problems", "(", "filename", ",", "problems", ")", ":", "problem_file", "=", "filename", ".", "replace", "(", "\".po\"", ",", "\".prob\"", ")", "id_filler", "=", "textwrap", ".", "TextWrapper", "(", "width", "=", "79", ",", "initial_indent", "=", "\" msgid: \"", ",", "subsequent_indent", "=", "\" \"", "*", "9", ")", "tx_filler", "=", "textwrap", ".", "TextWrapper", "(", "width", "=", "79", ",", "initial_indent", "=", "\" -----> \"", ",", "subsequent_indent", "=", "\" \"", "*", "9", ")", "with", "codecs", ".", "open", "(", "problem_file", ",", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "prob_file", ":", "for", "problem", "in", "problems", ":", "desc", ",", "msgid", "=", "problem", "[", ":", "2", "]", "prob_file", ".", "write", "(", "u\"{}\\n{}\\n\"", ".", "format", "(", "desc", ",", "id_filler", ".", "fill", "(", "msgid", ")", ")", ")", "info", "=", "u\"{}\\n{}\\n\"", ".", "format", "(", "desc", ",", "id_filler", ".", "fill", "(", "msgid", ")", ")", "for", "translation", "in", "problem", "[", "2", ":", "]", ":", "prob_file", ".", "write", "(", "u\"{}\\n\"", ".", "format", "(", "tx_filler", ".", "fill", "(", "translation", ")", ")", ")", "info", "+=", "u\"{}\\n\"", ".", "format", "(", "tx_filler", ".", "fill", "(", "translation", ")", ")", "log", ".", "info", "(", "info", ")", "prob_file", ".", "write", "(", "u\"\\n\"", ")", "log", ".", "error", "(", "\" %s problems in %s, details in .prob file\"", ",", "len", "(", "problems", ")", ",", "filename", ")" ]
Report on the problems found in `filename`. `problems` is a list of tuples as returned by `check_messages`.
[ "Report", "on", "the", "problems", "found", "in", "filename", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L182-L203
1,282
edx/i18n-tools
i18n/generate.py
merge
def merge(configuration, locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True): """ For the given locale, merge the `sources` files to become the `target` file. Note that the target file might also be one of the sources. If fail_if_missing is true, and the files to be merged are missing, throw an Exception, otherwise return silently. If fail_if_missing is false, and the files to be merged are missing, just return silently. """ LOG.info('Merging %s locale %s', target, locale) locale_directory = configuration.get_messages_dir(locale) try: validate_files(locale_directory, sources) except Exception: # pylint: disable=broad-except if not fail_if_missing: return raise # merged file is merged.po merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources) execute(merge_cmd, working_directory=locale_directory) # clean up redunancies in the metadata merged_filename = locale_directory.joinpath('merged.po') duplicate_entries = clean_pofile(merged_filename) # rename merged.po -> django.po (default) target_filename = locale_directory.joinpath(target) os.rename(merged_filename, target_filename) # Write duplicate messages to a file if duplicate_entries: dup_file = target_filename.replace(".po", ".dup") with codecs.open(dup_file, "w", encoding="utf8") as dfile: for (entry, translations) in duplicate_entries: dfile.write(u"{}\n".format(entry)) dfile.write(u"Translations found were:\n\t{}\n\n".format(translations)) LOG.warning(" %s duplicates in %s, details in .dup file", len(duplicate_entries), target_filename)
python
def merge(configuration, locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True): """ For the given locale, merge the `sources` files to become the `target` file. Note that the target file might also be one of the sources. If fail_if_missing is true, and the files to be merged are missing, throw an Exception, otherwise return silently. If fail_if_missing is false, and the files to be merged are missing, just return silently. """ LOG.info('Merging %s locale %s', target, locale) locale_directory = configuration.get_messages_dir(locale) try: validate_files(locale_directory, sources) except Exception: # pylint: disable=broad-except if not fail_if_missing: return raise # merged file is merged.po merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources) execute(merge_cmd, working_directory=locale_directory) # clean up redunancies in the metadata merged_filename = locale_directory.joinpath('merged.po') duplicate_entries = clean_pofile(merged_filename) # rename merged.po -> django.po (default) target_filename = locale_directory.joinpath(target) os.rename(merged_filename, target_filename) # Write duplicate messages to a file if duplicate_entries: dup_file = target_filename.replace(".po", ".dup") with codecs.open(dup_file, "w", encoding="utf8") as dfile: for (entry, translations) in duplicate_entries: dfile.write(u"{}\n".format(entry)) dfile.write(u"Translations found were:\n\t{}\n\n".format(translations)) LOG.warning(" %s duplicates in %s, details in .dup file", len(duplicate_entries), target_filename)
[ "def", "merge", "(", "configuration", ",", "locale", ",", "target", "=", "'django.po'", ",", "sources", "=", "(", "'django-partial.po'", ",", ")", ",", "fail_if_missing", "=", "True", ")", ":", "LOG", ".", "info", "(", "'Merging %s locale %s'", ",", "target", ",", "locale", ")", "locale_directory", "=", "configuration", ".", "get_messages_dir", "(", "locale", ")", "try", ":", "validate_files", "(", "locale_directory", ",", "sources", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "if", "not", "fail_if_missing", ":", "return", "raise", "# merged file is merged.po", "merge_cmd", "=", "'msgcat -o merged.po '", "+", "' '", ".", "join", "(", "sources", ")", "execute", "(", "merge_cmd", ",", "working_directory", "=", "locale_directory", ")", "# clean up redunancies in the metadata", "merged_filename", "=", "locale_directory", ".", "joinpath", "(", "'merged.po'", ")", "duplicate_entries", "=", "clean_pofile", "(", "merged_filename", ")", "# rename merged.po -> django.po (default)", "target_filename", "=", "locale_directory", ".", "joinpath", "(", "target", ")", "os", ".", "rename", "(", "merged_filename", ",", "target_filename", ")", "# Write duplicate messages to a file", "if", "duplicate_entries", ":", "dup_file", "=", "target_filename", ".", "replace", "(", "\".po\"", ",", "\".dup\"", ")", "with", "codecs", ".", "open", "(", "dup_file", ",", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "dfile", ":", "for", "(", "entry", ",", "translations", ")", "in", "duplicate_entries", ":", "dfile", ".", "write", "(", "u\"{}\\n\"", ".", "format", "(", "entry", ")", ")", "dfile", ".", "write", "(", "u\"Translations found were:\\n\\t{}\\n\\n\"", ".", "format", "(", "translations", ")", ")", "LOG", ".", "warning", "(", "\" %s duplicates in %s, details in .dup file\"", ",", "len", "(", "duplicate_entries", ")", ",", "target_filename", ")" ]
For the given locale, merge the `sources` files to become the `target` file. Note that the target file might also be one of the sources. If fail_if_missing is true, and the files to be merged are missing, throw an Exception, otherwise return silently. If fail_if_missing is false, and the files to be merged are missing, just return silently.
[ "For", "the", "given", "locale", "merge", "the", "sources", "files", "to", "become", "the", "target", "file", ".", "Note", "that", "the", "target", "file", "might", "also", "be", "one", "of", "the", "sources", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L32-L72
1,283
edx/i18n-tools
i18n/generate.py
merge_files
def merge_files(configuration, locale, fail_if_missing=True): """ Merge all the files in `locale`, as specified in config.yaml. """ for target, sources in configuration.generate_merge.items(): merge(configuration, locale, target, sources, fail_if_missing)
python
def merge_files(configuration, locale, fail_if_missing=True): """ Merge all the files in `locale`, as specified in config.yaml. """ for target, sources in configuration.generate_merge.items(): merge(configuration, locale, target, sources, fail_if_missing)
[ "def", "merge_files", "(", "configuration", ",", "locale", ",", "fail_if_missing", "=", "True", ")", ":", "for", "target", ",", "sources", "in", "configuration", ".", "generate_merge", ".", "items", "(", ")", ":", "merge", "(", "configuration", ",", "locale", ",", "target", ",", "sources", ",", "fail_if_missing", ")" ]
Merge all the files in `locale`, as specified in config.yaml.
[ "Merge", "all", "the", "files", "in", "locale", "as", "specified", "in", "config", ".", "yaml", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L75-L80
1,284
edx/i18n-tools
i18n/generate.py
clean_pofile
def clean_pofile(pofile_path): """ Clean various aspect of a .po file. Fixes: - Removes the fuzzy flag on metadata. - Removes occurrence line numbers so that the generated files don't generate a lot of line noise when they're committed. Returns a list of any duplicate entries found. """ # Reading in the .po file and saving it again fixes redundancies. pomsgs = pofile(pofile_path) # The msgcat tool marks the metadata as fuzzy, but it's ok as it is. pomsgs.metadata_is_fuzzy = False duplicate_entries = [] for entry in pomsgs: # Remove line numbers entry.occurrences = [(filename, None) for filename, __ in entry.occurrences] # Check for merge conflicts. Pick the first, and emit a warning. if 'fuzzy' in entry.flags: # Remove fuzzy from flags entry.flags = [f for f in entry.flags if f != 'fuzzy'] # Save a warning message dup_msg = 'Multiple translations found for single string.\n\tString "{0}"\n\tPresent in files {1}'.format( entry.msgid, [f for (f, __) in entry.occurrences] ) duplicate_entries.append((dup_msg, entry.msgstr)) # Pick the first entry for msgstr in DUPLICATE_ENTRY_PATTERN.split(entry.msgstr): # Ignore any empty strings that may result from the split call if msgstr: # Set the first one we find to be the right one. Strip to remove extraneous # new lines that exist. entry.msgstr = msgstr.strip() # Raise error if there's new lines starting or ending the id string. if entry.msgid.startswith('\n') or entry.msgid.endswith('\n'): raise ValueError( u'{} starts or ends with a new line character, which is not allowed. ' 'Please fix before continuing. Source string is found in {}'.format( entry.msgid, entry.occurrences ).encode('utf-8') ) break pomsgs.save() return duplicate_entries
python
def clean_pofile(pofile_path): """ Clean various aspect of a .po file. Fixes: - Removes the fuzzy flag on metadata. - Removes occurrence line numbers so that the generated files don't generate a lot of line noise when they're committed. Returns a list of any duplicate entries found. """ # Reading in the .po file and saving it again fixes redundancies. pomsgs = pofile(pofile_path) # The msgcat tool marks the metadata as fuzzy, but it's ok as it is. pomsgs.metadata_is_fuzzy = False duplicate_entries = [] for entry in pomsgs: # Remove line numbers entry.occurrences = [(filename, None) for filename, __ in entry.occurrences] # Check for merge conflicts. Pick the first, and emit a warning. if 'fuzzy' in entry.flags: # Remove fuzzy from flags entry.flags = [f for f in entry.flags if f != 'fuzzy'] # Save a warning message dup_msg = 'Multiple translations found for single string.\n\tString "{0}"\n\tPresent in files {1}'.format( entry.msgid, [f for (f, __) in entry.occurrences] ) duplicate_entries.append((dup_msg, entry.msgstr)) # Pick the first entry for msgstr in DUPLICATE_ENTRY_PATTERN.split(entry.msgstr): # Ignore any empty strings that may result from the split call if msgstr: # Set the first one we find to be the right one. Strip to remove extraneous # new lines that exist. entry.msgstr = msgstr.strip() # Raise error if there's new lines starting or ending the id string. if entry.msgid.startswith('\n') or entry.msgid.endswith('\n'): raise ValueError( u'{} starts or ends with a new line character, which is not allowed. ' 'Please fix before continuing. Source string is found in {}'.format( entry.msgid, entry.occurrences ).encode('utf-8') ) break pomsgs.save() return duplicate_entries
[ "def", "clean_pofile", "(", "pofile_path", ")", ":", "# Reading in the .po file and saving it again fixes redundancies.", "pomsgs", "=", "pofile", "(", "pofile_path", ")", "# The msgcat tool marks the metadata as fuzzy, but it's ok as it is.", "pomsgs", ".", "metadata_is_fuzzy", "=", "False", "duplicate_entries", "=", "[", "]", "for", "entry", "in", "pomsgs", ":", "# Remove line numbers", "entry", ".", "occurrences", "=", "[", "(", "filename", ",", "None", ")", "for", "filename", ",", "__", "in", "entry", ".", "occurrences", "]", "# Check for merge conflicts. Pick the first, and emit a warning.", "if", "'fuzzy'", "in", "entry", ".", "flags", ":", "# Remove fuzzy from flags", "entry", ".", "flags", "=", "[", "f", "for", "f", "in", "entry", ".", "flags", "if", "f", "!=", "'fuzzy'", "]", "# Save a warning message", "dup_msg", "=", "'Multiple translations found for single string.\\n\\tString \"{0}\"\\n\\tPresent in files {1}'", ".", "format", "(", "entry", ".", "msgid", ",", "[", "f", "for", "(", "f", ",", "__", ")", "in", "entry", ".", "occurrences", "]", ")", "duplicate_entries", ".", "append", "(", "(", "dup_msg", ",", "entry", ".", "msgstr", ")", ")", "# Pick the first entry", "for", "msgstr", "in", "DUPLICATE_ENTRY_PATTERN", ".", "split", "(", "entry", ".", "msgstr", ")", ":", "# Ignore any empty strings that may result from the split call", "if", "msgstr", ":", "# Set the first one we find to be the right one. Strip to remove extraneous", "# new lines that exist.", "entry", ".", "msgstr", "=", "msgstr", ".", "strip", "(", ")", "# Raise error if there's new lines starting or ending the id string.", "if", "entry", ".", "msgid", ".", "startswith", "(", "'\\n'", ")", "or", "entry", ".", "msgid", ".", "endswith", "(", "'\\n'", ")", ":", "raise", "ValueError", "(", "u'{} starts or ends with a new line character, which is not allowed. '", "'Please fix before continuing. Source string is found in {}'", ".", "format", "(", "entry", ".", "msgid", ",", "entry", ".", "occurrences", ")", ".", "encode", "(", "'utf-8'", ")", ")", "break", "pomsgs", ".", "save", "(", ")", "return", "duplicate_entries" ]
Clean various aspect of a .po file. Fixes: - Removes the fuzzy flag on metadata. - Removes occurrence line numbers so that the generated files don't generate a lot of line noise when they're committed. Returns a list of any duplicate entries found.
[ "Clean", "various", "aspect", "of", "a", ".", "po", "file", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L83-L135
1,285
edx/i18n-tools
i18n/dummy.py
new_filename
def new_filename(original_filename, new_locale): """Returns a filename derived from original_filename, using new_locale as the locale""" orig_file = Path(original_filename) new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name return new_file.abspath()
python
def new_filename(original_filename, new_locale): """Returns a filename derived from original_filename, using new_locale as the locale""" orig_file = Path(original_filename) new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name return new_file.abspath()
[ "def", "new_filename", "(", "original_filename", ",", "new_locale", ")", ":", "orig_file", "=", "Path", "(", "original_filename", ")", "new_file", "=", "orig_file", ".", "parent", ".", "parent", ".", "parent", "/", "new_locale", "/", "orig_file", ".", "parent", ".", "name", "/", "orig_file", ".", "name", "return", "new_file", ".", "abspath", "(", ")" ]
Returns a filename derived from original_filename, using new_locale as the locale
[ "Returns", "a", "filename", "derived", "from", "original_filename", "using", "new_locale", "as", "the", "locale" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L217-L221
1,286
edx/i18n-tools
i18n/dummy.py
DummyCommand.run
def run(self, args): """ Generate dummy strings for all source po files. """ configuration = self.configuration source_messages_dir = configuration.source_messages_dir for locale, converter in zip(configuration.dummy_locales, [Dummy(), Dummy2(), ArabicDummy()]): print('Processing source language files into dummy strings, locale "{}"'.format(locale)) for source_file in configuration.source_messages_dir.walkfiles('*.po'): if args.verbose: print(' ', source_file.relpath()) make_dummy(source_messages_dir.joinpath(source_file), locale, converter) if args.verbose: print()
python
def run(self, args): """ Generate dummy strings for all source po files. """ configuration = self.configuration source_messages_dir = configuration.source_messages_dir for locale, converter in zip(configuration.dummy_locales, [Dummy(), Dummy2(), ArabicDummy()]): print('Processing source language files into dummy strings, locale "{}"'.format(locale)) for source_file in configuration.source_messages_dir.walkfiles('*.po'): if args.verbose: print(' ', source_file.relpath()) make_dummy(source_messages_dir.joinpath(source_file), locale, converter) if args.verbose: print()
[ "def", "run", "(", "self", ",", "args", ")", ":", "configuration", "=", "self", ".", "configuration", "source_messages_dir", "=", "configuration", ".", "source_messages_dir", "for", "locale", ",", "converter", "in", "zip", "(", "configuration", ".", "dummy_locales", ",", "[", "Dummy", "(", ")", ",", "Dummy2", "(", ")", ",", "ArabicDummy", "(", ")", "]", ")", ":", "print", "(", "'Processing source language files into dummy strings, locale \"{}\"'", ".", "format", "(", "locale", ")", ")", "for", "source_file", "in", "configuration", ".", "source_messages_dir", ".", "walkfiles", "(", "'*.po'", ")", ":", "if", "args", ".", "verbose", ":", "print", "(", "' '", ",", "source_file", ".", "relpath", "(", ")", ")", "make_dummy", "(", "source_messages_dir", ".", "joinpath", "(", "source_file", ")", ",", "locale", ",", "converter", ")", "if", "args", ".", "verbose", ":", "print", "(", ")" ]
Generate dummy strings for all source po files.
[ "Generate", "dummy", "strings", "for", "all", "source", "po", "files", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/dummy.py#L235-L248
1,287
edx/i18n-tools
i18n/execute.py
execute
def execute(command, working_directory=config.BASE_DIR, stderr=sp.STDOUT): """ Executes shell command in a given working_directory. Command is a string to pass to the shell. Output is ignored. """ LOG.info("Executing in %s ...", working_directory) LOG.info(command) sp.check_call(command, cwd=working_directory, stderr=stderr, shell=True)
python
def execute(command, working_directory=config.BASE_DIR, stderr=sp.STDOUT): """ Executes shell command in a given working_directory. Command is a string to pass to the shell. Output is ignored. """ LOG.info("Executing in %s ...", working_directory) LOG.info(command) sp.check_call(command, cwd=working_directory, stderr=stderr, shell=True)
[ "def", "execute", "(", "command", ",", "working_directory", "=", "config", ".", "BASE_DIR", ",", "stderr", "=", "sp", ".", "STDOUT", ")", ":", "LOG", ".", "info", "(", "\"Executing in %s ...\"", ",", "working_directory", ")", "LOG", ".", "info", "(", "command", ")", "sp", ".", "check_call", "(", "command", ",", "cwd", "=", "working_directory", ",", "stderr", "=", "stderr", ",", "shell", "=", "True", ")" ]
Executes shell command in a given working_directory. Command is a string to pass to the shell. Output is ignored.
[ "Executes", "shell", "command", "in", "a", "given", "working_directory", ".", "Command", "is", "a", "string", "to", "pass", "to", "the", "shell", ".", "Output", "is", "ignored", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/execute.py#L13-L21
1,288
edx/i18n-tools
i18n/execute.py
remove_file
def remove_file(filename, verbose=True): """ Attempt to delete filename. log is boolean. If true, removal is logged. Log a warning if file does not exist. Logging filenames are relative to config.BASE_DIR to cut down on noise in output. """ if verbose: LOG.info('Deleting file %s', os.path.relpath(filename, config.BASE_DIR)) if not os.path.exists(filename): LOG.warning("File does not exist: %s", os.path.relpath(filename, config.BASE_DIR)) else: os.remove(filename)
python
def remove_file(filename, verbose=True): """ Attempt to delete filename. log is boolean. If true, removal is logged. Log a warning if file does not exist. Logging filenames are relative to config.BASE_DIR to cut down on noise in output. """ if verbose: LOG.info('Deleting file %s', os.path.relpath(filename, config.BASE_DIR)) if not os.path.exists(filename): LOG.warning("File does not exist: %s", os.path.relpath(filename, config.BASE_DIR)) else: os.remove(filename)
[ "def", "remove_file", "(", "filename", ",", "verbose", "=", "True", ")", ":", "if", "verbose", ":", "LOG", ".", "info", "(", "'Deleting file %s'", ",", "os", ".", "path", ".", "relpath", "(", "filename", ",", "config", ".", "BASE_DIR", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOG", ".", "warning", "(", "\"File does not exist: %s\"", ",", "os", ".", "path", ".", "relpath", "(", "filename", ",", "config", ".", "BASE_DIR", ")", ")", "else", ":", "os", ".", "remove", "(", "filename", ")" ]
Attempt to delete filename. log is boolean. If true, removal is logged. Log a warning if file does not exist. Logging filenames are relative to config.BASE_DIR to cut down on noise in output.
[ "Attempt", "to", "delete", "filename", ".", "log", "is", "boolean", ".", "If", "true", "removal", "is", "logged", ".", "Log", "a", "warning", "if", "file", "does", "not", "exist", ".", "Logging", "filenames", "are", "relative", "to", "config", ".", "BASE_DIR", "to", "cut", "down", "on", "noise", "in", "output", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/execute.py#L37-L49
1,289
edx/i18n-tools
i18n/transifex.py
push
def push(*resources): """ Push translation source English files to Transifex. Arguments name specific resources to push. Otherwise, push all the source files. """ cmd = 'tx push -s' if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd)
python
def push(*resources): """ Push translation source English files to Transifex. Arguments name specific resources to push. Otherwise, push all the source files. """ cmd = 'tx push -s' if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd)
[ "def", "push", "(", "*", "resources", ")", ":", "cmd", "=", "'tx push -s'", "if", "resources", ":", "for", "resource", "in", "resources", ":", "execute", "(", "cmd", "+", "' -r {resource}'", ".", "format", "(", "resource", "=", "resource", ")", ")", "else", ":", "execute", "(", "cmd", ")" ]
Push translation source English files to Transifex. Arguments name specific resources to push. Otherwise, push all the source files.
[ "Push", "translation", "source", "English", "files", "to", "Transifex", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L17-L29
1,290
edx/i18n-tools
i18n/transifex.py
pull_all_ltr
def pull_all_ltr(configuration): """ Pulls all translations - reviewed or not - for LTR languages """ print("Pulling all translated LTR languages from transifex...") for lang in configuration.ltr_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.ltr_langs)
python
def pull_all_ltr(configuration): """ Pulls all translations - reviewed or not - for LTR languages """ print("Pulling all translated LTR languages from transifex...") for lang in configuration.ltr_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.ltr_langs)
[ "def", "pull_all_ltr", "(", "configuration", ")", ":", "print", "(", "\"Pulling all translated LTR languages from transifex...\"", ")", "for", "lang", "in", "configuration", ".", "ltr_langs", ":", "print", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'tx pull -l '", "+", "lang", ")", "clean_translated_locales", "(", "configuration", ",", "langs", "=", "configuration", ".", "ltr_langs", ")" ]
Pulls all translations - reviewed or not - for LTR languages
[ "Pulls", "all", "translations", "-", "reviewed", "or", "not", "-", "for", "LTR", "languages" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L79-L88
1,291
edx/i18n-tools
i18n/transifex.py
pull_all_rtl
def pull_all_rtl(configuration): """ Pulls all translations - reviewed or not - for RTL languages """ print("Pulling all translated RTL languages from transifex...") for lang in configuration.rtl_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.rtl_langs)
python
def pull_all_rtl(configuration): """ Pulls all translations - reviewed or not - for RTL languages """ print("Pulling all translated RTL languages from transifex...") for lang in configuration.rtl_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.rtl_langs)
[ "def", "pull_all_rtl", "(", "configuration", ")", ":", "print", "(", "\"Pulling all translated RTL languages from transifex...\"", ")", "for", "lang", "in", "configuration", ".", "rtl_langs", ":", "print", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'tx pull -l '", "+", "lang", ")", "clean_translated_locales", "(", "configuration", ",", "langs", "=", "configuration", ".", "rtl_langs", ")" ]
Pulls all translations - reviewed or not - for RTL languages
[ "Pulls", "all", "translations", "-", "reviewed", "or", "not", "-", "for", "RTL", "languages" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L91-L100
1,292
edx/i18n-tools
i18n/transifex.py
clean_translated_locales
def clean_translated_locales(configuration, langs=None): """ Strips out the warning from all translated po files about being an English source file. """ if not langs: langs = configuration.translated_locales for locale in langs: clean_locale(configuration, locale)
python
def clean_translated_locales(configuration, langs=None): """ Strips out the warning from all translated po files about being an English source file. """ if not langs: langs = configuration.translated_locales for locale in langs: clean_locale(configuration, locale)
[ "def", "clean_translated_locales", "(", "configuration", ",", "langs", "=", "None", ")", ":", "if", "not", "langs", ":", "langs", "=", "configuration", ".", "translated_locales", "for", "locale", "in", "langs", ":", "clean_locale", "(", "configuration", ",", "locale", ")" ]
Strips out the warning from all translated po files about being an English source file.
[ "Strips", "out", "the", "warning", "from", "all", "translated", "po", "files", "about", "being", "an", "English", "source", "file", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L103-L111
1,293
edx/i18n-tools
i18n/transifex.py
clean_locale
def clean_locale(configuration, locale): """ Strips out the warning from all of a locale's translated po files about being an English source file. Iterates over machine-generated files. """ dirname = configuration.get_messages_dir(locale) if not dirname.exists(): # Happens when we have a supported locale that doesn't exist in Transifex return for filename in dirname.files('*.po'): clean_file(configuration, dirname.joinpath(filename))
python
def clean_locale(configuration, locale): """ Strips out the warning from all of a locale's translated po files about being an English source file. Iterates over machine-generated files. """ dirname = configuration.get_messages_dir(locale) if not dirname.exists(): # Happens when we have a supported locale that doesn't exist in Transifex return for filename in dirname.files('*.po'): clean_file(configuration, dirname.joinpath(filename))
[ "def", "clean_locale", "(", "configuration", ",", "locale", ")", ":", "dirname", "=", "configuration", ".", "get_messages_dir", "(", "locale", ")", "if", "not", "dirname", ".", "exists", "(", ")", ":", "# Happens when we have a supported locale that doesn't exist in Transifex", "return", "for", "filename", "in", "dirname", ".", "files", "(", "'*.po'", ")", ":", "clean_file", "(", "configuration", ",", "dirname", ".", "joinpath", "(", "filename", ")", ")" ]
Strips out the warning from all of a locale's translated po files about being an English source file. Iterates over machine-generated files.
[ "Strips", "out", "the", "warning", "from", "all", "of", "a", "locale", "s", "translated", "po", "files", "about", "being", "an", "English", "source", "file", ".", "Iterates", "over", "machine", "-", "generated", "files", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L114-L125
1,294
edx/i18n-tools
i18n/transifex.py
clean_file
def clean_file(configuration, filename): """ Strips out the warning from a translated po file about being an English source file. Replaces warning with a note about coming from Transifex. """ pofile = polib.pofile(filename) if pofile.header.find(EDX_MARKER) != -1: new_header = get_new_header(configuration, pofile) new = pofile.header.replace(EDX_MARKER, new_header) pofile.header = new pofile.save()
python
def clean_file(configuration, filename): """ Strips out the warning from a translated po file about being an English source file. Replaces warning with a note about coming from Transifex. """ pofile = polib.pofile(filename) if pofile.header.find(EDX_MARKER) != -1: new_header = get_new_header(configuration, pofile) new = pofile.header.replace(EDX_MARKER, new_header) pofile.header = new pofile.save()
[ "def", "clean_file", "(", "configuration", ",", "filename", ")", ":", "pofile", "=", "polib", ".", "pofile", "(", "filename", ")", "if", "pofile", ".", "header", ".", "find", "(", "EDX_MARKER", ")", "!=", "-", "1", ":", "new_header", "=", "get_new_header", "(", "configuration", ",", "pofile", ")", "new", "=", "pofile", ".", "header", ".", "replace", "(", "EDX_MARKER", ",", "new_header", ")", "pofile", ".", "header", "=", "new", "pofile", ".", "save", "(", ")" ]
Strips out the warning from a translated po file about being an English source file. Replaces warning with a note about coming from Transifex.
[ "Strips", "out", "the", "warning", "from", "a", "translated", "po", "file", "about", "being", "an", "English", "source", "file", ".", "Replaces", "warning", "with", "a", "note", "about", "coming", "from", "Transifex", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L128-L139
1,295
edx/i18n-tools
i18n/transifex.py
get_new_header
def get_new_header(configuration, pofile): """ Insert info about edX into the po file headers """ team = pofile.metadata.get('Language-Team', None) if not team: return TRANSIFEX_HEADER.format(configuration.TRANSIFEX_URL) return TRANSIFEX_HEADER.format(team)
python
def get_new_header(configuration, pofile): """ Insert info about edX into the po file headers """ team = pofile.metadata.get('Language-Team', None) if not team: return TRANSIFEX_HEADER.format(configuration.TRANSIFEX_URL) return TRANSIFEX_HEADER.format(team)
[ "def", "get_new_header", "(", "configuration", ",", "pofile", ")", ":", "team", "=", "pofile", ".", "metadata", ".", "get", "(", "'Language-Team'", ",", "None", ")", "if", "not", "team", ":", "return", "TRANSIFEX_HEADER", ".", "format", "(", "configuration", ".", "TRANSIFEX_URL", ")", "return", "TRANSIFEX_HEADER", ".", "format", "(", "team", ")" ]
Insert info about edX into the po file headers
[ "Insert", "info", "about", "edX", "into", "the", "po", "file", "headers" ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L142-L149
1,296
edx/i18n-tools
i18n/converter.py
Converter.detag_string
def detag_string(self, string): """Extracts tags from string. returns (string, list) where string: string has tags replaced by indices (<BR>... => <0>, <1>, <2>, etc.) list: list of the removed tags ('<BR>', '<I>', '</I>') """ counter = itertools.count(0) count = lambda m: '<%s>' % next(counter) tags = self.tag_pattern.findall(string) tags = [''.join(tag) for tag in tags] (new, nfound) = self.tag_pattern.subn(count, string) if len(tags) != nfound: raise Exception('tags dont match:' + string) return (new, tags)
python
def detag_string(self, string): """Extracts tags from string. returns (string, list) where string: string has tags replaced by indices (<BR>... => <0>, <1>, <2>, etc.) list: list of the removed tags ('<BR>', '<I>', '</I>') """ counter = itertools.count(0) count = lambda m: '<%s>' % next(counter) tags = self.tag_pattern.findall(string) tags = [''.join(tag) for tag in tags] (new, nfound) = self.tag_pattern.subn(count, string) if len(tags) != nfound: raise Exception('tags dont match:' + string) return (new, tags)
[ "def", "detag_string", "(", "self", ",", "string", ")", ":", "counter", "=", "itertools", ".", "count", "(", "0", ")", "count", "=", "lambda", "m", ":", "'<%s>'", "%", "next", "(", "counter", ")", "tags", "=", "self", ".", "tag_pattern", ".", "findall", "(", "string", ")", "tags", "=", "[", "''", ".", "join", "(", "tag", ")", "for", "tag", "in", "tags", "]", "(", "new", ",", "nfound", ")", "=", "self", ".", "tag_pattern", ".", "subn", "(", "count", ",", "string", ")", "if", "len", "(", "tags", ")", "!=", "nfound", ":", "raise", "Exception", "(", "'tags dont match:'", "+", "string", ")", "return", "(", "new", ",", "tags", ")" ]
Extracts tags from string. returns (string, list) where string: string has tags replaced by indices (<BR>... => <0>, <1>, <2>, etc.) list: list of the removed tags ('<BR>', '<I>', '</I>')
[ "Extracts", "tags", "from", "string", "." ]
99b20c17d1a0ca07a8839f33e0e9068248a581e5
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/converter.py#L51-L65
1,297
protream/iquery
iquery/utils.py
Args.options
def options(self): """Train tickets query options.""" arg = self.get(0) if arg.startswith('-') and not self.is_asking_for_help: return arg[1:] return ''.join(x for x in arg if x in 'dgktz')
python
def options(self): """Train tickets query options.""" arg = self.get(0) if arg.startswith('-') and not self.is_asking_for_help: return arg[1:] return ''.join(x for x in arg if x in 'dgktz')
[ "def", "options", "(", "self", ")", ":", "arg", "=", "self", ".", "get", "(", "0", ")", "if", "arg", ".", "startswith", "(", "'-'", ")", "and", "not", "self", ".", "is_asking_for_help", ":", "return", "arg", "[", "1", ":", "]", "return", "''", ".", "join", "(", "x", "for", "x", "in", "arg", "if", "x", "in", "'dgktz'", ")" ]
Train tickets query options.
[ "Train", "tickets", "query", "options", "." ]
7272e68af610f1dd63cf695209cfa44b75adc0e6
https://github.com/protream/iquery/blob/7272e68af610f1dd63cf695209cfa44b75adc0e6/iquery/utils.py#L79-L84
1,298
protream/iquery
iquery/trains.py
TrainsCollection.trains
def trains(self): """Filter rows according to `headers`""" for row in self._rows: train_no = row.get('station_train_code') initial = train_no[0].lower() if not self._opts or initial in self._opts: train = [ # Column: '车次' train_no, # Column: '车站' '\n'.join([ colored.green(row.get('from_station_name')), colored.red(row.get('to_station_name')), ]), # Column: '时间' '\n'.join([ colored.green(row.get('start_time')), colored.red(row.get('arrive_time')), ]), # Column: '历时' self._get_duration(row), # Column: '商务' row.get('swz_num'), # Column: '一等' row.get('zy_num'), # Column: '二等' row.get('ze_num'), # Column: '软卧' row.get('rw_num'), # Column: '硬卧' row.get('yw_num'), # Column: '软座' row.get('rz_num'), # Column: '硬座' row.get('yz_num'), # Column: '无座' row.get('wz_num') ] yield train
python
def trains(self): """Filter rows according to `headers`""" for row in self._rows: train_no = row.get('station_train_code') initial = train_no[0].lower() if not self._opts or initial in self._opts: train = [ # Column: '车次' train_no, # Column: '车站' '\n'.join([ colored.green(row.get('from_station_name')), colored.red(row.get('to_station_name')), ]), # Column: '时间' '\n'.join([ colored.green(row.get('start_time')), colored.red(row.get('arrive_time')), ]), # Column: '历时' self._get_duration(row), # Column: '商务' row.get('swz_num'), # Column: '一等' row.get('zy_num'), # Column: '二等' row.get('ze_num'), # Column: '软卧' row.get('rw_num'), # Column: '硬卧' row.get('yw_num'), # Column: '软座' row.get('rz_num'), # Column: '硬座' row.get('yz_num'), # Column: '无座' row.get('wz_num') ] yield train
[ "def", "trains", "(", "self", ")", ":", "for", "row", "in", "self", ".", "_rows", ":", "train_no", "=", "row", ".", "get", "(", "'station_train_code'", ")", "initial", "=", "train_no", "[", "0", "]", ".", "lower", "(", ")", "if", "not", "self", ".", "_opts", "or", "initial", "in", "self", ".", "_opts", ":", "train", "=", "[", "# Column: '车次'", "train_no", ",", "# Column: '车站'", "'\\n'", ".", "join", "(", "[", "colored", ".", "green", "(", "row", ".", "get", "(", "'from_station_name'", ")", ")", ",", "colored", ".", "red", "(", "row", ".", "get", "(", "'to_station_name'", ")", ")", ",", "]", ")", ",", "# Column: '时间'", "'\\n'", ".", "join", "(", "[", "colored", ".", "green", "(", "row", ".", "get", "(", "'start_time'", ")", ")", ",", "colored", ".", "red", "(", "row", ".", "get", "(", "'arrive_time'", ")", ")", ",", "]", ")", ",", "# Column: '历时'", "self", ".", "_get_duration", "(", "row", ")", ",", "# Column: '商务'", "row", ".", "get", "(", "'swz_num'", ")", ",", "# Column: '一等'", "row", ".", "get", "(", "'zy_num'", ")", ",", "# Column: '二等'", "row", ".", "get", "(", "'ze_num'", ")", ",", "# Column: '软卧'", "row", ".", "get", "(", "'rw_num'", ")", ",", "# Column: '硬卧'", "row", ".", "get", "(", "'yw_num'", ")", ",", "# Column: '软座'", "row", ".", "get", "(", "'rz_num'", ")", ",", "# Column: '硬座'", "row", ".", "get", "(", "'yz_num'", ")", ",", "# Column: '无座'", "row", ".", "get", "(", "'wz_num'", ")", "]", "yield", "train" ]
Filter rows according to `headers`
[ "Filter", "rows", "according", "to", "headers" ]
7272e68af610f1dd63cf695209cfa44b75adc0e6
https://github.com/protream/iquery/blob/7272e68af610f1dd63cf695209cfa44b75adc0e6/iquery/trains.py#L63-L101
1,299
protream/iquery
iquery/trains.py
TrainsCollection.pretty_print
def pretty_print(self): """Use `PrettyTable` to perform formatted outprint.""" pt = PrettyTable() if len(self) == 0: pt._set_field_names(['Sorry,']) pt.add_row([TRAIN_NOT_FOUND]) else: pt._set_field_names(self.headers) for train in self.trains: pt.add_row(train) print(pt)
python
def pretty_print(self): """Use `PrettyTable` to perform formatted outprint.""" pt = PrettyTable() if len(self) == 0: pt._set_field_names(['Sorry,']) pt.add_row([TRAIN_NOT_FOUND]) else: pt._set_field_names(self.headers) for train in self.trains: pt.add_row(train) print(pt)
[ "def", "pretty_print", "(", "self", ")", ":", "pt", "=", "PrettyTable", "(", ")", "if", "len", "(", "self", ")", "==", "0", ":", "pt", ".", "_set_field_names", "(", "[", "'Sorry,'", "]", ")", "pt", ".", "add_row", "(", "[", "TRAIN_NOT_FOUND", "]", ")", "else", ":", "pt", ".", "_set_field_names", "(", "self", ".", "headers", ")", "for", "train", "in", "self", ".", "trains", ":", "pt", ".", "add_row", "(", "train", ")", "print", "(", "pt", ")" ]
Use `PrettyTable` to perform formatted outprint.
[ "Use", "PrettyTable", "to", "perform", "formatted", "outprint", "." ]
7272e68af610f1dd63cf695209cfa44b75adc0e6
https://github.com/protream/iquery/blob/7272e68af610f1dd63cf695209cfa44b75adc0e6/iquery/trains.py#L103-L113