id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
4,200
architv/chcli
challenges/writers.py
challenge
def challenge(): """Creates an enum for contest type""" enums = dict( ACTIVE="active", UPCOMING="upcoming", HIRING="hiring", ALL="all", SHORT="short", ) return type('Enum', (), enums)
python
def challenge(): enums = dict( ACTIVE="active", UPCOMING="upcoming", HIRING="hiring", ALL="all", SHORT="short", ) return type('Enum', (), enums)
[ "def", "challenge", "(", ")", ":", "enums", "=", "dict", "(", "ACTIVE", "=", "\"active\"", ",", "UPCOMING", "=", "\"upcoming\"", ",", "HIRING", "=", "\"hiring\"", ",", "ALL", "=", "\"all\"", ",", "SHORT", "=", "\"short\"", ",", ")", "return", "type", "(", "'Enum'", ",", "(", ")", ",", "enums", ")" ]
Creates an enum for contest type
[ "Creates", "an", "enum", "for", "contest", "type" ]
e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7
https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/writers.py#L20-L30
4,201
architv/chcli
challenges/utilities.py
time_difference
def time_difference(target_time): """Calculate the difference between the current time and the given time""" TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"]) time_diff = format_date(target_time) - datetime.utcnow() hours, remainder = divmod(time_diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds)
python
def time_difference(target_time): TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"]) time_diff = format_date(target_time) - datetime.utcnow() hours, remainder = divmod(time_diff.seconds, 3600) minutes, seconds = divmod(remainder, 60) return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds)
[ "def", "time_difference", "(", "target_time", ")", ":", "TimeDiff", "=", "namedtuple", "(", "\"TimeDiff\"", ",", "[", "\"days\"", ",", "\"hours\"", ",", "\"minutes\"", ",", "\"seconds\"", "]", ")", "time_diff", "=", "format_date", "(", "target_time", ")", "-", "datetime", ".", "utcnow", "(", ")", "hours", ",", "remainder", "=", "divmod", "(", "time_diff", ".", "seconds", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remainder", ",", "60", ")", "return", "TimeDiff", "(", "days", "=", "time_diff", ".", "days", ",", "hours", "=", "hours", ",", "minutes", "=", "minutes", ",", "seconds", "=", "seconds", ")" ]
Calculate the difference between the current time and the given time
[ "Calculate", "the", "difference", "between", "the", "current", "time", "and", "the", "given", "time" ]
e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7
https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/utilities.py#L12-L18
4,202
manjitkumar/drf-url-filters
filters/validations.py
CSVofIntegers
def CSVofIntegers(msg=None): ''' Checks whether a value is list of integers. Returns list of integers or just one integer in list if there is only one element in given CSV string. ''' def fn(value): try: if isinstance(value, basestring): if ',' in value: value = list(map( int, filter( bool, list(map( lambda x: x.strip(), value.split(',') )) ) )) return value else: return [int(value)] else: raise ValueError except ValueError: raise Invalid( '<{0}> is not a valid csv of integers'.format(value) ) return fn
python
def CSVofIntegers(msg=None): ''' Checks whether a value is list of integers. Returns list of integers or just one integer in list if there is only one element in given CSV string. ''' def fn(value): try: if isinstance(value, basestring): if ',' in value: value = list(map( int, filter( bool, list(map( lambda x: x.strip(), value.split(',') )) ) )) return value else: return [int(value)] else: raise ValueError except ValueError: raise Invalid( '<{0}> is not a valid csv of integers'.format(value) ) return fn
[ "def", "CSVofIntegers", "(", "msg", "=", "None", ")", ":", "def", "fn", "(", "value", ")", ":", "try", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "if", "','", "in", "value", ":", "value", "=", "list", "(", "map", "(", "int", ",", "filter", "(", "bool", ",", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "value", ".", "split", "(", "','", ")", ")", ")", ")", ")", ")", "return", "value", "else", ":", "return", "[", "int", "(", "value", ")", "]", "else", ":", "raise", "ValueError", "except", "ValueError", ":", "raise", "Invalid", "(", "'<{0}> is not a valid csv of integers'", ".", "format", "(", "value", ")", ")", "return", "fn" ]
Checks whether a value is list of integers. Returns list of integers or just one integer in list if there is only one element in given CSV string.
[ "Checks", "whether", "a", "value", "is", "list", "of", "integers", ".", "Returns", "list", "of", "integers", "or", "just", "one", "integer", "in", "list", "if", "there", "is", "only", "one", "element", "in", "given", "CSV", "string", "." ]
ebac358729bcd9aa70537247b2ccd6005f5678c1
https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L95-L121
4,203
manjitkumar/drf-url-filters
example_app/views.py
TeamsViewSet.get_queryset
def get_queryset(self): """ Optionally restricts the queryset by filtering against query parameters in the URL. """ query_params = self.request.query_params url_params = self.kwargs # get queryset_filters from FilterMixin queryset_filters = self.get_db_filters(url_params, query_params) # This dict will hold filter kwargs to pass in to Django ORM calls. db_filters = queryset_filters['db_filters'] # This dict will hold exclude kwargs to pass in to Django ORM calls. db_excludes = queryset_filters['db_excludes'] queryset = Team.objects.prefetch_related( 'players' ).all() return queryset.filter(**db_filters).exclude(**db_excludes)
python
def get_queryset(self): query_params = self.request.query_params url_params = self.kwargs # get queryset_filters from FilterMixin queryset_filters = self.get_db_filters(url_params, query_params) # This dict will hold filter kwargs to pass in to Django ORM calls. db_filters = queryset_filters['db_filters'] # This dict will hold exclude kwargs to pass in to Django ORM calls. db_excludes = queryset_filters['db_excludes'] queryset = Team.objects.prefetch_related( 'players' ).all() return queryset.filter(**db_filters).exclude(**db_excludes)
[ "def", "get_queryset", "(", "self", ")", ":", "query_params", "=", "self", ".", "request", ".", "query_params", "url_params", "=", "self", ".", "kwargs", "# get queryset_filters from FilterMixin", "queryset_filters", "=", "self", ".", "get_db_filters", "(", "url_params", ",", "query_params", ")", "# This dict will hold filter kwargs to pass in to Django ORM calls.", "db_filters", "=", "queryset_filters", "[", "'db_filters'", "]", "# This dict will hold exclude kwargs to pass in to Django ORM calls.", "db_excludes", "=", "queryset_filters", "[", "'db_excludes'", "]", "queryset", "=", "Team", ".", "objects", ".", "prefetch_related", "(", "'players'", ")", ".", "all", "(", ")", "return", "queryset", ".", "filter", "(", "*", "*", "db_filters", ")", ".", "exclude", "(", "*", "*", "db_excludes", ")" ]
Optionally restricts the queryset by filtering against query parameters in the URL.
[ "Optionally", "restricts", "the", "queryset", "by", "filtering", "against", "query", "parameters", "in", "the", "URL", "." ]
ebac358729bcd9aa70537247b2ccd6005f5678c1
https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/example_app/views.py#L90-L112
4,204
nimbis/cmsplugin-newsplus
cmsplugin_newsplus/settings.py
get_setting
def get_setting(name, default): """ A little helper for fetching global settings with a common prefix. """ parent_name = "CMSPLUGIN_NEWS_{0}".format(name) return getattr(django_settings, parent_name, default)
python
def get_setting(name, default): parent_name = "CMSPLUGIN_NEWS_{0}".format(name) return getattr(django_settings, parent_name, default)
[ "def", "get_setting", "(", "name", ",", "default", ")", ":", "parent_name", "=", "\"CMSPLUGIN_NEWS_{0}\"", ".", "format", "(", "name", ")", "return", "getattr", "(", "django_settings", ",", "parent_name", ",", "default", ")" ]
A little helper for fetching global settings with a common prefix.
[ "A", "little", "helper", "for", "fetching", "global", "settings", "with", "a", "common", "prefix", "." ]
1787fb674faa7800845f18ce782154e290f6be27
https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/settings.py#L5-L10
4,205
nimbis/cmsplugin-newsplus
cmsplugin_newsplus/admin.py
NewsAdmin.make_published
def make_published(self, request, queryset): """ Marks selected news items as published """ rows_updated = queryset.update(is_published=True) self.message_user(request, ungettext('%(count)d newsitem was published', '%(count)d newsitems were published', rows_updated) % {'count': rows_updated})
python
def make_published(self, request, queryset): rows_updated = queryset.update(is_published=True) self.message_user(request, ungettext('%(count)d newsitem was published', '%(count)d newsitems were published', rows_updated) % {'count': rows_updated})
[ "def", "make_published", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "is_published", "=", "True", ")", "self", ".", "message_user", "(", "request", ",", "ungettext", "(", "'%(count)d newsitem was published'", ",", "'%(count)d newsitems were published'", ",", "rows_updated", ")", "%", "{", "'count'", ":", "rows_updated", "}", ")" ]
Marks selected news items as published
[ "Marks", "selected", "news", "items", "as", "published" ]
1787fb674faa7800845f18ce782154e290f6be27
https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/admin.py#L38-L46
4,206
nimbis/cmsplugin-newsplus
cmsplugin_newsplus/admin.py
NewsAdmin.make_unpublished
def make_unpublished(self, request, queryset): """ Marks selected news items as unpublished """ rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
python
def make_unpublished(self, request, queryset): rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
[ "def", "make_unpublished", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "is_published", "=", "False", ")", "self", ".", "message_user", "(", "request", ",", "ungettext", "(", "'%(count)d newsitem was unpublished'", ",", "'%(count)d newsitems were unpublished'", ",", "rows_updated", ")", "%", "{", "'count'", ":", "rows_updated", "}", ")" ]
Marks selected news items as unpublished
[ "Marks", "selected", "news", "items", "as", "unpublished" ]
1787fb674faa7800845f18ce782154e290f6be27
https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/admin.py#L49-L57
4,207
tutorcruncher/pydf
pydf/wkhtmltopdf.py
_execute_wk
def _execute_wk(*args, input=None): """ Generate path for the wkhtmltopdf binary and execute command. :param args: args to pass straight to subprocess.Popen :return: stdout, stderr """ wk_args = (WK_PATH,) + args return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
python
def _execute_wk(*args, input=None): wk_args = (WK_PATH,) + args return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def", "_execute_wk", "(", "*", "args", ",", "input", "=", "None", ")", ":", "wk_args", "=", "(", "WK_PATH", ",", ")", "+", "args", "return", "subprocess", ".", "run", "(", "wk_args", ",", "input", "=", "input", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")" ]
Generate path for the wkhtmltopdf binary and execute command. :param args: args to pass straight to subprocess.Popen :return: stdout, stderr
[ "Generate", "path", "for", "the", "wkhtmltopdf", "binary", "and", "execute", "command", "." ]
53dd030f02f112593ed6e2655160a40b892a23c0
https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L22-L30
4,208
tutorcruncher/pydf
pydf/wkhtmltopdf.py
generate_pdf
def generate_pdf(html, *, cache_dir: Path=DFT_CACHE_DIR, grayscale: bool=False, lowquality: bool=False, margin_bottom: str=None, margin_left: str=None, margin_right: str=None, margin_top: str=None, orientation: str=None, page_height: str=None, page_width: str=None, page_size: str=None, image_dpi: str=None, image_quality: str=None, **extra_kwargs): """ Generate a pdf from either a url or a html string. After the html and url arguments all other arguments are passed straight to wkhtmltopdf For details on extra arguments see the output of get_help() and get_extended_help() All arguments whether specified or caught with extra_kwargs are converted to command line args with "'--' + original_name.replace('_', '-')" Arguments which are True are passed with no value eg. just --quiet, False and None arguments are missed, everything else is passed with str(value). :param html: html string to generate pdf from :param grayscale: bool :param lowquality: bool :param margin_bottom: string eg. 10mm :param margin_left: string eg. 10mm :param margin_right: string eg. 10mm :param margin_top: string eg. 10mm :param orientation: Portrait or Landscape :param page_height: string eg. 10mm :param page_width: string eg. 10mm :param page_size: string: A4, Letter, etc. :param image_dpi: int default 600 :param image_quality: int default 94 :param extra_kwargs: any exotic extra options for wkhtmltopdf :return: string representing pdf """ if not cache_dir.exists(): Path.mkdir(cache_dir) py_args = dict( cache_dir=cache_dir, grayscale=grayscale, lowquality=lowquality, margin_bottom=margin_bottom, margin_left=margin_left, margin_right=margin_right, margin_top=margin_top, orientation=orientation, page_height=page_height, page_width=page_width, page_size=page_size, image_dpi=image_dpi, image_quality=image_quality, ) py_args.update(extra_kwargs) cmd_args = _convert_args(**py_args) p = _execute_wk(*cmd_args, input=html.encode()) pdf_content = p.stdout # it seems wkhtmltopdf's error codes can be false, we'll ignore them if we # seem to have generated a pdf if p.returncode != 0 and pdf_content[:4] != b'%PDF': raise RuntimeError('error running wkhtmltopdf, command: {!r}\n' 'response: "{}"'.format(cmd_args, p.stderr.decode().strip())) return pdf_content
python
def generate_pdf(html, *, cache_dir: Path=DFT_CACHE_DIR, grayscale: bool=False, lowquality: bool=False, margin_bottom: str=None, margin_left: str=None, margin_right: str=None, margin_top: str=None, orientation: str=None, page_height: str=None, page_width: str=None, page_size: str=None, image_dpi: str=None, image_quality: str=None, **extra_kwargs): if not cache_dir.exists(): Path.mkdir(cache_dir) py_args = dict( cache_dir=cache_dir, grayscale=grayscale, lowquality=lowquality, margin_bottom=margin_bottom, margin_left=margin_left, margin_right=margin_right, margin_top=margin_top, orientation=orientation, page_height=page_height, page_width=page_width, page_size=page_size, image_dpi=image_dpi, image_quality=image_quality, ) py_args.update(extra_kwargs) cmd_args = _convert_args(**py_args) p = _execute_wk(*cmd_args, input=html.encode()) pdf_content = p.stdout # it seems wkhtmltopdf's error codes can be false, we'll ignore them if we # seem to have generated a pdf if p.returncode != 0 and pdf_content[:4] != b'%PDF': raise RuntimeError('error running wkhtmltopdf, command: {!r}\n' 'response: "{}"'.format(cmd_args, p.stderr.decode().strip())) return pdf_content
[ "def", "generate_pdf", "(", "html", ",", "*", ",", "cache_dir", ":", "Path", "=", "DFT_CACHE_DIR", ",", "grayscale", ":", "bool", "=", "False", ",", "lowquality", ":", "bool", "=", "False", ",", "margin_bottom", ":", "str", "=", "None", ",", "margin_left", ":", "str", "=", "None", ",", "margin_right", ":", "str", "=", "None", ",", "margin_top", ":", "str", "=", "None", ",", "orientation", ":", "str", "=", "None", ",", "page_height", ":", "str", "=", "None", ",", "page_width", ":", "str", "=", "None", ",", "page_size", ":", "str", "=", "None", ",", "image_dpi", ":", "str", "=", "None", ",", "image_quality", ":", "str", "=", "None", ",", "*", "*", "extra_kwargs", ")", ":", "if", "not", "cache_dir", ".", "exists", "(", ")", ":", "Path", ".", "mkdir", "(", "cache_dir", ")", "py_args", "=", "dict", "(", "cache_dir", "=", "cache_dir", ",", "grayscale", "=", "grayscale", ",", "lowquality", "=", "lowquality", ",", "margin_bottom", "=", "margin_bottom", ",", "margin_left", "=", "margin_left", ",", "margin_right", "=", "margin_right", ",", "margin_top", "=", "margin_top", ",", "orientation", "=", "orientation", ",", "page_height", "=", "page_height", ",", "page_width", "=", "page_width", ",", "page_size", "=", "page_size", ",", "image_dpi", "=", "image_dpi", ",", "image_quality", "=", "image_quality", ",", ")", "py_args", ".", "update", "(", "extra_kwargs", ")", "cmd_args", "=", "_convert_args", "(", "*", "*", "py_args", ")", "p", "=", "_execute_wk", "(", "*", "cmd_args", ",", "input", "=", "html", ".", "encode", "(", ")", ")", "pdf_content", "=", "p", ".", "stdout", "# it seems wkhtmltopdf's error codes can be false, we'll ignore them if we", "# seem to have generated a pdf", "if", "p", ".", "returncode", "!=", "0", "and", "pdf_content", "[", ":", "4", "]", "!=", "b'%PDF'", ":", "raise", "RuntimeError", "(", "'error running wkhtmltopdf, command: {!r}\\n'", "'response: \"{}\"'", ".", "format", "(", "cmd_args", ",", "p", ".", "stderr", ".", "decode", "(", ")", ".", "strip", "(", ")", ")", ")", "return", "pdf_content" ]
Generate a pdf from either a url or a html string. After the html and url arguments all other arguments are passed straight to wkhtmltopdf For details on extra arguments see the output of get_help() and get_extended_help() All arguments whether specified or caught with extra_kwargs are converted to command line args with "'--' + original_name.replace('_', '-')" Arguments which are True are passed with no value eg. just --quiet, False and None arguments are missed, everything else is passed with str(value). :param html: html string to generate pdf from :param grayscale: bool :param lowquality: bool :param margin_bottom: string eg. 10mm :param margin_left: string eg. 10mm :param margin_right: string eg. 10mm :param margin_top: string eg. 10mm :param orientation: Portrait or Landscape :param page_height: string eg. 10mm :param page_width: string eg. 10mm :param page_size: string: A4, Letter, etc. :param image_dpi: int default 600 :param image_quality: int default 94 :param extra_kwargs: any exotic extra options for wkhtmltopdf :return: string representing pdf
[ "Generate", "a", "pdf", "from", "either", "a", "url", "or", "a", "html", "string", "." ]
53dd030f02f112593ed6e2655160a40b892a23c0
https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L78-L153
4,209
tutorcruncher/pydf
pydf/wkhtmltopdf.py
get_version
def get_version(): """ Get version of pydf and wkhtmltopdf binary :return: version string """ try: wk_version = _string_execute('-V') except Exception as e: # we catch all errors here to make sure we get a version no matter what wk_version = '%s: %s' % (e.__class__.__name__, e) return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
python
def get_version(): try: wk_version = _string_execute('-V') except Exception as e: # we catch all errors here to make sure we get a version no matter what wk_version = '%s: %s' % (e.__class__.__name__, e) return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
[ "def", "get_version", "(", ")", ":", "try", ":", "wk_version", "=", "_string_execute", "(", "'-V'", ")", "except", "Exception", "as", "e", ":", "# we catch all errors here to make sure we get a version no matter what", "wk_version", "=", "'%s: %s'", "%", "(", "e", ".", "__class__", ".", "__name__", ",", "e", ")", "return", "'pydf version: %s\\nwkhtmltopdf version: %s'", "%", "(", "VERSION", ",", "wk_version", ")" ]
Get version of pydf and wkhtmltopdf binary :return: version string
[ "Get", "version", "of", "pydf", "and", "wkhtmltopdf", "binary" ]
53dd030f02f112593ed6e2655160a40b892a23c0
https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L160-L171
4,210
PiotrDabkowski/pyjsparser
pyjsparser/parser.py
PyJsParser._interpret_regexp
def _interpret_regexp(self, string, flags): '''Perform sctring escape - for regexp literals''' self.index = 0 self.length = len(string) self.source = string self.lineNumber = 0 self.lineStart = 0 octal = False st = '' inside_square = 0 while (self.index < self.length): template = '[%s]' if not inside_square else '%s' ch = self.source[self.index] self.index += 1 if ch == '\\': ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch == 'u': digs = self.source[self.index:self.index + 4] if len(digs) == 4 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 4 else: st += 'u' elif ch == 'x': digs = self.source[self.index:self.index + 2] if len(digs) == 2 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 2 else: st += 'x' # special meaning - single char. elif ch == '0': st += '\\0' elif ch == 'n': st += '\\n' elif ch == 'r': st += '\\r' elif ch == 't': st += '\\t' elif ch == 'f': st += '\\f' elif ch == 'v': st += '\\v' # unescape special single characters like . so that they are interpreted literally elif ch in REGEXP_SPECIAL_SINGLE: st += '\\' + ch # character groups elif ch == 'b': st += '\\b' elif ch == 'B': st += '\\B' elif ch == 'w': st += '\\w' elif ch == 'W': st += '\\W' elif ch == 'd': st += '\\d' elif ch == 'D': st += '\\D' elif ch == 's': st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff' elif ch == 'S': st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff' else: if isDecimalDigit(ch): num = ch while self.index < self.length and isDecimalDigit( self.source[self.index]): num += self.source[self.index] self.index += 1 st += '\\' + num else: st += ch # DONT ESCAPE!!! else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index else: if ch == '[': inside_square = True elif ch == ']': inside_square = False st += ch # print string, 'was transformed to', st return st
python
def _interpret_regexp(self, string, flags): '''Perform sctring escape - for regexp literals''' self.index = 0 self.length = len(string) self.source = string self.lineNumber = 0 self.lineStart = 0 octal = False st = '' inside_square = 0 while (self.index < self.length): template = '[%s]' if not inside_square else '%s' ch = self.source[self.index] self.index += 1 if ch == '\\': ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch == 'u': digs = self.source[self.index:self.index + 4] if len(digs) == 4 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 4 else: st += 'u' elif ch == 'x': digs = self.source[self.index:self.index + 2] if len(digs) == 2 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 2 else: st += 'x' # special meaning - single char. elif ch == '0': st += '\\0' elif ch == 'n': st += '\\n' elif ch == 'r': st += '\\r' elif ch == 't': st += '\\t' elif ch == 'f': st += '\\f' elif ch == 'v': st += '\\v' # unescape special single characters like . so that they are interpreted literally elif ch in REGEXP_SPECIAL_SINGLE: st += '\\' + ch # character groups elif ch == 'b': st += '\\b' elif ch == 'B': st += '\\B' elif ch == 'w': st += '\\w' elif ch == 'W': st += '\\W' elif ch == 'd': st += '\\d' elif ch == 'D': st += '\\D' elif ch == 's': st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff' elif ch == 'S': st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff' else: if isDecimalDigit(ch): num = ch while self.index < self.length and isDecimalDigit( self.source[self.index]): num += self.source[self.index] self.index += 1 st += '\\' + num else: st += ch # DONT ESCAPE!!! else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index else: if ch == '[': inside_square = True elif ch == ']': inside_square = False st += ch # print string, 'was transformed to', st return st
[ "def", "_interpret_regexp", "(", "self", ",", "string", ",", "flags", ")", ":", "self", ".", "index", "=", "0", "self", ".", "length", "=", "len", "(", "string", ")", "self", ".", "source", "=", "string", "self", ".", "lineNumber", "=", "0", "self", ".", "lineStart", "=", "0", "octal", "=", "False", "st", "=", "''", "inside_square", "=", "0", "while", "(", "self", ".", "index", "<", "self", ".", "length", ")", ":", "template", "=", "'[%s]'", "if", "not", "inside_square", "else", "'%s'", "ch", "=", "self", ".", "source", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "if", "ch", "==", "'\\\\'", ":", "ch", "=", "self", ".", "source", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "if", "(", "not", "isLineTerminator", "(", "ch", ")", ")", ":", "if", "ch", "==", "'u'", ":", "digs", "=", "self", ".", "source", "[", "self", ".", "index", ":", "self", ".", "index", "+", "4", "]", "if", "len", "(", "digs", ")", "==", "4", "and", "all", "(", "isHexDigit", "(", "d", ")", "for", "d", "in", "digs", ")", ":", "st", "+=", "template", "%", "unichr", "(", "int", "(", "digs", ",", "16", ")", ")", "self", ".", "index", "+=", "4", "else", ":", "st", "+=", "'u'", "elif", "ch", "==", "'x'", ":", "digs", "=", "self", ".", "source", "[", "self", ".", "index", ":", "self", ".", "index", "+", "2", "]", "if", "len", "(", "digs", ")", "==", "2", "and", "all", "(", "isHexDigit", "(", "d", ")", "for", "d", "in", "digs", ")", ":", "st", "+=", "template", "%", "unichr", "(", "int", "(", "digs", ",", "16", ")", ")", "self", ".", "index", "+=", "2", "else", ":", "st", "+=", "'x'", "# special meaning - single char.", "elif", "ch", "==", "'0'", ":", "st", "+=", "'\\\\0'", "elif", "ch", "==", "'n'", ":", "st", "+=", "'\\\\n'", "elif", "ch", "==", "'r'", ":", "st", "+=", "'\\\\r'", "elif", "ch", "==", "'t'", ":", "st", "+=", "'\\\\t'", "elif", "ch", "==", "'f'", ":", "st", "+=", "'\\\\f'", "elif", "ch", "==", "'v'", ":", "st", "+=", "'\\\\v'", "# unescape special single characters like . so that they are interpreted literally", "elif", "ch", "in", "REGEXP_SPECIAL_SINGLE", ":", "st", "+=", "'\\\\'", "+", "ch", "# character groups", "elif", "ch", "==", "'b'", ":", "st", "+=", "'\\\\b'", "elif", "ch", "==", "'B'", ":", "st", "+=", "'\\\\B'", "elif", "ch", "==", "'w'", ":", "st", "+=", "'\\\\w'", "elif", "ch", "==", "'W'", ":", "st", "+=", "'\\\\W'", "elif", "ch", "==", "'d'", ":", "st", "+=", "'\\\\d'", "elif", "ch", "==", "'D'", ":", "st", "+=", "'\\\\D'", "elif", "ch", "==", "'s'", ":", "st", "+=", "template", "%", "u' \\f\\n\\r\\t\\v\\u00a0\\u1680\\u180e\\u2000-\\u200a\\u2028\\u2029\\u202f\\u205f\\u3000\\ufeff'", "elif", "ch", "==", "'S'", ":", "st", "+=", "template", "%", "u'\\u0000-\\u0008\\u000e-\\u001f\\u0021-\\u009f\\u00a1-\\u167f\\u1681-\\u180d\\u180f-\\u1fff\\u200b-\\u2027\\u202a-\\u202e\\u2030-\\u205e\\u2060-\\u2fff\\u3001-\\ufefe\\uff00-\\uffff'", "else", ":", "if", "isDecimalDigit", "(", "ch", ")", ":", "num", "=", "ch", "while", "self", ".", "index", "<", "self", ".", "length", "and", "isDecimalDigit", "(", "self", ".", "source", "[", "self", ".", "index", "]", ")", ":", "num", "+=", "self", ".", "source", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "st", "+=", "'\\\\'", "+", "num", "else", ":", "st", "+=", "ch", "# DONT ESCAPE!!!", "else", ":", "self", ".", "lineNumber", "+=", "1", "if", "(", "ch", "==", "'\\r'", "and", "self", ".", "source", "[", "self", ".", "index", "]", "==", "'\\n'", ")", ":", "self", ".", "index", "+=", "1", "self", ".", "lineStart", "=", "self", ".", "index", "else", ":", "if", "ch", "==", "'['", ":", "inside_square", "=", "True", "elif", "ch", "==", "']'", ":", "inside_square", "=", "False", "st", "+=", "ch", "# print string, 'was transformed to', st", "return", "st" ]
Perform sctring escape - for regexp literals
[ "Perform", "sctring", "escape", "-", "for", "regexp", "literals" ]
5465d037b30e334cb0997f2315ec1e451b8ad4c1
https://github.com/PiotrDabkowski/pyjsparser/blob/5465d037b30e334cb0997f2315ec1e451b8ad4c1/pyjsparser/parser.py#L518-L608
4,211
sckott/habanero
habanero/crossref/crossref.py
Crossref.works
def works(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"]) ''' if ids.__class__.__name__ != 'NoneType': return request(self.mailto, self.base_url, "/works/", ids, query, filter, offset, limit, sample, sort, order, facet, select, None, None, None, None, **kwargs) else: return Request(self.mailto, self.base_url, "/works/", query, filter, offset, limit, sample, sort, order, facet, select, cursor, cursor_max, None, **kwargs).do_request()
python
def works(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"]) ''' if ids.__class__.__name__ != 'NoneType': return request(self.mailto, self.base_url, "/works/", ids, query, filter, offset, limit, sample, sort, order, facet, select, None, None, None, None, **kwargs) else: return Request(self.mailto, self.base_url, "/works/", query, filter, offset, limit, sample, sort, order, facet, select, cursor, cursor_max, None, **kwargs).do_request()
[ "def", "works", "(", "self", ",", "ids", "=", "None", ",", "query", "=", "None", ",", "filter", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "sample", "=", "None", ",", "sort", "=", "None", ",", "order", "=", "None", ",", "facet", "=", "None", ",", "select", "=", "None", ",", "cursor", "=", "None", ",", "cursor_max", "=", "5000", ",", "*", "*", "kwargs", ")", ":", "if", "ids", ".", "__class__", ".", "__name__", "!=", "'NoneType'", ":", "return", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/works/\"", ",", "ids", ",", "query", ",", "filter", ",", "offset", ",", "limit", ",", "sample", ",", "sort", ",", "order", ",", "facet", ",", "select", ",", "None", ",", "None", ",", "None", ",", "None", ",", "*", "*", "kwargs", ")", "else", ":", "return", "Request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/works/\"", ",", "query", ",", "filter", ",", "offset", ",", "limit", ",", "sample", ",", "sort", ",", "order", ",", "facet", ",", "select", ",", "cursor", ",", "cursor_max", ",", "None", ",", "*", "*", "kwargs", ")", ".", "do_request", "(", ")" ]
Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"])
[ "Search", "Crossref", "works" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L171-L296
4,212
sckott/habanero
habanero/crossref/crossref.py
Crossref.prefixes
def prefixes(self, ids = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, works = False, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref prefixes :param ids: [Array] DOIs (digital object identifier) or other identifiers :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.prefixes(ids = "10.1016") cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093']) # get works cr.prefixes(ids = "10.1016", works = True) # Limit number of results cr.prefixes(ids = "10.1016", works = True, limit = 3) # Sort and order cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc") # cursor - deep paging res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'}) eds = [ x.get('editor') for x in res['message']['items'] ] [ z for z in eds if z is not None ] ''' check_kwargs(["query"], kwargs) return request(self.mailto, self.base_url, "/prefixes/", ids, query = None, filter = filter, offset = offset, limit = limit, sample = sample, sort = sort, order = order, facet = facet, select = select, works = works, cursor = cursor, cursor_max = cursor_max, **kwargs)
python
def prefixes(self, ids = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, works = False, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref prefixes :param ids: [Array] DOIs (digital object identifier) or other identifiers :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.prefixes(ids = "10.1016") cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093']) # get works cr.prefixes(ids = "10.1016", works = True) # Limit number of results cr.prefixes(ids = "10.1016", works = True, limit = 3) # Sort and order cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc") # cursor - deep paging res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'}) eds = [ x.get('editor') for x in res['message']['items'] ] [ z for z in eds if z is not None ] ''' check_kwargs(["query"], kwargs) return request(self.mailto, self.base_url, "/prefixes/", ids, query = None, filter = filter, offset = offset, limit = limit, sample = sample, sort = sort, order = order, facet = facet, select = select, works = works, cursor = cursor, cursor_max = cursor_max, **kwargs)
[ "def", "prefixes", "(", "self", ",", "ids", "=", "None", ",", "filter", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "sample", "=", "None", ",", "sort", "=", "None", ",", "order", "=", "None", ",", "facet", "=", "None", ",", "works", "=", "False", ",", "select", "=", "None", ",", "cursor", "=", "None", ",", "cursor_max", "=", "5000", ",", "*", "*", "kwargs", ")", ":", "check_kwargs", "(", "[", "\"query\"", "]", ",", "kwargs", ")", "return", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/prefixes/\"", ",", "ids", ",", "query", "=", "None", ",", "filter", "=", "filter", ",", "offset", "=", "offset", ",", "limit", "=", "limit", ",", "sample", "=", "sample", ",", "sort", "=", "sort", ",", "order", "=", "order", ",", "facet", "=", "facet", ",", "select", "=", "select", ",", "works", "=", "works", ",", "cursor", "=", "cursor", ",", "cursor_max", "=", "cursor_max", ",", "*", "*", "kwargs", ")" ]
Search Crossref prefixes :param ids: [Array] DOIs (digital object identifier) or other identifiers :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.prefixes(ids = "10.1016") cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093']) # get works cr.prefixes(ids = "10.1016", works = True) # Limit number of results cr.prefixes(ids = "10.1016", works = True, limit = 3) # Sort and order cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc") # cursor - deep paging res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'}) eds = [ x.get('editor') for x in res['message']['items'] ] [ z for z in eds if z is not None ]
[ "Search", "Crossref", "prefixes" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L361-L430
4,213
sckott/habanero
habanero/crossref/crossref.py
Crossref.types
def types(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, works = False, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref types :param ids: [Array] Type identifier, e.g., journal :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.types() cr.types(ids = "journal") cr.types(ids = "journal-article") cr.types(ids = "journal", works = True) # field queries res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100) [ x.get('title') for x in res['message']['items'] ] ''' return request(self.mailto, self.base_url, "/types/", ids, query, filter, offset, limit, sample, sort, order, facet, select, works, cursor, cursor_max, **kwargs)
python
def types(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, works = False, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref types :param ids: [Array] Type identifier, e.g., journal :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.types() cr.types(ids = "journal") cr.types(ids = "journal-article") cr.types(ids = "journal", works = True) # field queries res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100) [ x.get('title') for x in res['message']['items'] ] ''' return request(self.mailto, self.base_url, "/types/", ids, query, filter, offset, limit, sample, sort, order, facet, select, works, cursor, cursor_max, **kwargs)
[ "def", "types", "(", "self", ",", "ids", "=", "None", ",", "query", "=", "None", ",", "filter", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "sample", "=", "None", ",", "sort", "=", "None", ",", "order", "=", "None", ",", "facet", "=", "None", ",", "works", "=", "False", ",", "select", "=", "None", ",", "cursor", "=", "None", ",", "cursor_max", "=", "5000", ",", "*", "*", "kwargs", ")", ":", "return", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/types/\"", ",", "ids", ",", "query", ",", "filter", ",", "offset", ",", "limit", ",", "sample", ",", "sort", ",", "order", ",", "facet", ",", "select", ",", "works", ",", "cursor", ",", "cursor_max", ",", "*", "*", "kwargs", ")" ]
Search Crossref types :param ids: [Array] Type identifier, e.g., journal :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param works: [Boolean] If true, works returned as well. Default: false :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.types() cr.types(ids = "journal") cr.types(ids = "journal-article") cr.types(ids = "journal", works = True) # field queries res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100) [ x.get('title') for x in res['message']['items'] ]
[ "Search", "Crossref", "types" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L573-L625
4,214
sckott/habanero
habanero/crossref/crossref.py
Crossref.licenses
def licenses(self, query = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, **kwargs): ''' Search Crossref licenses :param query: [String] A query string :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.licenses() cr.licenses(query = "creative") ''' check_kwargs(["ids", "filter", "works"], kwargs) res = request(self.mailto, self.base_url, "/licenses/", None, query, None, offset, limit, None, sort, order, facet, None, None, None, None, **kwargs) return res
python
def licenses(self, query = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, **kwargs): ''' Search Crossref licenses :param query: [String] A query string :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.licenses() cr.licenses(query = "creative") ''' check_kwargs(["ids", "filter", "works"], kwargs) res = request(self.mailto, self.base_url, "/licenses/", None, query, None, offset, limit, None, sort, order, facet, None, None, None, None, **kwargs) return res
[ "def", "licenses", "(", "self", ",", "query", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "sample", "=", "None", ",", "sort", "=", "None", ",", "order", "=", "None", ",", "facet", "=", "None", ",", "*", "*", "kwargs", ")", ":", "check_kwargs", "(", "[", "\"ids\"", ",", "\"filter\"", ",", "\"works\"", "]", ",", "kwargs", ")", "res", "=", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/licenses/\"", ",", "None", ",", "query", ",", "None", ",", "offset", ",", "limit", ",", "None", ",", "sort", ",", "order", ",", "facet", ",", "None", ",", "None", ",", "None", ",", "None", ",", "*", "*", "kwargs", ")", "return", "res" ]
Search Crossref licenses :param query: [String] A query string :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*` See Facets_ for options. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.licenses() cr.licenses(query = "creative")
[ "Search", "Crossref", "licenses" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L627-L659
4,215
sckott/habanero
habanero/crossref/crossref.py
Crossref.registration_agency
def registration_agency(self, ids, **kwargs): ''' Determine registration agency for DOIs :param ids: [Array] DOIs (digital object identifier) or other identifiers :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: list of DOI minting agencies Usage:: from habanero import Crossref cr = Crossref() cr.registration_agency('10.1371/journal.pone.0033693') cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993']) ''' check_kwargs(["query", "filter", "offset", "limit", "sample", "sort", "order", "facet", "works"], kwargs) res = request(self.mailto, self.base_url, "/works/", ids, None, None, None, None, None, None, None, None, None, None, None, None, True, **kwargs) if res.__class__ != list: k = [] k.append(res) else: k = res return [ z['message']['agency']['label'] for z in k ]
python
def registration_agency(self, ids, **kwargs): ''' Determine registration agency for DOIs :param ids: [Array] DOIs (digital object identifier) or other identifiers :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: list of DOI minting agencies Usage:: from habanero import Crossref cr = Crossref() cr.registration_agency('10.1371/journal.pone.0033693') cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993']) ''' check_kwargs(["query", "filter", "offset", "limit", "sample", "sort", "order", "facet", "works"], kwargs) res = request(self.mailto, self.base_url, "/works/", ids, None, None, None, None, None, None, None, None, None, None, None, None, True, **kwargs) if res.__class__ != list: k = [] k.append(res) else: k = res return [ z['message']['agency']['label'] for z in k ]
[ "def", "registration_agency", "(", "self", ",", "ids", ",", "*", "*", "kwargs", ")", ":", "check_kwargs", "(", "[", "\"query\"", ",", "\"filter\"", ",", "\"offset\"", ",", "\"limit\"", ",", "\"sample\"", ",", "\"sort\"", ",", "\"order\"", ",", "\"facet\"", ",", "\"works\"", "]", ",", "kwargs", ")", "res", "=", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/works/\"", ",", "ids", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "True", ",", "*", "*", "kwargs", ")", "if", "res", ".", "__class__", "!=", "list", ":", "k", "=", "[", "]", "k", ".", "append", "(", "res", ")", "else", ":", "k", "=", "res", "return", "[", "z", "[", "'message'", "]", "[", "'agency'", "]", "[", "'label'", "]", "for", "z", "in", "k", "]" ]
Determine registration agency for DOIs :param ids: [Array] DOIs (digital object identifier) or other identifiers :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: list of DOI minting agencies Usage:: from habanero import Crossref cr = Crossref() cr.registration_agency('10.1371/journal.pone.0033693') cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
[ "Determine", "registration", "agency", "for", "DOIs" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L661-L688
4,216
sckott/habanero
habanero/crossref/crossref.py
Crossref.random_dois
def random_dois(self, sample = 10, **kwargs): ''' Get a random set of DOIs :param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100 :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: [Array] of DOIs Usage:: from habanero import Crossref cr = Crossref() cr.random_dois(1) cr.random_dois(10) cr.random_dois(50) cr.random_dois(100) ''' res = request(self.mailto, self.base_url, "/works/", None, None, None, None, None, sample, None, None, None, None, True, None, None, None, **kwargs) return [ z['DOI'] for z in res['message']['items'] ]
python
def random_dois(self, sample = 10, **kwargs): ''' Get a random set of DOIs :param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100 :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: [Array] of DOIs Usage:: from habanero import Crossref cr = Crossref() cr.random_dois(1) cr.random_dois(10) cr.random_dois(50) cr.random_dois(100) ''' res = request(self.mailto, self.base_url, "/works/", None, None, None, None, None, sample, None, None, None, None, True, None, None, None, **kwargs) return [ z['DOI'] for z in res['message']['items'] ]
[ "def", "random_dois", "(", "self", ",", "sample", "=", "10", ",", "*", "*", "kwargs", ")", ":", "res", "=", "request", "(", "self", ".", "mailto", ",", "self", ".", "base_url", ",", "\"/works/\"", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "sample", ",", "None", ",", "None", ",", "None", ",", "None", ",", "True", ",", "None", ",", "None", ",", "None", ",", "*", "*", "kwargs", ")", "return", "[", "z", "[", "'DOI'", "]", "for", "z", "in", "res", "[", "'message'", "]", "[", "'items'", "]", "]" ]
Get a random set of DOIs :param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100 :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples) :return: [Array] of DOIs Usage:: from habanero import Crossref cr = Crossref() cr.random_dois(1) cr.random_dois(10) cr.random_dois(50) cr.random_dois(100)
[ "Get", "a", "random", "set", "of", "DOIs" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L690-L712
4,217
sckott/habanero
habanero/cn/cn.py
content_negotiation
def content_negotiation(ids = None, format = "bibtex", style = 'apa', locale = "en-US", url = None, **kwargs): ''' Get citations in various formats from CrossRef :param ids: [str] Search by a single DOI or many DOIs, each a string. If many passed in, do so in a list :param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json", "citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml", "datacite-xml","bibentry", or "crossref-tdm" :param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles` for options. Default: "apa". If there's a style that CrossRef doesn't support you'll get a `(500) Internal Server Error` :param locale: [str] Language locale. See `locale.locale_alias` :param url: [str] Base URL for the content negotiation request. Default: `https://doi.org` :param kwargs: any additional arguments will be passed on to `requests.get` :return: string, which can be parsed to various formats depending on what format you request (e.g., JSON vs. XML vs. bibtex) Usage:: from habanero import cn cn.content_negotiation(ids = '10.1126/science.169.3946.635') # get citeproc-json cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json") # some other formats cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text") # return an R bibentry type cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry") cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry") # return an apa style citation cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos") # Using DataCite DOIs ## some formats don't work # cn.content_negotiation(ids = "10.5284/1011335", format = "text") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm") ## But most do work cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "turtle") cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json") cn.content_negotiation(ids = "10.5284/1011335", format = "ris") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") # many DOIs dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217'] x = cn.content_negotiation(ids = dois) # Use a different base url url = "http://dx.doi.org" cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url) cn.content_negotiation(ids = "10.5284/1011335", url = url) ''' if url is None: url = cn_base_url return CNRequest(url, ids, format, style, locale, **kwargs)
python
def content_negotiation(ids = None, format = "bibtex", style = 'apa', locale = "en-US", url = None, **kwargs): ''' Get citations in various formats from CrossRef :param ids: [str] Search by a single DOI or many DOIs, each a string. If many passed in, do so in a list :param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json", "citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml", "datacite-xml","bibentry", or "crossref-tdm" :param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles` for options. Default: "apa". If there's a style that CrossRef doesn't support you'll get a `(500) Internal Server Error` :param locale: [str] Language locale. See `locale.locale_alias` :param url: [str] Base URL for the content negotiation request. Default: `https://doi.org` :param kwargs: any additional arguments will be passed on to `requests.get` :return: string, which can be parsed to various formats depending on what format you request (e.g., JSON vs. XML vs. bibtex) Usage:: from habanero import cn cn.content_negotiation(ids = '10.1126/science.169.3946.635') # get citeproc-json cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json") # some other formats cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text") # return an R bibentry type cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry") cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry") # return an apa style citation cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos") # Using DataCite DOIs ## some formats don't work # cn.content_negotiation(ids = "10.5284/1011335", format = "text") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm") ## But most do work cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "turtle") cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json") cn.content_negotiation(ids = "10.5284/1011335", format = "ris") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") # many DOIs dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217'] x = cn.content_negotiation(ids = dois) # Use a different base url url = "http://dx.doi.org" cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url) cn.content_negotiation(ids = "10.5284/1011335", url = url) ''' if url is None: url = cn_base_url return CNRequest(url, ids, format, style, locale, **kwargs)
[ "def", "content_negotiation", "(", "ids", "=", "None", ",", "format", "=", "\"bibtex\"", ",", "style", "=", "'apa'", ",", "locale", "=", "\"en-US\"", ",", "url", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "url", "is", "None", ":", "url", "=", "cn_base_url", "return", "CNRequest", "(", "url", ",", "ids", ",", "format", ",", "style", ",", "locale", ",", "*", "*", "kwargs", ")" ]
Get citations in various formats from CrossRef :param ids: [str] Search by a single DOI or many DOIs, each a string. If many passed in, do so in a list :param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json", "citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml", "datacite-xml","bibentry", or "crossref-tdm" :param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles` for options. Default: "apa". If there's a style that CrossRef doesn't support you'll get a `(500) Internal Server Error` :param locale: [str] Language locale. See `locale.locale_alias` :param url: [str] Base URL for the content negotiation request. Default: `https://doi.org` :param kwargs: any additional arguments will be passed on to `requests.get` :return: string, which can be parsed to various formats depending on what format you request (e.g., JSON vs. XML vs. bibtex) Usage:: from habanero import cn cn.content_negotiation(ids = '10.1126/science.169.3946.635') # get citeproc-json cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json") # some other formats cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text") # return an R bibentry type cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry") cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry") # return an apa style citation cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity") cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos") # Using DataCite DOIs ## some formats don't work # cn.content_negotiation(ids = "10.5284/1011335", format = "text") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml") # cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm") ## But most do work cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml") cn.content_negotiation(ids = "10.5284/1011335", format = "turtle") cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json") cn.content_negotiation(ids = "10.5284/1011335", format = "ris") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry") cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex") # many DOIs dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217'] x = cn.content_negotiation(ids = dois) # Use a different base url url = "http://dx.doi.org" cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url) cn.content_negotiation(ids = "10.5284/1011335", url = url)
[ "Get", "citations", "in", "various", "formats", "from", "CrossRef" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/cn/cn.py#L4-L76
4,218
sckott/habanero
habanero/counts/counts.py
citation_count
def citation_count(doi, url = "http://www.crossref.org/openurl/", key = "[email protected]", **kwargs): ''' Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012") ''' args = {"id": "doi:" + doi, "pid": key, "noredirect": True} args = dict((k, v) for k, v in args.items() if v) res = requests.get(url, params = args, headers = make_ua(), **kwargs) xmldoc = minidom.parseString(res.content) val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value return int(str(val))
python
def citation_count(doi, url = "http://www.crossref.org/openurl/", key = "[email protected]", **kwargs): ''' Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012") ''' args = {"id": "doi:" + doi, "pid": key, "noredirect": True} args = dict((k, v) for k, v in args.items() if v) res = requests.get(url, params = args, headers = make_ua(), **kwargs) xmldoc = minidom.parseString(res.content) val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value return int(str(val))
[ "def", "citation_count", "(", "doi", ",", "url", "=", "\"http://www.crossref.org/openurl/\"", ",", "key", "=", "\"[email protected]\"", ",", "*", "*", "kwargs", ")", ":", "args", "=", "{", "\"id\"", ":", "\"doi:\"", "+", "doi", ",", "\"pid\"", ":", "key", ",", "\"noredirect\"", ":", "True", "}", "args", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", "if", "v", ")", "res", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "args", ",", "headers", "=", "make_ua", "(", ")", ",", "*", "*", "kwargs", ")", "xmldoc", "=", "minidom", ".", "parseString", "(", "res", ".", "content", ")", "val", "=", "xmldoc", ".", "getElementsByTagName", "(", "'query'", ")", "[", "0", "]", ".", "attributes", "[", "'fl_count'", "]", ".", "value", "return", "int", "(", "str", "(", "val", ")", ")" ]
Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012")
[ "Get", "a", "citation", "count", "with", "a", "DOI" ]
a17d87070378786bbb138e1c9712ecad9aacf38e
https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/counts/counts.py#L5-L30
4,219
alvinwan/tex2py
tex2py/tex2py.py
TreeOfContents.findHierarchy
def findHierarchy(self, max_subs=10): """Find hierarchy for the LaTeX source. >>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy() ('section', 'subsection') >>> TOC.fromLatex( ... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy() ('subsubsection', 'subsubsubsection') >>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}').findHierarchy() ('section', 'subsection') """ hierarchy = [] defaults = TOC.default_hierarchy + tuple( '%ssection' % ('sub'*i) for i in range(2, max_subs)) for level in defaults: if getattr(self.source, level, False): hierarchy.append(level) return tuple(hierarchy)
python
def findHierarchy(self, max_subs=10): hierarchy = [] defaults = TOC.default_hierarchy + tuple( '%ssection' % ('sub'*i) for i in range(2, max_subs)) for level in defaults: if getattr(self.source, level, False): hierarchy.append(level) return tuple(hierarchy)
[ "def", "findHierarchy", "(", "self", ",", "max_subs", "=", "10", ")", ":", "hierarchy", "=", "[", "]", "defaults", "=", "TOC", ".", "default_hierarchy", "+", "tuple", "(", "'%ssection'", "%", "(", "'sub'", "*", "i", ")", "for", "i", "in", "range", "(", "2", ",", "max_subs", ")", ")", "for", "level", "in", "defaults", ":", "if", "getattr", "(", "self", ".", "source", ",", "level", ",", "False", ")", ":", "hierarchy", ".", "append", "(", "level", ")", "return", "tuple", "(", "hierarchy", ")" ]
Find hierarchy for the LaTeX source. >>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy() ('section', 'subsection') >>> TOC.fromLatex( ... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy() ('subsubsection', 'subsubsubsection') >>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\ ... \subsection{subh2}').findHierarchy() ('section', 'subsection')
[ "Find", "hierarchy", "for", "the", "LaTeX", "source", "." ]
85ce4a23ad8dbeb49a360171877dd14d099b3e9a
https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L69-L87
4,220
alvinwan/tex2py
tex2py/tex2py.py
TreeOfContents.getHeadingLevel
def getHeadingLevel(ts, hierarchy=default_hierarchy): """Extract heading level for a particular Tex element, given a specified hierarchy. >>> ts = TexSoup(r'\section{Hello}').section >>> TOC.getHeadingLevel(ts) 2 >>> ts2 = TexSoup(r'\chapter{hello again}').chapter >>> TOC.getHeadingLevel(ts2) 1 >>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection >>> TOC.getHeadingLevel(ts3) 6 """ try: return hierarchy.index(ts.name)+1 except ValueError: if ts.name.endswith('section'): i, name = 0, ts.name while name.startswith('sub'): name, i = name[3:], i+1 if name == 'section': return i+2 return float('inf') except (AttributeError, TypeError): return float('inf')
python
def getHeadingLevel(ts, hierarchy=default_hierarchy): try: return hierarchy.index(ts.name)+1 except ValueError: if ts.name.endswith('section'): i, name = 0, ts.name while name.startswith('sub'): name, i = name[3:], i+1 if name == 'section': return i+2 return float('inf') except (AttributeError, TypeError): return float('inf')
[ "def", "getHeadingLevel", "(", "ts", ",", "hierarchy", "=", "default_hierarchy", ")", ":", "try", ":", "return", "hierarchy", ".", "index", "(", "ts", ".", "name", ")", "+", "1", "except", "ValueError", ":", "if", "ts", ".", "name", ".", "endswith", "(", "'section'", ")", ":", "i", ",", "name", "=", "0", ",", "ts", ".", "name", "while", "name", ".", "startswith", "(", "'sub'", ")", ":", "name", ",", "i", "=", "name", "[", "3", ":", "]", ",", "i", "+", "1", "if", "name", "==", "'section'", ":", "return", "i", "+", "2", "return", "float", "(", "'inf'", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "return", "float", "(", "'inf'", ")" ]
Extract heading level for a particular Tex element, given a specified hierarchy. >>> ts = TexSoup(r'\section{Hello}').section >>> TOC.getHeadingLevel(ts) 2 >>> ts2 = TexSoup(r'\chapter{hello again}').chapter >>> TOC.getHeadingLevel(ts2) 1 >>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection >>> TOC.getHeadingLevel(ts3) 6
[ "Extract", "heading", "level", "for", "a", "particular", "Tex", "element", "given", "a", "specified", "hierarchy", "." ]
85ce4a23ad8dbeb49a360171877dd14d099b3e9a
https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L90-L115
4,221
alvinwan/tex2py
tex2py/tex2py.py
TreeOfContents.parseTopDepth
def parseTopDepth(self, descendants=()): """Parse tex for highest tag in hierarchy >>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth() 1 >>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}' >>> TOC.fromLatex(s).parseTopDepth() 1 >>> h = ('section', 'subsubsection', 'subsubsubsection') >>> TOC.fromLatex(s, hierarchy=h).parseTopDepth() 2 """ descendants = list(descendants) or \ list(getattr(self.source, 'descendants', descendants)) if not descendants: return -1 return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants)
python
def parseTopDepth(self, descendants=()): descendants = list(descendants) or \ list(getattr(self.source, 'descendants', descendants)) if not descendants: return -1 return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants)
[ "def", "parseTopDepth", "(", "self", ",", "descendants", "=", "(", ")", ")", ":", "descendants", "=", "list", "(", "descendants", ")", "or", "list", "(", "getattr", "(", "self", ".", "source", ",", "'descendants'", ",", "descendants", ")", ")", "if", "not", "descendants", ":", "return", "-", "1", "return", "min", "(", "TOC", ".", "getHeadingLevel", "(", "e", ",", "self", ".", "hierarchy", ")", "for", "e", "in", "descendants", ")" ]
Parse tex for highest tag in hierarchy >>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth() 1 >>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}' >>> TOC.fromLatex(s).parseTopDepth() 1 >>> h = ('section', 'subsubsection', 'subsubsubsection') >>> TOC.fromLatex(s, hierarchy=h).parseTopDepth() 2
[ "Parse", "tex", "for", "highest", "tag", "in", "hierarchy" ]
85ce4a23ad8dbeb49a360171877dd14d099b3e9a
https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L117-L133
4,222
alvinwan/tex2py
tex2py/tex2py.py
TreeOfContents.fromLatex
def fromLatex(tex, *args, **kwargs): """Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object """ source = TexSoup(tex) return TOC('[document]', source=source, descendants=list(source.descendants), *args, **kwargs)
python
def fromLatex(tex, *args, **kwargs): source = TexSoup(tex) return TOC('[document]', source=source, descendants=list(source.descendants), *args, **kwargs)
[ "def", "fromLatex", "(", "tex", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "TexSoup", "(", "tex", ")", "return", "TOC", "(", "'[document]'", ",", "source", "=", "source", ",", "descendants", "=", "list", "(", "source", ".", "descendants", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Creates abstraction using Latex :param str tex: Latex :return: TreeOfContents object
[ "Creates", "abstraction", "using", "Latex" ]
85ce4a23ad8dbeb49a360171877dd14d099b3e9a
https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L224-L232
4,223
mar10/pyftpsync
ftpsync/synchronizers.py
process_options
def process_options(opts): """Check and prepare options dict.""" # Convert match and exclude args into pattern lists match = opts.get("match") if match and type(match) is str: opts["match"] = [pat.strip() for pat in match.split(",")] elif match: assert type(match) is list else: opts["match"] = [] exclude = opts.get("exclude") if exclude and type(exclude) is str: opts["exclude"] = [pat.strip() for pat in exclude.split(",")] elif exclude: assert type(exclude) is list else: # opts["exclude"] = DEFAULT_OMIT opts["exclude"] = []
python
def process_options(opts): # Convert match and exclude args into pattern lists match = opts.get("match") if match and type(match) is str: opts["match"] = [pat.strip() for pat in match.split(",")] elif match: assert type(match) is list else: opts["match"] = [] exclude = opts.get("exclude") if exclude and type(exclude) is str: opts["exclude"] = [pat.strip() for pat in exclude.split(",")] elif exclude: assert type(exclude) is list else: # opts["exclude"] = DEFAULT_OMIT opts["exclude"] = []
[ "def", "process_options", "(", "opts", ")", ":", "# Convert match and exclude args into pattern lists", "match", "=", "opts", ".", "get", "(", "\"match\"", ")", "if", "match", "and", "type", "(", "match", ")", "is", "str", ":", "opts", "[", "\"match\"", "]", "=", "[", "pat", ".", "strip", "(", ")", "for", "pat", "in", "match", ".", "split", "(", "\",\"", ")", "]", "elif", "match", ":", "assert", "type", "(", "match", ")", "is", "list", "else", ":", "opts", "[", "\"match\"", "]", "=", "[", "]", "exclude", "=", "opts", ".", "get", "(", "\"exclude\"", ")", "if", "exclude", "and", "type", "(", "exclude", ")", "is", "str", ":", "opts", "[", "\"exclude\"", "]", "=", "[", "pat", ".", "strip", "(", ")", "for", "pat", "in", "exclude", ".", "split", "(", "\",\"", ")", "]", "elif", "exclude", ":", "assert", "type", "(", "exclude", ")", "is", "list", "else", ":", "# opts[\"exclude\"] = DEFAULT_OMIT", "opts", "[", "\"exclude\"", "]", "=", "[", "]" ]
Check and prepare options dict.
[ "Check", "and", "prepare", "options", "dict", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L41-L59
4,224
mar10/pyftpsync
ftpsync/synchronizers.py
match_path
def match_path(entry, opts): """Return True if `path` matches `match` and `exclude` options.""" if entry.name in ALWAYS_OMIT: return False # TODO: currently we use fnmatch syntax and match against names. # We also might allow glob syntax and match against the whole relative path instead # path = entry.get_rel_path() path = entry.name ok = True match = opts.get("match") exclude = opts.get("exclude") if entry.is_file() and match: assert type(match) is list ok = False for pat in match: if fnmatch.fnmatch(path, pat): ok = True break if ok and exclude: assert type(exclude) is list for pat in exclude: if fnmatch.fnmatch(path, pat): ok = False break # write("match", ok, entry) return ok
python
def match_path(entry, opts): if entry.name in ALWAYS_OMIT: return False # TODO: currently we use fnmatch syntax and match against names. # We also might allow glob syntax and match against the whole relative path instead # path = entry.get_rel_path() path = entry.name ok = True match = opts.get("match") exclude = opts.get("exclude") if entry.is_file() and match: assert type(match) is list ok = False for pat in match: if fnmatch.fnmatch(path, pat): ok = True break if ok and exclude: assert type(exclude) is list for pat in exclude: if fnmatch.fnmatch(path, pat): ok = False break # write("match", ok, entry) return ok
[ "def", "match_path", "(", "entry", ",", "opts", ")", ":", "if", "entry", ".", "name", "in", "ALWAYS_OMIT", ":", "return", "False", "# TODO: currently we use fnmatch syntax and match against names.", "# We also might allow glob syntax and match against the whole relative path instead", "# path = entry.get_rel_path()", "path", "=", "entry", ".", "name", "ok", "=", "True", "match", "=", "opts", ".", "get", "(", "\"match\"", ")", "exclude", "=", "opts", ".", "get", "(", "\"exclude\"", ")", "if", "entry", ".", "is_file", "(", ")", "and", "match", ":", "assert", "type", "(", "match", ")", "is", "list", "ok", "=", "False", "for", "pat", "in", "match", ":", "if", "fnmatch", ".", "fnmatch", "(", "path", ",", "pat", ")", ":", "ok", "=", "True", "break", "if", "ok", "and", "exclude", ":", "assert", "type", "(", "exclude", ")", "is", "list", "for", "pat", "in", "exclude", ":", "if", "fnmatch", ".", "fnmatch", "(", "path", ",", "pat", ")", ":", "ok", "=", "False", "break", "# write(\"match\", ok, entry)", "return", "ok" ]
Return True if `path` matches `match` and `exclude` options.
[ "Return", "True", "if", "path", "matches", "match", "and", "exclude", "options", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L63-L88
4,225
mar10/pyftpsync
ftpsync/synchronizers.py
BaseSynchronizer._tick
def _tick(self): """Write progress info and move cursor to beginning of line.""" if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"): stats = self.get_stats() prefix = DRY_RUN_PREFIX if self.dry_run else "" sys.stdout.write( "{}Touched {}/{} entries in {} directories...\r".format( prefix, stats["entries_touched"], stats["entries_seen"], stats["local_dirs"], ) ) sys.stdout.flush() return
python
def _tick(self): if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"): stats = self.get_stats() prefix = DRY_RUN_PREFIX if self.dry_run else "" sys.stdout.write( "{}Touched {}/{} entries in {} directories...\r".format( prefix, stats["entries_touched"], stats["entries_seen"], stats["local_dirs"], ) ) sys.stdout.flush() return
[ "def", "_tick", "(", "self", ")", ":", "if", "(", "self", ".", "verbose", ">=", "3", "and", "not", "IS_REDIRECTED", ")", "or", "self", ".", "options", ".", "get", "(", "\"progress\"", ")", ":", "stats", "=", "self", ".", "get_stats", "(", ")", "prefix", "=", "DRY_RUN_PREFIX", "if", "self", ".", "dry_run", "else", "\"\"", "sys", ".", "stdout", ".", "write", "(", "\"{}Touched {}/{} entries in {} directories...\\r\"", ".", "format", "(", "prefix", ",", "stats", "[", "\"entries_touched\"", "]", ",", "stats", "[", "\"entries_seen\"", "]", ",", "stats", "[", "\"local_dirs\"", "]", ",", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return" ]
Write progress info and move cursor to beginning of line.
[ "Write", "progress", "info", "and", "move", "cursor", "to", "beginning", "of", "line", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L437-L451
4,226
mar10/pyftpsync
ftpsync/synchronizers.py
BaseSynchronizer._sync_dir
def _sync_dir(self): """Traverse the local folder structure and remote peers. This is the core algorithm that generates calls to self.sync_XXX() handler methods. _sync_dir() is called by self.run(). """ local_entries = self.local.get_dir() # Convert into a dict {name: FileEntry, ...} local_entry_map = dict(map(lambda e: (e.name, e), local_entries)) remote_entries = self.remote.get_dir() # Convert into a dict {name: FileEntry, ...} remote_entry_map = dict(map(lambda e: (e.name, e), remote_entries)) entry_pair_list = [] # 1. Loop over all local files and classify the relationship to the # peer entries. for local_entry in local_entries: if isinstance(local_entry, DirectoryEntry): self._inc_stat("local_dirs") else: self._inc_stat("local_files") if not self._before_sync(local_entry): # TODO: currently, if a file is skipped, it will not be # considered for deletion on the peer target continue # TODO: case insensitive? # We should use os.path.normcase() to convert to lowercase on windows # (i.e. if the FTP server is based on Windows) remote_entry = remote_entry_map.get(local_entry.name) entry_pair = EntryPair(local_entry, remote_entry) entry_pair_list.append(entry_pair) # TODO: renaming could be triggered, if we find an existing # entry.unique with a different entry.name # 2. Collect all remote entries that do NOT exist on the local target. for remote_entry in remote_entries: if isinstance(remote_entry, DirectoryEntry): self._inc_stat("remote_dirs") else: self._inc_stat("remote_files") if not self._before_sync(remote_entry): continue if remote_entry.name not in local_entry_map: entry_pair = EntryPair(None, remote_entry) entry_pair_list.append(entry_pair) # print("NOT IN LOCAL") # print(remote_entry.name) # print(self.remote.get_id()) # print(local_entry_map.keys()) # print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id())) # 3. Classify all entries and pairs. # We pass the additional meta data here peer_dir_meta = self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()) for pair in entry_pair_list: pair.classify(peer_dir_meta) # 4. Perform (or schedule) resulting file operations for pair in entry_pair_list: # print(pair) # Let synchronizer modify the default operation (e.g. apply `--force` option) hook_result = self.re_classify_pair(pair) # Let synchronizer implement special handling of unmatched entries # (e.g. `--delete_unmatched`) if not self._match(pair.any_entry): self.on_mismatch(pair) # ... do not call operation handler... elif hook_result is not False: handler = getattr(self, "on_" + pair.operation, None) # print(handler) if handler: try: res = handler(pair) except Exception as e: if self.on_error(e, pair) is not True: raise else: # write("NO HANDLER") raise NotImplementedError("No handler for {}".format(pair)) if pair.is_conflict(): self._inc_stat("conflict_files") # 5. Let the target provider write its meta data for the files in the # current directory. self.local.flush_meta() self.remote.flush_meta() # 6. Finally visit all local sub-directories recursively that also # exist on the remote target. for local_dir in local_entries: # write("local_dir(%s, %s)" % (local_dir, local_dir)) if not local_dir.is_dir(): continue elif not self._before_sync(local_dir): continue remote_dir = remote_entry_map.get(local_dir.name) if remote_dir: # write("sync_equal_dir(%s, %s)" % (local_dir, remote_dir)) # self._log_call("sync_equal_dir(%s, %s)" % (local_dir, remote_dir)) # res = self.sync_equal_dir(local_dir, remote_dir) # res = self.on_equal(local_dir, remote_dir) if res is not False: self.local.cwd(local_dir.name) self.remote.cwd(local_dir.name) self._sync_dir() self.local.cwd("..") self.remote.cwd("..") return True
python
def _sync_dir(self): local_entries = self.local.get_dir() # Convert into a dict {name: FileEntry, ...} local_entry_map = dict(map(lambda e: (e.name, e), local_entries)) remote_entries = self.remote.get_dir() # Convert into a dict {name: FileEntry, ...} remote_entry_map = dict(map(lambda e: (e.name, e), remote_entries)) entry_pair_list = [] # 1. Loop over all local files and classify the relationship to the # peer entries. for local_entry in local_entries: if isinstance(local_entry, DirectoryEntry): self._inc_stat("local_dirs") else: self._inc_stat("local_files") if not self._before_sync(local_entry): # TODO: currently, if a file is skipped, it will not be # considered for deletion on the peer target continue # TODO: case insensitive? # We should use os.path.normcase() to convert to lowercase on windows # (i.e. if the FTP server is based on Windows) remote_entry = remote_entry_map.get(local_entry.name) entry_pair = EntryPair(local_entry, remote_entry) entry_pair_list.append(entry_pair) # TODO: renaming could be triggered, if we find an existing # entry.unique with a different entry.name # 2. Collect all remote entries that do NOT exist on the local target. for remote_entry in remote_entries: if isinstance(remote_entry, DirectoryEntry): self._inc_stat("remote_dirs") else: self._inc_stat("remote_files") if not self._before_sync(remote_entry): continue if remote_entry.name not in local_entry_map: entry_pair = EntryPair(None, remote_entry) entry_pair_list.append(entry_pair) # print("NOT IN LOCAL") # print(remote_entry.name) # print(self.remote.get_id()) # print(local_entry_map.keys()) # print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id())) # 3. Classify all entries and pairs. # We pass the additional meta data here peer_dir_meta = self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()) for pair in entry_pair_list: pair.classify(peer_dir_meta) # 4. Perform (or schedule) resulting file operations for pair in entry_pair_list: # print(pair) # Let synchronizer modify the default operation (e.g. apply `--force` option) hook_result = self.re_classify_pair(pair) # Let synchronizer implement special handling of unmatched entries # (e.g. `--delete_unmatched`) if not self._match(pair.any_entry): self.on_mismatch(pair) # ... do not call operation handler... elif hook_result is not False: handler = getattr(self, "on_" + pair.operation, None) # print(handler) if handler: try: res = handler(pair) except Exception as e: if self.on_error(e, pair) is not True: raise else: # write("NO HANDLER") raise NotImplementedError("No handler for {}".format(pair)) if pair.is_conflict(): self._inc_stat("conflict_files") # 5. Let the target provider write its meta data for the files in the # current directory. self.local.flush_meta() self.remote.flush_meta() # 6. Finally visit all local sub-directories recursively that also # exist on the remote target. for local_dir in local_entries: # write("local_dir(%s, %s)" % (local_dir, local_dir)) if not local_dir.is_dir(): continue elif not self._before_sync(local_dir): continue remote_dir = remote_entry_map.get(local_dir.name) if remote_dir: # write("sync_equal_dir(%s, %s)" % (local_dir, remote_dir)) # self._log_call("sync_equal_dir(%s, %s)" % (local_dir, remote_dir)) # res = self.sync_equal_dir(local_dir, remote_dir) # res = self.on_equal(local_dir, remote_dir) if res is not False: self.local.cwd(local_dir.name) self.remote.cwd(local_dir.name) self._sync_dir() self.local.cwd("..") self.remote.cwd("..") return True
[ "def", "_sync_dir", "(", "self", ")", ":", "local_entries", "=", "self", ".", "local", ".", "get_dir", "(", ")", "# Convert into a dict {name: FileEntry, ...}", "local_entry_map", "=", "dict", "(", "map", "(", "lambda", "e", ":", "(", "e", ".", "name", ",", "e", ")", ",", "local_entries", ")", ")", "remote_entries", "=", "self", ".", "remote", ".", "get_dir", "(", ")", "# Convert into a dict {name: FileEntry, ...}", "remote_entry_map", "=", "dict", "(", "map", "(", "lambda", "e", ":", "(", "e", ".", "name", ",", "e", ")", ",", "remote_entries", ")", ")", "entry_pair_list", "=", "[", "]", "# 1. Loop over all local files and classify the relationship to the", "# peer entries.", "for", "local_entry", "in", "local_entries", ":", "if", "isinstance", "(", "local_entry", ",", "DirectoryEntry", ")", ":", "self", ".", "_inc_stat", "(", "\"local_dirs\"", ")", "else", ":", "self", ".", "_inc_stat", "(", "\"local_files\"", ")", "if", "not", "self", ".", "_before_sync", "(", "local_entry", ")", ":", "# TODO: currently, if a file is skipped, it will not be", "# considered for deletion on the peer target", "continue", "# TODO: case insensitive?", "# We should use os.path.normcase() to convert to lowercase on windows", "# (i.e. if the FTP server is based on Windows)", "remote_entry", "=", "remote_entry_map", ".", "get", "(", "local_entry", ".", "name", ")", "entry_pair", "=", "EntryPair", "(", "local_entry", ",", "remote_entry", ")", "entry_pair_list", ".", "append", "(", "entry_pair", ")", "# TODO: renaming could be triggered, if we find an existing", "# entry.unique with a different entry.name", "# 2. Collect all remote entries that do NOT exist on the local target.", "for", "remote_entry", "in", "remote_entries", ":", "if", "isinstance", "(", "remote_entry", ",", "DirectoryEntry", ")", ":", "self", ".", "_inc_stat", "(", "\"remote_dirs\"", ")", "else", ":", "self", ".", "_inc_stat", "(", "\"remote_files\"", ")", "if", "not", "self", ".", "_before_sync", "(", "remote_entry", ")", ":", "continue", "if", "remote_entry", ".", "name", "not", "in", "local_entry_map", ":", "entry_pair", "=", "EntryPair", "(", "None", ",", "remote_entry", ")", "entry_pair_list", ".", "append", "(", "entry_pair", ")", "# print(\"NOT IN LOCAL\")", "# print(remote_entry.name)", "# print(self.remote.get_id())", "# print(local_entry_map.keys())", "# print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()))", "# 3. Classify all entries and pairs.", "# We pass the additional meta data here", "peer_dir_meta", "=", "self", ".", "local", ".", "cur_dir_meta", ".", "peer_sync", ".", "get", "(", "self", ".", "remote", ".", "get_id", "(", ")", ")", "for", "pair", "in", "entry_pair_list", ":", "pair", ".", "classify", "(", "peer_dir_meta", ")", "# 4. Perform (or schedule) resulting file operations", "for", "pair", "in", "entry_pair_list", ":", "# print(pair)", "# Let synchronizer modify the default operation (e.g. apply `--force` option)", "hook_result", "=", "self", ".", "re_classify_pair", "(", "pair", ")", "# Let synchronizer implement special handling of unmatched entries", "# (e.g. `--delete_unmatched`)", "if", "not", "self", ".", "_match", "(", "pair", ".", "any_entry", ")", ":", "self", ".", "on_mismatch", "(", "pair", ")", "# ... do not call operation handler...", "elif", "hook_result", "is", "not", "False", ":", "handler", "=", "getattr", "(", "self", ",", "\"on_\"", "+", "pair", ".", "operation", ",", "None", ")", "# print(handler)", "if", "handler", ":", "try", ":", "res", "=", "handler", "(", "pair", ")", "except", "Exception", "as", "e", ":", "if", "self", ".", "on_error", "(", "e", ",", "pair", ")", "is", "not", "True", ":", "raise", "else", ":", "# write(\"NO HANDLER\")", "raise", "NotImplementedError", "(", "\"No handler for {}\"", ".", "format", "(", "pair", ")", ")", "if", "pair", ".", "is_conflict", "(", ")", ":", "self", ".", "_inc_stat", "(", "\"conflict_files\"", ")", "# 5. Let the target provider write its meta data for the files in the", "# current directory.", "self", ".", "local", ".", "flush_meta", "(", ")", "self", ".", "remote", ".", "flush_meta", "(", ")", "# 6. Finally visit all local sub-directories recursively that also", "# exist on the remote target.", "for", "local_dir", "in", "local_entries", ":", "# write(\"local_dir(%s, %s)\" % (local_dir, local_dir))", "if", "not", "local_dir", ".", "is_dir", "(", ")", ":", "continue", "elif", "not", "self", ".", "_before_sync", "(", "local_dir", ")", ":", "continue", "remote_dir", "=", "remote_entry_map", ".", "get", "(", "local_dir", ".", "name", ")", "if", "remote_dir", ":", "# write(\"sync_equal_dir(%s, %s)\" % (local_dir, remote_dir))", "# self._log_call(\"sync_equal_dir(%s, %s)\" % (local_dir, remote_dir))", "# res = self.sync_equal_dir(local_dir, remote_dir)", "# res = self.on_equal(local_dir, remote_dir)", "if", "res", "is", "not", "False", ":", "self", ".", "local", ".", "cwd", "(", "local_dir", ".", "name", ")", "self", ".", "remote", ".", "cwd", "(", "local_dir", ".", "name", ")", "self", ".", "_sync_dir", "(", ")", "self", ".", "local", ".", "cwd", "(", "\"..\"", ")", "self", ".", "remote", ".", "cwd", "(", "\"..\"", ")", "return", "True" ]
Traverse the local folder structure and remote peers. This is the core algorithm that generates calls to self.sync_XXX() handler methods. _sync_dir() is called by self.run().
[ "Traverse", "the", "local", "folder", "structure", "and", "remote", "peers", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L474-L596
4,227
mar10/pyftpsync
ftpsync/synchronizers.py
BaseSynchronizer.on_copy_local
def on_copy_local(self, pair): """Called when the local resource should be copied to remote.""" status = pair.remote_classification self._log_action("copy", status, ">", pair.local)
python
def on_copy_local(self, pair): status = pair.remote_classification self._log_action("copy", status, ">", pair.local)
[ "def", "on_copy_local", "(", "self", ",", "pair", ")", ":", "status", "=", "pair", ".", "remote_classification", "self", ".", "_log_action", "(", "\"copy\"", ",", "status", ",", "\">\"", ",", "pair", ".", "local", ")" ]
Called when the local resource should be copied to remote.
[ "Called", "when", "the", "local", "resource", "should", "be", "copied", "to", "remote", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L626-L629
4,228
mar10/pyftpsync
ftpsync/synchronizers.py
BaseSynchronizer.on_copy_remote
def on_copy_remote(self, pair): """Called when the remote resource should be copied to local.""" status = pair.local_classification self._log_action("copy", status, "<", pair.remote)
python
def on_copy_remote(self, pair): status = pair.local_classification self._log_action("copy", status, "<", pair.remote)
[ "def", "on_copy_remote", "(", "self", ",", "pair", ")", ":", "status", "=", "pair", ".", "local_classification", "self", ".", "_log_action", "(", "\"copy\"", ",", "status", ",", "\"<\"", ",", "pair", ".", "remote", ")" ]
Called when the remote resource should be copied to local.
[ "Called", "when", "the", "remote", "resource", "should", "be", "copied", "to", "local", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L631-L634
4,229
mar10/pyftpsync
ftpsync/synchronizers.py
BiDirSynchronizer.on_need_compare
def on_need_compare(self, pair): """Re-classify pair based on file attributes and options.""" # print("on_need_compare", pair) # If no metadata is available, we could only classify file entries as # 'existing'. # Now we use peer information to improve this classification. c_pair = (pair.local_classification, pair.remote_classification) org_pair = c_pair org_operation = pair.operation # print("need_compare", pair) if pair.is_dir: # For directores, we cannot compare existing peer entries. # Instead, we simply log (and traverse the children later). pair.local_classification = pair.remote_classification = "existing" pair.operation = "equal" self._log_action("", "visit", "?", pair.local, min_level=4) # self._log_action("", "equal", "=", pair.local, min_level=4) return elif c_pair == ("existing", "existing"): # Naive classification derived from file time and size time_cmp = eps_compare( pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME ) if time_cmp < 0: c_pair = ("unmodified", "modified") # remote is newer elif time_cmp > 0: c_pair = ("modified", "unmodified") # local is newer elif pair.local.size == pair.remote.size: c_pair = ("unmodified", "unmodified") # equal else: c_pair = ("modified", "modified") # conflict! elif c_pair == ("new", "new"): # Naive classification derived from file time and size time_cmp = eps_compare( pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME ) if time_cmp == 0 and pair.local.size == pair.remote.size: c_pair = ("unmodified", "unmodified") # equal else: c_pair = ("modified", "modified") # conflict! # elif c_pair == ("unmodified", "unmodified"): pair.local_classification = c_pair[0] pair.remote_classification = c_pair[1] pair.operation = operation_map.get(c_pair) # print("on_need_compare {} => {}".format(org_pair, pair)) if not pair.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) elif pair.operation == org_operation: raise RuntimeError("Could not re-classify {}".format(org_pair)) handler = getattr(self, "on_" + pair.operation, None) res = handler(pair) # self._log_action("", "different", "?", pair.local, min_level=2) return res
python
def on_need_compare(self, pair): # print("on_need_compare", pair) # If no metadata is available, we could only classify file entries as # 'existing'. # Now we use peer information to improve this classification. c_pair = (pair.local_classification, pair.remote_classification) org_pair = c_pair org_operation = pair.operation # print("need_compare", pair) if pair.is_dir: # For directores, we cannot compare existing peer entries. # Instead, we simply log (and traverse the children later). pair.local_classification = pair.remote_classification = "existing" pair.operation = "equal" self._log_action("", "visit", "?", pair.local, min_level=4) # self._log_action("", "equal", "=", pair.local, min_level=4) return elif c_pair == ("existing", "existing"): # Naive classification derived from file time and size time_cmp = eps_compare( pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME ) if time_cmp < 0: c_pair = ("unmodified", "modified") # remote is newer elif time_cmp > 0: c_pair = ("modified", "unmodified") # local is newer elif pair.local.size == pair.remote.size: c_pair = ("unmodified", "unmodified") # equal else: c_pair = ("modified", "modified") # conflict! elif c_pair == ("new", "new"): # Naive classification derived from file time and size time_cmp = eps_compare( pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME ) if time_cmp == 0 and pair.local.size == pair.remote.size: c_pair = ("unmodified", "unmodified") # equal else: c_pair = ("modified", "modified") # conflict! # elif c_pair == ("unmodified", "unmodified"): pair.local_classification = c_pair[0] pair.remote_classification = c_pair[1] pair.operation = operation_map.get(c_pair) # print("on_need_compare {} => {}".format(org_pair, pair)) if not pair.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) elif pair.operation == org_operation: raise RuntimeError("Could not re-classify {}".format(org_pair)) handler = getattr(self, "on_" + pair.operation, None) res = handler(pair) # self._log_action("", "different", "?", pair.local, min_level=2) return res
[ "def", "on_need_compare", "(", "self", ",", "pair", ")", ":", "# print(\"on_need_compare\", pair)", "# If no metadata is available, we could only classify file entries as", "# 'existing'.", "# Now we use peer information to improve this classification.", "c_pair", "=", "(", "pair", ".", "local_classification", ",", "pair", ".", "remote_classification", ")", "org_pair", "=", "c_pair", "org_operation", "=", "pair", ".", "operation", "# print(\"need_compare\", pair)", "if", "pair", ".", "is_dir", ":", "# For directores, we cannot compare existing peer entries.", "# Instead, we simply log (and traverse the children later).", "pair", ".", "local_classification", "=", "pair", ".", "remote_classification", "=", "\"existing\"", "pair", ".", "operation", "=", "\"equal\"", "self", ".", "_log_action", "(", "\"\"", ",", "\"visit\"", ",", "\"?\"", ",", "pair", ".", "local", ",", "min_level", "=", "4", ")", "# self._log_action(\"\", \"equal\", \"=\", pair.local, min_level=4)", "return", "elif", "c_pair", "==", "(", "\"existing\"", ",", "\"existing\"", ")", ":", "# Naive classification derived from file time and size", "time_cmp", "=", "eps_compare", "(", "pair", ".", "local", ".", "mtime", ",", "pair", ".", "remote", ".", "mtime", ",", "FileEntry", ".", "EPS_TIME", ")", "if", "time_cmp", "<", "0", ":", "c_pair", "=", "(", "\"unmodified\"", ",", "\"modified\"", ")", "# remote is newer", "elif", "time_cmp", ">", "0", ":", "c_pair", "=", "(", "\"modified\"", ",", "\"unmodified\"", ")", "# local is newer", "elif", "pair", ".", "local", ".", "size", "==", "pair", ".", "remote", ".", "size", ":", "c_pair", "=", "(", "\"unmodified\"", ",", "\"unmodified\"", ")", "# equal", "else", ":", "c_pair", "=", "(", "\"modified\"", ",", "\"modified\"", ")", "# conflict!", "elif", "c_pair", "==", "(", "\"new\"", ",", "\"new\"", ")", ":", "# Naive classification derived from file time and size", "time_cmp", "=", "eps_compare", "(", "pair", ".", "local", ".", "mtime", ",", "pair", ".", "remote", ".", "mtime", ",", "FileEntry", ".", "EPS_TIME", ")", "if", "time_cmp", "==", "0", "and", "pair", ".", "local", ".", "size", "==", "pair", ".", "remote", ".", "size", ":", "c_pair", "=", "(", "\"unmodified\"", ",", "\"unmodified\"", ")", "# equal", "else", ":", "c_pair", "=", "(", "\"modified\"", ",", "\"modified\"", ")", "# conflict!", "# elif c_pair == (\"unmodified\", \"unmodified\"):", "pair", ".", "local_classification", "=", "c_pair", "[", "0", "]", "pair", ".", "remote_classification", "=", "c_pair", "[", "1", "]", "pair", ".", "operation", "=", "operation_map", ".", "get", "(", "c_pair", ")", "# print(\"on_need_compare {} => {}\".format(org_pair, pair))", "if", "not", "pair", ".", "operation", ":", "raise", "RuntimeError", "(", "\"Undefined operation for pair classification {}\"", ".", "format", "(", "c_pair", ")", ")", "elif", "pair", ".", "operation", "==", "org_operation", ":", "raise", "RuntimeError", "(", "\"Could not re-classify {}\"", ".", "format", "(", "org_pair", ")", ")", "handler", "=", "getattr", "(", "self", ",", "\"on_\"", "+", "pair", ".", "operation", ",", "None", ")", "res", "=", "handler", "(", "pair", ")", "# self._log_action(\"\", \"different\", \"?\", pair.local, min_level=2)", "return", "res" ]
Re-classify pair based on file attributes and options.
[ "Re", "-", "classify", "pair", "based", "on", "file", "attributes", "and", "options", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L871-L934
4,230
mar10/pyftpsync
ftpsync/synchronizers.py
BiDirSynchronizer.on_conflict
def on_conflict(self, pair): """Return False to prevent visiting of children.""" # self._log_action("skip", "conflict", "!", pair.local, min_level=2) # print("on_conflict", pair) any_entry = pair.any_entry if not self._test_match_or_print(any_entry): return resolve = self._interactive_resolve(pair) if resolve == "skip": self._log_action("skip", "conflict", "*?*", any_entry) self._inc_stat("conflict_files_skipped") return if pair.local and pair.remote: assert pair.local.is_file() is_newer = pair.local > pair.remote if ( resolve == "local" or (is_newer and resolve == "new") or (not is_newer and resolve == "old") ): self._log_action("copy", "conflict", "*>*", pair.local) self._copy_file(self.local, self.remote, pair.local) elif ( resolve == "remote" or (is_newer and resolve == "old") or (not is_newer and resolve == "new") ): self._log_action("copy", "conflict", "*<*", pair.local) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError elif pair.local: assert pair.local.is_file() if resolve == "local": self._log_action("restore", "conflict", "*>x", pair.local) self._copy_file(self.local, self.remote, pair.local) elif resolve == "remote": self._log_action("delete", "conflict", "*<x", pair.local) self._remove_file(pair.local) else: raise NotImplementedError else: assert pair.remote.is_file() if resolve == "local": self._log_action("delete", "conflict", "x>*", pair.remote) self._remove_file(pair.remote) elif resolve == "remote": self._log_action("restore", "conflict", "x<*", pair.remote) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError return
python
def on_conflict(self, pair): # self._log_action("skip", "conflict", "!", pair.local, min_level=2) # print("on_conflict", pair) any_entry = pair.any_entry if not self._test_match_or_print(any_entry): return resolve = self._interactive_resolve(pair) if resolve == "skip": self._log_action("skip", "conflict", "*?*", any_entry) self._inc_stat("conflict_files_skipped") return if pair.local and pair.remote: assert pair.local.is_file() is_newer = pair.local > pair.remote if ( resolve == "local" or (is_newer and resolve == "new") or (not is_newer and resolve == "old") ): self._log_action("copy", "conflict", "*>*", pair.local) self._copy_file(self.local, self.remote, pair.local) elif ( resolve == "remote" or (is_newer and resolve == "old") or (not is_newer and resolve == "new") ): self._log_action("copy", "conflict", "*<*", pair.local) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError elif pair.local: assert pair.local.is_file() if resolve == "local": self._log_action("restore", "conflict", "*>x", pair.local) self._copy_file(self.local, self.remote, pair.local) elif resolve == "remote": self._log_action("delete", "conflict", "*<x", pair.local) self._remove_file(pair.local) else: raise NotImplementedError else: assert pair.remote.is_file() if resolve == "local": self._log_action("delete", "conflict", "x>*", pair.remote) self._remove_file(pair.remote) elif resolve == "remote": self._log_action("restore", "conflict", "x<*", pair.remote) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError return
[ "def", "on_conflict", "(", "self", ",", "pair", ")", ":", "# self._log_action(\"skip\", \"conflict\", \"!\", pair.local, min_level=2)", "# print(\"on_conflict\", pair)", "any_entry", "=", "pair", ".", "any_entry", "if", "not", "self", ".", "_test_match_or_print", "(", "any_entry", ")", ":", "return", "resolve", "=", "self", ".", "_interactive_resolve", "(", "pair", ")", "if", "resolve", "==", "\"skip\"", ":", "self", ".", "_log_action", "(", "\"skip\"", ",", "\"conflict\"", ",", "\"*?*\"", ",", "any_entry", ")", "self", ".", "_inc_stat", "(", "\"conflict_files_skipped\"", ")", "return", "if", "pair", ".", "local", "and", "pair", ".", "remote", ":", "assert", "pair", ".", "local", ".", "is_file", "(", ")", "is_newer", "=", "pair", ".", "local", ">", "pair", ".", "remote", "if", "(", "resolve", "==", "\"local\"", "or", "(", "is_newer", "and", "resolve", "==", "\"new\"", ")", "or", "(", "not", "is_newer", "and", "resolve", "==", "\"old\"", ")", ")", ":", "self", ".", "_log_action", "(", "\"copy\"", ",", "\"conflict\"", ",", "\"*>*\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "local", ",", "self", ".", "remote", ",", "pair", ".", "local", ")", "elif", "(", "resolve", "==", "\"remote\"", "or", "(", "is_newer", "and", "resolve", "==", "\"old\"", ")", "or", "(", "not", "is_newer", "and", "resolve", "==", "\"new\"", ")", ")", ":", "self", ".", "_log_action", "(", "\"copy\"", ",", "\"conflict\"", ",", "\"*<*\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "remote", ",", "self", ".", "local", ",", "pair", ".", "remote", ")", "else", ":", "raise", "NotImplementedError", "elif", "pair", ".", "local", ":", "assert", "pair", ".", "local", ".", "is_file", "(", ")", "if", "resolve", "==", "\"local\"", ":", "self", ".", "_log_action", "(", "\"restore\"", ",", "\"conflict\"", ",", "\"*>x\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "local", ",", "self", ".", "remote", ",", "pair", ".", "local", ")", "elif", "resolve", "==", "\"remote\"", ":", "self", ".", "_log_action", "(", "\"delete\"", ",", "\"conflict\"", ",", "\"*<x\"", ",", "pair", ".", "local", ")", "self", ".", "_remove_file", "(", "pair", ".", "local", ")", "else", ":", "raise", "NotImplementedError", "else", ":", "assert", "pair", ".", "remote", ".", "is_file", "(", ")", "if", "resolve", "==", "\"local\"", ":", "self", ".", "_log_action", "(", "\"delete\"", ",", "\"conflict\"", ",", "\"x>*\"", ",", "pair", ".", "remote", ")", "self", ".", "_remove_file", "(", "pair", ".", "remote", ")", "elif", "resolve", "==", "\"remote\"", ":", "self", ".", "_log_action", "(", "\"restore\"", ",", "\"conflict\"", ",", "\"x<*\"", ",", "pair", ".", "remote", ")", "self", ".", "_copy_file", "(", "self", ".", "remote", ",", "self", ".", "local", ",", "pair", ".", "remote", ")", "else", ":", "raise", "NotImplementedError", "return" ]
Return False to prevent visiting of children.
[ "Return", "False", "to", "prevent", "visiting", "of", "children", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L936-L990
4,231
mar10/pyftpsync
ftpsync/synchronizers.py
DownloadSynchronizer._interactive_resolve
def _interactive_resolve(self, pair): """Return 'local', 'remote', or 'skip' to use local, remote resource or skip.""" if self.resolve_all: if self.verbose >= 5: self._print_pair_diff(pair) return self.resolve_all resolve = self.options.get("resolve", "skip") assert resolve in ("remote", "ask", "skip") if resolve == "ask" or self.verbose >= 5: self._print_pair_diff(pair) if resolve in ("remote", "skip"): # self.resolve_all = resolve return resolve # RED = ansi_code("Fore.LIGHTRED_EX") M = ansi_code("Style.BRIGHT") + ansi_code("Style.UNDERLINE") R = ansi_code("Style.RESET_ALL") # self._print_pair_diff(pair) self._inc_stat("interactive_ask") while True: prompt = ( "Use " + M + "R" + R + "emote, " + M + "S" + R + "kip, " + M + "B" + R + "inary compare, " + M + "H" + R + "elp? " ) r = compat.console_input(prompt).strip() if r in ("h", "H", "?"): print("The following keys are supported:") print(" 'b': Binary compare") print(" 'r': Download remote file") print(" 's': Skip this file (leave both targets unchanged)") print( "Hold Shift (upper case letters) to apply choice for all " "remaining conflicts." ) print("Hit Ctrl+C to abort.") continue elif r in ("B", "b"): self._compare_file(pair.local, pair.remote) continue elif r in ("R", "S"): r = self._resolve_shortcuts[r.lower()] self.resolve_all = r break elif r in ("r", "s"): r = self._resolve_shortcuts[r] break return r
python
def _interactive_resolve(self, pair): if self.resolve_all: if self.verbose >= 5: self._print_pair_diff(pair) return self.resolve_all resolve = self.options.get("resolve", "skip") assert resolve in ("remote", "ask", "skip") if resolve == "ask" or self.verbose >= 5: self._print_pair_diff(pair) if resolve in ("remote", "skip"): # self.resolve_all = resolve return resolve # RED = ansi_code("Fore.LIGHTRED_EX") M = ansi_code("Style.BRIGHT") + ansi_code("Style.UNDERLINE") R = ansi_code("Style.RESET_ALL") # self._print_pair_diff(pair) self._inc_stat("interactive_ask") while True: prompt = ( "Use " + M + "R" + R + "emote, " + M + "S" + R + "kip, " + M + "B" + R + "inary compare, " + M + "H" + R + "elp? " ) r = compat.console_input(prompt).strip() if r in ("h", "H", "?"): print("The following keys are supported:") print(" 'b': Binary compare") print(" 'r': Download remote file") print(" 's': Skip this file (leave both targets unchanged)") print( "Hold Shift (upper case letters) to apply choice for all " "remaining conflicts." ) print("Hit Ctrl+C to abort.") continue elif r in ("B", "b"): self._compare_file(pair.local, pair.remote) continue elif r in ("R", "S"): r = self._resolve_shortcuts[r.lower()] self.resolve_all = r break elif r in ("r", "s"): r = self._resolve_shortcuts[r] break return r
[ "def", "_interactive_resolve", "(", "self", ",", "pair", ")", ":", "if", "self", ".", "resolve_all", ":", "if", "self", ".", "verbose", ">=", "5", ":", "self", ".", "_print_pair_diff", "(", "pair", ")", "return", "self", ".", "resolve_all", "resolve", "=", "self", ".", "options", ".", "get", "(", "\"resolve\"", ",", "\"skip\"", ")", "assert", "resolve", "in", "(", "\"remote\"", ",", "\"ask\"", ",", "\"skip\"", ")", "if", "resolve", "==", "\"ask\"", "or", "self", ".", "verbose", ">=", "5", ":", "self", ".", "_print_pair_diff", "(", "pair", ")", "if", "resolve", "in", "(", "\"remote\"", ",", "\"skip\"", ")", ":", "# self.resolve_all = resolve", "return", "resolve", "# RED = ansi_code(\"Fore.LIGHTRED_EX\")", "M", "=", "ansi_code", "(", "\"Style.BRIGHT\"", ")", "+", "ansi_code", "(", "\"Style.UNDERLINE\"", ")", "R", "=", "ansi_code", "(", "\"Style.RESET_ALL\"", ")", "# self._print_pair_diff(pair)", "self", ".", "_inc_stat", "(", "\"interactive_ask\"", ")", "while", "True", ":", "prompt", "=", "(", "\"Use \"", "+", "M", "+", "\"R\"", "+", "R", "+", "\"emote, \"", "+", "M", "+", "\"S\"", "+", "R", "+", "\"kip, \"", "+", "M", "+", "\"B\"", "+", "R", "+", "\"inary compare, \"", "+", "M", "+", "\"H\"", "+", "R", "+", "\"elp? \"", ")", "r", "=", "compat", ".", "console_input", "(", "prompt", ")", ".", "strip", "(", ")", "if", "r", "in", "(", "\"h\"", ",", "\"H\"", ",", "\"?\"", ")", ":", "print", "(", "\"The following keys are supported:\"", ")", "print", "(", "\" 'b': Binary compare\"", ")", "print", "(", "\" 'r': Download remote file\"", ")", "print", "(", "\" 's': Skip this file (leave both targets unchanged)\"", ")", "print", "(", "\"Hold Shift (upper case letters) to apply choice for all \"", "\"remaining conflicts.\"", ")", "print", "(", "\"Hit Ctrl+C to abort.\"", ")", "continue", "elif", "r", "in", "(", "\"B\"", ",", "\"b\"", ")", ":", "self", ".", "_compare_file", "(", "pair", ".", "local", ",", "pair", ".", "remote", ")", "continue", "elif", "r", "in", "(", "\"R\"", ",", "\"S\"", ")", ":", "r", "=", "self", ".", "_resolve_shortcuts", "[", "r", ".", "lower", "(", ")", "]", "self", ".", "resolve_all", "=", "r", "break", "elif", "r", "in", "(", "\"r\"", ",", "\"s\"", ")", ":", "r", "=", "self", ".", "_resolve_shortcuts", "[", "r", "]", "break", "return", "r" ]
Return 'local', 'remote', or 'skip' to use local, remote resource or skip.
[ "Return", "local", "remote", "or", "skip", "to", "use", "local", "remote", "resource", "or", "skip", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L1194-L1263
4,232
mar10/pyftpsync
ftpsync/util.py
set_pyftpsync_logger
def set_pyftpsync_logger(logger=True): """Define target for common output. Args: logger (bool | None | logging.Logger): Pass None to use `print()` to stdout instead of logging. Pass True to create a simple standard logger. """ global _logger prev_logger = _logger if logger is True: logging.basicConfig(level=logging.INFO) _logger = logging.getLogger("pyftpsync") _logger.setLevel(logging.DEBUG) else: _logger = logger return prev_logger
python
def set_pyftpsync_logger(logger=True): global _logger prev_logger = _logger if logger is True: logging.basicConfig(level=logging.INFO) _logger = logging.getLogger("pyftpsync") _logger.setLevel(logging.DEBUG) else: _logger = logger return prev_logger
[ "def", "set_pyftpsync_logger", "(", "logger", "=", "True", ")", ":", "global", "_logger", "prev_logger", "=", "_logger", "if", "logger", "is", "True", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "_logger", "=", "logging", ".", "getLogger", "(", "\"pyftpsync\"", ")", "_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "_logger", "=", "logger", "return", "prev_logger" ]
Define target for common output. Args: logger (bool | None | logging.Logger): Pass None to use `print()` to stdout instead of logging. Pass True to create a simple standard logger.
[ "Define", "target", "for", "common", "output", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L31-L47
4,233
mar10/pyftpsync
ftpsync/util.py
write
def write(*args, **kwargs): """Redirectable wrapper for print statements.""" debug = kwargs.pop("debug", None) warning = kwargs.pop("warning", None) if _logger: kwargs.pop("end", None) kwargs.pop("file", None) if debug: _logger.debug(*args, **kwargs) elif warning: _logger.warning(*args, **kwargs) else: _logger.info(*args, **kwargs) else: print(*args, **kwargs)
python
def write(*args, **kwargs): debug = kwargs.pop("debug", None) warning = kwargs.pop("warning", None) if _logger: kwargs.pop("end", None) kwargs.pop("file", None) if debug: _logger.debug(*args, **kwargs) elif warning: _logger.warning(*args, **kwargs) else: _logger.info(*args, **kwargs) else: print(*args, **kwargs)
[ "def", "write", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "debug", "=", "kwargs", ".", "pop", "(", "\"debug\"", ",", "None", ")", "warning", "=", "kwargs", ".", "pop", "(", "\"warning\"", ",", "None", ")", "if", "_logger", ":", "kwargs", ".", "pop", "(", "\"end\"", ",", "None", ")", "kwargs", ".", "pop", "(", "\"file\"", ",", "None", ")", "if", "debug", ":", "_logger", ".", "debug", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "warning", ":", "_logger", ".", "warning", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "_logger", ".", "info", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Redirectable wrapper for print statements.
[ "Redirectable", "wrapper", "for", "print", "statements", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L54-L68
4,234
mar10/pyftpsync
ftpsync/util.py
write_error
def write_error(*args, **kwargs): """Redirectable wrapper for print sys.stderr statements.""" if _logger: kwargs.pop("end", None) kwargs.pop("file", None) _logger.error(*args, **kwargs) else: print(*args, file=sys.stderr, **kwargs)
python
def write_error(*args, **kwargs): if _logger: kwargs.pop("end", None) kwargs.pop("file", None) _logger.error(*args, **kwargs) else: print(*args, file=sys.stderr, **kwargs)
[ "def", "write_error", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "_logger", ":", "kwargs", ".", "pop", "(", "\"end\"", ",", "None", ")", "kwargs", ".", "pop", "(", "\"file\"", ",", "None", ")", "_logger", ".", "error", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "print", "(", "*", "args", ",", "file", "=", "sys", ".", "stderr", ",", "*", "*", "kwargs", ")" ]
Redirectable wrapper for print sys.stderr statements.
[ "Redirectable", "wrapper", "for", "print", "sys", ".", "stderr", "statements", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L71-L78
4,235
mar10/pyftpsync
ftpsync/util.py
namespace_to_dict
def namespace_to_dict(o): """Convert an argparse namespace object to a dictionary.""" d = {} for k, v in o.__dict__.items(): if not callable(v): d[k] = v return d
python
def namespace_to_dict(o): d = {} for k, v in o.__dict__.items(): if not callable(v): d[k] = v return d
[ "def", "namespace_to_dict", "(", "o", ")", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "o", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "callable", "(", "v", ")", ":", "d", "[", "k", "]", "=", "v", "return", "d" ]
Convert an argparse namespace object to a dictionary.
[ "Convert", "an", "argparse", "namespace", "object", "to", "a", "dictionary", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L109-L115
4,236
mar10/pyftpsync
ftpsync/util.py
eps_compare
def eps_compare(f1, f2, eps): """Return true if |f1-f2| <= eps.""" res = f1 - f2 if abs(res) <= eps: # '<=',so eps == 0 works as expected return 0 elif res < 0: return -1 return 1
python
def eps_compare(f1, f2, eps): res = f1 - f2 if abs(res) <= eps: # '<=',so eps == 0 works as expected return 0 elif res < 0: return -1 return 1
[ "def", "eps_compare", "(", "f1", ",", "f2", ",", "eps", ")", ":", "res", "=", "f1", "-", "f2", "if", "abs", "(", "res", ")", "<=", "eps", ":", "# '<=',so eps == 0 works as expected", "return", "0", "elif", "res", "<", "0", ":", "return", "-", "1", "return", "1" ]
Return true if |f1-f2| <= eps.
[ "Return", "true", "if", "|f1", "-", "f2|", "<", "=", "eps", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L118-L125
4,237
mar10/pyftpsync
ftpsync/util.py
get_option
def get_option(env_name, section, opt_name, default=None): """Return a configuration setting from environment var or .pyftpsyncrc""" val = os.environ.get(env_name) if val is None: try: val = _pyftpsyncrc_parser.get(section, opt_name) except (compat.configparser.NoSectionError, compat.configparser.NoOptionError): pass if val is None: val = default return val
python
def get_option(env_name, section, opt_name, default=None): val = os.environ.get(env_name) if val is None: try: val = _pyftpsyncrc_parser.get(section, opt_name) except (compat.configparser.NoSectionError, compat.configparser.NoOptionError): pass if val is None: val = default return val
[ "def", "get_option", "(", "env_name", ",", "section", ",", "opt_name", ",", "default", "=", "None", ")", ":", "val", "=", "os", ".", "environ", ".", "get", "(", "env_name", ")", "if", "val", "is", "None", ":", "try", ":", "val", "=", "_pyftpsyncrc_parser", ".", "get", "(", "section", ",", "opt_name", ")", "except", "(", "compat", ".", "configparser", ".", "NoSectionError", ",", "compat", ".", "configparser", ".", "NoOptionError", ")", ":", "pass", "if", "val", "is", "None", ":", "val", "=", "default", "return", "val" ]
Return a configuration setting from environment var or .pyftpsyncrc
[ "Return", "a", "configuration", "setting", "from", "environment", "var", "or", ".", "pyftpsyncrc" ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L139-L149
4,238
mar10/pyftpsync
ftpsync/util.py
prompt_for_password
def prompt_for_password(url, user=None, default_user=None): """Prompt for username and password. If a user name is passed, only prompt for a password. Args: url (str): hostname user (str, optional): Pass a valid name to skip prompting for a user name default_user (str, optional): Pass a valid name that is used as default when prompting for a user name Raises: KeyboardInterrupt if user hits Ctrl-C Returns: (username, password) or None """ if user is None: default_user = default_user or getpass.getuser() while user is None: user = compat.console_input( "Enter username for {} [{}]: ".format(url, default_user) ) if user.strip() == "" and default_user: user = default_user if user: pw = getpass.getpass( "Enter password for {}@{} (Ctrl+C to abort): ".format(user, url) ) if pw or pw == "": return (user, pw) return None
python
def prompt_for_password(url, user=None, default_user=None): if user is None: default_user = default_user or getpass.getuser() while user is None: user = compat.console_input( "Enter username for {} [{}]: ".format(url, default_user) ) if user.strip() == "" and default_user: user = default_user if user: pw = getpass.getpass( "Enter password for {}@{} (Ctrl+C to abort): ".format(user, url) ) if pw or pw == "": return (user, pw) return None
[ "def", "prompt_for_password", "(", "url", ",", "user", "=", "None", ",", "default_user", "=", "None", ")", ":", "if", "user", "is", "None", ":", "default_user", "=", "default_user", "or", "getpass", ".", "getuser", "(", ")", "while", "user", "is", "None", ":", "user", "=", "compat", ".", "console_input", "(", "\"Enter username for {} [{}]: \"", ".", "format", "(", "url", ",", "default_user", ")", ")", "if", "user", ".", "strip", "(", ")", "==", "\"\"", "and", "default_user", ":", "user", "=", "default_user", "if", "user", ":", "pw", "=", "getpass", ".", "getpass", "(", "\"Enter password for {}@{} (Ctrl+C to abort): \"", ".", "format", "(", "user", ",", "url", ")", ")", "if", "pw", "or", "pw", "==", "\"\"", ":", "return", "(", "user", ",", "pw", ")", "return", "None" ]
Prompt for username and password. If a user name is passed, only prompt for a password. Args: url (str): hostname user (str, optional): Pass a valid name to skip prompting for a user name default_user (str, optional): Pass a valid name that is used as default when prompting for a user name Raises: KeyboardInterrupt if user hits Ctrl-C Returns: (username, password) or None
[ "Prompt", "for", "username", "and", "password", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L169-L199
4,239
mar10/pyftpsync
ftpsync/util.py
get_credentials_for_url
def get_credentials_for_url(url, opts, force_user=None): """Lookup credentials for a given target in keyring and .netrc. Optionally prompts for credentials if not found. Returns: 2-tuple (username, password) or None """ creds = None verbose = int(opts.get("verbose")) force_prompt = opts.get("prompt", False) allow_prompt = not opts.get("no_prompt", True) allow_keyring = not opts.get("no_keyring", False) and not force_user allow_netrc = not opts.get("no_netrc", False) and not force_user # print("get_credentials_for_url", force_user, allow_prompt) if force_user and not allow_prompt: raise RuntimeError( "Cannot get credentials for a distinct user ({}) from keyring or .netrc and " "prompting is disabled.".format(force_user) ) # Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x home_path = os.path.expanduser("~") file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE) if os.path.isfile(file_path): raise RuntimeError( "Custom password files are no longer supported. Delete {} and use .netrc instead.".format( file_path ) ) # Query keyring database if creds is None and keyring and allow_keyring: try: # Note: we pass the url as `username` and username:password as `password` c = keyring.get_password("pyftpsync", url) if c is not None: creds = c.split(":", 1) write( "Using credentials from keyring('pyftpsync', '{}'): {}:***.".format( url, creds[0] ) ) else: if verbose >= 4: write( "No credentials found in keyring('pyftpsync', '{}').".format( url ) ) # except keyring.errors.TransientKeyringError: except Exception as e: # e.g. user clicked 'no' write_error("Could not get password from keyring {}".format(e)) # Query .netrc file # print(opts) if creds is None and allow_netrc: try: authenticators = None authenticators = netrc.netrc().authenticators(url) except CompatFileNotFoundError: if verbose >= 4: write("Could not get password (no .netrc file).") except Exception as e: write_error("Could not read .netrc: {}.".format(e)) if authenticators: creds = (authenticators[0], authenticators[2]) write("Using credentials from .netrc file: {}:***.".format(creds[0])) else: if verbose >= 4: write("Could not find entry for '{}' in .netrc file.".format(url)) # Prompt for password if we don't have credentials yet, or --prompt was set. if allow_prompt: if creds is None: creds = prompt_for_password(url) elif force_prompt: # --prompt was set but we can provide a default for the user name creds = prompt_for_password(url, default_user=creds[0]) return creds
python
def get_credentials_for_url(url, opts, force_user=None): creds = None verbose = int(opts.get("verbose")) force_prompt = opts.get("prompt", False) allow_prompt = not opts.get("no_prompt", True) allow_keyring = not opts.get("no_keyring", False) and not force_user allow_netrc = not opts.get("no_netrc", False) and not force_user # print("get_credentials_for_url", force_user, allow_prompt) if force_user and not allow_prompt: raise RuntimeError( "Cannot get credentials for a distinct user ({}) from keyring or .netrc and " "prompting is disabled.".format(force_user) ) # Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x home_path = os.path.expanduser("~") file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE) if os.path.isfile(file_path): raise RuntimeError( "Custom password files are no longer supported. Delete {} and use .netrc instead.".format( file_path ) ) # Query keyring database if creds is None and keyring and allow_keyring: try: # Note: we pass the url as `username` and username:password as `password` c = keyring.get_password("pyftpsync", url) if c is not None: creds = c.split(":", 1) write( "Using credentials from keyring('pyftpsync', '{}'): {}:***.".format( url, creds[0] ) ) else: if verbose >= 4: write( "No credentials found in keyring('pyftpsync', '{}').".format( url ) ) # except keyring.errors.TransientKeyringError: except Exception as e: # e.g. user clicked 'no' write_error("Could not get password from keyring {}".format(e)) # Query .netrc file # print(opts) if creds is None and allow_netrc: try: authenticators = None authenticators = netrc.netrc().authenticators(url) except CompatFileNotFoundError: if verbose >= 4: write("Could not get password (no .netrc file).") except Exception as e: write_error("Could not read .netrc: {}.".format(e)) if authenticators: creds = (authenticators[0], authenticators[2]) write("Using credentials from .netrc file: {}:***.".format(creds[0])) else: if verbose >= 4: write("Could not find entry for '{}' in .netrc file.".format(url)) # Prompt for password if we don't have credentials yet, or --prompt was set. if allow_prompt: if creds is None: creds = prompt_for_password(url) elif force_prompt: # --prompt was set but we can provide a default for the user name creds = prompt_for_password(url, default_user=creds[0]) return creds
[ "def", "get_credentials_for_url", "(", "url", ",", "opts", ",", "force_user", "=", "None", ")", ":", "creds", "=", "None", "verbose", "=", "int", "(", "opts", ".", "get", "(", "\"verbose\"", ")", ")", "force_prompt", "=", "opts", ".", "get", "(", "\"prompt\"", ",", "False", ")", "allow_prompt", "=", "not", "opts", ".", "get", "(", "\"no_prompt\"", ",", "True", ")", "allow_keyring", "=", "not", "opts", ".", "get", "(", "\"no_keyring\"", ",", "False", ")", "and", "not", "force_user", "allow_netrc", "=", "not", "opts", ".", "get", "(", "\"no_netrc\"", ",", "False", ")", "and", "not", "force_user", "# print(\"get_credentials_for_url\", force_user, allow_prompt)", "if", "force_user", "and", "not", "allow_prompt", ":", "raise", "RuntimeError", "(", "\"Cannot get credentials for a distinct user ({}) from keyring or .netrc and \"", "\"prompting is disabled.\"", ".", "format", "(", "force_user", ")", ")", "# Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x", "home_path", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "home_path", ",", "DEFAULT_CREDENTIAL_STORE", ")", "if", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "raise", "RuntimeError", "(", "\"Custom password files are no longer supported. Delete {} and use .netrc instead.\"", ".", "format", "(", "file_path", ")", ")", "# Query keyring database", "if", "creds", "is", "None", "and", "keyring", "and", "allow_keyring", ":", "try", ":", "# Note: we pass the url as `username` and username:password as `password`", "c", "=", "keyring", ".", "get_password", "(", "\"pyftpsync\"", ",", "url", ")", "if", "c", "is", "not", "None", ":", "creds", "=", "c", ".", "split", "(", "\":\"", ",", "1", ")", "write", "(", "\"Using credentials from keyring('pyftpsync', '{}'): {}:***.\"", ".", "format", "(", "url", ",", "creds", "[", "0", "]", ")", ")", "else", ":", "if", "verbose", ">=", "4", ":", "write", "(", "\"No credentials found in keyring('pyftpsync', '{}').\"", ".", "format", "(", "url", ")", ")", "# except keyring.errors.TransientKeyringError:", "except", "Exception", "as", "e", ":", "# e.g. user clicked 'no'", "write_error", "(", "\"Could not get password from keyring {}\"", ".", "format", "(", "e", ")", ")", "# Query .netrc file", "# print(opts)", "if", "creds", "is", "None", "and", "allow_netrc", ":", "try", ":", "authenticators", "=", "None", "authenticators", "=", "netrc", ".", "netrc", "(", ")", ".", "authenticators", "(", "url", ")", "except", "CompatFileNotFoundError", ":", "if", "verbose", ">=", "4", ":", "write", "(", "\"Could not get password (no .netrc file).\"", ")", "except", "Exception", "as", "e", ":", "write_error", "(", "\"Could not read .netrc: {}.\"", ".", "format", "(", "e", ")", ")", "if", "authenticators", ":", "creds", "=", "(", "authenticators", "[", "0", "]", ",", "authenticators", "[", "2", "]", ")", "write", "(", "\"Using credentials from .netrc file: {}:***.\"", ".", "format", "(", "creds", "[", "0", "]", ")", ")", "else", ":", "if", "verbose", ">=", "4", ":", "write", "(", "\"Could not find entry for '{}' in .netrc file.\"", ".", "format", "(", "url", ")", ")", "# Prompt for password if we don't have credentials yet, or --prompt was set.", "if", "allow_prompt", ":", "if", "creds", "is", "None", ":", "creds", "=", "prompt_for_password", "(", "url", ")", "elif", "force_prompt", ":", "# --prompt was set but we can provide a default for the user name", "creds", "=", "prompt_for_password", "(", "url", ",", "default_user", "=", "creds", "[", "0", "]", ")", "return", "creds" ]
Lookup credentials for a given target in keyring and .netrc. Optionally prompts for credentials if not found. Returns: 2-tuple (username, password) or None
[ "Lookup", "credentials", "for", "a", "given", "target", "in", "keyring", "and", ".", "netrc", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L202-L285
4,240
mar10/pyftpsync
ftpsync/util.py
save_password
def save_password(url, username, password): """Store credentials in keyring.""" if keyring: if ":" in username: raise RuntimeError( "Unable to store credentials if username contains a ':' ({}).".format( username ) ) try: # Note: we pass the url as `username` and username:password as `password` if password is None: keyring.delete_password("pyftpsync", url) write("Delete credentials from keyring ({})".format(url)) else: keyring.set_password( "pyftpsync", url, "{}:{}".format(username, password) ) write( "Store credentials in keyring ({}, {}:***).".format(url, username) ) # except keyring.errors.TransientKeyringError: except Exception as e: write("Could not delete/set password {}.".format(e)) pass # e.g. user clicked 'no' else: write("Could not store credentials (missing keyring support).") return
python
def save_password(url, username, password): if keyring: if ":" in username: raise RuntimeError( "Unable to store credentials if username contains a ':' ({}).".format( username ) ) try: # Note: we pass the url as `username` and username:password as `password` if password is None: keyring.delete_password("pyftpsync", url) write("Delete credentials from keyring ({})".format(url)) else: keyring.set_password( "pyftpsync", url, "{}:{}".format(username, password) ) write( "Store credentials in keyring ({}, {}:***).".format(url, username) ) # except keyring.errors.TransientKeyringError: except Exception as e: write("Could not delete/set password {}.".format(e)) pass # e.g. user clicked 'no' else: write("Could not store credentials (missing keyring support).") return
[ "def", "save_password", "(", "url", ",", "username", ",", "password", ")", ":", "if", "keyring", ":", "if", "\":\"", "in", "username", ":", "raise", "RuntimeError", "(", "\"Unable to store credentials if username contains a ':' ({}).\"", ".", "format", "(", "username", ")", ")", "try", ":", "# Note: we pass the url as `username` and username:password as `password`", "if", "password", "is", "None", ":", "keyring", ".", "delete_password", "(", "\"pyftpsync\"", ",", "url", ")", "write", "(", "\"Delete credentials from keyring ({})\"", ".", "format", "(", "url", ")", ")", "else", ":", "keyring", ".", "set_password", "(", "\"pyftpsync\"", ",", "url", ",", "\"{}:{}\"", ".", "format", "(", "username", ",", "password", ")", ")", "write", "(", "\"Store credentials in keyring ({}, {}:***).\"", ".", "format", "(", "url", ",", "username", ")", ")", "# except keyring.errors.TransientKeyringError:", "except", "Exception", "as", "e", ":", "write", "(", "\"Could not delete/set password {}.\"", ".", "format", "(", "e", ")", ")", "pass", "# e.g. user clicked 'no'", "else", ":", "write", "(", "\"Could not store credentials (missing keyring support).\"", ")", "return" ]
Store credentials in keyring.
[ "Store", "credentials", "in", "keyring", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L288-L316
4,241
mar10/pyftpsync
ftpsync/util.py
str_to_bool
def str_to_bool(val): """Return a boolean for '0', 'false', 'on', ...""" val = str(val).lower().strip() if val in ("1", "true", "on", "yes"): return True elif val in ("0", "false", "off", "no"): return False raise ValueError( "Invalid value '{}'" "(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val) )
python
def str_to_bool(val): val = str(val).lower().strip() if val in ("1", "true", "on", "yes"): return True elif val in ("0", "false", "off", "no"): return False raise ValueError( "Invalid value '{}'" "(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val) )
[ "def", "str_to_bool", "(", "val", ")", ":", "val", "=", "str", "(", "val", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "val", "in", "(", "\"1\"", ",", "\"true\"", ",", "\"on\"", ",", "\"yes\"", ")", ":", "return", "True", "elif", "val", "in", "(", "\"0\"", ",", "\"false\"", ",", "\"off\"", ",", "\"no\"", ")", ":", "return", "False", "raise", "ValueError", "(", "\"Invalid value '{}'\"", "\"(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').\"", ".", "format", "(", "val", ")", ")" ]
Return a boolean for '0', 'false', 'on', ...
[ "Return", "a", "boolean", "for", "0", "false", "on", "..." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L319-L329
4,242
mar10/pyftpsync
ftpsync/util.py
ansi_code
def ansi_code(name): """Return ansi color or style codes or '' if colorama is not available.""" try: obj = colorama for part in name.split("."): obj = getattr(obj, part) return obj except AttributeError: return ""
python
def ansi_code(name): try: obj = colorama for part in name.split("."): obj = getattr(obj, part) return obj except AttributeError: return ""
[ "def", "ansi_code", "(", "name", ")", ":", "try", ":", "obj", "=", "colorama", "for", "part", "in", "name", ".", "split", "(", "\".\"", ")", ":", "obj", "=", "getattr", "(", "obj", ",", "part", ")", "return", "obj", "except", "AttributeError", ":", "return", "\"\"" ]
Return ansi color or style codes or '' if colorama is not available.
[ "Return", "ansi", "color", "or", "style", "codes", "or", "if", "colorama", "is", "not", "available", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L332-L340
4,243
mar10/pyftpsync
ftpsync/targets.py
make_target
def make_target(url, extra_opts=None): """Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target` """ # debug = extra_opts.get("debug", 1) parts = compat.urlparse(url, allow_fragments=False) # scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986 scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
python
def make_target(url, extra_opts=None): # debug = extra_opts.get("debug", 1) parts = compat.urlparse(url, allow_fragments=False) # scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986 scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
[ "def", "make_target", "(", "url", ",", "extra_opts", "=", "None", ")", ":", "# debug = extra_opts.get(\"debug\", 1)", "parts", "=", "compat", ".", "urlparse", "(", "url", ",", "allow_fragments", "=", "False", ")", "# scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986", "scheme", "=", "parts", ".", "scheme", ".", "lower", "(", ")", "if", "scheme", "in", "[", "\"ftp\"", ",", "\"ftps\"", "]", ":", "creds", "=", "parts", ".", "username", ",", "parts", ".", "password", "tls", "=", "scheme", "==", "\"ftps\"", "from", "ftpsync", "import", "ftp_target", "target", "=", "ftp_target", ".", "FtpTarget", "(", "parts", ".", "path", ",", "parts", ".", "hostname", ",", "parts", ".", "port", ",", "username", "=", "creds", "[", "0", "]", ",", "password", "=", "creds", "[", "1", "]", ",", "tls", "=", "tls", ",", "timeout", "=", "None", ",", "extra_opts", "=", "extra_opts", ",", ")", "else", ":", "target", "=", "FsTarget", "(", "url", ",", "extra_opts", ")", "return", "target" ]
Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target`
[ "Factory", "that", "creates", "_Target", "objects", "from", "URLs", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L24-L59
4,244
mar10/pyftpsync
ftpsync/targets.py
_get_encoding_opt
def _get_encoding_opt(synchronizer, extra_opts, default): """Helper to figure out encoding setting inside constructors.""" encoding = default # if synchronizer and "encoding" in synchronizer.options: # encoding = synchronizer.options.get("encoding") if extra_opts and "encoding" in extra_opts: encoding = extra_opts.get("encoding") if encoding: # Normalize name (e.g. 'UTF8' => 'utf-8') encoding = codecs.lookup(encoding).name # print("_get_encoding_opt", encoding) return encoding or None
python
def _get_encoding_opt(synchronizer, extra_opts, default): encoding = default # if synchronizer and "encoding" in synchronizer.options: # encoding = synchronizer.options.get("encoding") if extra_opts and "encoding" in extra_opts: encoding = extra_opts.get("encoding") if encoding: # Normalize name (e.g. 'UTF8' => 'utf-8') encoding = codecs.lookup(encoding).name # print("_get_encoding_opt", encoding) return encoding or None
[ "def", "_get_encoding_opt", "(", "synchronizer", ",", "extra_opts", ",", "default", ")", ":", "encoding", "=", "default", "# if synchronizer and \"encoding\" in synchronizer.options:", "# encoding = synchronizer.options.get(\"encoding\")", "if", "extra_opts", "and", "\"encoding\"", "in", "extra_opts", ":", "encoding", "=", "extra_opts", ".", "get", "(", "\"encoding\"", ")", "if", "encoding", ":", "# Normalize name (e.g. 'UTF8' => 'utf-8')", "encoding", "=", "codecs", ".", "lookup", "(", "encoding", ")", ".", "name", "# print(\"_get_encoding_opt\", encoding)", "return", "encoding", "or", "None" ]
Helper to figure out encoding setting inside constructors.
[ "Helper", "to", "figure", "out", "encoding", "setting", "inside", "constructors", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L62-L73
4,245
mar10/pyftpsync
ftpsync/targets.py
_Target.walk
def walk(self, pred=None, recursive=True): """Iterate over all target entries recursively. Args: pred (function, optional): Callback(:class:`ftpsync.resources._Resource`) should return `False` to ignore entry. Default: `None`. recursive (bool, optional): Pass `False` to generate top level entries only. Default: `True`. Yields: :class:`ftpsync.resources._Resource` """ for entry in self.get_dir(): if pred and pred(entry) is False: continue yield entry if recursive: if isinstance(entry, DirectoryEntry): self.cwd(entry.name) for e in self.walk(pred): yield e self.cwd("..") return
python
def walk(self, pred=None, recursive=True): for entry in self.get_dir(): if pred and pred(entry) is False: continue yield entry if recursive: if isinstance(entry, DirectoryEntry): self.cwd(entry.name) for e in self.walk(pred): yield e self.cwd("..") return
[ "def", "walk", "(", "self", ",", "pred", "=", "None", ",", "recursive", "=", "True", ")", ":", "for", "entry", "in", "self", ".", "get_dir", "(", ")", ":", "if", "pred", "and", "pred", "(", "entry", ")", "is", "False", ":", "continue", "yield", "entry", "if", "recursive", ":", "if", "isinstance", "(", "entry", ",", "DirectoryEntry", ")", ":", "self", ".", "cwd", "(", "entry", ".", "name", ")", "for", "e", "in", "self", ".", "walk", "(", "pred", ")", ":", "yield", "e", "self", ".", "cwd", "(", "\"..\"", ")", "return" ]
Iterate over all target entries recursively. Args: pred (function, optional): Callback(:class:`ftpsync.resources._Resource`) should return `False` to ignore entry. Default: `None`. recursive (bool, optional): Pass `False` to generate top level entries only. Default: `True`. Yields: :class:`ftpsync.resources._Resource`
[ "Iterate", "over", "all", "target", "entries", "recursively", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L255-L279
4,246
mar10/pyftpsync
ftpsync/targets.py
FsTarget.set_mtime
def set_mtime(self, name, mtime, size): """Set modification time on file.""" self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
python
def set_mtime(self, name, mtime, size): self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
[ "def", "set_mtime", "(", "self", ",", "name", ",", "mtime", ",", "size", ")", ":", "self", ".", "check_write", "(", "name", ")", "os", ".", "utime", "(", "os", ".", "path", ".", "join", "(", "self", ".", "cur_dir", ",", "name", ")", ",", "(", "-", "1", ",", "mtime", ")", ")" ]
Set modification time on file.
[ "Set", "modification", "time", "on", "file", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L480-L483
4,247
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._lock
def _lock(self, break_existing=False): """Write a special file to the target root folder.""" # write("_lock") data = {"lock_time": time.time(), "lock_holder": None} try: assert self.cur_dir == self.root_dir self.write_text(DirMetadata.LOCK_FILE_NAME, json.dumps(data)) self.lock_data = data self.lock_write_time = time.time() except Exception as e: errmsg = "{}".format(e) write_error("Could not write lock file: {}".format(errmsg)) if errmsg.startswith("550") and self.ftp.passiveserver: try: self.ftp.makepasv() except Exception: write_error( "The server probably requires FTP Active mode. " "Try passing the --ftp-active option." ) # Set to False, so we don't try to remove later self.lock_data = False
python
def _lock(self, break_existing=False): # write("_lock") data = {"lock_time": time.time(), "lock_holder": None} try: assert self.cur_dir == self.root_dir self.write_text(DirMetadata.LOCK_FILE_NAME, json.dumps(data)) self.lock_data = data self.lock_write_time = time.time() except Exception as e: errmsg = "{}".format(e) write_error("Could not write lock file: {}".format(errmsg)) if errmsg.startswith("550") and self.ftp.passiveserver: try: self.ftp.makepasv() except Exception: write_error( "The server probably requires FTP Active mode. " "Try passing the --ftp-active option." ) # Set to False, so we don't try to remove later self.lock_data = False
[ "def", "_lock", "(", "self", ",", "break_existing", "=", "False", ")", ":", "# write(\"_lock\")\r", "data", "=", "{", "\"lock_time\"", ":", "time", ".", "time", "(", ")", ",", "\"lock_holder\"", ":", "None", "}", "try", ":", "assert", "self", ".", "cur_dir", "==", "self", ".", "root_dir", "self", ".", "write_text", "(", "DirMetadata", ".", "LOCK_FILE_NAME", ",", "json", ".", "dumps", "(", "data", ")", ")", "self", ".", "lock_data", "=", "data", "self", ".", "lock_write_time", "=", "time", ".", "time", "(", ")", "except", "Exception", "as", "e", ":", "errmsg", "=", "\"{}\"", ".", "format", "(", "e", ")", "write_error", "(", "\"Could not write lock file: {}\"", ".", "format", "(", "errmsg", ")", ")", "if", "errmsg", ".", "startswith", "(", "\"550\"", ")", "and", "self", ".", "ftp", ".", "passiveserver", ":", "try", ":", "self", ".", "ftp", ".", "makepasv", "(", ")", "except", "Exception", ":", "write_error", "(", "\"The server probably requires FTP Active mode. \"", "\"Try passing the --ftp-active option.\"", ")", "# Set to False, so we don't try to remove later\r", "self", ".", "lock_data", "=", "False" ]
Write a special file to the target root folder.
[ "Write", "a", "special", "file", "to", "the", "target", "root", "folder", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L282-L305
4,248
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._unlock
def _unlock(self, closing=False): """Remove lock file to the target root folder. """ # write("_unlock", closing) try: if self.cur_dir != self.root_dir: if closing: write( "Changing to ftp root folder to remove lock file: {}".format( self.root_dir ) ) self.cwd(self.root_dir) else: write_error( "Could not remove lock file, because CWD != ftp root: {}".format( self.cur_dir ) ) return if self.lock_data is False: if self.get_option("verbose", 3) >= 4: write("Skip remove lock file (was not written).") else: # direct delete, without updating metadata or checking for target access: try: self.ftp.delete(DirMetadata.LOCK_FILE_NAME) # self.remove_file(DirMetadata.LOCK_FILE_NAME) except Exception as e: # I have seen '226 Closing data connection' responses here, # probably when a previous command threw another error. # However here, 2xx response should be Ok(?): # A 226 reply code is sent by the server before closing the # data connection after successfully processing the previous client command if e.args[0][:3] == "226": write_error("Ignoring 226 response for ftp.delete() lockfile") else: raise self.lock_data = None except Exception as e: write_error("Could not remove lock file: {}".format(e)) raise
python
def _unlock(self, closing=False): # write("_unlock", closing) try: if self.cur_dir != self.root_dir: if closing: write( "Changing to ftp root folder to remove lock file: {}".format( self.root_dir ) ) self.cwd(self.root_dir) else: write_error( "Could not remove lock file, because CWD != ftp root: {}".format( self.cur_dir ) ) return if self.lock_data is False: if self.get_option("verbose", 3) >= 4: write("Skip remove lock file (was not written).") else: # direct delete, without updating metadata or checking for target access: try: self.ftp.delete(DirMetadata.LOCK_FILE_NAME) # self.remove_file(DirMetadata.LOCK_FILE_NAME) except Exception as e: # I have seen '226 Closing data connection' responses here, # probably when a previous command threw another error. # However here, 2xx response should be Ok(?): # A 226 reply code is sent by the server before closing the # data connection after successfully processing the previous client command if e.args[0][:3] == "226": write_error("Ignoring 226 response for ftp.delete() lockfile") else: raise self.lock_data = None except Exception as e: write_error("Could not remove lock file: {}".format(e)) raise
[ "def", "_unlock", "(", "self", ",", "closing", "=", "False", ")", ":", "# write(\"_unlock\", closing)\r", "try", ":", "if", "self", ".", "cur_dir", "!=", "self", ".", "root_dir", ":", "if", "closing", ":", "write", "(", "\"Changing to ftp root folder to remove lock file: {}\"", ".", "format", "(", "self", ".", "root_dir", ")", ")", "self", ".", "cwd", "(", "self", ".", "root_dir", ")", "else", ":", "write_error", "(", "\"Could not remove lock file, because CWD != ftp root: {}\"", ".", "format", "(", "self", ".", "cur_dir", ")", ")", "return", "if", "self", ".", "lock_data", "is", "False", ":", "if", "self", ".", "get_option", "(", "\"verbose\"", ",", "3", ")", ">=", "4", ":", "write", "(", "\"Skip remove lock file (was not written).\"", ")", "else", ":", "# direct delete, without updating metadata or checking for target access:\r", "try", ":", "self", ".", "ftp", ".", "delete", "(", "DirMetadata", ".", "LOCK_FILE_NAME", ")", "# self.remove_file(DirMetadata.LOCK_FILE_NAME)\r", "except", "Exception", "as", "e", ":", "# I have seen '226 Closing data connection' responses here,\r", "# probably when a previous command threw another error.\r", "# However here, 2xx response should be Ok(?):\r", "# A 226 reply code is sent by the server before closing the\r", "# data connection after successfully processing the previous client command\r", "if", "e", ".", "args", "[", "0", "]", "[", ":", "3", "]", "==", "\"226\"", ":", "write_error", "(", "\"Ignoring 226 response for ftp.delete() lockfile\"", ")", "else", ":", "raise", "self", ".", "lock_data", "=", "None", "except", "Exception", "as", "e", ":", "write_error", "(", "\"Could not remove lock file: {}\"", ".", "format", "(", "e", ")", ")", "raise" ]
Remove lock file to the target root folder.
[ "Remove", "lock", "file", "to", "the", "target", "root", "folder", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L307-L351
4,249
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._probe_lock_file
def _probe_lock_file(self, reported_mtime): """Called by get_dir""" delta = reported_mtime - self.lock_data["lock_time"] # delta2 = reported_mtime - self.lock_write_time self.server_time_ofs = delta if self.get_option("verbose", 3) >= 4: write("Server time offset: {:.2f} seconds.".format(delta))
python
def _probe_lock_file(self, reported_mtime): delta = reported_mtime - self.lock_data["lock_time"] # delta2 = reported_mtime - self.lock_write_time self.server_time_ofs = delta if self.get_option("verbose", 3) >= 4: write("Server time offset: {:.2f} seconds.".format(delta))
[ "def", "_probe_lock_file", "(", "self", ",", "reported_mtime", ")", ":", "delta", "=", "reported_mtime", "-", "self", ".", "lock_data", "[", "\"lock_time\"", "]", "# delta2 = reported_mtime - self.lock_write_time\r", "self", ".", "server_time_ofs", "=", "delta", "if", "self", ".", "get_option", "(", "\"verbose\"", ",", "3", ")", ">=", "4", ":", "write", "(", "\"Server time offset: {:.2f} seconds.\"", ".", "format", "(", "delta", ")", ")" ]
Called by get_dir
[ "Called", "by", "get_dir" ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L353-L359
4,250
mar10/pyftpsync
ftpsync/resources.py
EntryPair.is_same_time
def is_same_time(self): """Return True if local.mtime == remote.mtime.""" return ( self.local and self.remote and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0 )
python
def is_same_time(self): return ( self.local and self.remote and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0 )
[ "def", "is_same_time", "(", "self", ")", ":", "return", "(", "self", ".", "local", "and", "self", ".", "remote", "and", "FileEntry", ".", "_eps_compare", "(", "self", ".", "local", ".", "mtime", ",", "self", ".", "remote", ".", "mtime", ")", "==", "0", ")" ]
Return True if local.mtime == remote.mtime.
[ "Return", "True", "if", "local", ".", "mtime", "==", "remote", ".", "mtime", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L118-L124
4,251
mar10/pyftpsync
ftpsync/resources.py
EntryPair.override_operation
def override_operation(self, operation, reason): """Re-Classify entry pair.""" prev_class = (self.local_classification, self.remote_classification) prev_op = self.operation assert operation != prev_op assert operation in PAIR_OPERATIONS if self.any_entry.target.synchronizer.verbose > 3: write( "override_operation({}, {}) -> {} ({})".format( prev_class, prev_op, operation, reason ), debug=True, ) self.operation = operation self.re_class_reason = reason
python
def override_operation(self, operation, reason): prev_class = (self.local_classification, self.remote_classification) prev_op = self.operation assert operation != prev_op assert operation in PAIR_OPERATIONS if self.any_entry.target.synchronizer.verbose > 3: write( "override_operation({}, {}) -> {} ({})".format( prev_class, prev_op, operation, reason ), debug=True, ) self.operation = operation self.re_class_reason = reason
[ "def", "override_operation", "(", "self", ",", "operation", ",", "reason", ")", ":", "prev_class", "=", "(", "self", ".", "local_classification", ",", "self", ".", "remote_classification", ")", "prev_op", "=", "self", ".", "operation", "assert", "operation", "!=", "prev_op", "assert", "operation", "in", "PAIR_OPERATIONS", "if", "self", ".", "any_entry", ".", "target", ".", "synchronizer", ".", "verbose", ">", "3", ":", "write", "(", "\"override_operation({}, {}) -> {} ({})\"", ".", "format", "(", "prev_class", ",", "prev_op", ",", "operation", ",", "reason", ")", ",", "debug", "=", "True", ",", ")", "self", ".", "operation", "=", "operation", "self", ".", "re_class_reason", "=", "reason" ]
Re-Classify entry pair.
[ "Re", "-", "Classify", "entry", "pair", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L126-L140
4,252
mar10/pyftpsync
ftpsync/resources.py
EntryPair.classify
def classify(self, peer_dir_meta): """Classify entry pair.""" assert self.operation is None # write("CLASSIFIY", self, peer_dir_meta) # Note: We pass False if the entry is not listed in the metadata. # We pass None if we don't have metadata all. peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None # write("=>", self, peer_entry_meta) if self.local: self.local.classify(peer_dir_meta) self.local_classification = self.local.classification elif peer_entry_meta: self.local_classification = "deleted" else: self.local_classification = "missing" if self.remote: self.remote.classify(peer_dir_meta) self.remote_classification = self.remote.classification elif peer_entry_meta: self.remote_classification = "deleted" else: self.remote_classification = "missing" c_pair = (self.local_classification, self.remote_classification) self.operation = operation_map.get(c_pair) if not self.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) # if not entry.meta: # assert self.classification in PAIR_CLASSIFICATIONS assert self.operation in PAIR_OPERATIONS return self.operation
python
def classify(self, peer_dir_meta): assert self.operation is None # write("CLASSIFIY", self, peer_dir_meta) # Note: We pass False if the entry is not listed in the metadata. # We pass None if we don't have metadata all. peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None # write("=>", self, peer_entry_meta) if self.local: self.local.classify(peer_dir_meta) self.local_classification = self.local.classification elif peer_entry_meta: self.local_classification = "deleted" else: self.local_classification = "missing" if self.remote: self.remote.classify(peer_dir_meta) self.remote_classification = self.remote.classification elif peer_entry_meta: self.remote_classification = "deleted" else: self.remote_classification = "missing" c_pair = (self.local_classification, self.remote_classification) self.operation = operation_map.get(c_pair) if not self.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) # if not entry.meta: # assert self.classification in PAIR_CLASSIFICATIONS assert self.operation in PAIR_OPERATIONS return self.operation
[ "def", "classify", "(", "self", ",", "peer_dir_meta", ")", ":", "assert", "self", ".", "operation", "is", "None", "# write(\"CLASSIFIY\", self, peer_dir_meta)", "# Note: We pass False if the entry is not listed in the metadata.", "# We pass None if we don't have metadata all.", "peer_entry_meta", "=", "peer_dir_meta", ".", "get", "(", "self", ".", "name", ",", "False", ")", "if", "peer_dir_meta", "else", "None", "# write(\"=>\", self, peer_entry_meta)", "if", "self", ".", "local", ":", "self", ".", "local", ".", "classify", "(", "peer_dir_meta", ")", "self", ".", "local_classification", "=", "self", ".", "local", ".", "classification", "elif", "peer_entry_meta", ":", "self", ".", "local_classification", "=", "\"deleted\"", "else", ":", "self", ".", "local_classification", "=", "\"missing\"", "if", "self", ".", "remote", ":", "self", ".", "remote", ".", "classify", "(", "peer_dir_meta", ")", "self", ".", "remote_classification", "=", "self", ".", "remote", ".", "classification", "elif", "peer_entry_meta", ":", "self", ".", "remote_classification", "=", "\"deleted\"", "else", ":", "self", ".", "remote_classification", "=", "\"missing\"", "c_pair", "=", "(", "self", ".", "local_classification", ",", "self", ".", "remote_classification", ")", "self", ".", "operation", "=", "operation_map", ".", "get", "(", "c_pair", ")", "if", "not", "self", ".", "operation", ":", "raise", "RuntimeError", "(", "\"Undefined operation for pair classification {}\"", ".", "format", "(", "c_pair", ")", ")", "if", "PRINT_CLASSIFICATIONS", ":", "write", "(", "\"classify {}\"", ".", "format", "(", "self", ")", ")", "# if not entry.meta:", "# assert self.classification in PAIR_CLASSIFICATIONS", "assert", "self", ".", "operation", "in", "PAIR_OPERATIONS", "return", "self", ".", "operation" ]
Classify entry pair.
[ "Classify", "entry", "pair", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L142-L179
4,253
mar10/pyftpsync
ftpsync/resources.py
_Resource.classify
def classify(self, peer_dir_meta): """Classify this entry as 'new', 'unmodified', or 'modified'.""" assert self.classification is None peer_entry_meta = None if peer_dir_meta: # Metadata is generally available, so we can detect 'new' or 'modified' peer_entry_meta = peer_dir_meta.get(self.name, False) if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) if peer_entry_meta: self.classification = "unmodified" else: self.classification = "new" elif peer_entry_meta: # File entries can be classified as modified/unmodified self.ps_size = peer_entry_meta.get("s") self.ps_mtime = peer_entry_meta.get("m") self.ps_utime = peer_entry_meta.get("u") if ( self.size == self.ps_size and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0 ): self.classification = "unmodified" else: self.classification = "modified" else: # A new file entry self.classification = "new" else: # No metadata available: if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) self.classification = "unmodified" else: # That's all we know, but EntryPair.classify() may adjust this self.classification = "existing" if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) assert self.classification in ENTRY_CLASSIFICATIONS return self.classification
python
def classify(self, peer_dir_meta): assert self.classification is None peer_entry_meta = None if peer_dir_meta: # Metadata is generally available, so we can detect 'new' or 'modified' peer_entry_meta = peer_dir_meta.get(self.name, False) if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) if peer_entry_meta: self.classification = "unmodified" else: self.classification = "new" elif peer_entry_meta: # File entries can be classified as modified/unmodified self.ps_size = peer_entry_meta.get("s") self.ps_mtime = peer_entry_meta.get("m") self.ps_utime = peer_entry_meta.get("u") if ( self.size == self.ps_size and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0 ): self.classification = "unmodified" else: self.classification = "modified" else: # A new file entry self.classification = "new" else: # No metadata available: if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) self.classification = "unmodified" else: # That's all we know, but EntryPair.classify() may adjust this self.classification = "existing" if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) assert self.classification in ENTRY_CLASSIFICATIONS return self.classification
[ "def", "classify", "(", "self", ",", "peer_dir_meta", ")", ":", "assert", "self", ".", "classification", "is", "None", "peer_entry_meta", "=", "None", "if", "peer_dir_meta", ":", "# Metadata is generally available, so we can detect 'new' or 'modified'", "peer_entry_meta", "=", "peer_dir_meta", ".", "get", "(", "self", ".", "name", ",", "False", ")", "if", "self", ".", "is_dir", "(", ")", ":", "# Directories are considered 'unmodified' (would require deep traversal", "# to check otherwise)", "if", "peer_entry_meta", ":", "self", ".", "classification", "=", "\"unmodified\"", "else", ":", "self", ".", "classification", "=", "\"new\"", "elif", "peer_entry_meta", ":", "# File entries can be classified as modified/unmodified", "self", ".", "ps_size", "=", "peer_entry_meta", ".", "get", "(", "\"s\"", ")", "self", ".", "ps_mtime", "=", "peer_entry_meta", ".", "get", "(", "\"m\"", ")", "self", ".", "ps_utime", "=", "peer_entry_meta", ".", "get", "(", "\"u\"", ")", "if", "(", "self", ".", "size", "==", "self", ".", "ps_size", "and", "FileEntry", ".", "_eps_compare", "(", "self", ".", "mtime", ",", "self", ".", "ps_mtime", ")", "==", "0", ")", ":", "self", ".", "classification", "=", "\"unmodified\"", "else", ":", "self", ".", "classification", "=", "\"modified\"", "else", ":", "# A new file entry", "self", ".", "classification", "=", "\"new\"", "else", ":", "# No metadata available:", "if", "self", ".", "is_dir", "(", ")", ":", "# Directories are considered 'unmodified' (would require deep traversal", "# to check otherwise)", "self", ".", "classification", "=", "\"unmodified\"", "else", ":", "# That's all we know, but EntryPair.classify() may adjust this", "self", ".", "classification", "=", "\"existing\"", "if", "PRINT_CLASSIFICATIONS", ":", "write", "(", "\"classify {}\"", ".", "format", "(", "self", ")", ")", "assert", "self", ".", "classification", "in", "ENTRY_CLASSIFICATIONS", "return", "self", ".", "classification" ]
Classify this entry as 'new', 'unmodified', or 'modified'.
[ "Classify", "this", "entry", "as", "new", "unmodified", "or", "modified", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L288-L331
4,254
mar10/pyftpsync
ftpsync/resources.py
FileEntry.was_modified_since_last_sync
def was_modified_since_last_sync(self): """Return True if this resource was modified since last sync. None is returned if we don't know (because of missing meta data). """ info = self.get_sync_info() if not info: return None if self.size != info["s"]: return True if self.mtime > info["m"]: return True return False
python
def was_modified_since_last_sync(self): info = self.get_sync_info() if not info: return None if self.size != info["s"]: return True if self.mtime > info["m"]: return True return False
[ "def", "was_modified_since_last_sync", "(", "self", ")", ":", "info", "=", "self", ".", "get_sync_info", "(", ")", "if", "not", "info", ":", "return", "None", "if", "self", ".", "size", "!=", "info", "[", "\"s\"", "]", ":", "return", "True", "if", "self", ".", "mtime", ">", "info", "[", "\"m\"", "]", ":", "return", "True", "return", "False" ]
Return True if this resource was modified since last sync. None is returned if we don't know (because of missing meta data).
[ "Return", "True", "if", "this", "resource", "was", "modified", "since", "last", "sync", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L377-L389
4,255
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.set_mtime
def set_mtime(self, filename, mtime, size): """Store real file mtime in meta data. This is needed on FTP targets, because FTP servers don't allow to set file mtime, but use to the upload time instead. We also record size and upload time, so we can detect if the file was changed by other means and we have to discard our meta data. """ ut = time.time() # UTC time stamp if self.target.server_time_ofs: # We add the estimated time offset, so the stored 'u' time stamp matches # better the mtime value that the server will generate for that file ut += self.target.server_time_ofs self.list[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: self.list[filename].update( {"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)} ) # print("set_mtime", self.list[filename]) self.modified_list = True
python
def set_mtime(self, filename, mtime, size): ut = time.time() # UTC time stamp if self.target.server_time_ofs: # We add the estimated time offset, so the stored 'u' time stamp matches # better the mtime value that the server will generate for that file ut += self.target.server_time_ofs self.list[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: self.list[filename].update( {"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)} ) # print("set_mtime", self.list[filename]) self.modified_list = True
[ "def", "set_mtime", "(", "self", ",", "filename", ",", "mtime", ",", "size", ")", ":", "ut", "=", "time", ".", "time", "(", ")", "# UTC time stamp", "if", "self", ".", "target", ".", "server_time_ofs", ":", "# We add the estimated time offset, so the stored 'u' time stamp matches", "# better the mtime value that the server will generate for that file", "ut", "+=", "self", ".", "target", ".", "server_time_ofs", "self", ".", "list", "[", "filename", "]", "=", "{", "\"m\"", ":", "mtime", ",", "\"s\"", ":", "size", ",", "\"u\"", ":", "ut", "}", "if", "self", ".", "PRETTY", ":", "self", ".", "list", "[", "filename", "]", ".", "update", "(", "{", "\"mtime_str\"", ":", "pretty_stamp", "(", "mtime", ")", ",", "\"uploaded_str\"", ":", "pretty_stamp", "(", "ut", ")", "}", ")", "# print(\"set_mtime\", self.list[filename])", "self", ".", "modified_list", "=", "True" ]
Store real file mtime in meta data. This is needed on FTP targets, because FTP servers don't allow to set file mtime, but use to the upload time instead. We also record size and upload time, so we can detect if the file was changed by other means and we have to discard our meta data.
[ "Store", "real", "file", "mtime", "in", "meta", "data", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L70-L90
4,256
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.remove
def remove(self, filename): """Remove any data for the given file name.""" if self.list.pop(filename, None): self.modified_list = True if self.target.peer: # otherwise `scan` command if self.target.is_local(): remote_target = self.target.peer if remote_target.get_id() in self.dir["peer_sync"]: rid = remote_target.get_id() self.modified_sync = bool( self.dir["peer_sync"][rid].pop(filename, None) ) return
python
def remove(self, filename): if self.list.pop(filename, None): self.modified_list = True if self.target.peer: # otherwise `scan` command if self.target.is_local(): remote_target = self.target.peer if remote_target.get_id() in self.dir["peer_sync"]: rid = remote_target.get_id() self.modified_sync = bool( self.dir["peer_sync"][rid].pop(filename, None) ) return
[ "def", "remove", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "list", ".", "pop", "(", "filename", ",", "None", ")", ":", "self", ".", "modified_list", "=", "True", "if", "self", ".", "target", ".", "peer", ":", "# otherwise `scan` command", "if", "self", ".", "target", ".", "is_local", "(", ")", ":", "remote_target", "=", "self", ".", "target", ".", "peer", "if", "remote_target", ".", "get_id", "(", ")", "in", "self", ".", "dir", "[", "\"peer_sync\"", "]", ":", "rid", "=", "remote_target", ".", "get_id", "(", ")", "self", ".", "modified_sync", "=", "bool", "(", "self", ".", "dir", "[", "\"peer_sync\"", "]", "[", "rid", "]", ".", "pop", "(", "filename", ",", "None", ")", ")", "return" ]
Remove any data for the given file name.
[ "Remove", "any", "data", "for", "the", "given", "file", "name", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L113-L125
4,257
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.read
def read(self): """Initialize self from .pyftpsync-meta.json file.""" assert self.path == self.target.cur_dir try: self.modified_list = False self.modified_sync = False is_valid_file = False s = self.target.read_text(self.filename) # print("s", s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_read", len(s)) self.was_read = True # True if a file exists (even invalid) self.dir = json.loads(s) # import pprint # print("dir") # print(pprint.pformat(self.dir)) self.dir = make_native_dict_keys(self.dir) # print(pprint.pformat(self.dir)) self.list = self.dir["mtimes"] self.peer_sync = self.dir["peer_sync"] is_valid_file = True # write"DirMetadata: read(%s)" % (self.filename, ), self.dir) # except IncompatibleMetadataVersion: # raise # We want version errors to terminate the app except Exception as e: write_error("Could not read meta info {}: {!r}".format(self, e)) # If the version is incompatible, we stop, unless: # if --migrate is set, we simply ignore this file (and probably replace it # with a current version) if is_valid_file and self.dir.get("_file_version", 0) != self.VERSION: if not self.target or not self.target.get_option("migrate"): raise IncompatibleMetadataVersion( "Invalid meta data version: {} (expected {}).\n" "Consider passing --migrate to discard old data.".format( self.dir.get("_file_version"), self.VERSION ) ) # write( "Migrating meta data version from {} to {} (discarding old): {}".format( self.dir.get("_file_version"), self.VERSION, self.filename ) ) self.list = {} self.peer_sync = {} return
python
def read(self): assert self.path == self.target.cur_dir try: self.modified_list = False self.modified_sync = False is_valid_file = False s = self.target.read_text(self.filename) # print("s", s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_read", len(s)) self.was_read = True # True if a file exists (even invalid) self.dir = json.loads(s) # import pprint # print("dir") # print(pprint.pformat(self.dir)) self.dir = make_native_dict_keys(self.dir) # print(pprint.pformat(self.dir)) self.list = self.dir["mtimes"] self.peer_sync = self.dir["peer_sync"] is_valid_file = True # write"DirMetadata: read(%s)" % (self.filename, ), self.dir) # except IncompatibleMetadataVersion: # raise # We want version errors to terminate the app except Exception as e: write_error("Could not read meta info {}: {!r}".format(self, e)) # If the version is incompatible, we stop, unless: # if --migrate is set, we simply ignore this file (and probably replace it # with a current version) if is_valid_file and self.dir.get("_file_version", 0) != self.VERSION: if not self.target or not self.target.get_option("migrate"): raise IncompatibleMetadataVersion( "Invalid meta data version: {} (expected {}).\n" "Consider passing --migrate to discard old data.".format( self.dir.get("_file_version"), self.VERSION ) ) # write( "Migrating meta data version from {} to {} (discarding old): {}".format( self.dir.get("_file_version"), self.VERSION, self.filename ) ) self.list = {} self.peer_sync = {} return
[ "def", "read", "(", "self", ")", ":", "assert", "self", ".", "path", "==", "self", ".", "target", ".", "cur_dir", "try", ":", "self", ".", "modified_list", "=", "False", "self", ".", "modified_sync", "=", "False", "is_valid_file", "=", "False", "s", "=", "self", ".", "target", ".", "read_text", "(", "self", ".", "filename", ")", "# print(\"s\", s)", "if", "self", ".", "target", ".", "synchronizer", ":", "self", ".", "target", ".", "synchronizer", ".", "_inc_stat", "(", "\"meta_bytes_read\"", ",", "len", "(", "s", ")", ")", "self", ".", "was_read", "=", "True", "# True if a file exists (even invalid)", "self", ".", "dir", "=", "json", ".", "loads", "(", "s", ")", "# import pprint", "# print(\"dir\")", "# print(pprint.pformat(self.dir))", "self", ".", "dir", "=", "make_native_dict_keys", "(", "self", ".", "dir", ")", "# print(pprint.pformat(self.dir))", "self", ".", "list", "=", "self", ".", "dir", "[", "\"mtimes\"", "]", "self", ".", "peer_sync", "=", "self", ".", "dir", "[", "\"peer_sync\"", "]", "is_valid_file", "=", "True", "# write\"DirMetadata: read(%s)\" % (self.filename, ), self.dir)", "# except IncompatibleMetadataVersion:", "# raise # We want version errors to terminate the app", "except", "Exception", "as", "e", ":", "write_error", "(", "\"Could not read meta info {}: {!r}\"", ".", "format", "(", "self", ",", "e", ")", ")", "# If the version is incompatible, we stop, unless:", "# if --migrate is set, we simply ignore this file (and probably replace it", "# with a current version)", "if", "is_valid_file", "and", "self", ".", "dir", ".", "get", "(", "\"_file_version\"", ",", "0", ")", "!=", "self", ".", "VERSION", ":", "if", "not", "self", ".", "target", "or", "not", "self", ".", "target", ".", "get_option", "(", "\"migrate\"", ")", ":", "raise", "IncompatibleMetadataVersion", "(", "\"Invalid meta data version: {} (expected {}).\\n\"", "\"Consider passing --migrate to discard old data.\"", ".", "format", "(", "self", ".", "dir", ".", "get", "(", "\"_file_version\"", ")", ",", "self", ".", "VERSION", ")", ")", "#", "write", "(", "\"Migrating meta data version from {} to {} (discarding old): {}\"", ".", "format", "(", "self", ".", "dir", ".", "get", "(", "\"_file_version\"", ")", ",", "self", ".", "VERSION", ",", "self", ".", "filename", ")", ")", "self", ".", "list", "=", "{", "}", "self", ".", "peer_sync", "=", "{", "}", "return" ]
Initialize self from .pyftpsync-meta.json file.
[ "Initialize", "self", "from", ".", "pyftpsync", "-", "meta", ".", "json", "file", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L127-L175
4,258
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.flush
def flush(self): """Write self to .pyftpsync-meta.json.""" # We DO write meta files even on read-only targets, but not in dry-run mode # if self.target.readonly: # write("DirMetadata.flush(%s): read-only; nothing to do" % self.target) # return assert self.path == self.target.cur_dir if self.target.dry_run: # write("DirMetadata.flush(%s): dry-run; nothing to do" % self.target) pass elif self.was_read and len(self.list) == 0 and len(self.peer_sync) == 0: write("Remove empty meta data file: {}".format(self.target)) self.target.remove_file(self.filename) elif not self.modified_list and not self.modified_sync: # write("DirMetadata.flush(%s): unmodified; nothing to do" % self.target) pass else: self.dir["_disclaimer"] = "Generated by https://github.com/mar10/pyftpsync" self.dir["_time_str"] = pretty_stamp(time.time()) self.dir["_file_version"] = self.VERSION self.dir["_version"] = __version__ self.dir["_time"] = time.mktime(time.gmtime()) # We always save utf-8 encoded. # `ensure_ascii` would escape all bytes >127 as `\x12` or `\u1234`, # which makes it hard to read, so we set it to false. # `sort_keys` converts binary keys to unicode using utf-8, so we # must make sure that we don't pass cp1225 or other encoded data. data = self.dir opts = {"indent": 4, "sort_keys": True, "ensure_ascii": False} if compat.PY2: # The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3 # opts["encoding"] = "utf-8" # Python 2 has problems with mixed keys (str/unicode) data = decode_dict_keys(data, "utf-8") if not self.PRETTY: opts["indent"] = None opts["separators"] = (",", ":") s = json.dumps(data, **opts) self.target.write_text(self.filename, s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_written", len(s)) self.modified_list = False self.modified_sync = False
python
def flush(self): # We DO write meta files even on read-only targets, but not in dry-run mode # if self.target.readonly: # write("DirMetadata.flush(%s): read-only; nothing to do" % self.target) # return assert self.path == self.target.cur_dir if self.target.dry_run: # write("DirMetadata.flush(%s): dry-run; nothing to do" % self.target) pass elif self.was_read and len(self.list) == 0 and len(self.peer_sync) == 0: write("Remove empty meta data file: {}".format(self.target)) self.target.remove_file(self.filename) elif not self.modified_list and not self.modified_sync: # write("DirMetadata.flush(%s): unmodified; nothing to do" % self.target) pass else: self.dir["_disclaimer"] = "Generated by https://github.com/mar10/pyftpsync" self.dir["_time_str"] = pretty_stamp(time.time()) self.dir["_file_version"] = self.VERSION self.dir["_version"] = __version__ self.dir["_time"] = time.mktime(time.gmtime()) # We always save utf-8 encoded. # `ensure_ascii` would escape all bytes >127 as `\x12` or `\u1234`, # which makes it hard to read, so we set it to false. # `sort_keys` converts binary keys to unicode using utf-8, so we # must make sure that we don't pass cp1225 or other encoded data. data = self.dir opts = {"indent": 4, "sort_keys": True, "ensure_ascii": False} if compat.PY2: # The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3 # opts["encoding"] = "utf-8" # Python 2 has problems with mixed keys (str/unicode) data = decode_dict_keys(data, "utf-8") if not self.PRETTY: opts["indent"] = None opts["separators"] = (",", ":") s = json.dumps(data, **opts) self.target.write_text(self.filename, s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_written", len(s)) self.modified_list = False self.modified_sync = False
[ "def", "flush", "(", "self", ")", ":", "# We DO write meta files even on read-only targets, but not in dry-run mode", "# if self.target.readonly:", "# write(\"DirMetadata.flush(%s): read-only; nothing to do\" % self.target)", "# return", "assert", "self", ".", "path", "==", "self", ".", "target", ".", "cur_dir", "if", "self", ".", "target", ".", "dry_run", ":", "# write(\"DirMetadata.flush(%s): dry-run; nothing to do\" % self.target)", "pass", "elif", "self", ".", "was_read", "and", "len", "(", "self", ".", "list", ")", "==", "0", "and", "len", "(", "self", ".", "peer_sync", ")", "==", "0", ":", "write", "(", "\"Remove empty meta data file: {}\"", ".", "format", "(", "self", ".", "target", ")", ")", "self", ".", "target", ".", "remove_file", "(", "self", ".", "filename", ")", "elif", "not", "self", ".", "modified_list", "and", "not", "self", ".", "modified_sync", ":", "# write(\"DirMetadata.flush(%s): unmodified; nothing to do\" % self.target)", "pass", "else", ":", "self", ".", "dir", "[", "\"_disclaimer\"", "]", "=", "\"Generated by https://github.com/mar10/pyftpsync\"", "self", ".", "dir", "[", "\"_time_str\"", "]", "=", "pretty_stamp", "(", "time", ".", "time", "(", ")", ")", "self", ".", "dir", "[", "\"_file_version\"", "]", "=", "self", ".", "VERSION", "self", ".", "dir", "[", "\"_version\"", "]", "=", "__version__", "self", ".", "dir", "[", "\"_time\"", "]", "=", "time", ".", "mktime", "(", "time", ".", "gmtime", "(", ")", ")", "# We always save utf-8 encoded.", "# `ensure_ascii` would escape all bytes >127 as `\\x12` or `\\u1234`,", "# which makes it hard to read, so we set it to false.", "# `sort_keys` converts binary keys to unicode using utf-8, so we", "# must make sure that we don't pass cp1225 or other encoded data.", "data", "=", "self", ".", "dir", "opts", "=", "{", "\"indent\"", ":", "4", ",", "\"sort_keys\"", ":", "True", ",", "\"ensure_ascii\"", ":", "False", "}", "if", "compat", ".", "PY2", ":", "# The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3", "# opts[\"encoding\"] = \"utf-8\"", "# Python 2 has problems with mixed keys (str/unicode)", "data", "=", "decode_dict_keys", "(", "data", ",", "\"utf-8\"", ")", "if", "not", "self", ".", "PRETTY", ":", "opts", "[", "\"indent\"", "]", "=", "None", "opts", "[", "\"separators\"", "]", "=", "(", "\",\"", ",", "\":\"", ")", "s", "=", "json", ".", "dumps", "(", "data", ",", "*", "*", "opts", ")", "self", ".", "target", ".", "write_text", "(", "self", ".", "filename", ",", "s", ")", "if", "self", ".", "target", ".", "synchronizer", ":", "self", ".", "target", ".", "synchronizer", ".", "_inc_stat", "(", "\"meta_bytes_written\"", ",", "len", "(", "s", ")", ")", "self", ".", "modified_list", "=", "False", "self", ".", "modified_sync", "=", "False" ]
Write self to .pyftpsync-meta.json.
[ "Write", "self", "to", ".", "pyftpsync", "-", "meta", ".", "json", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L177-L228
4,259
mar10/pyftpsync
ftpsync/scan_command.py
scan_handler
def scan_handler(parser, args): """Implement `scan` sub-command.""" opts = namespace_to_dict(args) opts.update({"ftp_debug": args.verbose >= 6}) target = make_target(args.target, opts) target.readonly = True root_depth = target.root_dir.count("/") start = time.time() dir_count = 1 file_count = 0 processed_files = set() opts = namespace_to_dict(args) process_options(opts) def _pred(entry): """Walker predicate that check match/exclude options.""" if not match_path(entry, opts): return False try: target.open() for e in target.walk(recursive=args.recursive, pred=_pred): is_dir = isinstance(e, DirectoryEntry) indent = " " * (target.cur_dir.count("/") - root_depth) if is_dir: dir_count += 1 else: file_count += 1 if args.list: if is_dir: print(indent, "[{e.name}]".format(e=e)) else: delta = e.mtime_org - e.mtime dt_modified = pretty_stamp(e.mtime) if delta: prefix = "+" if delta > 0 else "" print( indent, "{e.name:<40} {dt_modified} (system: {prefix}{delta})".format( e=e, prefix=prefix, delta=timedelta(seconds=delta), dt_modified=dt_modified, ), ) else: print( indent, "{e.name:<40} {dt_modified}".format( e=e, dt_modified=dt_modified ), ) if ( args.remove_meta and target.cur_dir_meta and target.cur_dir_meta.was_read ): fspec = target.cur_dir_meta.get_full_path() if fspec not in processed_files: processed_files.add(fspec) print("DELETE {}".format(fspec)) if ( args.remove_locks and not is_dir and e.name == DirMetadata.LOCK_FILE_NAME ): fspec = e.get_rel_path() print("DELETE {}".format(fspec)) finally: target.close() print( "Scanning {:,} files in {:,} directories took {:02.2f} seconds.".format( file_count, dir_count, time.time() - start ) )
python
def scan_handler(parser, args): opts = namespace_to_dict(args) opts.update({"ftp_debug": args.verbose >= 6}) target = make_target(args.target, opts) target.readonly = True root_depth = target.root_dir.count("/") start = time.time() dir_count = 1 file_count = 0 processed_files = set() opts = namespace_to_dict(args) process_options(opts) def _pred(entry): """Walker predicate that check match/exclude options.""" if not match_path(entry, opts): return False try: target.open() for e in target.walk(recursive=args.recursive, pred=_pred): is_dir = isinstance(e, DirectoryEntry) indent = " " * (target.cur_dir.count("/") - root_depth) if is_dir: dir_count += 1 else: file_count += 1 if args.list: if is_dir: print(indent, "[{e.name}]".format(e=e)) else: delta = e.mtime_org - e.mtime dt_modified = pretty_stamp(e.mtime) if delta: prefix = "+" if delta > 0 else "" print( indent, "{e.name:<40} {dt_modified} (system: {prefix}{delta})".format( e=e, prefix=prefix, delta=timedelta(seconds=delta), dt_modified=dt_modified, ), ) else: print( indent, "{e.name:<40} {dt_modified}".format( e=e, dt_modified=dt_modified ), ) if ( args.remove_meta and target.cur_dir_meta and target.cur_dir_meta.was_read ): fspec = target.cur_dir_meta.get_full_path() if fspec not in processed_files: processed_files.add(fspec) print("DELETE {}".format(fspec)) if ( args.remove_locks and not is_dir and e.name == DirMetadata.LOCK_FILE_NAME ): fspec = e.get_rel_path() print("DELETE {}".format(fspec)) finally: target.close() print( "Scanning {:,} files in {:,} directories took {:02.2f} seconds.".format( file_count, dir_count, time.time() - start ) )
[ "def", "scan_handler", "(", "parser", ",", "args", ")", ":", "opts", "=", "namespace_to_dict", "(", "args", ")", "opts", ".", "update", "(", "{", "\"ftp_debug\"", ":", "args", ".", "verbose", ">=", "6", "}", ")", "target", "=", "make_target", "(", "args", ".", "target", ",", "opts", ")", "target", ".", "readonly", "=", "True", "root_depth", "=", "target", ".", "root_dir", ".", "count", "(", "\"/\"", ")", "start", "=", "time", ".", "time", "(", ")", "dir_count", "=", "1", "file_count", "=", "0", "processed_files", "=", "set", "(", ")", "opts", "=", "namespace_to_dict", "(", "args", ")", "process_options", "(", "opts", ")", "def", "_pred", "(", "entry", ")", ":", "\"\"\"Walker predicate that check match/exclude options.\"\"\"", "if", "not", "match_path", "(", "entry", ",", "opts", ")", ":", "return", "False", "try", ":", "target", ".", "open", "(", ")", "for", "e", "in", "target", ".", "walk", "(", "recursive", "=", "args", ".", "recursive", ",", "pred", "=", "_pred", ")", ":", "is_dir", "=", "isinstance", "(", "e", ",", "DirectoryEntry", ")", "indent", "=", "\" \"", "*", "(", "target", ".", "cur_dir", ".", "count", "(", "\"/\"", ")", "-", "root_depth", ")", "if", "is_dir", ":", "dir_count", "+=", "1", "else", ":", "file_count", "+=", "1", "if", "args", ".", "list", ":", "if", "is_dir", ":", "print", "(", "indent", ",", "\"[{e.name}]\"", ".", "format", "(", "e", "=", "e", ")", ")", "else", ":", "delta", "=", "e", ".", "mtime_org", "-", "e", ".", "mtime", "dt_modified", "=", "pretty_stamp", "(", "e", ".", "mtime", ")", "if", "delta", ":", "prefix", "=", "\"+\"", "if", "delta", ">", "0", "else", "\"\"", "print", "(", "indent", ",", "\"{e.name:<40} {dt_modified} (system: {prefix}{delta})\"", ".", "format", "(", "e", "=", "e", ",", "prefix", "=", "prefix", ",", "delta", "=", "timedelta", "(", "seconds", "=", "delta", ")", ",", "dt_modified", "=", "dt_modified", ",", ")", ",", ")", "else", ":", "print", "(", "indent", ",", "\"{e.name:<40} {dt_modified}\"", ".", "format", "(", "e", "=", "e", ",", "dt_modified", "=", "dt_modified", ")", ",", ")", "if", "(", "args", ".", "remove_meta", "and", "target", ".", "cur_dir_meta", "and", "target", ".", "cur_dir_meta", ".", "was_read", ")", ":", "fspec", "=", "target", ".", "cur_dir_meta", ".", "get_full_path", "(", ")", "if", "fspec", "not", "in", "processed_files", ":", "processed_files", ".", "add", "(", "fspec", ")", "print", "(", "\"DELETE {}\"", ".", "format", "(", "fspec", ")", ")", "if", "(", "args", ".", "remove_locks", "and", "not", "is_dir", "and", "e", ".", "name", "==", "DirMetadata", ".", "LOCK_FILE_NAME", ")", ":", "fspec", "=", "e", ".", "get_rel_path", "(", ")", "print", "(", "\"DELETE {}\"", ".", "format", "(", "fspec", ")", ")", "finally", ":", "target", ".", "close", "(", ")", "print", "(", "\"Scanning {:,} files in {:,} directories took {:02.2f} seconds.\"", ".", "format", "(", "file_count", ",", "dir_count", ",", "time", ".", "time", "(", ")", "-", "start", ")", ")" ]
Implement `scan` sub-command.
[ "Implement", "scan", "sub", "-", "command", "." ]
bbdc94186975cdc1cc4f678474bdce08bce7bb76
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/scan_command.py#L61-L141
4,260
weblyzard/inscriptis
src/inscriptis/table_engine.py
TableCell.get_format_spec
def get_format_spec(self): ''' The format specification according to the values of `align` and `width` ''' return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
python
def get_format_spec(self): ''' The format specification according to the values of `align` and `width` ''' return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
[ "def", "get_format_spec", "(", "self", ")", ":", "return", "u\"{{:{align}{width}}}\"", ".", "format", "(", "align", "=", "self", ".", "align", ",", "width", "=", "self", ".", "width", ")" ]
The format specification according to the values of `align` and `width`
[ "The", "format", "specification", "according", "to", "the", "values", "of", "align", "and", "width" ]
0d04f81e69d643bb5f470f33b4ca67b62fc1037c
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L31-L35
4,261
weblyzard/inscriptis
src/inscriptis/table_engine.py
Table.compute_column_width_and_height
def compute_column_width_and_height(self): ''' compute and set the column width for all colls in the table ''' # skip tables with no row if not self.rows: return # determine row height for row in self.rows: max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1 for cell in row.columns: cell.height = max_row_height # determine maximum number of columns max_columns = max([len(row.columns) for row in self.rows]) for column_idx in range(max_columns): # determine max_column_width row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows] max_column_width = max((len(line) for line in chain(*row_cell_lines))) # set column width in all rows for row in self.rows: if len(row.columns) > column_idx: row.columns[column_idx].width = max_column_width
python
def compute_column_width_and_height(self): ''' compute and set the column width for all colls in the table ''' # skip tables with no row if not self.rows: return # determine row height for row in self.rows: max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1 for cell in row.columns: cell.height = max_row_height # determine maximum number of columns max_columns = max([len(row.columns) for row in self.rows]) for column_idx in range(max_columns): # determine max_column_width row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows] max_column_width = max((len(line) for line in chain(*row_cell_lines))) # set column width in all rows for row in self.rows: if len(row.columns) > column_idx: row.columns[column_idx].width = max_column_width
[ "def", "compute_column_width_and_height", "(", "self", ")", ":", "# skip tables with no row", "if", "not", "self", ".", "rows", ":", "return", "# determine row height", "for", "row", "in", "self", ".", "rows", ":", "max_row_height", "=", "max", "(", "(", "len", "(", "cell", ".", "get_cell_lines", "(", ")", ")", "for", "cell", "in", "row", ".", "columns", ")", ")", "if", "row", ".", "columns", "else", "1", "for", "cell", "in", "row", ".", "columns", ":", "cell", ".", "height", "=", "max_row_height", "# determine maximum number of columns", "max_columns", "=", "max", "(", "[", "len", "(", "row", ".", "columns", ")", "for", "row", "in", "self", ".", "rows", "]", ")", "for", "column_idx", "in", "range", "(", "max_columns", ")", ":", "# determine max_column_width", "row_cell_lines", "=", "[", "row", ".", "get_cell_lines", "(", "column_idx", ")", "for", "row", "in", "self", ".", "rows", "]", "max_column_width", "=", "max", "(", "(", "len", "(", "line", ")", "for", "line", "in", "chain", "(", "*", "row_cell_lines", ")", ")", ")", "# set column width in all rows", "for", "row", "in", "self", ".", "rows", ":", "if", "len", "(", "row", ".", "columns", ")", ">", "column_idx", ":", "row", ".", "columns", "[", "column_idx", "]", ".", "width", "=", "max_column_width" ]
compute and set the column width for all colls in the table
[ "compute", "and", "set", "the", "column", "width", "for", "all", "colls", "in", "the", "table" ]
0d04f81e69d643bb5f470f33b4ca67b62fc1037c
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L66-L91
4,262
weblyzard/inscriptis
scripts/inscript.py
get_parser
def get_parser(): """ Parses the arguments if script is run directly via console """ parser = argparse.ArgumentParser(description='Converts HTML from file or url to a clean text version') parser.add_argument('input', nargs='?', default=None, help='Html input either from a file or an url (default:stdin)') parser.add_argument('-o', '--output', type=str, help='Output file (default:stdout).') parser.add_argument('-e', '--encoding', type=str, help='Content encoding for files (default:utf-8)', default='utf-8') parser.add_argument('-i', '--display-image-captions', action='store_true', default=False, help='Display image captions (default:false).') parser.add_argument('-l', '--display-link-targets', action='store_true', default=False, help='Display link targets (default:false).') parser.add_argument('-d', '--deduplicate-image-captions', action='store_true', default=False, help='Deduplicate image captions (default:false).') return parser
python
def get_parser(): parser = argparse.ArgumentParser(description='Converts HTML from file or url to a clean text version') parser.add_argument('input', nargs='?', default=None, help='Html input either from a file or an url (default:stdin)') parser.add_argument('-o', '--output', type=str, help='Output file (default:stdout).') parser.add_argument('-e', '--encoding', type=str, help='Content encoding for files (default:utf-8)', default='utf-8') parser.add_argument('-i', '--display-image-captions', action='store_true', default=False, help='Display image captions (default:false).') parser.add_argument('-l', '--display-link-targets', action='store_true', default=False, help='Display link targets (default:false).') parser.add_argument('-d', '--deduplicate-image-captions', action='store_true', default=False, help='Deduplicate image captions (default:false).') return parser
[ "def", "get_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Converts HTML from file or url to a clean text version'", ")", "parser", ".", "add_argument", "(", "'input'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'Html input either from a file or an url (default:stdin)'", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "type", "=", "str", ",", "help", "=", "'Output file (default:stdout).'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--encoding'", ",", "type", "=", "str", ",", "help", "=", "'Content encoding for files (default:utf-8)'", ",", "default", "=", "'utf-8'", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--display-image-captions'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Display image captions (default:false).'", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--display-link-targets'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Display link targets (default:false).'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--deduplicate-image-captions'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Deduplicate image captions (default:false).'", ")", "return", "parser" ]
Parses the arguments if script is run directly via console
[ "Parses", "the", "arguments", "if", "script", "is", "run", "directly", "via", "console" ]
0d04f81e69d643bb5f470f33b4ca67b62fc1037c
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/scripts/inscript.py#L28-L37
4,263
weblyzard/inscriptis
src/inscriptis/html_engine.py
Inscriptis.write_line
def write_line(self, force=False): ''' Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False ''' # only break the line if there is any relevant content if not force and (not self.current_line[-1].content or self.current_line[-1].content.isspace()): self.current_line[-1].margin_before = max(self.current_line[-1].margin_before, self.current_tag[-1].margin_before) return False line = self.current_line[-1].get_text() self.clean_text_lines[-1].append(line) self.current_line[-1] = self.next_line[-1] self.next_line[-1] = Line() return True
python
def write_line(self, force=False): ''' Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False ''' # only break the line if there is any relevant content if not force and (not self.current_line[-1].content or self.current_line[-1].content.isspace()): self.current_line[-1].margin_before = max(self.current_line[-1].margin_before, self.current_tag[-1].margin_before) return False line = self.current_line[-1].get_text() self.clean_text_lines[-1].append(line) self.current_line[-1] = self.next_line[-1] self.next_line[-1] = Line() return True
[ "def", "write_line", "(", "self", ",", "force", "=", "False", ")", ":", "# only break the line if there is any relevant content", "if", "not", "force", "and", "(", "not", "self", ".", "current_line", "[", "-", "1", "]", ".", "content", "or", "self", ".", "current_line", "[", "-", "1", "]", ".", "content", ".", "isspace", "(", ")", ")", ":", "self", ".", "current_line", "[", "-", "1", "]", ".", "margin_before", "=", "max", "(", "self", ".", "current_line", "[", "-", "1", "]", ".", "margin_before", ",", "self", ".", "current_tag", "[", "-", "1", "]", ".", "margin_before", ")", "return", "False", "line", "=", "self", ".", "current_line", "[", "-", "1", "]", ".", "get_text", "(", ")", "self", ".", "clean_text_lines", "[", "-", "1", "]", ".", "append", "(", "line", ")", "self", ".", "current_line", "[", "-", "1", "]", "=", "self", ".", "next_line", "[", "-", "1", "]", "self", ".", "next_line", "[", "-", "1", "]", "=", "Line", "(", ")", "return", "True" ]
Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False
[ "Writes", "the", "current", "line", "to", "the", "buffer", "provided", "that", "there", "is", "any", "data", "to", "write", "." ]
0d04f81e69d643bb5f470f33b4ca67b62fc1037c
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/html_engine.py#L114-L132
4,264
weblyzard/inscriptis
src/inscriptis/css.py
CssParse._attr_display
def _attr_display(value, html_element): ''' Set the display value ''' if value == 'block': html_element.display = Display.block elif value == 'none': html_element.display = Display.none else: html_element.display = Display.inline
python
def _attr_display(value, html_element): ''' Set the display value ''' if value == 'block': html_element.display = Display.block elif value == 'none': html_element.display = Display.none else: html_element.display = Display.inline
[ "def", "_attr_display", "(", "value", ",", "html_element", ")", ":", "if", "value", "==", "'block'", ":", "html_element", ".", "display", "=", "Display", ".", "block", "elif", "value", "==", "'none'", ":", "html_element", ".", "display", "=", "Display", ".", "none", "else", ":", "html_element", ".", "display", "=", "Display", ".", "inline" ]
Set the display value
[ "Set", "the", "display", "value" ]
0d04f81e69d643bb5f470f33b4ca67b62fc1037c
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L114-L123
4,265
fabiocaccamo/django-maintenance-mode
maintenance_mode/io.py
read_file
def read_file(file_path, default_content=''): """ Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content. """ if not os.path.exists(file_path): write_file(file_path, default_content) handler = open(file_path, 'r') content = handler.read() handler.close() return content or default_content
python
def read_file(file_path, default_content=''): if not os.path.exists(file_path): write_file(file_path, default_content) handler = open(file_path, 'r') content = handler.read() handler.close() return content or default_content
[ "def", "read_file", "(", "file_path", ",", "default_content", "=", "''", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "write_file", "(", "file_path", ",", "default_content", ")", "handler", "=", "open", "(", "file_path", ",", "'r'", ")", "content", "=", "handler", ".", "read", "(", ")", "handler", ".", "close", "(", ")", "return", "content", "or", "default_content" ]
Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content.
[ "Read", "file", "at", "the", "specified", "path", ".", "If", "file", "doesn", "t", "exist", "it", "will", "be", "created", "with", "default", "-", "content", ".", "Returns", "the", "file", "content", "." ]
008221a6b8a687667c2480fa799c7a4228598441
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/io.py#L6-L18
4,266
fabiocaccamo/django-maintenance-mode
maintenance_mode/io.py
write_file
def write_file(file_path, content): """ Write file at the specified path with content. If file exists, it will be overwritten. """ handler = open(file_path, 'w+') handler.write(content) handler.close()
python
def write_file(file_path, content): handler = open(file_path, 'w+') handler.write(content) handler.close()
[ "def", "write_file", "(", "file_path", ",", "content", ")", ":", "handler", "=", "open", "(", "file_path", ",", "'w+'", ")", "handler", ".", "write", "(", "content", ")", "handler", ".", "close", "(", ")" ]
Write file at the specified path with content. If file exists, it will be overwritten.
[ "Write", "file", "at", "the", "specified", "path", "with", "content", ".", "If", "file", "exists", "it", "will", "be", "overwritten", "." ]
008221a6b8a687667c2480fa799c7a4228598441
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/io.py#L21-L28
4,267
fabiocaccamo/django-maintenance-mode
maintenance_mode/core.py
set_maintenance_mode
def set_maintenance_mode(value): """ Set maintenance_mode state to state file. """ # If maintenance mode is defined in settings, it can't be changed. if settings.MAINTENANCE_MODE is not None: raise ImproperlyConfigured( 'Maintenance mode cannot be set dynamically ' 'if defined in settings.') if not isinstance(value, bool): raise TypeError('value argument type is not boolean') backend = get_maintenance_mode_backend() backend.set_value(value)
python
def set_maintenance_mode(value): # If maintenance mode is defined in settings, it can't be changed. if settings.MAINTENANCE_MODE is not None: raise ImproperlyConfigured( 'Maintenance mode cannot be set dynamically ' 'if defined in settings.') if not isinstance(value, bool): raise TypeError('value argument type is not boolean') backend = get_maintenance_mode_backend() backend.set_value(value)
[ "def", "set_maintenance_mode", "(", "value", ")", ":", "# If maintenance mode is defined in settings, it can't be changed.", "if", "settings", ".", "MAINTENANCE_MODE", "is", "not", "None", ":", "raise", "ImproperlyConfigured", "(", "'Maintenance mode cannot be set dynamically '", "'if defined in settings.'", ")", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "TypeError", "(", "'value argument type is not boolean'", ")", "backend", "=", "get_maintenance_mode_backend", "(", ")", "backend", ".", "set_value", "(", "value", ")" ]
Set maintenance_mode state to state file.
[ "Set", "maintenance_mode", "state", "to", "state", "file", "." ]
008221a6b8a687667c2480fa799c7a4228598441
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/core.py#L60-L75
4,268
fabiocaccamo/django-maintenance-mode
maintenance_mode/http.py
get_maintenance_response
def get_maintenance_response(request): """ Return a '503 Service Unavailable' maintenance response. """ if settings.MAINTENANCE_MODE_REDIRECT_URL: return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL) context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT ' 'is not a valid function path.' ) context = get_request_context_func(request=request) if django.VERSION < (1, 8): kwargs = {'context_instance': RequestContext(request, context)} else: kwargs = {'context': context} response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, status=settings.MAINTENANCE_MODE_STATUS_CODE, **kwargs) response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER add_never_cache_headers(response) return response
python
def get_maintenance_response(request): if settings.MAINTENANCE_MODE_REDIRECT_URL: return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL) context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT ' 'is not a valid function path.' ) context = get_request_context_func(request=request) if django.VERSION < (1, 8): kwargs = {'context_instance': RequestContext(request, context)} else: kwargs = {'context': context} response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, status=settings.MAINTENANCE_MODE_STATUS_CODE, **kwargs) response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER add_never_cache_headers(response) return response
[ "def", "get_maintenance_response", "(", "request", ")", ":", "if", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ":", "return", "redirect", "(", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ")", "context", "=", "{", "}", "if", "settings", ".", "MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT", ":", "try", ":", "get_request_context_func", "=", "import_string", "(", "settings", ".", "MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT '", "'is not a valid function path.'", ")", "context", "=", "get_request_context_func", "(", "request", "=", "request", ")", "if", "django", ".", "VERSION", "<", "(", "1", ",", "8", ")", ":", "kwargs", "=", "{", "'context_instance'", ":", "RequestContext", "(", "request", ",", "context", ")", "}", "else", ":", "kwargs", "=", "{", "'context'", ":", "context", "}", "response", "=", "render", "(", "request", ",", "settings", ".", "MAINTENANCE_MODE_TEMPLATE", ",", "status", "=", "settings", ".", "MAINTENANCE_MODE_STATUS_CODE", ",", "*", "*", "kwargs", ")", "response", "[", "'Retry-After'", "]", "=", "settings", ".", "MAINTENANCE_MODE_RETRY_AFTER", "add_never_cache_headers", "(", "response", ")", "return", "response" ]
Return a '503 Service Unavailable' maintenance response.
[ "Return", "a", "503", "Service", "Unavailable", "maintenance", "response", "." ]
008221a6b8a687667c2480fa799c7a4228598441
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/http.py#L34-L65
4,269
fabiocaccamo/django-maintenance-mode
maintenance_mode/http.py
need_maintenance_response
def need_maintenance_response(request): """ Tells if the given request needs a maintenance response or not. """ try: view_match = resolve(request.path) view_func = view_match[0] view_dict = view_func.__dict__ view_force_maintenance_mode_off = view_dict.get( 'force_maintenance_mode_off', False) if view_force_maintenance_mode_off: # view has 'force_maintenance_mode_off' decorator return False view_force_maintenance_mode_on = view_dict.get( 'force_maintenance_mode_on', False) if view_force_maintenance_mode_on: # view has 'force_maintenance_mode_on' decorator return True except Resolver404: pass if not get_maintenance_mode(): return False try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return False except NoReverseMatch: # maintenance_mode.urls not added pass if hasattr(request, 'user'): if django.VERSION < (1, 10): if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous(): return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated(): return False else: if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous: return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated: return False if settings.MAINTENANCE_MODE_IGNORE_STAFF \ and request.user.is_staff: return False if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \ and request.user.is_superuser: return False if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE: try: request_path = request.path if request.path else '' if not request_path.endswith('/'): request_path += '/' admin_url = reverse('admin:index') if request_path.startswith(admin_url): return False except NoReverseMatch: # admin.urls not added pass if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \ or (len(sys.argv) > 1 and sys.argv[1] == 'test'): # python runtests.py | python manage.py test | python # setup.py test | django-admin.py test is_testing = True if is_testing: return False if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS ' 'is not a valid function path.') else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return False if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: if not isinstance(url, pattern_class): url = str(url) url_re = re.compile(url) if url_re.match(request.path_info): return False if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return False return True
python
def need_maintenance_response(request): try: view_match = resolve(request.path) view_func = view_match[0] view_dict = view_func.__dict__ view_force_maintenance_mode_off = view_dict.get( 'force_maintenance_mode_off', False) if view_force_maintenance_mode_off: # view has 'force_maintenance_mode_off' decorator return False view_force_maintenance_mode_on = view_dict.get( 'force_maintenance_mode_on', False) if view_force_maintenance_mode_on: # view has 'force_maintenance_mode_on' decorator return True except Resolver404: pass if not get_maintenance_mode(): return False try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return False except NoReverseMatch: # maintenance_mode.urls not added pass if hasattr(request, 'user'): if django.VERSION < (1, 10): if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous(): return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated(): return False else: if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous: return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated: return False if settings.MAINTENANCE_MODE_IGNORE_STAFF \ and request.user.is_staff: return False if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \ and request.user.is_superuser: return False if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE: try: request_path = request.path if request.path else '' if not request_path.endswith('/'): request_path += '/' admin_url = reverse('admin:index') if request_path.startswith(admin_url): return False except NoReverseMatch: # admin.urls not added pass if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \ or (len(sys.argv) > 1 and sys.argv[1] == 'test'): # python runtests.py | python manage.py test | python # setup.py test | django-admin.py test is_testing = True if is_testing: return False if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS ' 'is not a valid function path.') else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return False if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: if not isinstance(url, pattern_class): url = str(url) url_re = re.compile(url) if url_re.match(request.path_info): return False if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return False return True
[ "def", "need_maintenance_response", "(", "request", ")", ":", "try", ":", "view_match", "=", "resolve", "(", "request", ".", "path", ")", "view_func", "=", "view_match", "[", "0", "]", "view_dict", "=", "view_func", ".", "__dict__", "view_force_maintenance_mode_off", "=", "view_dict", ".", "get", "(", "'force_maintenance_mode_off'", ",", "False", ")", "if", "view_force_maintenance_mode_off", ":", "# view has 'force_maintenance_mode_off' decorator", "return", "False", "view_force_maintenance_mode_on", "=", "view_dict", ".", "get", "(", "'force_maintenance_mode_on'", ",", "False", ")", "if", "view_force_maintenance_mode_on", ":", "# view has 'force_maintenance_mode_on' decorator", "return", "True", "except", "Resolver404", ":", "pass", "if", "not", "get_maintenance_mode", "(", ")", ":", "return", "False", "try", ":", "url_off", "=", "reverse", "(", "'maintenance_mode_off'", ")", "resolve", "(", "url_off", ")", "if", "url_off", "==", "request", ".", "path_info", ":", "return", "False", "except", "NoReverseMatch", ":", "# maintenance_mode.urls not added", "pass", "if", "hasattr", "(", "request", ",", "'user'", ")", ":", "if", "django", ".", "VERSION", "<", "(", "1", ",", "10", ")", ":", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER", "and", "request", ".", "user", ".", "is_anonymous", "(", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "return", "False", "else", ":", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER", "and", "request", ".", "user", ".", "is_anonymous", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER", "and", "request", ".", "user", ".", "is_authenticated", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_STAFF", "and", "request", ".", "user", ".", "is_staff", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_SUPERUSER", "and", "request", ".", "user", ".", "is_superuser", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ADMIN_SITE", ":", "try", ":", "request_path", "=", "request", ".", "path", "if", "request", ".", "path", "else", "''", "if", "not", "request_path", ".", "endswith", "(", "'/'", ")", ":", "request_path", "+=", "'/'", "admin_url", "=", "reverse", "(", "'admin:index'", ")", "if", "request_path", ".", "startswith", "(", "admin_url", ")", ":", "return", "False", "except", "NoReverseMatch", ":", "# admin.urls not added", "pass", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_TESTS", ":", "is_testing", "=", "False", "if", "(", "len", "(", "sys", ".", "argv", ")", ">", "0", "and", "'runtests'", "in", "sys", ".", "argv", "[", "0", "]", ")", "or", "(", "len", "(", "sys", ".", "argv", ")", ">", "1", "and", "sys", ".", "argv", "[", "1", "]", "==", "'test'", ")", ":", "# python runtests.py | python manage.py test | python", "# setup.py test | django-admin.py test", "is_testing", "=", "True", "if", "is_testing", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_IP_ADDRESSES", ":", "if", "settings", ".", "MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS", ":", "try", ":", "get_client_ip_address_func", "=", "import_string", "(", "settings", ".", "MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS '", "'is not a valid function path.'", ")", "else", ":", "client_ip_address", "=", "get_client_ip_address_func", "(", "request", ")", "else", ":", "client_ip_address", "=", "get_client_ip_address", "(", "request", ")", "for", "ip_address", "in", "settings", ".", "MAINTENANCE_MODE_IGNORE_IP_ADDRESSES", ":", "ip_address_re", "=", "re", ".", "compile", "(", "ip_address", ")", "if", "ip_address_re", ".", "match", "(", "client_ip_address", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_URLS", ":", "for", "url", "in", "settings", ".", "MAINTENANCE_MODE_IGNORE_URLS", ":", "if", "not", "isinstance", "(", "url", ",", "pattern_class", ")", ":", "url", "=", "str", "(", "url", ")", "url_re", "=", "re", ".", "compile", "(", "url", ")", "if", "url_re", ".", "match", "(", "request", ".", "path_info", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ":", "redirect_url_re", "=", "re", ".", "compile", "(", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ")", "if", "redirect_url_re", ".", "match", "(", "request", ".", "path_info", ")", ":", "return", "False", "return", "True" ]
Tells if the given request needs a maintenance response or not.
[ "Tells", "if", "the", "given", "request", "needs", "a", "maintenance", "response", "or", "not", "." ]
008221a6b8a687667c2480fa799c7a4228598441
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/http.py#L68-L204
4,270
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.format_results
def format_results(self, results): """ Format the ldap results object into somthing that is reasonable """ if not results: return None userdn = results[0][0] userobj = results[0][1] userobj['dn'] = userdn keymap = self.config.get('KEY_MAP') if keymap: return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) } else: return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
python
def format_results(self, results): if not results: return None userdn = results[0][0] userobj = results[0][1] userobj['dn'] = userdn keymap = self.config.get('KEY_MAP') if keymap: return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) } else: return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
[ "def", "format_results", "(", "self", ",", "results", ")", ":", "if", "not", "results", ":", "return", "None", "userdn", "=", "results", "[", "0", "]", "[", "0", "]", "userobj", "=", "results", "[", "0", "]", "[", "1", "]", "userobj", "[", "'dn'", "]", "=", "userdn", "keymap", "=", "self", ".", "config", ".", "get", "(", "'KEY_MAP'", ")", "if", "keymap", ":", "return", "{", "key", ":", "scalar", "(", "userobj", ".", "get", "(", "value", ")", ")", "for", "key", ",", "value", "in", "keymap", ".", "items", "(", ")", "if", "_is_utf8", "(", "scalar", "(", "userobj", ".", "get", "(", "value", ")", ")", ")", "}", "else", ":", "return", "{", "key", ":", "scalar", "(", "value", ")", "for", "key", ",", "value", "in", "userobj", ".", "items", "(", ")", "if", "_is_utf8", "(", "scalar", "(", "value", ")", ")", "}" ]
Format the ldap results object into somthing that is reasonable
[ "Format", "the", "ldap", "results", "object", "into", "somthing", "that", "is", "reasonable" ]
09a08be45f861823cb08f95883ee1e092a618c37
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L113-L127
4,271
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.attrlist
def attrlist(self): 'Transform the KEY_MAP paramiter into an attrlist for ldap filters' keymap = self.config.get('KEY_MAP') if keymap: # https://github.com/ContinuumIO/flask-ldap-login/issues/11 # https://continuumsupport.zendesk.com/agent/tickets/393 return [s.encode('utf-8') for s in keymap.values()] else: return None
python
def attrlist(self): 'Transform the KEY_MAP paramiter into an attrlist for ldap filters' keymap = self.config.get('KEY_MAP') if keymap: # https://github.com/ContinuumIO/flask-ldap-login/issues/11 # https://continuumsupport.zendesk.com/agent/tickets/393 return [s.encode('utf-8') for s in keymap.values()] else: return None
[ "def", "attrlist", "(", "self", ")", ":", "keymap", "=", "self", ".", "config", ".", "get", "(", "'KEY_MAP'", ")", "if", "keymap", ":", "# https://github.com/ContinuumIO/flask-ldap-login/issues/11", "# https://continuumsupport.zendesk.com/agent/tickets/393", "return", "[", "s", ".", "encode", "(", "'utf-8'", ")", "for", "s", "in", "keymap", ".", "values", "(", ")", "]", "else", ":", "return", "None" ]
Transform the KEY_MAP paramiter into an attrlist for ldap filters
[ "Transform", "the", "KEY_MAP", "paramiter", "into", "an", "attrlist", "for", "ldap", "filters" ]
09a08be45f861823cb08f95883ee1e092a618c37
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L146-L154
4,272
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.connect
def connect(self): 'initialize ldap connection and set options' log.debug("Connecting to ldap server %s" % self.config['URI']) self.conn = ldap.initialize(self.config['URI']) # There are some settings that can't be changed at runtime without a context restart. # It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX # to 0, but this needs to be the last option set, and since the config dictionary is not # sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX # is present, it is applied last. options = self.config.get('OPTIONS', {}).items() options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX') for opt, value in options: if isinstance(opt, str): opt = getattr(ldap, opt) try: if isinstance(value, str): value = getattr(ldap, value) except AttributeError: pass self.conn.set_option(opt, value) if self.config.get('START_TLS'): log.debug("Starting TLS") self.conn.start_tls_s()
python
def connect(self): 'initialize ldap connection and set options' log.debug("Connecting to ldap server %s" % self.config['URI']) self.conn = ldap.initialize(self.config['URI']) # There are some settings that can't be changed at runtime without a context restart. # It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX # to 0, but this needs to be the last option set, and since the config dictionary is not # sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX # is present, it is applied last. options = self.config.get('OPTIONS', {}).items() options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX') for opt, value in options: if isinstance(opt, str): opt = getattr(ldap, opt) try: if isinstance(value, str): value = getattr(ldap, value) except AttributeError: pass self.conn.set_option(opt, value) if self.config.get('START_TLS'): log.debug("Starting TLS") self.conn.start_tls_s()
[ "def", "connect", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Connecting to ldap server %s\"", "%", "self", ".", "config", "[", "'URI'", "]", ")", "self", ".", "conn", "=", "ldap", ".", "initialize", "(", "self", ".", "config", "[", "'URI'", "]", ")", "# There are some settings that can't be changed at runtime without a context restart.", "# It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX", "# to 0, but this needs to be the last option set, and since the config dictionary is not", "# sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX", "# is present, it is applied last.", "options", "=", "self", ".", "config", ".", "get", "(", "'OPTIONS'", ",", "{", "}", ")", ".", "items", "(", ")", "options", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", "==", "'OPT_X_TLS_NEWCTX'", ")", "for", "opt", ",", "value", "in", "options", ":", "if", "isinstance", "(", "opt", ",", "str", ")", ":", "opt", "=", "getattr", "(", "ldap", ",", "opt", ")", "try", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "getattr", "(", "ldap", ",", "value", ")", "except", "AttributeError", ":", "pass", "self", ".", "conn", ".", "set_option", "(", "opt", ",", "value", ")", "if", "self", ".", "config", ".", "get", "(", "'START_TLS'", ")", ":", "log", ".", "debug", "(", "\"Starting TLS\"", ")", "self", ".", "conn", ".", "start_tls_s", "(", ")" ]
initialize ldap connection and set options
[ "initialize", "ldap", "connection", "and", "set", "options" ]
09a08be45f861823cb08f95883ee1e092a618c37
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L236-L262
4,273
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.ldap_login
def ldap_login(self, username, password): """ Authenticate a user using ldap. This will return a userdata dict if successfull. ldap_login will return None if the user does not exist or if the credentials are invalid """ self.connect() if self.config.get('USER_SEARCH'): result = self.bind_search(username, password) else: result = self.direct_bind(username, password) return result
python
def ldap_login(self, username, password): self.connect() if self.config.get('USER_SEARCH'): result = self.bind_search(username, password) else: result = self.direct_bind(username, password) return result
[ "def", "ldap_login", "(", "self", ",", "username", ",", "password", ")", ":", "self", ".", "connect", "(", ")", "if", "self", ".", "config", ".", "get", "(", "'USER_SEARCH'", ")", ":", "result", "=", "self", ".", "bind_search", "(", "username", ",", "password", ")", "else", ":", "result", "=", "self", ".", "direct_bind", "(", "username", ",", "password", ")", "return", "result" ]
Authenticate a user using ldap. This will return a userdata dict if successfull. ldap_login will return None if the user does not exist or if the credentials are invalid
[ "Authenticate", "a", "user", "using", "ldap", ".", "This", "will", "return", "a", "userdata", "dict", "if", "successfull", ".", "ldap_login", "will", "return", "None", "if", "the", "user", "does", "not", "exist", "or", "if", "the", "credentials", "are", "invalid" ]
09a08be45f861823cb08f95883ee1e092a618c37
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L264-L276
4,274
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP.address
def address(self): """ IP Address using bacpypes Address format """ port = "" if self._port: port = ":{}".format(self._port) return Address( "{}/{}{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1], port, ) )
python
def address(self): port = "" if self._port: port = ":{}".format(self._port) return Address( "{}/{}{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1], port, ) )
[ "def", "address", "(", "self", ")", ":", "port", "=", "\"\"", "if", "self", ".", "_port", ":", "port", "=", "\":{}\"", ".", "format", "(", "self", ".", "_port", ")", "return", "Address", "(", "\"{}/{}{}\"", ".", "format", "(", "self", ".", "interface", ".", "ip", ".", "compressed", ",", "self", ".", "interface", ".", "exploded", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ",", "port", ",", ")", ")" ]
IP Address using bacpypes Address format
[ "IP", "Address", "using", "bacpypes", "Address", "format" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L53-L66
4,275
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP._findIPAddr
def _findIPAddr(self): """ Retrieve the IP address connected to internet... used as a default IP address when defining Script :returns: IP Adress as String """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("google.com", 0)) addr = s.getsockname()[0] # print('Using ip : {addr}'.format(addr=addr)) s.close() except socket.error: raise NetworkInterfaceException( "Impossible to retrieve IP, please provide one manually" ) return addr
python
def _findIPAddr(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("google.com", 0)) addr = s.getsockname()[0] # print('Using ip : {addr}'.format(addr=addr)) s.close() except socket.error: raise NetworkInterfaceException( "Impossible to retrieve IP, please provide one manually" ) return addr
[ "def", "_findIPAddr", "(", "self", ")", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "try", ":", "s", ".", "connect", "(", "(", "\"google.com\"", ",", "0", ")", ")", "addr", "=", "s", ".", "getsockname", "(", ")", "[", "0", "]", "# print('Using ip : {addr}'.format(addr=addr))", "s", ".", "close", "(", ")", "except", "socket", ".", "error", ":", "raise", "NetworkInterfaceException", "(", "\"Impossible to retrieve IP, please provide one manually\"", ")", "return", "addr" ]
Retrieve the IP address connected to internet... used as a default IP address when defining Script :returns: IP Adress as String
[ "Retrieve", "the", "IP", "address", "connected", "to", "internet", "...", "used", "as", "a", "default", "IP", "address", "when", "defining", "Script" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L82-L99
4,276
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP._findSubnetMask
def _findSubnetMask(self, ip): """ Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String """ ip = ip if "win32" in sys.platform: try: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = ( proc.stdout.readline() .rstrip() .split(b":")[-1] .replace(b" ", b"") .decode() ) except: raise NetworkInterfaceException("Cannot read IP parameters from OS") else: """ This procedure could use more direct way of obtaining the broadcast IP as it is really simple in Unix ifconfig gives Bcast directly for example or use something like : iface = "eth0" socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) """ pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})") try: proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = re.findall(pattern, line.decode())[0] except: mask = "255.255.255.255" # self._log.debug('Mask found : %s' % mask) return mask
python
def _findSubnetMask(self, ip): ip = ip if "win32" in sys.platform: try: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = ( proc.stdout.readline() .rstrip() .split(b":")[-1] .replace(b" ", b"") .decode() ) except: raise NetworkInterfaceException("Cannot read IP parameters from OS") else: """ This procedure could use more direct way of obtaining the broadcast IP as it is really simple in Unix ifconfig gives Bcast directly for example or use something like : iface = "eth0" socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) """ pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})") try: proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = re.findall(pattern, line.decode())[0] except: mask = "255.255.255.255" # self._log.debug('Mask found : %s' % mask) return mask
[ "def", "_findSubnetMask", "(", "self", ",", "ip", ")", ":", "ip", "=", "ip", "if", "\"win32\"", "in", "sys", ".", "platform", ":", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "\"ipconfig\"", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "True", ":", "line", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "if", "ip", ".", "encode", "(", ")", "in", "line", ":", "break", "mask", "=", "(", "proc", ".", "stdout", ".", "readline", "(", ")", ".", "rstrip", "(", ")", ".", "split", "(", "b\":\"", ")", "[", "-", "1", "]", ".", "replace", "(", "b\" \"", ",", "b\"\"", ")", ".", "decode", "(", ")", ")", "except", ":", "raise", "NetworkInterfaceException", "(", "\"Cannot read IP parameters from OS\"", ")", "else", ":", "\"\"\"\n This procedure could use more direct way of obtaining the broadcast IP\n as it is really simple in Unix\n ifconfig gives Bcast directly for example\n or use something like :\n iface = \"eth0\"\n socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24])\n \"\"\"", "pattern", "=", "re", ".", "compile", "(", "r\"(255.\\d{1,3}.\\d{1,3}.\\d{1,3})\"", ")", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "\"ifconfig\"", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "True", ":", "line", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "if", "ip", ".", "encode", "(", ")", "in", "line", ":", "break", "mask", "=", "re", ".", "findall", "(", "pattern", ",", "line", ".", "decode", "(", ")", ")", "[", "0", "]", "except", ":", "mask", "=", "\"255.255.255.255\"", "# self._log.debug('Mask found : %s' % mask)", "return", "mask" ]
Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String
[ "Retrieve", "the", "broadcast", "IP", "address", "connected", "to", "internet", "...", "used", "as", "a", "default", "IP", "address", "when", "defining", "Script" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L101-L150
4,277
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin._read_from_sql
def _read_from_sql(self, request, db_name): """ Using the contextlib, I hope to close the connection to database when not in use """ with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con: return sql.read_sql(sql=request, con=con)
python
def _read_from_sql(self, request, db_name): with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con: return sql.read_sql(sql=request, con=con)
[ "def", "_read_from_sql", "(", "self", ",", "request", ",", "db_name", ")", ":", "with", "contextlib", ".", "closing", "(", "sqlite3", ".", "connect", "(", "\"{}.db\"", ".", "format", "(", "db_name", ")", ")", ")", "as", "con", ":", "return", "sql", ".", "read_sql", "(", "sql", "=", "request", ",", "con", "=", "con", ")" ]
Using the contextlib, I hope to close the connection to database when not in use
[ "Using", "the", "contextlib", "I", "hope", "to", "close", "the", "connection", "to", "database", "when", "not", "in", "use" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L42-L48
4,278
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.backup_histories_df
def backup_histories_df(self): """ Build a dataframe of the point histories """ backup = {} for point in self.points: if point.history.dtypes == object: backup[point.properties.name] = ( point.history.replace(["inactive", "active"], [0, 1]) .resample("1s") .mean() ) else: backup[point.properties.name] = point.history.resample("1s").mean() df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()])) return df.fillna(method="ffill")
python
def backup_histories_df(self): backup = {} for point in self.points: if point.history.dtypes == object: backup[point.properties.name] = ( point.history.replace(["inactive", "active"], [0, 1]) .resample("1s") .mean() ) else: backup[point.properties.name] = point.history.resample("1s").mean() df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()])) return df.fillna(method="ffill")
[ "def", "backup_histories_df", "(", "self", ")", ":", "backup", "=", "{", "}", "for", "point", "in", "self", ".", "points", ":", "if", "point", ".", "history", ".", "dtypes", "==", "object", ":", "backup", "[", "point", ".", "properties", ".", "name", "]", "=", "(", "point", ".", "history", ".", "replace", "(", "[", "\"inactive\"", ",", "\"active\"", "]", ",", "[", "0", ",", "1", "]", ")", ".", "resample", "(", "\"1s\"", ")", ".", "mean", "(", ")", ")", "else", ":", "backup", "[", "point", ".", "properties", ".", "name", "]", "=", "point", ".", "history", ".", "resample", "(", "\"1s\"", ")", ".", "mean", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "dict", "(", "[", "(", "k", ",", "pd", ".", "Series", "(", "v", ")", ")", "for", "k", ",", "v", "in", "backup", ".", "items", "(", ")", "]", ")", ")", "return", "df", ".", "fillna", "(", "method", "=", "\"ffill\"", ")" ]
Build a dataframe of the point histories
[ "Build", "a", "dataframe", "of", "the", "point", "histories" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L72-L88
4,279
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.save
def save(self, filename=None): """ Save the point histories to sqlite3 database. Save the device object properties to a pickle file so the device can be reloaded. """ if filename: if ".db" in filename: filename = filename.split(".")[0] self.properties.db_name = filename else: self.properties.db_name = "{}".format(self.properties.name) # Does file exist? If so, append data if os.path.isfile("{}.db".format(self.properties.db_name)): his = self._read_from_sql( 'select * from "{}"'.format("history"), self.properties.db_name ) his.index = his["index"].apply(Timestamp) try: last = his.index[-1] df_to_backup = self.backup_histories_df()[last:] except IndexError: df_to_backup = self.backup_histories_df() else: self._log.debug("Creating a new backup database") df_to_backup = self.backup_histories_df() # DataFrames that will be saved to SQL with contextlib.closing( sqlite3.connect("{}.db".format(self.properties.db_name)) ) as con: sql.to_sql( df_to_backup, name="history", con=con, index_label="index", index=True, if_exists="append", ) # Saving other properties to a pickle file... prop_backup = {} prop_backup["device"] = self.dev_properties_df() prop_backup["points"] = self.points_properties_df() with open("{}.bin".format(self.properties.db_name), "wb") as file: pickle.dump(prop_backup, file) self._log.info("Device saved to {}.db".format(self.properties.db_name))
python
def save(self, filename=None): if filename: if ".db" in filename: filename = filename.split(".")[0] self.properties.db_name = filename else: self.properties.db_name = "{}".format(self.properties.name) # Does file exist? If so, append data if os.path.isfile("{}.db".format(self.properties.db_name)): his = self._read_from_sql( 'select * from "{}"'.format("history"), self.properties.db_name ) his.index = his["index"].apply(Timestamp) try: last = his.index[-1] df_to_backup = self.backup_histories_df()[last:] except IndexError: df_to_backup = self.backup_histories_df() else: self._log.debug("Creating a new backup database") df_to_backup = self.backup_histories_df() # DataFrames that will be saved to SQL with contextlib.closing( sqlite3.connect("{}.db".format(self.properties.db_name)) ) as con: sql.to_sql( df_to_backup, name="history", con=con, index_label="index", index=True, if_exists="append", ) # Saving other properties to a pickle file... prop_backup = {} prop_backup["device"] = self.dev_properties_df() prop_backup["points"] = self.points_properties_df() with open("{}.bin".format(self.properties.db_name), "wb") as file: pickle.dump(prop_backup, file) self._log.info("Device saved to {}.db".format(self.properties.db_name))
[ "def", "save", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", ":", "if", "\".db\"", "in", "filename", ":", "filename", "=", "filename", ".", "split", "(", "\".\"", ")", "[", "0", "]", "self", ".", "properties", ".", "db_name", "=", "filename", "else", ":", "self", ".", "properties", ".", "db_name", "=", "\"{}\"", ".", "format", "(", "self", ".", "properties", ".", "name", ")", "# Does file exist? If so, append data", "if", "os", ".", "path", ".", "isfile", "(", "\"{}.db\"", ".", "format", "(", "self", ".", "properties", ".", "db_name", ")", ")", ":", "his", "=", "self", ".", "_read_from_sql", "(", "'select * from \"{}\"'", ".", "format", "(", "\"history\"", ")", ",", "self", ".", "properties", ".", "db_name", ")", "his", ".", "index", "=", "his", "[", "\"index\"", "]", ".", "apply", "(", "Timestamp", ")", "try", ":", "last", "=", "his", ".", "index", "[", "-", "1", "]", "df_to_backup", "=", "self", ".", "backup_histories_df", "(", ")", "[", "last", ":", "]", "except", "IndexError", ":", "df_to_backup", "=", "self", ".", "backup_histories_df", "(", ")", "else", ":", "self", ".", "_log", ".", "debug", "(", "\"Creating a new backup database\"", ")", "df_to_backup", "=", "self", ".", "backup_histories_df", "(", ")", "# DataFrames that will be saved to SQL", "with", "contextlib", ".", "closing", "(", "sqlite3", ".", "connect", "(", "\"{}.db\"", ".", "format", "(", "self", ".", "properties", ".", "db_name", ")", ")", ")", "as", "con", ":", "sql", ".", "to_sql", "(", "df_to_backup", ",", "name", "=", "\"history\"", ",", "con", "=", "con", ",", "index_label", "=", "\"index\"", ",", "index", "=", "True", ",", "if_exists", "=", "\"append\"", ",", ")", "# Saving other properties to a pickle file...", "prop_backup", "=", "{", "}", "prop_backup", "[", "\"device\"", "]", "=", "self", ".", "dev_properties_df", "(", ")", "prop_backup", "[", "\"points\"", "]", "=", "self", ".", "points_properties_df", "(", ")", "with", "open", "(", "\"{}.bin\"", ".", "format", "(", "self", ".", "properties", ".", "db_name", ")", ",", "\"wb\"", ")", "as", "file", ":", "pickle", ".", "dump", "(", "prop_backup", ",", "file", ")", "self", ".", "_log", ".", "info", "(", "\"Device saved to {}.db\"", ".", "format", "(", "self", ".", "properties", ".", "db_name", ")", ")" ]
Save the point histories to sqlite3 database. Save the device object properties to a pickle file so the device can be reloaded.
[ "Save", "the", "point", "histories", "to", "sqlite3", "database", ".", "Save", "the", "device", "object", "properties", "to", "a", "pickle", "file", "so", "the", "device", "can", "be", "reloaded", "." ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L90-L138
4,280
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.points_from_sql
def points_from_sql(self, db_name): """ Retrieve point list from SQL database """ points = self._read_from_sql("SELECT * FROM history;", db_name) return list(points.columns.values)[1:]
python
def points_from_sql(self, db_name): points = self._read_from_sql("SELECT * FROM history;", db_name) return list(points.columns.values)[1:]
[ "def", "points_from_sql", "(", "self", ",", "db_name", ")", ":", "points", "=", "self", ".", "_read_from_sql", "(", "\"SELECT * FROM history;\"", ",", "db_name", ")", "return", "list", "(", "points", ".", "columns", ".", "values", ")", "[", "1", ":", "]" ]
Retrieve point list from SQL database
[ "Retrieve", "point", "list", "from", "SQL", "database" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L140-L145
4,281
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.his_from_sql
def his_from_sql(self, db_name, point): """ Retrive point histories from SQL database """ his = self._read_from_sql('select * from "%s"' % "history", db_name) his.index = his["index"].apply(Timestamp) return his.set_index("index")[point]
python
def his_from_sql(self, db_name, point): his = self._read_from_sql('select * from "%s"' % "history", db_name) his.index = his["index"].apply(Timestamp) return his.set_index("index")[point]
[ "def", "his_from_sql", "(", "self", ",", "db_name", ",", "point", ")", ":", "his", "=", "self", ".", "_read_from_sql", "(", "'select * from \"%s\"'", "%", "\"history\"", ",", "db_name", ")", "his", ".", "index", "=", "his", "[", "\"index\"", "]", ".", "apply", "(", "Timestamp", ")", "return", "his", ".", "set_index", "(", "\"index\"", ")", "[", "point", "]" ]
Retrive point histories from SQL database
[ "Retrive", "point", "histories", "from", "SQL", "database" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L147-L153
4,282
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.read_point_prop
def read_point_prop(self, device_name, point): """ Points properties retrieved from pickle """ with open("%s.bin" % device_name, "rb") as file: return pickle.load(file)["points"][point]
python
def read_point_prop(self, device_name, point): with open("%s.bin" % device_name, "rb") as file: return pickle.load(file)["points"][point]
[ "def", "read_point_prop", "(", "self", ",", "device_name", ",", "point", ")", ":", "with", "open", "(", "\"%s.bin\"", "%", "device_name", ",", "\"rb\"", ")", "as", "file", ":", "return", "pickle", ".", "load", "(", "file", ")", "[", "\"points\"", "]", "[", "point", "]" ]
Points properties retrieved from pickle
[ "Points", "properties", "retrieved", "from", "pickle" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L161-L166
4,283
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.read_dev_prop
def read_dev_prop(self, device_name): """ Device properties retrieved from pickle """ with open("{}.bin".format(device_name), "rb") as file: return pickle.load(file)["device"]
python
def read_dev_prop(self, device_name): with open("{}.bin".format(device_name), "rb") as file: return pickle.load(file)["device"]
[ "def", "read_dev_prop", "(", "self", ",", "device_name", ")", ":", "with", "open", "(", "\"{}.bin\"", ".", "format", "(", "device_name", ")", ",", "\"rb\"", ")", "as", "file", ":", "return", "pickle", ".", "load", "(", "file", ")", "[", "\"device\"", "]" ]
Device properties retrieved from pickle
[ "Device", "properties", "retrieved", "from", "pickle" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L168-L173
4,284
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
BooleanPoint.value
def value(self): """ Read the value from BACnet network """ try: res = self.properties.device.properties.network.read( "{} {} {} presentValue".format( self.properties.device.properties.address, self.properties.type, str(self.properties.address), ) ) self._trend(res) except Exception: raise Exception("Problem reading : {}".format(self.properties.name)) if res == "inactive": self._key = 0 self._boolKey = False else: self._key = 1 self._boolKey = True return res
python
def value(self): try: res = self.properties.device.properties.network.read( "{} {} {} presentValue".format( self.properties.device.properties.address, self.properties.type, str(self.properties.address), ) ) self._trend(res) except Exception: raise Exception("Problem reading : {}".format(self.properties.name)) if res == "inactive": self._key = 0 self._boolKey = False else: self._key = 1 self._boolKey = True return res
[ "def", "value", "(", "self", ")", ":", "try", ":", "res", "=", "self", ".", "properties", ".", "device", ".", "properties", ".", "network", ".", "read", "(", "\"{} {} {} presentValue\"", ".", "format", "(", "self", ".", "properties", ".", "device", ".", "properties", ".", "address", ",", "self", ".", "properties", ".", "type", ",", "str", "(", "self", ".", "properties", ".", "address", ")", ",", ")", ")", "self", ".", "_trend", "(", "res", ")", "except", "Exception", ":", "raise", "Exception", "(", "\"Problem reading : {}\"", ".", "format", "(", "self", ".", "properties", ".", "name", ")", ")", "if", "res", "==", "\"inactive\"", ":", "self", ".", "_key", "=", "0", "self", ".", "_boolKey", "=", "False", "else", ":", "self", ".", "_key", "=", "1", "self", ".", "_boolKey", "=", "True", "return", "res" ]
Read the value from BACnet network
[ "Read", "the", "value", "from", "BACnet", "network" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L628-L651
4,285
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
EnumPointOffline.value
def value(self): """ Take last known value as the value """ try: value = self.lastValue except IndexError: value = "NaN" except ValueError: value = "NaN" return value
python
def value(self): try: value = self.lastValue except IndexError: value = "NaN" except ValueError: value = "NaN" return value
[ "def", "value", "(", "self", ")", ":", "try", ":", "value", "=", "self", ".", "lastValue", "except", "IndexError", ":", "value", "=", "\"NaN\"", "except", "ValueError", ":", "value", "=", "\"NaN\"", "return", "value" ]
Take last known value as the value
[ "Take", "last", "known", "value", "as", "the", "value" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L902-L912
4,286
ChristianTremblay/BAC0
BAC0/scripts/Complete.py
Stats_Mixin.network_stats
def network_stats(self): """ Used by Flask to show informations on the network """ statistics = {} mstp_networks = [] mstp_map = {} ip_devices = [] bacoids = [] mstp_devices = [] for address, bacoid in self.whois_answer[0].keys(): if ":" in address: net, mac = address.split(":") mstp_networks.append(net) mstp_devices.append(mac) try: mstp_map[net].append(mac) except KeyError: mstp_map[net] = [] mstp_map[net].append(mac) else: net = "ip" mac = address ip_devices.append(address) bacoids.append((bacoid, address)) mstpnetworks = sorted(set(mstp_networks)) statistics["mstp_networks"] = mstpnetworks statistics["ip_devices"] = sorted(ip_devices) statistics["bacoids"] = sorted(bacoids) statistics["mstp_map"] = mstp_map statistics["timestamp"] = str(datetime.now()) statistics["number_of_devices"] = self.number_of_devices statistics["number_of_registered_devices"] = len(self.registered_devices) statistics["print_mstpnetworks"] = self.print_list(mstpnetworks) return statistics
python
def network_stats(self): statistics = {} mstp_networks = [] mstp_map = {} ip_devices = [] bacoids = [] mstp_devices = [] for address, bacoid in self.whois_answer[0].keys(): if ":" in address: net, mac = address.split(":") mstp_networks.append(net) mstp_devices.append(mac) try: mstp_map[net].append(mac) except KeyError: mstp_map[net] = [] mstp_map[net].append(mac) else: net = "ip" mac = address ip_devices.append(address) bacoids.append((bacoid, address)) mstpnetworks = sorted(set(mstp_networks)) statistics["mstp_networks"] = mstpnetworks statistics["ip_devices"] = sorted(ip_devices) statistics["bacoids"] = sorted(bacoids) statistics["mstp_map"] = mstp_map statistics["timestamp"] = str(datetime.now()) statistics["number_of_devices"] = self.number_of_devices statistics["number_of_registered_devices"] = len(self.registered_devices) statistics["print_mstpnetworks"] = self.print_list(mstpnetworks) return statistics
[ "def", "network_stats", "(", "self", ")", ":", "statistics", "=", "{", "}", "mstp_networks", "=", "[", "]", "mstp_map", "=", "{", "}", "ip_devices", "=", "[", "]", "bacoids", "=", "[", "]", "mstp_devices", "=", "[", "]", "for", "address", ",", "bacoid", "in", "self", ".", "whois_answer", "[", "0", "]", ".", "keys", "(", ")", ":", "if", "\":\"", "in", "address", ":", "net", ",", "mac", "=", "address", ".", "split", "(", "\":\"", ")", "mstp_networks", ".", "append", "(", "net", ")", "mstp_devices", ".", "append", "(", "mac", ")", "try", ":", "mstp_map", "[", "net", "]", ".", "append", "(", "mac", ")", "except", "KeyError", ":", "mstp_map", "[", "net", "]", "=", "[", "]", "mstp_map", "[", "net", "]", ".", "append", "(", "mac", ")", "else", ":", "net", "=", "\"ip\"", "mac", "=", "address", "ip_devices", ".", "append", "(", "address", ")", "bacoids", ".", "append", "(", "(", "bacoid", ",", "address", ")", ")", "mstpnetworks", "=", "sorted", "(", "set", "(", "mstp_networks", ")", ")", "statistics", "[", "\"mstp_networks\"", "]", "=", "mstpnetworks", "statistics", "[", "\"ip_devices\"", "]", "=", "sorted", "(", "ip_devices", ")", "statistics", "[", "\"bacoids\"", "]", "=", "sorted", "(", "bacoids", ")", "statistics", "[", "\"mstp_map\"", "]", "=", "mstp_map", "statistics", "[", "\"timestamp\"", "]", "=", "str", "(", "datetime", ".", "now", "(", ")", ")", "statistics", "[", "\"number_of_devices\"", "]", "=", "self", ".", "number_of_devices", "statistics", "[", "\"number_of_registered_devices\"", "]", "=", "len", "(", "self", ".", "registered_devices", ")", "statistics", "[", "\"print_mstpnetworks\"", "]", "=", "self", ".", "print_list", "(", "mstpnetworks", ")", "return", "statistics" ]
Used by Flask to show informations on the network
[ "Used", "by", "Flask", "to", "show", "informations", "on", "the", "network" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/scripts/Complete.py#L101-L135
4,287
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected.connect
def connect(self, *, db=None): """ A connected device can be switched to 'database mode' where the device will not use the BACnet network but instead obtain its contents from a previously stored database. """ if db: self.poll(command="stop") self.properties.db_name = db.split(".")[0] self.new_state(DeviceFromDB) else: self._log.warning( "Already connected, provide db arg if you want to connect to db" )
python
def connect(self, *, db=None): if db: self.poll(command="stop") self.properties.db_name = db.split(".")[0] self.new_state(DeviceFromDB) else: self._log.warning( "Already connected, provide db arg if you want to connect to db" )
[ "def", "connect", "(", "self", ",", "*", ",", "db", "=", "None", ")", ":", "if", "db", ":", "self", ".", "poll", "(", "command", "=", "\"stop\"", ")", "self", ".", "properties", ".", "db_name", "=", "db", ".", "split", "(", "\".\"", ")", "[", "0", "]", "self", ".", "new_state", "(", "DeviceFromDB", ")", "else", ":", "self", ".", "_log", ".", "warning", "(", "\"Already connected, provide db arg if you want to connect to db\"", ")" ]
A connected device can be switched to 'database mode' where the device will not use the BACnet network but instead obtain its contents from a previously stored database.
[ "A", "connected", "device", "can", "be", "switched", "to", "database", "mode", "where", "the", "device", "will", "not", "use", "the", "BACnet", "network", "but", "instead", "obtain", "its", "contents", "from", "a", "previously", "stored", "database", "." ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L453-L466
4,288
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected.df
def df(self, list_of_points, force_read=True): """ When connected, calling DF should force a reading on the network. """ his = [] for point in list_of_points: try: his.append(self._findPoint(point, force_read=force_read).history) except ValueError as ve: self._log.error("{}".format(ve)) continue if not _PANDAS: return dict(zip(list_of_points, his)) return pd.DataFrame(dict(zip(list_of_points, his)))
python
def df(self, list_of_points, force_read=True): his = [] for point in list_of_points: try: his.append(self._findPoint(point, force_read=force_read).history) except ValueError as ve: self._log.error("{}".format(ve)) continue if not _PANDAS: return dict(zip(list_of_points, his)) return pd.DataFrame(dict(zip(list_of_points, his)))
[ "def", "df", "(", "self", ",", "list_of_points", ",", "force_read", "=", "True", ")", ":", "his", "=", "[", "]", "for", "point", "in", "list_of_points", ":", "try", ":", "his", ".", "append", "(", "self", ".", "_findPoint", "(", "point", ",", "force_read", "=", "force_read", ")", ".", "history", ")", "except", "ValueError", "as", "ve", ":", "self", ".", "_log", ".", "error", "(", "\"{}\"", ".", "format", "(", "ve", ")", ")", "continue", "if", "not", "_PANDAS", ":", "return", "dict", "(", "zip", "(", "list_of_points", ",", "his", ")", ")", "return", "pd", ".", "DataFrame", "(", "dict", "(", "zip", "(", "list_of_points", ",", "his", ")", ")", ")" ]
When connected, calling DF should force a reading on the network.
[ "When", "connected", "calling", "DF", "should", "force", "a", "reading", "on", "the", "network", "." ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L468-L482
4,289
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected._buildPointList
def _buildPointList(self): """ Upon connection to build the device point list and properties. """ try: self.properties.pss.value = self.properties.network.read( "{} device {} protocolServicesSupported".format( self.properties.address, self.properties.device_id ) ) except NoResponseFromController as error: self._log.error("Controller not found, aborting. ({})".format(error)) return ("Not Found", "", [], []) except SegmentationNotSupported as error: self._log.warning("Segmentation not supported") self.segmentation_supported = False self.new_state(DeviceDisconnected) self.properties.name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) self._log.info( "Device {}:[{}] found... building points list".format( self.properties.device_id, self.properties.name ) ) try: self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints( self.custom_object_list ) if self.properties.pollDelay > 0: self.poll(delay=self.properties.pollDelay) except NoResponseFromController as error: self._log.error("Cannot retrieve object list, disconnecting...") self.segmentation_supported = False self.new_state(DeviceDisconnected) except IndexError as error: self._log.error("Device creation failed... disconnecting") self.new_state(DeviceDisconnected)
python
def _buildPointList(self): try: self.properties.pss.value = self.properties.network.read( "{} device {} protocolServicesSupported".format( self.properties.address, self.properties.device_id ) ) except NoResponseFromController as error: self._log.error("Controller not found, aborting. ({})".format(error)) return ("Not Found", "", [], []) except SegmentationNotSupported as error: self._log.warning("Segmentation not supported") self.segmentation_supported = False self.new_state(DeviceDisconnected) self.properties.name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) self._log.info( "Device {}:[{}] found... building points list".format( self.properties.device_id, self.properties.name ) ) try: self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints( self.custom_object_list ) if self.properties.pollDelay > 0: self.poll(delay=self.properties.pollDelay) except NoResponseFromController as error: self._log.error("Cannot retrieve object list, disconnecting...") self.segmentation_supported = False self.new_state(DeviceDisconnected) except IndexError as error: self._log.error("Device creation failed... disconnecting") self.new_state(DeviceDisconnected)
[ "def", "_buildPointList", "(", "self", ")", ":", "try", ":", "self", ".", "properties", ".", "pss", ".", "value", "=", "self", ".", "properties", ".", "network", ".", "read", "(", "\"{} device {} protocolServicesSupported\"", ".", "format", "(", "self", ".", "properties", ".", "address", ",", "self", ".", "properties", ".", "device_id", ")", ")", "except", "NoResponseFromController", "as", "error", ":", "self", ".", "_log", ".", "error", "(", "\"Controller not found, aborting. ({})\"", ".", "format", "(", "error", ")", ")", "return", "(", "\"Not Found\"", ",", "\"\"", ",", "[", "]", ",", "[", "]", ")", "except", "SegmentationNotSupported", "as", "error", ":", "self", ".", "_log", ".", "warning", "(", "\"Segmentation not supported\"", ")", "self", ".", "segmentation_supported", "=", "False", "self", ".", "new_state", "(", "DeviceDisconnected", ")", "self", ".", "properties", ".", "name", "=", "self", ".", "properties", ".", "network", ".", "read", "(", "\"{} device {} objectName\"", ".", "format", "(", "self", ".", "properties", ".", "address", ",", "self", ".", "properties", ".", "device_id", ")", ")", "self", ".", "_log", ".", "info", "(", "\"Device {}:[{}] found... building points list\"", ".", "format", "(", "self", ".", "properties", ".", "device_id", ",", "self", ".", "properties", ".", "name", ")", ")", "try", ":", "self", ".", "properties", ".", "objects_list", ",", "self", ".", "points", ",", "self", ".", "trendlogs", "=", "self", ".", "_discoverPoints", "(", "self", ".", "custom_object_list", ")", "if", "self", ".", "properties", ".", "pollDelay", ">", "0", ":", "self", ".", "poll", "(", "delay", "=", "self", ".", "properties", ".", "pollDelay", ")", "except", "NoResponseFromController", "as", "error", ":", "self", ".", "_log", ".", "error", "(", "\"Cannot retrieve object list, disconnecting...\"", ")", "self", ".", "segmentation_supported", "=", "False", "self", ".", "new_state", "(", "DeviceDisconnected", ")", "except", "IndexError", "as", "error", ":", "self", ".", "_log", ".", "error", "(", "\"Device creation failed... disconnecting\"", ")", "self", ".", "new_state", "(", "DeviceDisconnected", ")" ]
Upon connection to build the device point list and properties.
[ "Upon", "connection", "to", "build", "the", "device", "point", "list", "and", "properties", "." ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L484-L527
4,290
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected._findPoint
def _findPoint(self, name, force_read=True): """ Used by getter and setter functions """ for point in self.points: if point.properties.name == name: if force_read: point.value return point raise ValueError("{} doesn't exist in controller".format(name))
python
def _findPoint(self, name, force_read=True): for point in self.points: if point.properties.name == name: if force_read: point.value return point raise ValueError("{} doesn't exist in controller".format(name))
[ "def", "_findPoint", "(", "self", ",", "name", ",", "force_read", "=", "True", ")", ":", "for", "point", "in", "self", ".", "points", ":", "if", "point", ".", "properties", ".", "name", "==", "name", ":", "if", "force_read", ":", "point", ".", "value", "return", "point", "raise", "ValueError", "(", "\"{} doesn't exist in controller\"", ".", "format", "(", "name", ")", ")" ]
Used by getter and setter functions
[ "Used", "by", "getter", "and", "setter", "functions" ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L629-L638
4,291
ChristianTremblay/BAC0
BAC0/core/functions/discoverPoints.py
discoverPoints
def discoverPoints(bacnetapp, address, devID): """ Discover the BACnet points in a BACnet device. :param bacnetApp: The app itself so we can call read :param address: address of the device as a string (ex. '2:5') :param devID: device ID of the bacnet device as a string (ex. '1001') :returns: a tuple with deviceName, pss, objList, df * *deviceName* : name of the device * *pss* : protocole service supported * *objList* : list of bacnet object (ex. analogInput, 1) * *df* : is a dataFrame containing pointType, pointAddress, pointName, description presentValue and units If pandas can't be found, df will be a simple array """ pss = bacnetapp.read( "{} device {} protocolServicesSupported".format(address, devID) ) deviceName = bacnetapp.read("{} device {} objectName".format(address, devID)) # print('Device {}- building points list'.format(deviceName)) objList = bacnetapp.read("{} device {] objectList".format(address, devID)) newLine = [] result = [] points = [] for pointType, pointAddr in objList: if "binary" in pointType: # BI/BO/BV newLine = [pointType, pointAddr] infos = bacnetapp.readMultiple( "{} {} {} objectName description presentValue inactiveText activeText".format( address, pointType, pointAddr ) ) newLine.extend(infos[:-2]) newLine.extend([infos[-2:]]) newPoint = BooleanPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "multiState" in pointType: # MI/MV/MO newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue stateText".format( address, pointType, pointAddr ) ) ) newPoint = EnumPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "analog" in pointType: # AI/AO/AV newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue units".format( address, pointType, pointAddr ) ) ) newPoint = NumericPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) else: continue # skip result.append(newLine) points.append(newPoint) if _PANDA: df = pd.DataFrame( result, columns=[ "pointType", "pointAddress", "pointName", "description", "presentValue", "units_state", ], ).set_index(["pointName"]) else: df = result # print('Ready!') return (deviceName, pss, objList, df, points)
python
def discoverPoints(bacnetapp, address, devID): pss = bacnetapp.read( "{} device {} protocolServicesSupported".format(address, devID) ) deviceName = bacnetapp.read("{} device {} objectName".format(address, devID)) # print('Device {}- building points list'.format(deviceName)) objList = bacnetapp.read("{} device {] objectList".format(address, devID)) newLine = [] result = [] points = [] for pointType, pointAddr in objList: if "binary" in pointType: # BI/BO/BV newLine = [pointType, pointAddr] infos = bacnetapp.readMultiple( "{} {} {} objectName description presentValue inactiveText activeText".format( address, pointType, pointAddr ) ) newLine.extend(infos[:-2]) newLine.extend([infos[-2:]]) newPoint = BooleanPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "multiState" in pointType: # MI/MV/MO newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue stateText".format( address, pointType, pointAddr ) ) ) newPoint = EnumPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "analog" in pointType: # AI/AO/AV newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue units".format( address, pointType, pointAddr ) ) ) newPoint = NumericPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) else: continue # skip result.append(newLine) points.append(newPoint) if _PANDA: df = pd.DataFrame( result, columns=[ "pointType", "pointAddress", "pointName", "description", "presentValue", "units_state", ], ).set_index(["pointName"]) else: df = result # print('Ready!') return (deviceName, pss, objList, df, points)
[ "def", "discoverPoints", "(", "bacnetapp", ",", "address", ",", "devID", ")", ":", "pss", "=", "bacnetapp", ".", "read", "(", "\"{} device {} protocolServicesSupported\"", ".", "format", "(", "address", ",", "devID", ")", ")", "deviceName", "=", "bacnetapp", ".", "read", "(", "\"{} device {} objectName\"", ".", "format", "(", "address", ",", "devID", ")", ")", "# print('Device {}- building points list'.format(deviceName))", "objList", "=", "bacnetapp", ".", "read", "(", "\"{} device {] objectList\"", ".", "format", "(", "address", ",", "devID", ")", ")", "newLine", "=", "[", "]", "result", "=", "[", "]", "points", "=", "[", "]", "for", "pointType", ",", "pointAddr", "in", "objList", ":", "if", "\"binary\"", "in", "pointType", ":", "# BI/BO/BV", "newLine", "=", "[", "pointType", ",", "pointAddr", "]", "infos", "=", "bacnetapp", ".", "readMultiple", "(", "\"{} {} {} objectName description presentValue inactiveText activeText\"", ".", "format", "(", "address", ",", "pointType", ",", "pointAddr", ")", ")", "newLine", ".", "extend", "(", "infos", "[", ":", "-", "2", "]", ")", "newLine", ".", "extend", "(", "[", "infos", "[", "-", "2", ":", "]", "]", ")", "newPoint", "=", "BooleanPoint", "(", "pointType", "=", "newLine", "[", "0", "]", ",", "pointAddress", "=", "newLine", "[", "1", "]", ",", "pointName", "=", "newLine", "[", "2", "]", ",", "description", "=", "newLine", "[", "3", "]", ",", "presentValue", "=", "newLine", "[", "4", "]", ",", "units_state", "=", "newLine", "[", "5", "]", ",", ")", "elif", "\"multiState\"", "in", "pointType", ":", "# MI/MV/MO", "newLine", "=", "[", "pointType", ",", "pointAddr", "]", "newLine", ".", "extend", "(", "bacnetapp", ".", "readMultiple", "(", "\"{} {} {} objectName description presentValue stateText\"", ".", "format", "(", "address", ",", "pointType", ",", "pointAddr", ")", ")", ")", "newPoint", "=", "EnumPoint", "(", "pointType", "=", "newLine", "[", "0", "]", ",", "pointAddress", "=", "newLine", "[", "1", "]", ",", "pointName", "=", "newLine", "[", "2", "]", ",", "description", "=", "newLine", "[", "3", "]", ",", "presentValue", "=", "newLine", "[", "4", "]", ",", "units_state", "=", "newLine", "[", "5", "]", ",", ")", "elif", "\"analog\"", "in", "pointType", ":", "# AI/AO/AV", "newLine", "=", "[", "pointType", ",", "pointAddr", "]", "newLine", ".", "extend", "(", "bacnetapp", ".", "readMultiple", "(", "\"{} {} {} objectName description presentValue units\"", ".", "format", "(", "address", ",", "pointType", ",", "pointAddr", ")", ")", ")", "newPoint", "=", "NumericPoint", "(", "pointType", "=", "newLine", "[", "0", "]", ",", "pointAddress", "=", "newLine", "[", "1", "]", ",", "pointName", "=", "newLine", "[", "2", "]", ",", "description", "=", "newLine", "[", "3", "]", ",", "presentValue", "=", "newLine", "[", "4", "]", ",", "units_state", "=", "newLine", "[", "5", "]", ",", ")", "else", ":", "continue", "# skip", "result", ".", "append", "(", "newLine", ")", "points", ".", "append", "(", "newPoint", ")", "if", "_PANDA", ":", "df", "=", "pd", ".", "DataFrame", "(", "result", ",", "columns", "=", "[", "\"pointType\"", ",", "\"pointAddress\"", ",", "\"pointName\"", ",", "\"description\"", ",", "\"presentValue\"", ",", "\"units_state\"", ",", "]", ",", ")", ".", "set_index", "(", "[", "\"pointName\"", "]", ")", "else", ":", "df", "=", "result", "# print('Ready!')", "return", "(", "deviceName", ",", "pss", ",", "objList", ",", "df", ",", "points", ")" ]
Discover the BACnet points in a BACnet device. :param bacnetApp: The app itself so we can call read :param address: address of the device as a string (ex. '2:5') :param devID: device ID of the bacnet device as a string (ex. '1001') :returns: a tuple with deviceName, pss, objList, df * *deviceName* : name of the device * *pss* : protocole service supported * *objList* : list of bacnet object (ex. analogInput, 1) * *df* : is a dataFrame containing pointType, pointAddress, pointName, description presentValue and units If pandas can't be found, df will be a simple array
[ "Discover", "the", "BACnet", "points", "in", "a", "BACnet", "device", "." ]
8d95b065ea068524a08f5b0c34322ebeeba95d06
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/discoverPoints.py#L28-L139
4,292
regebro/svg.path
src/svg/path/path.py
CubicBezier.point
def point(self, pos): """Calculate the x,y position at a certain position of the path""" return ((1 - pos) ** 3 * self.start) + \ (3 * (1 - pos) ** 2 * pos * self.control1) + \ (3 * (1 - pos) * pos ** 2 * self.control2) + \ (pos ** 3 * self.end)
python
def point(self, pos): return ((1 - pos) ** 3 * self.start) + \ (3 * (1 - pos) ** 2 * pos * self.control1) + \ (3 * (1 - pos) * pos ** 2 * self.control2) + \ (pos ** 3 * self.end)
[ "def", "point", "(", "self", ",", "pos", ")", ":", "return", "(", "(", "1", "-", "pos", ")", "**", "3", "*", "self", ".", "start", ")", "+", "(", "3", "*", "(", "1", "-", "pos", ")", "**", "2", "*", "pos", "*", "self", ".", "control1", ")", "+", "(", "3", "*", "(", "1", "-", "pos", ")", "*", "pos", "**", "2", "*", "self", ".", "control2", ")", "+", "(", "pos", "**", "3", "*", "self", ".", "end", ")" ]
Calculate the x,y position at a certain position of the path
[ "Calculate", "the", "x", "y", "position", "at", "a", "certain", "position", "of", "the", "path" ]
cb58e104e5aa3472be205c75da59690db30aecc9
https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L91-L96
4,293
zeekay/flask-uwsgi-websocket
flask_uwsgi_websocket/websocket.py
WebSocket.register_blueprint
def register_blueprint(self, blueprint, **options): ''' Registers a blueprint on the WebSockets. ''' first_registration = False if blueprint.name in self.blueprints: assert self.blueprints[blueprint.name] is blueprint, \ 'A blueprint\'s name collision occurred between %r and ' \ '%r. Both share the same name "%s". Blueprints that ' \ 'are created on the fly need unique names.' % \ (blueprint, self.blueprints[blueprint.name], blueprint.name) else: self.blueprints[blueprint.name] = blueprint first_registration = True blueprint.register(self, options, first_registration)
python
def register_blueprint(self, blueprint, **options): ''' Registers a blueprint on the WebSockets. ''' first_registration = False if blueprint.name in self.blueprints: assert self.blueprints[blueprint.name] is blueprint, \ 'A blueprint\'s name collision occurred between %r and ' \ '%r. Both share the same name "%s". Blueprints that ' \ 'are created on the fly need unique names.' % \ (blueprint, self.blueprints[blueprint.name], blueprint.name) else: self.blueprints[blueprint.name] = blueprint first_registration = True blueprint.register(self, options, first_registration)
[ "def", "register_blueprint", "(", "self", ",", "blueprint", ",", "*", "*", "options", ")", ":", "first_registration", "=", "False", "if", "blueprint", ".", "name", "in", "self", ".", "blueprints", ":", "assert", "self", ".", "blueprints", "[", "blueprint", ".", "name", "]", "is", "blueprint", ",", "'A blueprint\\'s name collision occurred between %r and '", "'%r. Both share the same name \"%s\". Blueprints that '", "'are created on the fly need unique names.'", "%", "(", "blueprint", ",", "self", ".", "blueprints", "[", "blueprint", ".", "name", "]", ",", "blueprint", ".", "name", ")", "else", ":", "self", ".", "blueprints", "[", "blueprint", ".", "name", "]", "=", "blueprint", "first_registration", "=", "True", "blueprint", ".", "register", "(", "self", ",", "options", ",", "first_registration", ")" ]
Registers a blueprint on the WebSockets.
[ "Registers", "a", "blueprint", "on", "the", "WebSockets", "." ]
d0264d220d570a37100ef01be10a0f01fef1e9df
https://github.com/zeekay/flask-uwsgi-websocket/blob/d0264d220d570a37100ef01be10a0f01fef1e9df/flask_uwsgi_websocket/websocket.py#L151-L165
4,294
mgedmin/findimports
findimports.py
adjust_lineno
def adjust_lineno(filename, lineno, name): """Adjust the line number of an import. Needed because import statements can span multiple lines, and our lineno is always the first line number. """ line = linecache.getline(filename, lineno) # Hack warning: might be fooled by comments rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]') while line and not rx.search(line): lineno += 1 line = linecache.getline(filename, lineno) return lineno
python
def adjust_lineno(filename, lineno, name): line = linecache.getline(filename, lineno) # Hack warning: might be fooled by comments rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]') while line and not rx.search(line): lineno += 1 line = linecache.getline(filename, lineno) return lineno
[ "def", "adjust_lineno", "(", "filename", ",", "lineno", ",", "name", ")", ":", "line", "=", "linecache", ".", "getline", "(", "filename", ",", "lineno", ")", "# Hack warning: might be fooled by comments", "rx", "=", "re", ".", "compile", "(", "r'\\b%s\\b'", "%", "re", ".", "escape", "(", "name", ")", "if", "name", "!=", "'*'", "else", "'[*]'", ")", "while", "line", "and", "not", "rx", ".", "search", "(", "line", ")", ":", "lineno", "+=", "1", "line", "=", "linecache", ".", "getline", "(", "filename", ",", "lineno", ")", "return", "lineno" ]
Adjust the line number of an import. Needed because import statements can span multiple lines, and our lineno is always the first line number.
[ "Adjust", "the", "line", "number", "of", "an", "import", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L89-L101
4,295
mgedmin/findimports
findimports.py
ModuleGraph.parsePathname
def parsePathname(self, pathname): """Parse one or more source files. ``pathname`` may be a file name or a directory name. """ if os.path.isdir(pathname): for root, dirs, files in os.walk(pathname): dirs.sort() files.sort() for fn in files: # ignore emacsish junk if fn.endswith('.py') and not fn.startswith('.#'): self.parseFile(os.path.join(root, fn)) elif pathname.endswith('.importcache'): self.readCache(pathname) else: self.parseFile(pathname)
python
def parsePathname(self, pathname): if os.path.isdir(pathname): for root, dirs, files in os.walk(pathname): dirs.sort() files.sort() for fn in files: # ignore emacsish junk if fn.endswith('.py') and not fn.startswith('.#'): self.parseFile(os.path.join(root, fn)) elif pathname.endswith('.importcache'): self.readCache(pathname) else: self.parseFile(pathname)
[ "def", "parsePathname", "(", "self", ",", "pathname", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "pathname", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "pathname", ")", ":", "dirs", ".", "sort", "(", ")", "files", ".", "sort", "(", ")", "for", "fn", "in", "files", ":", "# ignore emacsish junk", "if", "fn", ".", "endswith", "(", "'.py'", ")", "and", "not", "fn", ".", "startswith", "(", "'.#'", ")", ":", "self", ".", "parseFile", "(", "os", ".", "path", ".", "join", "(", "root", ",", "fn", ")", ")", "elif", "pathname", ".", "endswith", "(", "'.importcache'", ")", ":", "self", ".", "readCache", "(", "pathname", ")", "else", ":", "self", ".", "parseFile", "(", "pathname", ")" ]
Parse one or more source files. ``pathname`` may be a file name or a directory name.
[ "Parse", "one", "or", "more", "source", "files", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L417-L433
4,296
mgedmin/findimports
findimports.py
ModuleGraph.writeCache
def writeCache(self, filename): """Write the graph to a cache file.""" with open(filename, 'wb') as f: pickle.dump(self.modules, f)
python
def writeCache(self, filename): with open(filename, 'wb') as f: pickle.dump(self.modules, f)
[ "def", "writeCache", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "self", ".", "modules", ",", "f", ")" ]
Write the graph to a cache file.
[ "Write", "the", "graph", "to", "a", "cache", "file", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L435-L438
4,297
mgedmin/findimports
findimports.py
ModuleGraph.readCache
def readCache(self, filename): """Load the graph from a cache file.""" with open(filename, 'rb') as f: self.modules = pickle.load(f)
python
def readCache(self, filename): with open(filename, 'rb') as f: self.modules = pickle.load(f)
[ "def", "readCache", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "self", ".", "modules", "=", "pickle", ".", "load", "(", "f", ")" ]
Load the graph from a cache file.
[ "Load", "the", "graph", "from", "a", "cache", "file", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L440-L443
4,298
mgedmin/findimports
findimports.py
ModuleGraph.parseFile
def parseFile(self, filename): """Parse a single file.""" modname = self.filenameToModname(filename) module = Module(modname, filename) self.modules[modname] = module if self.trackUnusedNames: module.imported_names, module.unused_names = \ find_imports_and_track_names(filename, self.warn_about_duplicates, self.verbose) else: module.imported_names = find_imports(filename) module.unused_names = None dir = os.path.dirname(filename) module.imports = set( [self.findModuleOfName(imp.name, imp.level, filename, dir) for imp in module.imported_names])
python
def parseFile(self, filename): modname = self.filenameToModname(filename) module = Module(modname, filename) self.modules[modname] = module if self.trackUnusedNames: module.imported_names, module.unused_names = \ find_imports_and_track_names(filename, self.warn_about_duplicates, self.verbose) else: module.imported_names = find_imports(filename) module.unused_names = None dir = os.path.dirname(filename) module.imports = set( [self.findModuleOfName(imp.name, imp.level, filename, dir) for imp in module.imported_names])
[ "def", "parseFile", "(", "self", ",", "filename", ")", ":", "modname", "=", "self", ".", "filenameToModname", "(", "filename", ")", "module", "=", "Module", "(", "modname", ",", "filename", ")", "self", ".", "modules", "[", "modname", "]", "=", "module", "if", "self", ".", "trackUnusedNames", ":", "module", ".", "imported_names", ",", "module", ".", "unused_names", "=", "find_imports_and_track_names", "(", "filename", ",", "self", ".", "warn_about_duplicates", ",", "self", ".", "verbose", ")", "else", ":", "module", ".", "imported_names", "=", "find_imports", "(", "filename", ")", "module", ".", "unused_names", "=", "None", "dir", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "module", ".", "imports", "=", "set", "(", "[", "self", ".", "findModuleOfName", "(", "imp", ".", "name", ",", "imp", ".", "level", ",", "filename", ",", "dir", ")", "for", "imp", "in", "module", ".", "imported_names", "]", ")" ]
Parse a single file.
[ "Parse", "a", "single", "file", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L445-L461
4,299
mgedmin/findimports
findimports.py
ModuleGraph.filenameToModname
def filenameToModname(self, filename): """Convert a filename to a module name.""" for ext in reversed(self._exts): if filename.endswith(ext): filename = filename[:-len(ext)] break else: self.warn(filename, '%s: unknown file name extension', filename) filename = os.path.abspath(filename) elements = filename.split(os.path.sep) modname = [] while elements: modname.append(elements[-1]) del elements[-1] if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])): break modname.reverse() modname = ".".join(modname) return modname
python
def filenameToModname(self, filename): for ext in reversed(self._exts): if filename.endswith(ext): filename = filename[:-len(ext)] break else: self.warn(filename, '%s: unknown file name extension', filename) filename = os.path.abspath(filename) elements = filename.split(os.path.sep) modname = [] while elements: modname.append(elements[-1]) del elements[-1] if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])): break modname.reverse() modname = ".".join(modname) return modname
[ "def", "filenameToModname", "(", "self", ",", "filename", ")", ":", "for", "ext", "in", "reversed", "(", "self", ".", "_exts", ")", ":", "if", "filename", ".", "endswith", "(", "ext", ")", ":", "filename", "=", "filename", "[", ":", "-", "len", "(", "ext", ")", "]", "break", "else", ":", "self", ".", "warn", "(", "filename", ",", "'%s: unknown file name extension'", ",", "filename", ")", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "elements", "=", "filename", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "modname", "=", "[", "]", "while", "elements", ":", "modname", ".", "append", "(", "elements", "[", "-", "1", "]", ")", "del", "elements", "[", "-", "1", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "sep", ".", "join", "(", "elements", "+", "[", "'__init__.py'", "]", ")", ")", ":", "break", "modname", ".", "reverse", "(", ")", "modname", "=", "\".\"", ".", "join", "(", "modname", ")", "return", "modname" ]
Convert a filename to a module name.
[ "Convert", "a", "filename", "to", "a", "module", "name", "." ]
c20a50b497390fed15aa3835476f4fad57313e8a
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L463-L481