_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q4200
|
challenge
|
train
|
def challenge():
"""Creates an enum for contest type"""
enums = dict(
ACTIVE="active",
UPCOMING="upcoming",
HIRING="hiring",
ALL="all",
SHORT="short",
)
return type('Enum', (), enums)
|
python
|
{
"resource": ""
}
|
q4201
|
time_difference
|
train
|
def time_difference(target_time):
"""Calculate the difference between the current time and the given time"""
TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"])
time_diff = format_date(target_time) - datetime.utcnow()
hours, remainder = divmod(time_diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds)
|
python
|
{
"resource": ""
}
|
q4202
|
CSVofIntegers
|
train
|
def CSVofIntegers(msg=None):
'''
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
'''
def fn(value):
try:
if isinstance(value, basestring):
if ',' in value:
value = list(map(
int, filter(
bool, list(map(
lambda x: x.strip(), value.split(',')
))
)
))
return value
else:
return [int(value)]
else:
raise ValueError
except ValueError:
raise Invalid(
'<{0}> is not a valid csv of integers'.format(value)
)
return fn
|
python
|
{
"resource": ""
}
|
q4203
|
TeamsViewSet.get_queryset
|
train
|
def get_queryset(self):
"""
Optionally restricts the queryset by filtering against
query parameters in the URL.
"""
query_params = self.request.query_params
url_params = self.kwargs
# get queryset_filters from FilterMixin
queryset_filters = self.get_db_filters(url_params, query_params)
# This dict will hold filter kwargs to pass in to Django ORM calls.
db_filters = queryset_filters['db_filters']
# This dict will hold exclude kwargs to pass in to Django ORM calls.
db_excludes = queryset_filters['db_excludes']
queryset = Team.objects.prefetch_related(
'players'
).all()
return queryset.filter(**db_filters).exclude(**db_excludes)
|
python
|
{
"resource": ""
}
|
q4204
|
get_setting
|
train
|
def get_setting(name, default):
"""
A little helper for fetching global settings with a common prefix.
"""
parent_name = "CMSPLUGIN_NEWS_{0}".format(name)
return getattr(django_settings, parent_name, default)
|
python
|
{
"resource": ""
}
|
q4205
|
NewsAdmin.make_published
|
train
|
def make_published(self, request, queryset):
"""
Marks selected news items as published
"""
rows_updated = queryset.update(is_published=True)
self.message_user(request,
ungettext('%(count)d newsitem was published',
'%(count)d newsitems were published',
rows_updated) % {'count': rows_updated})
|
python
|
{
"resource": ""
}
|
q4206
|
NewsAdmin.make_unpublished
|
train
|
def make_unpublished(self, request, queryset):
"""
Marks selected news items as unpublished
"""
rows_updated = queryset.update(is_published=False)
self.message_user(request,
ungettext('%(count)d newsitem was unpublished',
'%(count)d newsitems were unpublished',
rows_updated) % {'count': rows_updated})
|
python
|
{
"resource": ""
}
|
q4207
|
_execute_wk
|
train
|
def _execute_wk(*args, input=None):
"""
Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr
"""
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
python
|
{
"resource": ""
}
|
q4208
|
generate_pdf
|
train
|
def generate_pdf(html, *,
cache_dir: Path=DFT_CACHE_DIR,
grayscale: bool=False,
lowquality: bool=False,
margin_bottom: str=None,
margin_left: str=None,
margin_right: str=None,
margin_top: str=None,
orientation: str=None,
page_height: str=None,
page_width: str=None,
page_size: str=None,
image_dpi: str=None,
image_quality: str=None,
**extra_kwargs):
"""
Generate a pdf from either a url or a html string.
After the html and url arguments all other arguments are
passed straight to wkhtmltopdf
For details on extra arguments see the output of get_help()
and get_extended_help()
All arguments whether specified or caught with extra_kwargs are converted
to command line args with "'--' + original_name.replace('_', '-')"
Arguments which are True are passed with no value eg. just --quiet, False
and None arguments are missed, everything else is passed with str(value).
:param html: html string to generate pdf from
:param grayscale: bool
:param lowquality: bool
:param margin_bottom: string eg. 10mm
:param margin_left: string eg. 10mm
:param margin_right: string eg. 10mm
:param margin_top: string eg. 10mm
:param orientation: Portrait or Landscape
:param page_height: string eg. 10mm
:param page_width: string eg. 10mm
:param page_size: string: A4, Letter, etc.
:param image_dpi: int default 600
:param image_quality: int default 94
:param extra_kwargs: any exotic extra options for wkhtmltopdf
:return: string representing pdf
"""
if not cache_dir.exists():
Path.mkdir(cache_dir)
py_args = dict(
cache_dir=cache_dir,
grayscale=grayscale,
lowquality=lowquality,
margin_bottom=margin_bottom,
margin_left=margin_left,
margin_right=margin_right,
margin_top=margin_top,
orientation=orientation,
page_height=page_height,
page_width=page_width,
page_size=page_size,
image_dpi=image_dpi,
image_quality=image_quality,
)
py_args.update(extra_kwargs)
cmd_args = _convert_args(**py_args)
p = _execute_wk(*cmd_args, input=html.encode())
pdf_content = p.stdout
# it seems wkhtmltopdf's error codes can be false, we'll ignore them if we
# seem to have generated a pdf
if p.returncode != 0 and pdf_content[:4] != b'%PDF':
raise RuntimeError('error running wkhtmltopdf, command: {!r}\n'
'response: "{}"'.format(cmd_args, p.stderr.decode().strip()))
return pdf_content
|
python
|
{
"resource": ""
}
|
q4209
|
get_version
|
train
|
def get_version():
"""
Get version of pydf and wkhtmltopdf binary
:return: version string
"""
try:
wk_version = _string_execute('-V')
except Exception as e:
# we catch all errors here to make sure we get a version no matter what
wk_version = '%s: %s' % (e.__class__.__name__, e)
return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
|
python
|
{
"resource": ""
}
|
q4210
|
PyJsParser._interpret_regexp
|
train
|
def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'u':
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch == 'x':
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch == '0':
st += '\\0'
elif ch == 'n':
st += '\\n'
elif ch == 'r':
st += '\\r'
elif ch == 't':
st += '\\t'
elif ch == 'f':
st += '\\f'
elif ch == 'v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch == 'b':
st += '\\b'
elif ch == 'B':
st += '\\B'
elif ch == 'w':
st += '\\w'
elif ch == 'W':
st += '\\W'
elif ch == 'd':
st += '\\d'
elif ch == 'D':
st += '\\D'
elif ch == 's':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch == 'S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(
self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch == '[':
inside_square = True
elif ch == ']':
inside_square = False
st += ch
# print string, 'was transformed to', st
return st
|
python
|
{
"resource": ""
}
|
q4211
|
Crossref.works
|
train
|
def works(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, select = None, cursor = None,
cursor_max = 5000, **kwargs):
'''
Search Crossref works
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois.
Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param cursor: [String] Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used.
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.works()
cr.works(ids = '10.1371/journal.pone.0033693')
dois = ['10.1371/journal.pone.0033693', ]
cr.works(ids = dois)
x = cr.works(query = "ecology")
x['status']
x['message-type']
x['message-version']
x['message']
x['message']['total-results']
x['message']['items-per-page']
x['message']['query']
x['message']['items']
# Get full text links
x = cr.works(filter = {'has_full_text': True})
x
# Parse output to various data pieces
x = cr.works(filter = {'has_full_text': True})
## get doi for each item
[ z['DOI'] for z in x['message']['items'] ]
## get doi and url for each item
[ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ]
### print every doi
for i in x['message']['items']:
print i['DOI']
# filters - pass in as a dict
## see https://github.com/CrossRef/rest-api-doc#filter-names
cr.works(filter = {'has_full_text': True})
cr.works(filter = {'has_funder': True, 'has_full_text': True})
cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'})
## to repeat a filter name, pass in a list
x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100)
map(lambda z:z['funder'][0]['DOI'], x['message']['items'])
# Deep paging, using the cursor parameter
## this search should lead to only ~215 results
cr.works(query = "widget", cursor = "*", cursor_max = 100)
## this search should lead to only ~2500 results, in chunks of 500
res = cr.works(query = "octopus", cursor = "*", limit = 500)
sum([ len(z['message']['items']) for z in res ])
## about 167 results
res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500)
sum([ len(z['message']['items']) for z in res ])
## cursor_max to get back only a maximum set of results
res = cr.works(query = "widget", cursor = "*", cursor_max = 100)
sum([ len(z['message']['items']) for z in res ])
## cursor_max - especially useful when a request could be very large
### e.g., "ecology" results in ~275K records, lets max at 10,000
### with 1000 at a time
res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.works(query = "ecology", query_author = 'carl boettiger')
[ x['author'][0]['family'] for x in res['message']['items'] ]
# select certain fields to return
## as a comma separated string
cr.works(query = "ecology", select = "DOI,title")
## or as a list
cr.works(query = "ecology", select = ["DOI","title"])
'''
if ids.__class__.__name__ != 'NoneType':
return request(self.mailto, self.base_url, "/works/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, None, None, None, None, **kwargs)
else:
return Request(self.mailto, self.base_url, "/works/",
query, filter, offset, limit, sample, sort,
order, facet, select, cursor, cursor_max, None, **kwargs).do_request()
|
python
|
{
"resource": ""
}
|
q4212
|
Crossref.prefixes
|
train
|
def prefixes(self, ids = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref prefixes
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.prefixes(ids = "10.1016")
cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093'])
# get works
cr.prefixes(ids = "10.1016", works = True)
# Limit number of results
cr.prefixes(ids = "10.1016", works = True, limit = 3)
# Sort and order
cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc")
# cursor - deep paging
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ]
'''
check_kwargs(["query"], kwargs)
return request(self.mailto, self.base_url, "/prefixes/", ids,
query = None, filter = filter, offset = offset, limit = limit,
sample = sample, sort = sort, order = order, facet = facet,
select = select, works = works, cursor = cursor, cursor_max = cursor_max,
**kwargs)
|
python
|
{
"resource": ""
}
|
q4213
|
Crossref.types
|
train
|
def types(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref types
:param ids: [Array] Type identifier, e.g., journal
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.types()
cr.types(ids = "journal")
cr.types(ids = "journal-article")
cr.types(ids = "journal", works = True)
# field queries
res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100)
[ x.get('title') for x in res['message']['items'] ]
'''
return request(self.mailto, self.base_url, "/types/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, works, cursor, cursor_max, **kwargs)
|
python
|
{
"resource": ""
}
|
q4214
|
Crossref.licenses
|
train
|
def licenses(self, query = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, **kwargs):
'''
Search Crossref licenses
:param query: [String] A query string
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.licenses()
cr.licenses(query = "creative")
'''
check_kwargs(["ids", "filter", "works"], kwargs)
res = request(self.mailto, self.base_url, "/licenses/", None,
query, None, offset, limit, None, sort,
order, facet, None, None, None, None, **kwargs)
return res
|
python
|
{
"resource": ""
}
|
q4215
|
Crossref.registration_agency
|
train
|
def registration_agency(self, ids, **kwargs):
'''
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
'''
check_kwargs(["query", "filter", "offset", "limit", "sample", "sort",
"order", "facet", "works"], kwargs)
res = request(self.mailto, self.base_url, "/works/", ids,
None, None, None, None, None, None,
None, None, None, None, None, None, True, **kwargs)
if res.__class__ != list:
k = []
k.append(res)
else:
k = res
return [ z['message']['agency']['label'] for z in k ]
|
python
|
{
"resource": ""
}
|
q4216
|
Crossref.random_dois
|
train
|
def random_dois(self, sample = 10, **kwargs):
'''
Get a random set of DOIs
:param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: [Array] of DOIs
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100)
'''
res = request(self.mailto, self.base_url, "/works/", None,
None, None, None, None, sample, None,
None, None, None, True, None, None, None, **kwargs)
return [ z['DOI'] for z in res['message']['items'] ]
|
python
|
{
"resource": ""
}
|
q4217
|
content_negotiation
|
train
|
def content_negotiation(ids = None, format = "bibtex", style = 'apa',
locale = "en-US", url = None, **kwargs):
'''
Get citations in various formats from CrossRef
:param ids: [str] Search by a single DOI or many DOIs, each a string. If many
passed in, do so in a list
:param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json",
"citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml",
"datacite-xml","bibentry", or "crossref-tdm"
:param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles`
for options. Default: "apa". If there's a style that CrossRef doesn't support
you'll get a `(500) Internal Server Error`
:param locale: [str] Language locale. See `locale.locale_alias`
:param url: [str] Base URL for the content negotiation request. Default: `https://doi.org`
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: string, which can be parsed to various formats depending on what
format you request (e.g., JSON vs. XML vs. bibtex)
Usage::
from habanero import cn
cn.content_negotiation(ids = '10.1126/science.169.3946.635')
# get citeproc-json
cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json")
# some other formats
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text")
# return an R bibentry type
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry")
cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry")
# return an apa style citation
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos")
# Using DataCite DOIs
## some formats don't work
# cn.content_negotiation(ids = "10.5284/1011335", format = "text")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm")
## But most do work
cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "turtle")
cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json")
cn.content_negotiation(ids = "10.5284/1011335", format = "ris")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
# many DOIs
dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217']
x = cn.content_negotiation(ids = dois)
# Use a different base url
url = "http://dx.doi.org"
cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url)
cn.content_negotiation(ids = "10.5284/1011335", url = url)
'''
if url is None:
url = cn_base_url
return CNRequest(url, ids, format, style, locale, **kwargs)
|
python
|
{
"resource": ""
}
|
q4218
|
citation_count
|
train
|
def citation_count(doi, url = "http://www.crossref.org/openurl/",
key = "[email protected]", **kwargs):
'''
Get a citation count with a DOI
:param doi: [String] DOI, digital object identifier
:param url: [String] the API url for the function (should be left to default)
:param keyc: [String] your API key
See http://labs.crossref.org/openurl/ for more info on this Crossref API service.
Usage::
from habanero import counts
counts.citation_count(doi = "10.1371/journal.pone.0042793")
counts.citation_count(doi = "10.1016/j.fbr.2012.01.001")
# DOI not found
## FIXME
counts.citation_count(doi = "10.1016/j.fbr.2012")
'''
args = {"id": "doi:" + doi, "pid": key, "noredirect": True}
args = dict((k, v) for k, v in args.items() if v)
res = requests.get(url, params = args, headers = make_ua(), **kwargs)
xmldoc = minidom.parseString(res.content)
val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value
return int(str(val))
|
python
|
{
"resource": ""
}
|
q4219
|
TreeOfContents.findHierarchy
|
train
|
def findHierarchy(self, max_subs=10):
"""Find hierarchy for the LaTeX source.
>>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy()
('section', 'subsection')
>>> TOC.fromLatex(
... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy()
('subsubsection', 'subsubsubsection')
>>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}').findHierarchy()
('section', 'subsection')
"""
hierarchy = []
defaults = TOC.default_hierarchy + tuple(
'%ssection' % ('sub'*i) for i in range(2, max_subs))
for level in defaults:
if getattr(self.source, level, False):
hierarchy.append(level)
return tuple(hierarchy)
|
python
|
{
"resource": ""
}
|
q4220
|
TreeOfContents.getHeadingLevel
|
train
|
def getHeadingLevel(ts, hierarchy=default_hierarchy):
"""Extract heading level for a particular Tex element, given a specified
hierarchy.
>>> ts = TexSoup(r'\section{Hello}').section
>>> TOC.getHeadingLevel(ts)
2
>>> ts2 = TexSoup(r'\chapter{hello again}').chapter
>>> TOC.getHeadingLevel(ts2)
1
>>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection
>>> TOC.getHeadingLevel(ts3)
6
"""
try:
return hierarchy.index(ts.name)+1
except ValueError:
if ts.name.endswith('section'):
i, name = 0, ts.name
while name.startswith('sub'):
name, i = name[3:], i+1
if name == 'section':
return i+2
return float('inf')
except (AttributeError, TypeError):
return float('inf')
|
python
|
{
"resource": ""
}
|
q4221
|
TreeOfContents.parseTopDepth
|
train
|
def parseTopDepth(self, descendants=()):
"""Parse tex for highest tag in hierarchy
>>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth()
1
>>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}'
>>> TOC.fromLatex(s).parseTopDepth()
1
>>> h = ('section', 'subsubsection', 'subsubsubsection')
>>> TOC.fromLatex(s, hierarchy=h).parseTopDepth()
2
"""
descendants = list(descendants) or \
list(getattr(self.source, 'descendants', descendants))
if not descendants:
return -1
return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants)
|
python
|
{
"resource": ""
}
|
q4222
|
TreeOfContents.fromLatex
|
train
|
def fromLatex(tex, *args, **kwargs):
"""Creates abstraction using Latex
:param str tex: Latex
:return: TreeOfContents object
"""
source = TexSoup(tex)
return TOC('[document]', source=source,
descendants=list(source.descendants), *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q4223
|
process_options
|
train
|
def process_options(opts):
"""Check and prepare options dict."""
# Convert match and exclude args into pattern lists
match = opts.get("match")
if match and type(match) is str:
opts["match"] = [pat.strip() for pat in match.split(",")]
elif match:
assert type(match) is list
else:
opts["match"] = []
exclude = opts.get("exclude")
if exclude and type(exclude) is str:
opts["exclude"] = [pat.strip() for pat in exclude.split(",")]
elif exclude:
assert type(exclude) is list
else:
# opts["exclude"] = DEFAULT_OMIT
opts["exclude"] = []
|
python
|
{
"resource": ""
}
|
q4224
|
match_path
|
train
|
def match_path(entry, opts):
"""Return True if `path` matches `match` and `exclude` options."""
if entry.name in ALWAYS_OMIT:
return False
# TODO: currently we use fnmatch syntax and match against names.
# We also might allow glob syntax and match against the whole relative path instead
# path = entry.get_rel_path()
path = entry.name
ok = True
match = opts.get("match")
exclude = opts.get("exclude")
if entry.is_file() and match:
assert type(match) is list
ok = False
for pat in match:
if fnmatch.fnmatch(path, pat):
ok = True
break
if ok and exclude:
assert type(exclude) is list
for pat in exclude:
if fnmatch.fnmatch(path, pat):
ok = False
break
# write("match", ok, entry)
return ok
|
python
|
{
"resource": ""
}
|
q4225
|
BaseSynchronizer._tick
|
train
|
def _tick(self):
"""Write progress info and move cursor to beginning of line."""
if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"):
stats = self.get_stats()
prefix = DRY_RUN_PREFIX if self.dry_run else ""
sys.stdout.write(
"{}Touched {}/{} entries in {} directories...\r".format(
prefix,
stats["entries_touched"],
stats["entries_seen"],
stats["local_dirs"],
)
)
sys.stdout.flush()
return
|
python
|
{
"resource": ""
}
|
q4226
|
BaseSynchronizer._sync_dir
|
train
|
def _sync_dir(self):
"""Traverse the local folder structure and remote peers.
This is the core algorithm that generates calls to self.sync_XXX()
handler methods.
_sync_dir() is called by self.run().
"""
local_entries = self.local.get_dir()
# Convert into a dict {name: FileEntry, ...}
local_entry_map = dict(map(lambda e: (e.name, e), local_entries))
remote_entries = self.remote.get_dir()
# Convert into a dict {name: FileEntry, ...}
remote_entry_map = dict(map(lambda e: (e.name, e), remote_entries))
entry_pair_list = []
# 1. Loop over all local files and classify the relationship to the
# peer entries.
for local_entry in local_entries:
if isinstance(local_entry, DirectoryEntry):
self._inc_stat("local_dirs")
else:
self._inc_stat("local_files")
if not self._before_sync(local_entry):
# TODO: currently, if a file is skipped, it will not be
# considered for deletion on the peer target
continue
# TODO: case insensitive?
# We should use os.path.normcase() to convert to lowercase on windows
# (i.e. if the FTP server is based on Windows)
remote_entry = remote_entry_map.get(local_entry.name)
entry_pair = EntryPair(local_entry, remote_entry)
entry_pair_list.append(entry_pair)
# TODO: renaming could be triggered, if we find an existing
# entry.unique with a different entry.name
# 2. Collect all remote entries that do NOT exist on the local target.
for remote_entry in remote_entries:
if isinstance(remote_entry, DirectoryEntry):
self._inc_stat("remote_dirs")
else:
self._inc_stat("remote_files")
if not self._before_sync(remote_entry):
continue
if remote_entry.name not in local_entry_map:
entry_pair = EntryPair(None, remote_entry)
entry_pair_list.append(entry_pair)
# print("NOT IN LOCAL")
# print(remote_entry.name)
# print(self.remote.get_id())
# print(local_entry_map.keys())
# print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()))
# 3. Classify all entries and pairs.
# We pass the additional meta data here
peer_dir_meta = self.local.cur_dir_meta.peer_sync.get(self.remote.get_id())
for pair in entry_pair_list:
pair.classify(peer_dir_meta)
# 4. Perform (or schedule) resulting file operations
for pair in entry_pair_list:
# print(pair)
# Let synchronizer modify the default operation (e.g. apply `--force` option)
hook_result = self.re_classify_pair(pair)
# Let synchronizer implement special handling of unmatched entries
# (e.g. `--delete_unmatched`)
if not self._match(pair.any_entry):
self.on_mismatch(pair)
# ... do not call operation handler...
elif hook_result is not False:
handler = getattr(self, "on_" + pair.operation, None)
# print(handler)
if handler:
try:
res = handler(pair)
except Exception as e:
if self.on_error(e, pair) is not True:
raise
else:
# write("NO HANDLER")
raise NotImplementedError("No handler for {}".format(pair))
if pair.is_conflict():
self._inc_stat("conflict_files")
# 5. Let the target provider write its meta data for the files in the
# current directory.
self.local.flush_meta()
self.remote.flush_meta()
# 6. Finally visit all local sub-directories recursively that also
# exist on the remote target.
for local_dir in local_entries:
# write("local_dir(%s, %s)" % (local_dir, local_dir))
if not local_dir.is_dir():
continue
elif not self._before_sync(local_dir):
continue
remote_dir = remote_entry_map.get(local_dir.name)
if remote_dir:
# write("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# self._log_call("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# res = self.sync_equal_dir(local_dir, remote_dir)
# res = self.on_equal(local_dir, remote_dir)
if res is not False:
self.local.cwd(local_dir.name)
self.remote.cwd(local_dir.name)
self._sync_dir()
self.local.cwd("..")
self.remote.cwd("..")
return True
|
python
|
{
"resource": ""
}
|
q4227
|
BaseSynchronizer.on_copy_local
|
train
|
def on_copy_local(self, pair):
"""Called when the local resource should be copied to remote."""
status = pair.remote_classification
self._log_action("copy", status, ">", pair.local)
|
python
|
{
"resource": ""
}
|
q4228
|
BaseSynchronizer.on_copy_remote
|
train
|
def on_copy_remote(self, pair):
"""Called when the remote resource should be copied to local."""
status = pair.local_classification
self._log_action("copy", status, "<", pair.remote)
|
python
|
{
"resource": ""
}
|
q4229
|
BiDirSynchronizer.on_need_compare
|
train
|
def on_need_compare(self, pair):
"""Re-classify pair based on file attributes and options."""
# print("on_need_compare", pair)
# If no metadata is available, we could only classify file entries as
# 'existing'.
# Now we use peer information to improve this classification.
c_pair = (pair.local_classification, pair.remote_classification)
org_pair = c_pair
org_operation = pair.operation
# print("need_compare", pair)
if pair.is_dir:
# For directores, we cannot compare existing peer entries.
# Instead, we simply log (and traverse the children later).
pair.local_classification = pair.remote_classification = "existing"
pair.operation = "equal"
self._log_action("", "visit", "?", pair.local, min_level=4)
# self._log_action("", "equal", "=", pair.local, min_level=4)
return
elif c_pair == ("existing", "existing"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp < 0:
c_pair = ("unmodified", "modified") # remote is newer
elif time_cmp > 0:
c_pair = ("modified", "unmodified") # local is newer
elif pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
elif c_pair == ("new", "new"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp == 0 and pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
# elif c_pair == ("unmodified", "unmodified"):
pair.local_classification = c_pair[0]
pair.remote_classification = c_pair[1]
pair.operation = operation_map.get(c_pair)
# print("on_need_compare {} => {}".format(org_pair, pair))
if not pair.operation:
raise RuntimeError(
"Undefined operation for pair classification {}".format(c_pair)
)
elif pair.operation == org_operation:
raise RuntimeError("Could not re-classify {}".format(org_pair))
handler = getattr(self, "on_" + pair.operation, None)
res = handler(pair)
# self._log_action("", "different", "?", pair.local, min_level=2)
return res
|
python
|
{
"resource": ""
}
|
q4230
|
BiDirSynchronizer.on_conflict
|
train
|
def on_conflict(self, pair):
"""Return False to prevent visiting of children."""
# self._log_action("skip", "conflict", "!", pair.local, min_level=2)
# print("on_conflict", pair)
any_entry = pair.any_entry
if not self._test_match_or_print(any_entry):
return
resolve = self._interactive_resolve(pair)
if resolve == "skip":
self._log_action("skip", "conflict", "*?*", any_entry)
self._inc_stat("conflict_files_skipped")
return
if pair.local and pair.remote:
assert pair.local.is_file()
is_newer = pair.local > pair.remote
if (
resolve == "local"
or (is_newer and resolve == "new")
or (not is_newer and resolve == "old")
):
self._log_action("copy", "conflict", "*>*", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif (
resolve == "remote"
or (is_newer and resolve == "old")
or (not is_newer and resolve == "new")
):
self._log_action("copy", "conflict", "*<*", pair.local)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
elif pair.local:
assert pair.local.is_file()
if resolve == "local":
self._log_action("restore", "conflict", "*>x", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif resolve == "remote":
self._log_action("delete", "conflict", "*<x", pair.local)
self._remove_file(pair.local)
else:
raise NotImplementedError
else:
assert pair.remote.is_file()
if resolve == "local":
self._log_action("delete", "conflict", "x>*", pair.remote)
self._remove_file(pair.remote)
elif resolve == "remote":
self._log_action("restore", "conflict", "x<*", pair.remote)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
return
|
python
|
{
"resource": ""
}
|
q4231
|
DownloadSynchronizer._interactive_resolve
|
train
|
def _interactive_resolve(self, pair):
"""Return 'local', 'remote', or 'skip' to use local, remote resource or skip."""
if self.resolve_all:
if self.verbose >= 5:
self._print_pair_diff(pair)
return self.resolve_all
resolve = self.options.get("resolve", "skip")
assert resolve in ("remote", "ask", "skip")
if resolve == "ask" or self.verbose >= 5:
self._print_pair_diff(pair)
if resolve in ("remote", "skip"):
# self.resolve_all = resolve
return resolve
# RED = ansi_code("Fore.LIGHTRED_EX")
M = ansi_code("Style.BRIGHT") + ansi_code("Style.UNDERLINE")
R = ansi_code("Style.RESET_ALL")
# self._print_pair_diff(pair)
self._inc_stat("interactive_ask")
while True:
prompt = (
"Use "
+ M
+ "R"
+ R
+ "emote, "
+ M
+ "S"
+ R
+ "kip, "
+ M
+ "B"
+ R
+ "inary compare, "
+ M
+ "H"
+ R
+ "elp? "
)
r = compat.console_input(prompt).strip()
if r in ("h", "H", "?"):
print("The following keys are supported:")
print(" 'b': Binary compare")
print(" 'r': Download remote file")
print(" 's': Skip this file (leave both targets unchanged)")
print(
"Hold Shift (upper case letters) to apply choice for all "
"remaining conflicts."
)
print("Hit Ctrl+C to abort.")
continue
elif r in ("B", "b"):
self._compare_file(pair.local, pair.remote)
continue
elif r in ("R", "S"):
r = self._resolve_shortcuts[r.lower()]
self.resolve_all = r
break
elif r in ("r", "s"):
r = self._resolve_shortcuts[r]
break
return r
|
python
|
{
"resource": ""
}
|
q4232
|
set_pyftpsync_logger
|
train
|
def set_pyftpsync_logger(logger=True):
"""Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger.
"""
global _logger
prev_logger = _logger
if logger is True:
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger("pyftpsync")
_logger.setLevel(logging.DEBUG)
else:
_logger = logger
return prev_logger
|
python
|
{
"resource": ""
}
|
q4233
|
write
|
train
|
def write(*args, **kwargs):
"""Redirectable wrapper for print statements."""
debug = kwargs.pop("debug", None)
warning = kwargs.pop("warning", None)
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
if debug:
_logger.debug(*args, **kwargs)
elif warning:
_logger.warning(*args, **kwargs)
else:
_logger.info(*args, **kwargs)
else:
print(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q4234
|
write_error
|
train
|
def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs)
|
python
|
{
"resource": ""
}
|
q4235
|
namespace_to_dict
|
train
|
def namespace_to_dict(o):
"""Convert an argparse namespace object to a dictionary."""
d = {}
for k, v in o.__dict__.items():
if not callable(v):
d[k] = v
return d
|
python
|
{
"resource": ""
}
|
q4236
|
eps_compare
|
train
|
def eps_compare(f1, f2, eps):
"""Return true if |f1-f2| <= eps."""
res = f1 - f2
if abs(res) <= eps: # '<=',so eps == 0 works as expected
return 0
elif res < 0:
return -1
return 1
|
python
|
{
"resource": ""
}
|
q4237
|
get_option
|
train
|
def get_option(env_name, section, opt_name, default=None):
"""Return a configuration setting from environment var or .pyftpsyncrc"""
val = os.environ.get(env_name)
if val is None:
try:
val = _pyftpsyncrc_parser.get(section, opt_name)
except (compat.configparser.NoSectionError, compat.configparser.NoOptionError):
pass
if val is None:
val = default
return val
|
python
|
{
"resource": ""
}
|
q4238
|
prompt_for_password
|
train
|
def prompt_for_password(url, user=None, default_user=None):
"""Prompt for username and password.
If a user name is passed, only prompt for a password.
Args:
url (str): hostname
user (str, optional):
Pass a valid name to skip prompting for a user name
default_user (str, optional):
Pass a valid name that is used as default when prompting
for a user name
Raises:
KeyboardInterrupt if user hits Ctrl-C
Returns:
(username, password) or None
"""
if user is None:
default_user = default_user or getpass.getuser()
while user is None:
user = compat.console_input(
"Enter username for {} [{}]: ".format(url, default_user)
)
if user.strip() == "" and default_user:
user = default_user
if user:
pw = getpass.getpass(
"Enter password for {}@{} (Ctrl+C to abort): ".format(user, url)
)
if pw or pw == "":
return (user, pw)
return None
|
python
|
{
"resource": ""
}
|
q4239
|
get_credentials_for_url
|
train
|
def get_credentials_for_url(url, opts, force_user=None):
"""Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Returns:
2-tuple (username, password) or None
"""
creds = None
verbose = int(opts.get("verbose"))
force_prompt = opts.get("prompt", False)
allow_prompt = not opts.get("no_prompt", True)
allow_keyring = not opts.get("no_keyring", False) and not force_user
allow_netrc = not opts.get("no_netrc", False) and not force_user
# print("get_credentials_for_url", force_user, allow_prompt)
if force_user and not allow_prompt:
raise RuntimeError(
"Cannot get credentials for a distinct user ({}) from keyring or .netrc and "
"prompting is disabled.".format(force_user)
)
# Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x
home_path = os.path.expanduser("~")
file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)
if os.path.isfile(file_path):
raise RuntimeError(
"Custom password files are no longer supported. Delete {} and use .netrc instead.".format(
file_path
)
)
# Query keyring database
if creds is None and keyring and allow_keyring:
try:
# Note: we pass the url as `username` and username:password as `password`
c = keyring.get_password("pyftpsync", url)
if c is not None:
creds = c.split(":", 1)
write(
"Using credentials from keyring('pyftpsync', '{}'): {}:***.".format(
url, creds[0]
)
)
else:
if verbose >= 4:
write(
"No credentials found in keyring('pyftpsync', '{}').".format(
url
)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
# e.g. user clicked 'no'
write_error("Could not get password from keyring {}".format(e))
# Query .netrc file
# print(opts)
if creds is None and allow_netrc:
try:
authenticators = None
authenticators = netrc.netrc().authenticators(url)
except CompatFileNotFoundError:
if verbose >= 4:
write("Could not get password (no .netrc file).")
except Exception as e:
write_error("Could not read .netrc: {}.".format(e))
if authenticators:
creds = (authenticators[0], authenticators[2])
write("Using credentials from .netrc file: {}:***.".format(creds[0]))
else:
if verbose >= 4:
write("Could not find entry for '{}' in .netrc file.".format(url))
# Prompt for password if we don't have credentials yet, or --prompt was set.
if allow_prompt:
if creds is None:
creds = prompt_for_password(url)
elif force_prompt:
# --prompt was set but we can provide a default for the user name
creds = prompt_for_password(url, default_user=creds[0])
return creds
|
python
|
{
"resource": ""
}
|
q4240
|
save_password
|
train
|
def save_password(url, username, password):
"""Store credentials in keyring."""
if keyring:
if ":" in username:
raise RuntimeError(
"Unable to store credentials if username contains a ':' ({}).".format(
username
)
)
try:
# Note: we pass the url as `username` and username:password as `password`
if password is None:
keyring.delete_password("pyftpsync", url)
write("Delete credentials from keyring ({})".format(url))
else:
keyring.set_password(
"pyftpsync", url, "{}:{}".format(username, password)
)
write(
"Store credentials in keyring ({}, {}:***).".format(url, username)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
write("Could not delete/set password {}.".format(e))
pass # e.g. user clicked 'no'
else:
write("Could not store credentials (missing keyring support).")
return
|
python
|
{
"resource": ""
}
|
q4241
|
str_to_bool
|
train
|
def str_to_bool(val):
"""Return a boolean for '0', 'false', 'on', ..."""
val = str(val).lower().strip()
if val in ("1", "true", "on", "yes"):
return True
elif val in ("0", "false", "off", "no"):
return False
raise ValueError(
"Invalid value '{}'"
"(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val)
)
|
python
|
{
"resource": ""
}
|
q4242
|
ansi_code
|
train
|
def ansi_code(name):
"""Return ansi color or style codes or '' if colorama is not available."""
try:
obj = colorama
for part in name.split("."):
obj = getattr(obj, part)
return obj
except AttributeError:
return ""
|
python
|
{
"resource": ""
}
|
q4243
|
make_target
|
train
|
def make_target(url, extra_opts=None):
"""Factory that creates `_Target` objects from URLs.
FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.
Note:
TLS is only supported on Python 2.7/3.2+.
Args:
url (str):
extra_opts (dict, optional): Passed to Target constructor. Default: None.
Returns:
:class:`_Target`
"""
# debug = extra_opts.get("debug", 1)
parts = compat.urlparse(url, allow_fragments=False)
# scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986
scheme = parts.scheme.lower()
if scheme in ["ftp", "ftps"]:
creds = parts.username, parts.password
tls = scheme == "ftps"
from ftpsync import ftp_target
target = ftp_target.FtpTarget(
parts.path,
parts.hostname,
parts.port,
username=creds[0],
password=creds[1],
tls=tls,
timeout=None,
extra_opts=extra_opts,
)
else:
target = FsTarget(url, extra_opts)
return target
|
python
|
{
"resource": ""
}
|
q4244
|
_get_encoding_opt
|
train
|
def _get_encoding_opt(synchronizer, extra_opts, default):
"""Helper to figure out encoding setting inside constructors."""
encoding = default
# if synchronizer and "encoding" in synchronizer.options:
# encoding = synchronizer.options.get("encoding")
if extra_opts and "encoding" in extra_opts:
encoding = extra_opts.get("encoding")
if encoding:
# Normalize name (e.g. 'UTF8' => 'utf-8')
encoding = codecs.lookup(encoding).name
# print("_get_encoding_opt", encoding)
return encoding or None
|
python
|
{
"resource": ""
}
|
q4245
|
_Target.walk
|
train
|
def walk(self, pred=None, recursive=True):
"""Iterate over all target entries recursively.
Args:
pred (function, optional):
Callback(:class:`ftpsync.resources._Resource`) should return `False` to
ignore entry. Default: `None`.
recursive (bool, optional):
Pass `False` to generate top level entries only. Default: `True`.
Yields:
:class:`ftpsync.resources._Resource`
"""
for entry in self.get_dir():
if pred and pred(entry) is False:
continue
yield entry
if recursive:
if isinstance(entry, DirectoryEntry):
self.cwd(entry.name)
for e in self.walk(pred):
yield e
self.cwd("..")
return
|
python
|
{
"resource": ""
}
|
q4246
|
FsTarget.set_mtime
|
train
|
def set_mtime(self, name, mtime, size):
"""Set modification time on file."""
self.check_write(name)
os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
|
python
|
{
"resource": ""
}
|
q4247
|
FtpTarget._lock
|
train
|
def _lock(self, break_existing=False):
"""Write a special file to the target root folder."""
# write("_lock")
data = {"lock_time": time.time(), "lock_holder": None}
try:
assert self.cur_dir == self.root_dir
self.write_text(DirMetadata.LOCK_FILE_NAME, json.dumps(data))
self.lock_data = data
self.lock_write_time = time.time()
except Exception as e:
errmsg = "{}".format(e)
write_error("Could not write lock file: {}".format(errmsg))
if errmsg.startswith("550") and self.ftp.passiveserver:
try:
self.ftp.makepasv()
except Exception:
write_error(
"The server probably requires FTP Active mode. "
"Try passing the --ftp-active option."
)
# Set to False, so we don't try to remove later
self.lock_data = False
|
python
|
{
"resource": ""
}
|
q4248
|
FtpTarget._unlock
|
train
|
def _unlock(self, closing=False):
"""Remove lock file to the target root folder.
"""
# write("_unlock", closing)
try:
if self.cur_dir != self.root_dir:
if closing:
write(
"Changing to ftp root folder to remove lock file: {}".format(
self.root_dir
)
)
self.cwd(self.root_dir)
else:
write_error(
"Could not remove lock file, because CWD != ftp root: {}".format(
self.cur_dir
)
)
return
if self.lock_data is False:
if self.get_option("verbose", 3) >= 4:
write("Skip remove lock file (was not written).")
else:
# direct delete, without updating metadata or checking for target access:
try:
self.ftp.delete(DirMetadata.LOCK_FILE_NAME)
# self.remove_file(DirMetadata.LOCK_FILE_NAME)
except Exception as e:
# I have seen '226 Closing data connection' responses here,
# probably when a previous command threw another error.
# However here, 2xx response should be Ok(?):
# A 226 reply code is sent by the server before closing the
# data connection after successfully processing the previous client command
if e.args[0][:3] == "226":
write_error("Ignoring 226 response for ftp.delete() lockfile")
else:
raise
self.lock_data = None
except Exception as e:
write_error("Could not remove lock file: {}".format(e))
raise
|
python
|
{
"resource": ""
}
|
q4249
|
FtpTarget._probe_lock_file
|
train
|
def _probe_lock_file(self, reported_mtime):
"""Called by get_dir"""
delta = reported_mtime - self.lock_data["lock_time"]
# delta2 = reported_mtime - self.lock_write_time
self.server_time_ofs = delta
if self.get_option("verbose", 3) >= 4:
write("Server time offset: {:.2f} seconds.".format(delta))
|
python
|
{
"resource": ""
}
|
q4250
|
EntryPair.is_same_time
|
train
|
def is_same_time(self):
"""Return True if local.mtime == remote.mtime."""
return (
self.local
and self.remote
and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0
)
|
python
|
{
"resource": ""
}
|
q4251
|
EntryPair.override_operation
|
train
|
def override_operation(self, operation, reason):
"""Re-Classify entry pair."""
prev_class = (self.local_classification, self.remote_classification)
prev_op = self.operation
assert operation != prev_op
assert operation in PAIR_OPERATIONS
if self.any_entry.target.synchronizer.verbose > 3:
write(
"override_operation({}, {}) -> {} ({})".format(
prev_class, prev_op, operation, reason
),
debug=True,
)
self.operation = operation
self.re_class_reason = reason
|
python
|
{
"resource": ""
}
|
q4252
|
EntryPair.classify
|
train
|
def classify(self, peer_dir_meta):
"""Classify entry pair."""
assert self.operation is None
# write("CLASSIFIY", self, peer_dir_meta)
# Note: We pass False if the entry is not listed in the metadata.
# We pass None if we don't have metadata all.
peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None
# write("=>", self, peer_entry_meta)
if self.local:
self.local.classify(peer_dir_meta)
self.local_classification = self.local.classification
elif peer_entry_meta:
self.local_classification = "deleted"
else:
self.local_classification = "missing"
if self.remote:
self.remote.classify(peer_dir_meta)
self.remote_classification = self.remote.classification
elif peer_entry_meta:
self.remote_classification = "deleted"
else:
self.remote_classification = "missing"
c_pair = (self.local_classification, self.remote_classification)
self.operation = operation_map.get(c_pair)
if not self.operation:
raise RuntimeError(
"Undefined operation for pair classification {}".format(c_pair)
)
if PRINT_CLASSIFICATIONS:
write("classify {}".format(self))
# if not entry.meta:
# assert self.classification in PAIR_CLASSIFICATIONS
assert self.operation in PAIR_OPERATIONS
return self.operation
|
python
|
{
"resource": ""
}
|
q4253
|
_Resource.classify
|
train
|
def classify(self, peer_dir_meta):
"""Classify this entry as 'new', 'unmodified', or 'modified'."""
assert self.classification is None
peer_entry_meta = None
if peer_dir_meta:
# Metadata is generally available, so we can detect 'new' or 'modified'
peer_entry_meta = peer_dir_meta.get(self.name, False)
if self.is_dir():
# Directories are considered 'unmodified' (would require deep traversal
# to check otherwise)
if peer_entry_meta:
self.classification = "unmodified"
else:
self.classification = "new"
elif peer_entry_meta:
# File entries can be classified as modified/unmodified
self.ps_size = peer_entry_meta.get("s")
self.ps_mtime = peer_entry_meta.get("m")
self.ps_utime = peer_entry_meta.get("u")
if (
self.size == self.ps_size
and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0
):
self.classification = "unmodified"
else:
self.classification = "modified"
else:
# A new file entry
self.classification = "new"
else:
# No metadata available:
if self.is_dir():
# Directories are considered 'unmodified' (would require deep traversal
# to check otherwise)
self.classification = "unmodified"
else:
# That's all we know, but EntryPair.classify() may adjust this
self.classification = "existing"
if PRINT_CLASSIFICATIONS:
write("classify {}".format(self))
assert self.classification in ENTRY_CLASSIFICATIONS
return self.classification
|
python
|
{
"resource": ""
}
|
q4254
|
FileEntry.was_modified_since_last_sync
|
train
|
def was_modified_since_last_sync(self):
"""Return True if this resource was modified since last sync.
None is returned if we don't know (because of missing meta data).
"""
info = self.get_sync_info()
if not info:
return None
if self.size != info["s"]:
return True
if self.mtime > info["m"]:
return True
return False
|
python
|
{
"resource": ""
}
|
q4255
|
DirMetadata.set_mtime
|
train
|
def set_mtime(self, filename, mtime, size):
"""Store real file mtime in meta data.
This is needed on FTP targets, because FTP servers don't allow to set
file mtime, but use to the upload time instead.
We also record size and upload time, so we can detect if the file was
changed by other means and we have to discard our meta data.
"""
ut = time.time() # UTC time stamp
if self.target.server_time_ofs:
# We add the estimated time offset, so the stored 'u' time stamp matches
# better the mtime value that the server will generate for that file
ut += self.target.server_time_ofs
self.list[filename] = {"m": mtime, "s": size, "u": ut}
if self.PRETTY:
self.list[filename].update(
{"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)}
)
# print("set_mtime", self.list[filename])
self.modified_list = True
|
python
|
{
"resource": ""
}
|
q4256
|
DirMetadata.remove
|
train
|
def remove(self, filename):
"""Remove any data for the given file name."""
if self.list.pop(filename, None):
self.modified_list = True
if self.target.peer: # otherwise `scan` command
if self.target.is_local():
remote_target = self.target.peer
if remote_target.get_id() in self.dir["peer_sync"]:
rid = remote_target.get_id()
self.modified_sync = bool(
self.dir["peer_sync"][rid].pop(filename, None)
)
return
|
python
|
{
"resource": ""
}
|
q4257
|
DirMetadata.read
|
train
|
def read(self):
"""Initialize self from .pyftpsync-meta.json file."""
assert self.path == self.target.cur_dir
try:
self.modified_list = False
self.modified_sync = False
is_valid_file = False
s = self.target.read_text(self.filename)
# print("s", s)
if self.target.synchronizer:
self.target.synchronizer._inc_stat("meta_bytes_read", len(s))
self.was_read = True # True if a file exists (even invalid)
self.dir = json.loads(s)
# import pprint
# print("dir")
# print(pprint.pformat(self.dir))
self.dir = make_native_dict_keys(self.dir)
# print(pprint.pformat(self.dir))
self.list = self.dir["mtimes"]
self.peer_sync = self.dir["peer_sync"]
is_valid_file = True
# write"DirMetadata: read(%s)" % (self.filename, ), self.dir)
# except IncompatibleMetadataVersion:
# raise # We want version errors to terminate the app
except Exception as e:
write_error("Could not read meta info {}: {!r}".format(self, e))
# If the version is incompatible, we stop, unless:
# if --migrate is set, we simply ignore this file (and probably replace it
# with a current version)
if is_valid_file and self.dir.get("_file_version", 0) != self.VERSION:
if not self.target or not self.target.get_option("migrate"):
raise IncompatibleMetadataVersion(
"Invalid meta data version: {} (expected {}).\n"
"Consider passing --migrate to discard old data.".format(
self.dir.get("_file_version"), self.VERSION
)
)
#
write(
"Migrating meta data version from {} to {} (discarding old): {}".format(
self.dir.get("_file_version"), self.VERSION, self.filename
)
)
self.list = {}
self.peer_sync = {}
return
|
python
|
{
"resource": ""
}
|
q4258
|
DirMetadata.flush
|
train
|
def flush(self):
"""Write self to .pyftpsync-meta.json."""
# We DO write meta files even on read-only targets, but not in dry-run mode
# if self.target.readonly:
# write("DirMetadata.flush(%s): read-only; nothing to do" % self.target)
# return
assert self.path == self.target.cur_dir
if self.target.dry_run:
# write("DirMetadata.flush(%s): dry-run; nothing to do" % self.target)
pass
elif self.was_read and len(self.list) == 0 and len(self.peer_sync) == 0:
write("Remove empty meta data file: {}".format(self.target))
self.target.remove_file(self.filename)
elif not self.modified_list and not self.modified_sync:
# write("DirMetadata.flush(%s): unmodified; nothing to do" % self.target)
pass
else:
self.dir["_disclaimer"] = "Generated by https://github.com/mar10/pyftpsync"
self.dir["_time_str"] = pretty_stamp(time.time())
self.dir["_file_version"] = self.VERSION
self.dir["_version"] = __version__
self.dir["_time"] = time.mktime(time.gmtime())
# We always save utf-8 encoded.
# `ensure_ascii` would escape all bytes >127 as `\x12` or `\u1234`,
# which makes it hard to read, so we set it to false.
# `sort_keys` converts binary keys to unicode using utf-8, so we
# must make sure that we don't pass cp1225 or other encoded data.
data = self.dir
opts = {"indent": 4, "sort_keys": True, "ensure_ascii": False}
if compat.PY2:
# The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3
# opts["encoding"] = "utf-8"
# Python 2 has problems with mixed keys (str/unicode)
data = decode_dict_keys(data, "utf-8")
if not self.PRETTY:
opts["indent"] = None
opts["separators"] = (",", ":")
s = json.dumps(data, **opts)
self.target.write_text(self.filename, s)
if self.target.synchronizer:
self.target.synchronizer._inc_stat("meta_bytes_written", len(s))
self.modified_list = False
self.modified_sync = False
|
python
|
{
"resource": ""
}
|
q4259
|
scan_handler
|
train
|
def scan_handler(parser, args):
"""Implement `scan` sub-command."""
opts = namespace_to_dict(args)
opts.update({"ftp_debug": args.verbose >= 6})
target = make_target(args.target, opts)
target.readonly = True
root_depth = target.root_dir.count("/")
start = time.time()
dir_count = 1
file_count = 0
processed_files = set()
opts = namespace_to_dict(args)
process_options(opts)
def _pred(entry):
"""Walker predicate that check match/exclude options."""
if not match_path(entry, opts):
return False
try:
target.open()
for e in target.walk(recursive=args.recursive, pred=_pred):
is_dir = isinstance(e, DirectoryEntry)
indent = " " * (target.cur_dir.count("/") - root_depth)
if is_dir:
dir_count += 1
else:
file_count += 1
if args.list:
if is_dir:
print(indent, "[{e.name}]".format(e=e))
else:
delta = e.mtime_org - e.mtime
dt_modified = pretty_stamp(e.mtime)
if delta:
prefix = "+" if delta > 0 else ""
print(
indent,
"{e.name:<40} {dt_modified} (system: {prefix}{delta})".format(
e=e,
prefix=prefix,
delta=timedelta(seconds=delta),
dt_modified=dt_modified,
),
)
else:
print(
indent,
"{e.name:<40} {dt_modified}".format(
e=e, dt_modified=dt_modified
),
)
if (
args.remove_meta
and target.cur_dir_meta
and target.cur_dir_meta.was_read
):
fspec = target.cur_dir_meta.get_full_path()
if fspec not in processed_files:
processed_files.add(fspec)
print("DELETE {}".format(fspec))
if (
args.remove_locks
and not is_dir
and e.name == DirMetadata.LOCK_FILE_NAME
):
fspec = e.get_rel_path()
print("DELETE {}".format(fspec))
finally:
target.close()
print(
"Scanning {:,} files in {:,} directories took {:02.2f} seconds.".format(
file_count, dir_count, time.time() - start
)
)
|
python
|
{
"resource": ""
}
|
q4260
|
TableCell.get_format_spec
|
train
|
def get_format_spec(self):
'''
The format specification according to the values of `align` and `width`
'''
return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
|
python
|
{
"resource": ""
}
|
q4261
|
Table.compute_column_width_and_height
|
train
|
def compute_column_width_and_height(self):
'''
compute and set the column width for all colls in the table
'''
# skip tables with no row
if not self.rows:
return
# determine row height
for row in self.rows:
max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1
for cell in row.columns:
cell.height = max_row_height
# determine maximum number of columns
max_columns = max([len(row.columns) for row in self.rows])
for column_idx in range(max_columns):
# determine max_column_width
row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows]
max_column_width = max((len(line) for line in chain(*row_cell_lines)))
# set column width in all rows
for row in self.rows:
if len(row.columns) > column_idx:
row.columns[column_idx].width = max_column_width
|
python
|
{
"resource": ""
}
|
q4262
|
get_parser
|
train
|
def get_parser():
""" Parses the arguments if script is run directly via console """
parser = argparse.ArgumentParser(description='Converts HTML from file or url to a clean text version')
parser.add_argument('input', nargs='?', default=None, help='Html input either from a file or an url (default:stdin)')
parser.add_argument('-o', '--output', type=str, help='Output file (default:stdout).')
parser.add_argument('-e', '--encoding', type=str, help='Content encoding for files (default:utf-8)', default='utf-8')
parser.add_argument('-i', '--display-image-captions', action='store_true', default=False, help='Display image captions (default:false).')
parser.add_argument('-l', '--display-link-targets', action='store_true', default=False, help='Display link targets (default:false).')
parser.add_argument('-d', '--deduplicate-image-captions', action='store_true', default=False, help='Deduplicate image captions (default:false).')
return parser
|
python
|
{
"resource": ""
}
|
q4263
|
Inscriptis.write_line
|
train
|
def write_line(self, force=False):
'''
Writes the current line to the buffer, provided that there is any
data to write.
::returns:
True, if a line has been writer, otherwise False
'''
# only break the line if there is any relevant content
if not force and (not self.current_line[-1].content or self.current_line[-1].content.isspace()):
self.current_line[-1].margin_before = max(self.current_line[-1].margin_before,
self.current_tag[-1].margin_before)
return False
line = self.current_line[-1].get_text()
self.clean_text_lines[-1].append(line)
self.current_line[-1] = self.next_line[-1]
self.next_line[-1] = Line()
return True
|
python
|
{
"resource": ""
}
|
q4264
|
CssParse._attr_display
|
train
|
def _attr_display(value, html_element):
'''
Set the display value
'''
if value == 'block':
html_element.display = Display.block
elif value == 'none':
html_element.display = Display.none
else:
html_element.display = Display.inline
|
python
|
{
"resource": ""
}
|
q4265
|
read_file
|
train
|
def read_file(file_path, default_content=''):
"""
Read file at the specified path.
If file doesn't exist, it will be created with default-content.
Returns the file content.
"""
if not os.path.exists(file_path):
write_file(file_path, default_content)
handler = open(file_path, 'r')
content = handler.read()
handler.close()
return content or default_content
|
python
|
{
"resource": ""
}
|
q4266
|
write_file
|
train
|
def write_file(file_path, content):
"""
Write file at the specified path with content.
If file exists, it will be overwritten.
"""
handler = open(file_path, 'w+')
handler.write(content)
handler.close()
|
python
|
{
"resource": ""
}
|
q4267
|
set_maintenance_mode
|
train
|
def set_maintenance_mode(value):
"""
Set maintenance_mode state to state file.
"""
# If maintenance mode is defined in settings, it can't be changed.
if settings.MAINTENANCE_MODE is not None:
raise ImproperlyConfigured(
'Maintenance mode cannot be set dynamically '
'if defined in settings.')
if not isinstance(value, bool):
raise TypeError('value argument type is not boolean')
backend = get_maintenance_mode_backend()
backend.set_value(value)
|
python
|
{
"resource": ""
}
|
q4268
|
get_maintenance_response
|
train
|
def get_maintenance_response(request):
"""
Return a '503 Service Unavailable' maintenance response.
"""
if settings.MAINTENANCE_MODE_REDIRECT_URL:
return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL)
context = {}
if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT:
try:
get_request_context_func = import_string(
settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT)
except ImportError:
raise ImproperlyConfigured(
'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT '
'is not a valid function path.'
)
context = get_request_context_func(request=request)
if django.VERSION < (1, 8):
kwargs = {'context_instance': RequestContext(request, context)}
else:
kwargs = {'context': context}
response = render(request, settings.MAINTENANCE_MODE_TEMPLATE,
status=settings.MAINTENANCE_MODE_STATUS_CODE,
**kwargs)
response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER
add_never_cache_headers(response)
return response
|
python
|
{
"resource": ""
}
|
q4269
|
need_maintenance_response
|
train
|
def need_maintenance_response(request):
"""
Tells if the given request needs a maintenance response or not.
"""
try:
view_match = resolve(request.path)
view_func = view_match[0]
view_dict = view_func.__dict__
view_force_maintenance_mode_off = view_dict.get(
'force_maintenance_mode_off', False)
if view_force_maintenance_mode_off:
# view has 'force_maintenance_mode_off' decorator
return False
view_force_maintenance_mode_on = view_dict.get(
'force_maintenance_mode_on', False)
if view_force_maintenance_mode_on:
# view has 'force_maintenance_mode_on' decorator
return True
except Resolver404:
pass
if not get_maintenance_mode():
return False
try:
url_off = reverse('maintenance_mode_off')
resolve(url_off)
if url_off == request.path_info:
return False
except NoReverseMatch:
# maintenance_mode.urls not added
pass
if hasattr(request, 'user'):
if django.VERSION < (1, 10):
if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \
and request.user.is_anonymous():
return False
if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \
and request.user.is_authenticated():
return False
else:
if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \
and request.user.is_anonymous:
return False
if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \
and request.user.is_authenticated:
return False
if settings.MAINTENANCE_MODE_IGNORE_STAFF \
and request.user.is_staff:
return False
if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \
and request.user.is_superuser:
return False
if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE:
try:
request_path = request.path if request.path else ''
if not request_path.endswith('/'):
request_path += '/'
admin_url = reverse('admin:index')
if request_path.startswith(admin_url):
return False
except NoReverseMatch:
# admin.urls not added
pass
if settings.MAINTENANCE_MODE_IGNORE_TESTS:
is_testing = False
if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \
or (len(sys.argv) > 1 and sys.argv[1] == 'test'):
# python runtests.py | python manage.py test | python
# setup.py test | django-admin.py test
is_testing = True
if is_testing:
return False
if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES:
if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS:
try:
get_client_ip_address_func = import_string(
settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS)
except ImportError:
raise ImproperlyConfigured(
'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS '
'is not a valid function path.')
else:
client_ip_address = get_client_ip_address_func(request)
else:
client_ip_address = get_client_ip_address(request)
for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES:
ip_address_re = re.compile(ip_address)
if ip_address_re.match(client_ip_address):
return False
if settings.MAINTENANCE_MODE_IGNORE_URLS:
for url in settings.MAINTENANCE_MODE_IGNORE_URLS:
if not isinstance(url, pattern_class):
url = str(url)
url_re = re.compile(url)
if url_re.match(request.path_info):
return False
if settings.MAINTENANCE_MODE_REDIRECT_URL:
redirect_url_re = re.compile(
settings.MAINTENANCE_MODE_REDIRECT_URL)
if redirect_url_re.match(request.path_info):
return False
return True
|
python
|
{
"resource": ""
}
|
q4270
|
LDAPLoginManager.format_results
|
train
|
def format_results(self, results):
"""
Format the ldap results object into somthing that is reasonable
"""
if not results:
return None
userdn = results[0][0]
userobj = results[0][1]
userobj['dn'] = userdn
keymap = self.config.get('KEY_MAP')
if keymap:
return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) }
else:
return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
|
python
|
{
"resource": ""
}
|
q4271
|
LDAPLoginManager.attrlist
|
train
|
def attrlist(self):
'Transform the KEY_MAP paramiter into an attrlist for ldap filters'
keymap = self.config.get('KEY_MAP')
if keymap:
# https://github.com/ContinuumIO/flask-ldap-login/issues/11
# https://continuumsupport.zendesk.com/agent/tickets/393
return [s.encode('utf-8') for s in keymap.values()]
else:
return None
|
python
|
{
"resource": ""
}
|
q4272
|
LDAPLoginManager.connect
|
train
|
def connect(self):
'initialize ldap connection and set options'
log.debug("Connecting to ldap server %s" % self.config['URI'])
self.conn = ldap.initialize(self.config['URI'])
# There are some settings that can't be changed at runtime without a context restart.
# It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX
# to 0, but this needs to be the last option set, and since the config dictionary is not
# sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX
# is present, it is applied last.
options = self.config.get('OPTIONS', {}).items()
options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX')
for opt, value in options:
if isinstance(opt, str):
opt = getattr(ldap, opt)
try:
if isinstance(value, str):
value = getattr(ldap, value)
except AttributeError:
pass
self.conn.set_option(opt, value)
if self.config.get('START_TLS'):
log.debug("Starting TLS")
self.conn.start_tls_s()
|
python
|
{
"resource": ""
}
|
q4273
|
LDAPLoginManager.ldap_login
|
train
|
def ldap_login(self, username, password):
"""
Authenticate a user using ldap. This will return a userdata dict
if successfull.
ldap_login will return None if the user does not exist or if the credentials are invalid
"""
self.connect()
if self.config.get('USER_SEARCH'):
result = self.bind_search(username, password)
else:
result = self.direct_bind(username, password)
return result
|
python
|
{
"resource": ""
}
|
q4274
|
HostIP.address
|
train
|
def address(self):
"""
IP Address using bacpypes Address format
"""
port = ""
if self._port:
port = ":{}".format(self._port)
return Address(
"{}/{}{}".format(
self.interface.ip.compressed,
self.interface.exploded.split("/")[-1],
port,
)
)
|
python
|
{
"resource": ""
}
|
q4275
|
HostIP._findIPAddr
|
train
|
def _findIPAddr(self):
"""
Retrieve the IP address connected to internet... used as
a default IP address when defining Script
:returns: IP Adress as String
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("google.com", 0))
addr = s.getsockname()[0]
# print('Using ip : {addr}'.format(addr=addr))
s.close()
except socket.error:
raise NetworkInterfaceException(
"Impossible to retrieve IP, please provide one manually"
)
return addr
|
python
|
{
"resource": ""
}
|
q4276
|
HostIP._findSubnetMask
|
train
|
def _findSubnetMask(self, ip):
"""
Retrieve the broadcast IP address connected to internet... used as
a default IP address when defining Script
:param ip: (str) optionnal IP address. If not provided, default to getIPAddr()
:param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC)
:returns: broadcast IP Adress as String
"""
ip = ip
if "win32" in sys.platform:
try:
proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = (
proc.stdout.readline()
.rstrip()
.split(b":")[-1]
.replace(b" ", b"")
.decode()
)
except:
raise NetworkInterfaceException("Cannot read IP parameters from OS")
else:
"""
This procedure could use more direct way of obtaining the broadcast IP
as it is really simple in Unix
ifconfig gives Bcast directly for example
or use something like :
iface = "eth0"
socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24])
"""
pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})")
try:
proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = re.findall(pattern, line.decode())[0]
except:
mask = "255.255.255.255"
# self._log.debug('Mask found : %s' % mask)
return mask
|
python
|
{
"resource": ""
}
|
q4277
|
SQLMixin._read_from_sql
|
train
|
def _read_from_sql(self, request, db_name):
"""
Using the contextlib, I hope to close the connection to database when
not in use
"""
with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con:
return sql.read_sql(sql=request, con=con)
|
python
|
{
"resource": ""
}
|
q4278
|
SQLMixin.backup_histories_df
|
train
|
def backup_histories_df(self):
"""
Build a dataframe of the point histories
"""
backup = {}
for point in self.points:
if point.history.dtypes == object:
backup[point.properties.name] = (
point.history.replace(["inactive", "active"], [0, 1])
.resample("1s")
.mean()
)
else:
backup[point.properties.name] = point.history.resample("1s").mean()
df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()]))
return df.fillna(method="ffill")
|
python
|
{
"resource": ""
}
|
q4279
|
SQLMixin.save
|
train
|
def save(self, filename=None):
"""
Save the point histories to sqlite3 database.
Save the device object properties to a pickle file so the device can be reloaded.
"""
if filename:
if ".db" in filename:
filename = filename.split(".")[0]
self.properties.db_name = filename
else:
self.properties.db_name = "{}".format(self.properties.name)
# Does file exist? If so, append data
if os.path.isfile("{}.db".format(self.properties.db_name)):
his = self._read_from_sql(
'select * from "{}"'.format("history"), self.properties.db_name
)
his.index = his["index"].apply(Timestamp)
try:
last = his.index[-1]
df_to_backup = self.backup_histories_df()[last:]
except IndexError:
df_to_backup = self.backup_histories_df()
else:
self._log.debug("Creating a new backup database")
df_to_backup = self.backup_histories_df()
# DataFrames that will be saved to SQL
with contextlib.closing(
sqlite3.connect("{}.db".format(self.properties.db_name))
) as con:
sql.to_sql(
df_to_backup,
name="history",
con=con,
index_label="index",
index=True,
if_exists="append",
)
# Saving other properties to a pickle file...
prop_backup = {}
prop_backup["device"] = self.dev_properties_df()
prop_backup["points"] = self.points_properties_df()
with open("{}.bin".format(self.properties.db_name), "wb") as file:
pickle.dump(prop_backup, file)
self._log.info("Device saved to {}.db".format(self.properties.db_name))
|
python
|
{
"resource": ""
}
|
q4280
|
SQLMixin.points_from_sql
|
train
|
def points_from_sql(self, db_name):
"""
Retrieve point list from SQL database
"""
points = self._read_from_sql("SELECT * FROM history;", db_name)
return list(points.columns.values)[1:]
|
python
|
{
"resource": ""
}
|
q4281
|
SQLMixin.his_from_sql
|
train
|
def his_from_sql(self, db_name, point):
"""
Retrive point histories from SQL database
"""
his = self._read_from_sql('select * from "%s"' % "history", db_name)
his.index = his["index"].apply(Timestamp)
return his.set_index("index")[point]
|
python
|
{
"resource": ""
}
|
q4282
|
SQLMixin.read_point_prop
|
train
|
def read_point_prop(self, device_name, point):
"""
Points properties retrieved from pickle
"""
with open("%s.bin" % device_name, "rb") as file:
return pickle.load(file)["points"][point]
|
python
|
{
"resource": ""
}
|
q4283
|
SQLMixin.read_dev_prop
|
train
|
def read_dev_prop(self, device_name):
"""
Device properties retrieved from pickle
"""
with open("{}.bin".format(device_name), "rb") as file:
return pickle.load(file)["device"]
|
python
|
{
"resource": ""
}
|
q4284
|
BooleanPoint.value
|
train
|
def value(self):
"""
Read the value from BACnet network
"""
try:
res = self.properties.device.properties.network.read(
"{} {} {} presentValue".format(
self.properties.device.properties.address,
self.properties.type,
str(self.properties.address),
)
)
self._trend(res)
except Exception:
raise Exception("Problem reading : {}".format(self.properties.name))
if res == "inactive":
self._key = 0
self._boolKey = False
else:
self._key = 1
self._boolKey = True
return res
|
python
|
{
"resource": ""
}
|
q4285
|
EnumPointOffline.value
|
train
|
def value(self):
"""
Take last known value as the value
"""
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value
|
python
|
{
"resource": ""
}
|
q4286
|
Stats_Mixin.network_stats
|
train
|
def network_stats(self):
"""
Used by Flask to show informations on the network
"""
statistics = {}
mstp_networks = []
mstp_map = {}
ip_devices = []
bacoids = []
mstp_devices = []
for address, bacoid in self.whois_answer[0].keys():
if ":" in address:
net, mac = address.split(":")
mstp_networks.append(net)
mstp_devices.append(mac)
try:
mstp_map[net].append(mac)
except KeyError:
mstp_map[net] = []
mstp_map[net].append(mac)
else:
net = "ip"
mac = address
ip_devices.append(address)
bacoids.append((bacoid, address))
mstpnetworks = sorted(set(mstp_networks))
statistics["mstp_networks"] = mstpnetworks
statistics["ip_devices"] = sorted(ip_devices)
statistics["bacoids"] = sorted(bacoids)
statistics["mstp_map"] = mstp_map
statistics["timestamp"] = str(datetime.now())
statistics["number_of_devices"] = self.number_of_devices
statistics["number_of_registered_devices"] = len(self.registered_devices)
statistics["print_mstpnetworks"] = self.print_list(mstpnetworks)
return statistics
|
python
|
{
"resource": ""
}
|
q4287
|
DeviceConnected.connect
|
train
|
def connect(self, *, db=None):
"""
A connected device can be switched to 'database mode' where the device will
not use the BACnet network but instead obtain its contents from a previously
stored database.
"""
if db:
self.poll(command="stop")
self.properties.db_name = db.split(".")[0]
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Already connected, provide db arg if you want to connect to db"
)
|
python
|
{
"resource": ""
}
|
q4288
|
DeviceConnected.df
|
train
|
def df(self, list_of_points, force_read=True):
"""
When connected, calling DF should force a reading on the network.
"""
his = []
for point in list_of_points:
try:
his.append(self._findPoint(point, force_read=force_read).history)
except ValueError as ve:
self._log.error("{}".format(ve))
continue
if not _PANDAS:
return dict(zip(list_of_points, his))
return pd.DataFrame(dict(zip(list_of_points, his)))
|
python
|
{
"resource": ""
}
|
q4289
|
DeviceConnected._buildPointList
|
train
|
def _buildPointList(self):
"""
Upon connection to build the device point list and properties.
"""
try:
self.properties.pss.value = self.properties.network.read(
"{} device {} protocolServicesSupported".format(
self.properties.address, self.properties.device_id
)
)
except NoResponseFromController as error:
self._log.error("Controller not found, aborting. ({})".format(error))
return ("Not Found", "", [], [])
except SegmentationNotSupported as error:
self._log.warning("Segmentation not supported")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
self.properties.name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
self._log.info(
"Device {}:[{}] found... building points list".format(
self.properties.device_id, self.properties.name
)
)
try:
self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints(
self.custom_object_list
)
if self.properties.pollDelay > 0:
self.poll(delay=self.properties.pollDelay)
except NoResponseFromController as error:
self._log.error("Cannot retrieve object list, disconnecting...")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
except IndexError as error:
self._log.error("Device creation failed... disconnecting")
self.new_state(DeviceDisconnected)
|
python
|
{
"resource": ""
}
|
q4290
|
DeviceConnected._findPoint
|
train
|
def _findPoint(self, name, force_read=True):
"""
Used by getter and setter functions
"""
for point in self.points:
if point.properties.name == name:
if force_read:
point.value
return point
raise ValueError("{} doesn't exist in controller".format(name))
|
python
|
{
"resource": ""
}
|
q4291
|
discoverPoints
|
train
|
def discoverPoints(bacnetapp, address, devID):
"""
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
"""
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points)
|
python
|
{
"resource": ""
}
|
q4292
|
CubicBezier.point
|
train
|
def point(self, pos):
"""Calculate the x,y position at a certain position of the path"""
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end)
|
python
|
{
"resource": ""
}
|
q4293
|
WebSocket.register_blueprint
|
train
|
def register_blueprint(self, blueprint, **options):
'''
Registers a blueprint on the WebSockets.
'''
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
|
python
|
{
"resource": ""
}
|
q4294
|
adjust_lineno
|
train
|
def adjust_lineno(filename, lineno, name):
"""Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
"""
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno
|
python
|
{
"resource": ""
}
|
q4295
|
ModuleGraph.parsePathname
|
train
|
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
|
python
|
{
"resource": ""
}
|
q4296
|
ModuleGraph.writeCache
|
train
|
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
|
python
|
{
"resource": ""
}
|
q4297
|
ModuleGraph.readCache
|
train
|
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
|
python
|
{
"resource": ""
}
|
q4298
|
ModuleGraph.parseFile
|
train
|
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
|
python
|
{
"resource": ""
}
|
q4299
|
ModuleGraph.filenameToModname
|
train
|
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.