_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q2600
|
SMB2QueryDirectoryRequest.unpack_response
|
train
|
def unpack_response(file_information_class, buffer):
"""
Pass in the buffer value from the response object to unpack it and
return a list of query response structures for the request.
:param buffer: The raw bytes value of the SMB2QueryDirectoryResponse
buffer field.
:return: List of query_info.* structures based on the
FileInformationClass used in the initial query request.
"""
structs = smbprotocol.query_info
resp_structure = {
FileInformationClass.FILE_DIRECTORY_INFORMATION:
structs.FileDirectoryInformation,
FileInformationClass.FILE_NAMES_INFORMATION:
structs.FileNamesInformation,
FileInformationClass.FILE_BOTH_DIRECTORY_INFORMATION:
structs.FileBothDirectoryInformation,
FileInformationClass.FILE_ID_BOTH_DIRECTORY_INFORMATION:
structs.FileIdBothDirectoryInformation,
FileInformationClass.FILE_FULL_DIRECTORY_INFORMATION:
structs.FileFullDirectoryInformation,
FileInformationClass.FILE_ID_FULL_DIRECTORY_INFORMATION:
structs.FileIdFullDirectoryInformation,
}[file_information_class]
query_results = []
current_offset = 0
is_next = True
while is_next:
result = resp_structure()
result.unpack(buffer[current_offset:])
query_results.append(result)
current_offset += result['next_entry_offset'].get_value()
is_next = result['next_entry_offset'].get_value() != 0
return query_results
|
python
|
{
"resource": ""
}
|
q2601
|
Open.read
|
train
|
def read(self, offset, length, min_length=0, unbuffered=False, wait=True,
send=True):
"""
Reads from an opened file or pipe
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2ReadRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param offset: The offset to start the read of the file.
:param length: The number of bytes to read from the offset.
:param min_length: The minimum number of bytes to be read for a
successful operation.
:param unbuffered: Whether to the server should cache the read data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: A byte string of the bytes read
"""
if length > self.connection.max_read_size:
raise SMBException("The requested read length %d is greater than "
"the maximum negotiated read size %d"
% (length, self.connection.max_read_size))
read = SMB2ReadRequest()
read['length'] = length
read['offset'] = offset
read['minimum_count'] = min_length
read['file_id'] = self.file_id
read['padding'] = b"\x50"
if unbuffered:
if self.connection.dialect < Dialects.SMB_3_0_2:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_3_0_2,
"SMB2_READFLAG_READ_UNBUFFERED",
True)
read['flags'].set_flag(ReadFlags.SMB2_READFLAG_READ_UNBUFFERED)
if not send:
return read, self._read_response
log.info("Session: %s, Tree Connect ID: %s - sending SMB2 Read "
"Request for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(read))
request = self.connection.send(read,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._read_response(request, wait)
|
python
|
{
"resource": ""
}
|
q2602
|
Open.write
|
train
|
def write(self, data, offset=0, write_through=False, unbuffered=False,
wait=True, send=True):
"""
Writes data to an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMBWriteRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param data: The bytes data to write.
:param offset: The offset in the file to write the bytes at
:param write_through: Whether written data is persisted to the
underlying storage, not valid for SMB 2.0.2.
:param unbuffered: Whether to the server should cache the write data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The number of bytes written
"""
data_len = len(data)
if data_len > self.connection.max_write_size:
raise SMBException("The requested write length %d is greater than "
"the maximum negotiated write size %d"
% (data_len, self.connection.max_write_size))
write = SMB2WriteRequest()
write['length'] = len(data)
write['offset'] = offset
write['file_id'] = self.file_id
write['buffer'] = data
if write_through:
if self.connection.dialect < Dialects.SMB_2_1_0:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_2_1_0,
"SMB2_WRITEFLAG_WRITE_THROUGH",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_THROUGH)
if unbuffered:
if self.connection.dialect < Dialects.SMB_3_0_2:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_3_0_2,
"SMB2_WRITEFLAG_WRITE_UNBUFFERED",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_UNBUFFERED)
if not send:
return write, self._write_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Write Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(write))
request = self.connection.send(write,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._write_response(request, wait)
|
python
|
{
"resource": ""
}
|
q2603
|
Open.flush
|
train
|
def flush(self, send=True):
"""
A command sent by the client to request that a server flush all cached
file information for the opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2FlushRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The SMB2FlushResponse received from the server
"""
flush = SMB2FlushRequest()
flush['file_id'] = self.file_id
if not send:
return flush, self._flush_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Flush Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(flush))
request = self.connection.send(flush,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._flush_response(request)
|
python
|
{
"resource": ""
}
|
q2604
|
Open.close
|
train
|
def close(self, get_attributes=False, send=True):
"""
Closes an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMB2CloseRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param get_attributes: (Bool) whether to get the latest attributes on
the close and set them on the Open object
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: SMB2CloseResponse message received from the server
"""
# it is already closed and this isn't for an out of band request
if not self._connected and send:
return
close = SMB2CloseRequest()
close['file_id'] = self.file_id
if get_attributes:
close['flags'] = CloseFlags.SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB
if not send:
return close, self._close_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Close Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(close))
request = self.connection.send(close,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._close_response(request)
|
python
|
{
"resource": ""
}
|
q2605
|
SIDPacket.from_string
|
train
|
def from_string(self, sid_string):
"""
Used to set the structure parameters based on the input string
:param sid_string: String of the sid in S-x-x-x-x form
"""
if not sid_string.startswith("S-"):
raise ValueError("A SID string must start with S-")
sid_entries = sid_string.split("-")
if len(sid_entries) < 3:
raise ValueError("A SID string must start with S and contain a "
"revision and identifier authority, e.g. S-1-0")
revision = int(sid_entries[1])
id_authority = int(sid_entries[2])
sub_authorities = [int(i) for i in sid_entries[3:]]
self['revision'].set_value(revision)
self['identifier_authority'].set_value(id_authority)
self['sub_authorities'] = sub_authorities
|
python
|
{
"resource": ""
}
|
q2606
|
Pushover.sounds
|
train
|
def sounds(self):
"""Return a dictionary of sounds recognized by Pushover and that can be
used in a notification message.
"""
if not Pushover._SOUNDS:
request = Request("get", SOUND_URL, {"token": self.token})
Pushover._SOUNDS = request.answer["sounds"]
return Pushover._SOUNDS
|
python
|
{
"resource": ""
}
|
q2607
|
Pushover.message
|
train
|
def message(self, user, message, **kwargs):
"""Send `message` to the user specified by `user`. It is possible
to specify additional properties of the message by passing keyword
arguments. The list of valid keywords is ``title, priority, sound,
callback, timestamp, url, url_title, device, retry, expire and html``
which are described in the Pushover API documentation.
For convenience, you can simply set ``timestamp=True`` to set the
timestamp to the current timestamp.
An image can be attached to a message by passing a file-like object
to the `attachment` keyword argument.
This method returns a :class:`MessageRequest` object.
"""
payload = {"message": message, "user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.message_keywords:
raise ValueError("{0}: invalid message parameter".format(key))
elif key == "timestamp" and value is True:
payload[key] = int(time.time())
elif key == "sound" and value not in self.sounds:
raise ValueError("{0}: invalid sound".format(value))
else:
payload[key] = value
return MessageRequest(payload)
|
python
|
{
"resource": ""
}
|
q2608
|
Pushover.glance
|
train
|
def glance(self, user, **kwargs):
"""Send a glance to the user. The default property is ``text``, as this
is used on most glances, however a valid glance does not need to
require text and can be constructed using any combination of valid
keyword properties. The list of valid keywords is ``title, text,
subtext, count, percent and device`` which are described in the
Pushover Glance API documentation.
This method returns a :class:`GlanceRequest` object.
"""
payload = {"user": user, "token": self.token}
for key, value in kwargs.iteritems():
if key not in Pushover.glance_keywords:
raise ValueError("{0}: invalid glance parameter".format(key))
else:
payload[key] = value
return Request("post", GLANCE_URL, payload)
|
python
|
{
"resource": ""
}
|
q2609
|
mswe
|
train
|
def mswe(w, v):
"""
Calculate mean squared weight error between estimated and true filter
coefficients, in respect to iterations.
Parameters
----------
v : array-like
True coefficients used to generate desired signal, must be a
one-dimensional array.
w : array-like
Estimated coefficients from adaptive filtering algorithm. Must be an
N x M matrix where N is the number of iterations, and M is the number
of filter coefficients.
Returns
-------
mswe : numpy.array
One-dimensional array containing the mean-squared weight error for
every iteration.
Raises
------
TypeError
If inputs have wrong dimensions
Note
----
To use this function with the adaptive filter functions set the optional
parameter returnCoeffs to True. This will return a coefficient matrix w
corresponding with the input-parameter w.
"""
# Ensure inputs are numpy arrays
w = np.array(w)
v = np.array(v)
# Check dimensions
if(len(w.shape) != 2):
raise TypeError('Estimated coefficients must be in NxM matrix')
if(len(v.shape) != 1):
raise TypeError('Real coefficients must be in 1d array')
# Ensure equal length between estimated and real coeffs
N, M = w.shape
L = v.size
if(M < L):
v = v[:-(L-M)]
elif(M > L):
v = np.concatenate((v, np.zeros(M-L)))
# Calculate and return MSWE
mswe = np.mean((w - v)**2, axis=1)
return mswe
|
python
|
{
"resource": ""
}
|
q2610
|
BaseAutoCompleteField.has_changed
|
train
|
def has_changed(self, initial, data):
"Detects if the data was changed. This is added in 1.6."
if initial is None and data is None:
return False
if data and not hasattr(data, '__iter__'):
data = self.widget.decompress(data)
initial = self.to_python(initial)
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
if isinstance(data, Model) and isinstance(initial, Model):
return model_vars(data) != model_vars(initial)
else:
return data != initial
|
python
|
{
"resource": ""
}
|
q2611
|
results_decorator
|
train
|
def results_decorator(func):
"""
Helper for constructing simple decorators around Lookup.results.
func is a function which takes a request as the first parameter. If func
returns an HttpReponse it is returned otherwise the original Lookup.results
is returned.
"""
# Wrap function to maintian the original doc string, etc
@wraps(func)
def decorator(lookup_cls):
# Construct a class decorator from the original function
original = lookup_cls.results
def inner(self, request):
# Wrap lookup_cls.results by first calling func and checking the result
result = func(request)
if isinstance(result, HttpResponse):
return result
return original(self, request)
# Replace original lookup_cls.results with wrapped version
lookup_cls.results = inner
return lookup_cls
# Return the constructed decorator
return decorator
|
python
|
{
"resource": ""
}
|
q2612
|
login_required
|
train
|
def login_required(request):
"Lookup decorator to require the user to be authenticated."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401)
|
python
|
{
"resource": ""
}
|
q2613
|
staff_member_required
|
train
|
def staff_member_required(request):
"Lookup decorator to require the user is a staff member."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) # Unauthorized
elif not user.is_staff:
return HttpResponseForbidden()
|
python
|
{
"resource": ""
}
|
q2614
|
LookupBase.format_item
|
train
|
def format_item(self, item):
"Construct result dictionary for the match item."
result = {
'id': self.get_item_id(item),
'value': self.get_item_value(item),
'label': self.get_item_label(item),
}
for key in settings.SELECTABLE_ESCAPED_KEYS:
if key in result:
result[key] = conditional_escape(result[key])
return result
|
python
|
{
"resource": ""
}
|
q2615
|
LookupBase.paginate_results
|
train
|
def paginate_results(self, results, options):
"Return a django.core.paginator.Page of results."
limit = options.get('limit', settings.SELECTABLE_MAX_LIMIT)
paginator = Paginator(results, limit)
page = options.get('page', 1)
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
return results
|
python
|
{
"resource": ""
}
|
q2616
|
LookupBase.results
|
train
|
def results(self, request):
"Match results to given term and return the serialized HttpResponse."
results = {}
form = self.form(request.GET)
if form.is_valid():
options = form.cleaned_data
term = options.get('term', '')
raw_data = self.get_query(request, term)
results = self.format_results(raw_data, options)
return self.response(results)
|
python
|
{
"resource": ""
}
|
q2617
|
LookupBase.format_results
|
train
|
def format_results(self, raw_data, options):
'''
Returns a python structure that later gets serialized.
raw_data
full list of objects matching the search term
options
a dictionary of the given options
'''
page_data = self.paginate_results(raw_data, options)
results = {}
meta = options.copy()
meta['more'] = _('Show more results')
if page_data and page_data.has_next():
meta['next_page'] = page_data.next_page_number()
if page_data and page_data.has_previous():
meta['prev_page'] = page_data.previous_page_number()
results['data'] = [self.format_item(item) for item in page_data.object_list]
results['meta'] = meta
return results
|
python
|
{
"resource": ""
}
|
q2618
|
import_lookup_class
|
train
|
def import_lookup_class(lookup_class):
"""
Import lookup_class as a dotted base and ensure it extends LookupBase
"""
from selectable.base import LookupBase
if isinstance(lookup_class, string_types):
mod_str, cls_str = lookup_class.rsplit('.', 1)
mod = import_module(mod_str)
lookup_class = getattr(mod, cls_str)
if not issubclass(lookup_class, LookupBase):
raise TypeError('lookup_class must extend from selectable.base.LookupBase')
return lookup_class
|
python
|
{
"resource": ""
}
|
q2619
|
BaseLookupForm.clean_limit
|
train
|
def clean_limit(self):
"Ensure given limit is less than default if defined"
limit = self.cleaned_data.get('limit', None)
if (settings.SELECTABLE_MAX_LIMIT is not None and
(not limit or limit > settings.SELECTABLE_MAX_LIMIT)):
limit = settings.SELECTABLE_MAX_LIMIT
return limit
|
python
|
{
"resource": ""
}
|
q2620
|
AccessRateWatcher.waitAccessAsync
|
train
|
async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break
|
python
|
{
"resource": ""
}
|
q2621
|
AccessRateWatcher.__access
|
train
|
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain))
|
python
|
{
"resource": ""
}
|
q2622
|
aiohttp_socket_timeout
|
train
|
def aiohttp_socket_timeout(socket_timeout_s):
""" Return a aiohttp.ClientTimeout object with only socket timeouts set. """
return aiohttp.ClientTimeout(total=None,
connect=None,
sock_connect=socket_timeout_s,
sock_read=socket_timeout_s)
|
python
|
{
"resource": ""
}
|
q2623
|
Http.isReachable
|
train
|
async def isReachable(self, url, *, headers=None, verify=True, response_headers=None, cache=None):
""" Send a HEAD request with short timeout or get data from cache, return True if ressource has 2xx status code, False instead. """
if (cache is not None) and (url in cache):
# try from cache first
self.logger.debug("Got headers for URL '%s' from cache" % (url))
resp_ok, response_headers = pickle.loads(cache[url])
return resp_ok
domain_rate_watcher = rate_watcher.AccessRateWatcher(self.watcher_db_filepath,
url,
self.min_delay_between_accesses,
jitter_range_ms=self.jitter_range_ms,
logger=self.logger)
resp_ok = True
try:
for attempt, time_to_sleep in enumerate(redo.retrier(max_attempts=HTTP_MAX_ATTEMPTS,
sleeptime=0.5,
max_sleeptime=HTTP_MAX_RETRY_SLEEP_SHORT_S,
sleepscale=1.5),
1):
await domain_rate_watcher.waitAccessAsync()
try:
async with self.session.head(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify) as response:
pass
except (asyncio.TimeoutError, aiohttp.ClientError) as e:
self.logger.warning("Probing '%s' failed (attempt %u/%u): %s %s" % (url,
attempt,
HTTP_MAX_ATTEMPTS,
e.__class__.__qualname__,
e))
if attempt == HTTP_MAX_ATTEMPTS:
resp_ok = False
else:
self.logger.debug("Retrying in %.3fs" % (time_to_sleep))
await asyncio.sleep(time_to_sleep)
else:
response.raise_for_status()
if response_headers is not None:
response_headers.update(response.headers)
break # http retry loop
except aiohttp.ClientResponseError as e:
self.logger.debug("Probing '%s' failed: %s %s" % (url, e.__class__.__qualname__, e))
resp_ok = False
if cache is not None:
# store in cache
cache[url] = pickle.dumps((resp_ok, response_headers))
return resp_ok
|
python
|
{
"resource": ""
}
|
q2624
|
Http.fastStreamedQuery
|
train
|
async def fastStreamedQuery(self, url, *, headers=None, verify=True):
""" Send a GET request with short timeout, do not retry, and return streamed response. """
response = await self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify)
response.raise_for_status()
return response
|
python
|
{
"resource": ""
}
|
q2625
|
LastFmCoverSource.processQueryString
|
train
|
def processQueryString(self, s):
""" See CoverSource.processQueryString. """
char_blacklist = set(string.punctuation)
char_blacklist.remove("'")
char_blacklist.remove("&")
char_blacklist = frozenset(char_blacklist)
return __class__.unpunctuate(s.lower(), char_blacklist=char_blacklist)
|
python
|
{
"resource": ""
}
|
q2626
|
search_and_download
|
train
|
async def search_and_download(album, artist, format, size, out_filepath, *, size_tolerance_prct, amazon_tlds, no_lq_sources,
async_loop):
""" Search and download a cover, return True if success, False instead. """
# register sources
source_args = (size, size_tolerance_prct)
cover_sources = [sources.LastFmCoverSource(*source_args),
sources.AmazonCdCoverSource(*source_args),
sources.AmazonDigitalCoverSource(*source_args)]
for tld in amazon_tlds:
cover_sources.append(sources.AmazonCdCoverSource(*source_args, tld=tld))
if not no_lq_sources:
cover_sources.append(sources.GoogleImagesWebScrapeCoverSource(*source_args))
# schedule search work
search_futures = []
for cover_source in cover_sources:
coroutine = cover_source.search(album, artist)
future = asyncio.ensure_future(coroutine, loop=async_loop)
search_futures.append(future)
# wait for it
await asyncio.wait(search_futures, loop=async_loop)
# get results
results = []
for future in search_futures:
source_results = future.result()
results.extend(source_results)
# sort results
results = await CoverSourceResult.preProcessForComparison(results, size, size_tolerance_prct)
results.sort(reverse=True,
key=functools.cmp_to_key(functools.partial(CoverSourceResult.compare,
target_size=size,
size_tolerance_prct=size_tolerance_prct)))
if not results:
logging.getLogger("Main").info("No results")
# download
for result in results:
try:
await result.get(format, size, size_tolerance_prct, out_filepath)
except Exception as e:
logging.getLogger("Main").warning("Download of %s failed: %s %s" % (result,
e.__class__.__qualname__,
e))
continue
else:
return True
return False
|
python
|
{
"resource": ""
}
|
q2627
|
AmazonDigitalCoverSource.generateImgUrls
|
train
|
def generateImgUrls(self, product_id, dynapi_key, format_id, slice_count):
""" Generate URLs for slice_count^2 subimages of a product. """
for x in range(slice_count):
for y in range(slice_count):
yield ("http://z2-ec2.images-amazon.com/R/1/a=" + product_id +
"+c=" + dynapi_key +
"+d=_SCR%28" + str(format_id) + "," + str(x) + "," + str(y) + "%29_=.jpg")
|
python
|
{
"resource": ""
}
|
q2628
|
retrier
|
train
|
def retrier(*, max_attempts, sleeptime, max_sleeptime, sleepscale=1.5, jitter=0.2):
""" Generator yielding time to wait for, after the attempt, if it failed. """
assert(max_attempts > 1)
assert(sleeptime >= 0)
assert(0 <= jitter <= sleeptime)
assert(sleepscale >= 1)
cur_sleeptime = min(max_sleeptime, sleeptime)
for attempt in range(max_attempts):
cur_jitter = random.randint(int(-jitter * 1000), int(jitter * 1000)) / 1000
yield max(0, cur_sleeptime + cur_jitter)
cur_sleeptime = min(max_sleeptime, cur_sleeptime * sleepscale)
|
python
|
{
"resource": ""
}
|
q2629
|
CoverSourceResult.get
|
train
|
async def get(self, target_format, target_size, size_tolerance_prct, out_filepath):
""" Download cover and process it. """
if self.source_quality.value <= CoverSourceQuality.LOW.value:
logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search")
images_data = []
for i, url in enumerate(self.urls):
# download
logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls)))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, self.format)
store_in_cache_callback, image_data = await self.source.http.query(url,
headers=headers,
verify=False,
cache=__class__.image_cache,
pre_cache_callback=pre_cache_callback)
# store immediately in cache
await store_in_cache_callback()
# append for multi images
images_data.append(image_data)
need_format_change = (self.format != target_format)
need_size_change = ((max(self.size) > target_size) and
(abs(max(self.size) - target_size) >
target_size * size_tolerance_prct / 100))
need_join = len(images_data) > 1
if need_join or need_format_change or need_size_change:
# post process
image_data = self.postProcess(images_data,
target_format if need_format_change else None,
target_size if need_size_change else None)
# crunch image again
image_data = await __class__.crunch(image_data, target_format)
# write it
with open(out_filepath, "wb") as file:
file.write(image_data)
|
python
|
{
"resource": ""
}
|
q2630
|
CoverSourceResult.setFormatMetadata
|
train
|
def setFormatMetadata(self, format):
""" Set format image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.FORMAT)) or
(self.format is format))
self.format = format
self.check_metadata &= ~CoverImageMetadata.FORMAT
|
python
|
{
"resource": ""
}
|
q2631
|
CoverSourceResult.setSizeMetadata
|
train
|
def setSizeMetadata(self, size):
""" Set size image metadata to what has been reliably identified. """
assert((self.needMetadataUpdate(CoverImageMetadata.SIZE)) or
(self.size == size))
self.size = size
self.check_metadata &= ~CoverImageMetadata.SIZE
|
python
|
{
"resource": ""
}
|
q2632
|
CoverSourceResult.updateSignature
|
train
|
async def updateSignature(self):
""" Calculate a cover's "signature" using its thumbnail url. """
assert(self.thumbnail_sig is None)
if self.thumbnail_url is None:
logging.getLogger("Cover").warning("No thumbnail available for %s" % (self))
return
# download
logging.getLogger("Cover").debug("Downloading cover thumbnail '%s'..." % (self.thumbnail_url))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, CoverImageFormat.JPEG, silent=True)
try:
store_in_cache_callback, image_data = await self.source.http.query(self.thumbnail_url,
cache=__class__.image_cache,
headers=headers,
pre_cache_callback=pre_cache_callback)
except Exception as e:
logging.getLogger("Cover").warning("Download of '%s' failed: %s %s" % (self.thumbnail_url,
e.__class__.__qualname__,
e))
return
# compute sig
logging.getLogger("Cover").debug("Computing signature of %s..." % (self))
try:
self.thumbnail_sig = __class__.computeImgSignature(image_data)
except Exception as e:
logging.getLogger("Cover").warning("Failed to compute signature of '%s': %s %s" % (self,
e.__class__.__qualname__,
e))
else:
await store_in_cache_callback()
|
python
|
{
"resource": ""
}
|
q2633
|
CoverSourceResult.crunch
|
train
|
async def crunch(image_data, format, silent=False):
""" Crunch image data, and return the processed data, or orignal data if operation failed. """
if (((format is CoverImageFormat.PNG) and (not HAS_OPTIPNG)) or
((format is CoverImageFormat.JPEG) and (not HAS_JPEGOPTIM))):
return image_data
with mkstemp_ctx.mkstemp(suffix=".%s" % (format.name.lower())) as tmp_out_filepath:
if not silent:
logging.getLogger("Cover").info("Crunching %s image..." % (format.name.upper()))
with open(tmp_out_filepath, "wb") as tmp_out_file:
tmp_out_file.write(image_data)
size_before = len(image_data)
if format is CoverImageFormat.PNG:
cmd = ["optipng", "-quiet", "-o1"]
elif format is CoverImageFormat.JPEG:
cmd = ["jpegoptim", "-q", "--strip-all"]
cmd.append(tmp_out_filepath)
p = await asyncio.create_subprocess_exec(*cmd,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL)
await p.wait()
if p.returncode != 0:
if not silent:
logging.getLogger("Cover").warning("Crunching image failed")
return image_data
with open(tmp_out_filepath, "rb") as tmp_out_file:
crunched_image_data = tmp_out_file.read()
size_after = len(crunched_image_data)
pct_saved = 100 * (size_before - size_after) / size_before
if not silent:
logging.getLogger("Cover").debug("Crunching image saved %.2f%% filesize" % (pct_saved))
return crunched_image_data
|
python
|
{
"resource": ""
}
|
q2634
|
CoverSourceResult.guessImageMetadataFromData
|
train
|
def guessImageMetadataFromData(img_data):
""" Identify an image format and size from its first bytes. """
format, width, height = None, None, None
img_stream = io.BytesIO(img_data)
try:
img = PIL.Image.open(img_stream)
except IOError:
format = imghdr.what(None, h=img_data)
format = SUPPORTED_IMG_FORMATS.get(format, None)
else:
format = img.format.lower()
format = SUPPORTED_IMG_FORMATS.get(format, None)
width, height = img.size
return format, width, height
|
python
|
{
"resource": ""
}
|
q2635
|
CoverSourceResult.guessImageMetadataFromHttpData
|
train
|
async def guessImageMetadataFromHttpData(response):
""" Identify an image format and size from the beginning of its HTTP data. """
metadata = None
img_data = bytearray()
while len(img_data) < CoverSourceResult.MAX_FILE_METADATA_PEEK_SIZE:
new_img_data = await response.content.read(__class__.METADATA_PEEK_SIZE_INCREMENT)
if not new_img_data:
break
img_data.extend(new_img_data)
metadata = __class__.guessImageMetadataFromData(img_data)
if (metadata is not None) and all(metadata):
return metadata
return metadata
|
python
|
{
"resource": ""
}
|
q2636
|
CoverSourceResult.guessImageFormatFromHttpResponse
|
train
|
def guessImageFormatFromHttpResponse(response):
""" Guess file format from HTTP response, return format or None. """
extensions = []
# try to guess extension from response content-type header
try:
content_type = response.headers["Content-Type"]
except KeyError:
pass
else:
ext = mimetypes.guess_extension(content_type, strict=False)
if ext is not None:
extensions.append(ext)
# try to extract extension from URL
urls = list(response.history) + [response.url]
for url in map(str, urls):
ext = os.path.splitext(urllib.parse.urlsplit(url).path)[-1]
if (ext is not None) and (ext not in extensions):
extensions.append(ext)
# now guess from the extensions
for ext in extensions:
try:
return SUPPORTED_IMG_FORMATS[ext[1:]]
except KeyError:
pass
|
python
|
{
"resource": ""
}
|
q2637
|
CoverSourceResult.preProcessForComparison
|
train
|
async def preProcessForComparison(results, target_size, size_tolerance_prct):
""" Process results to prepare them for future comparison and sorting. """
# find reference (=image most likely to match target cover ignoring factors like size and format)
reference = None
for result in results:
if result.source_quality is CoverSourceQuality.REFERENCE:
if ((reference is None) or
(CoverSourceResult.compare(result,
reference,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) > 0)):
reference = result
# remove results that are only refs
results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results))
# remove duplicates
no_dup_results = []
for result in results:
is_dup = False
for result_comp in results:
if ((result_comp is not result) and
(result_comp.urls == result.urls) and
(__class__.compare(result,
result_comp,
target_size=target_size,
size_tolerance_prct=size_tolerance_prct) < 0)):
is_dup = True
break
if not is_dup:
no_dup_results.append(result)
dup_count = len(results) - len(no_dup_results)
if dup_count > 0:
logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count))
results = no_dup_results
if reference is not None:
logging.getLogger("Cover").info("Reference is: %s" % (reference))
reference.is_similar_to_reference = True
# calculate sigs
futures = []
for result in results:
coroutine = result.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if reference.is_only_reference:
assert(reference not in results)
coroutine = reference.updateSignature()
future = asyncio.ensure_future(coroutine)
futures.append(future)
if futures:
await asyncio.wait(futures)
for future in futures:
future.result() # raise pending exception if any
# compare other results to reference
for result in results:
if ((result is not reference) and
(result.thumbnail_sig is not None) and
(reference.thumbnail_sig is not None)):
result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig,
reference.thumbnail_sig)
if result.is_similar_to_reference:
logging.getLogger("Cover").debug("%s is similar to reference" % (result))
else:
logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result))
else:
logging.getLogger("Cover").warning("No reference result found")
return results
|
python
|
{
"resource": ""
}
|
q2638
|
CoverSourceResult.computeImgSignature
|
train
|
def computeImgSignature(image_data):
"""
Calculate an image signature.
This is similar to ahash but uses 3 colors components
See: https://github.com/JohannesBuchner/imagehash/blob/4.0/imagehash/__init__.py#L125
"""
parser = PIL.ImageFile.Parser()
parser.feed(image_data)
img = parser.close()
target_size = (__class__.IMG_SIG_SIZE, __class__.IMG_SIG_SIZE)
img.thumbnail(target_size, PIL.Image.BICUBIC)
if img.size != target_size:
logging.getLogger("Cover").debug("Non square thumbnail after resize to %ux%u, unable to compute signature" % target_size)
return None
img = img.convert(mode="RGB")
pixels = img.getdata()
pixel_count = target_size[0] * target_size[1]
color_count = 3
r = bitarray.bitarray(pixel_count * color_count)
r.setall(False)
for ic in range(color_count):
mean = sum(p[ic] for p in pixels) // pixel_count
for ip, p in enumerate(pixels):
if p[ic] > mean:
r[pixel_count * ic + ip] = True
return r
|
python
|
{
"resource": ""
}
|
q2639
|
get_metadata
|
train
|
def get_metadata(audio_filepaths):
""" Return a tuple of album, artist, has_embedded_album_art from a list of audio files. """
artist, album, has_embedded_album_art = None, None, None
for audio_filepath in audio_filepaths:
try:
mf = mutagen.File(audio_filepath)
except Exception:
continue
if mf is None:
continue
# artist
for key in ("albumartist", "artist", # ogg
"TPE1", "TPE2", # mp3
"aART", "\xa9ART"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
artist = val[-1]
break
# album
for key in ("_album", "album", # ogg
"TALB", # mp3
"\xa9alb"): # mp4
try:
val = mf.get(key, None)
except ValueError:
val = None
if val is not None:
album = val[-1]
break
if artist and album:
# album art
if isinstance(mf, mutagen.ogg.OggFileType):
has_embedded_album_art = "metadata_block_picture" in mf
elif isinstance(mf, mutagen.mp3.MP3):
has_embedded_album_art = any(map(operator.methodcaller("startswith", "APIC:"), mf.keys()))
elif isinstance(mf, mutagen.mp4.MP4):
has_embedded_album_art = "covr" in mf
# stop at the first file that succeeds (for performance)
break
return artist, album, has_embedded_album_art
|
python
|
{
"resource": ""
}
|
q2640
|
embed_album_art
|
train
|
def embed_album_art(cover_filepath, path):
""" Embed album art into audio files. """
with open(cover_filepath, "rb") as f:
cover_data = f.read()
for filename in os.listdir(path):
try:
ext = os.path.splitext(filename)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
filepath = os.path.join(path, filename)
mf = mutagen.File(filepath)
if (isinstance(mf.tags, mutagen._vorbis.VComment) or
isinstance(mf, mutagen.ogg.OggFileType)):
picture = mutagen.flac.Picture()
picture.data = cover_data
picture.type = mutagen.id3.PictureType.COVER_FRONT
picture.mime = "image/jpeg"
encoded_data = base64.b64encode(picture.write())
mf["metadata_block_picture"] = encoded_data.decode("ascii")
elif (isinstance(mf.tags, mutagen.id3.ID3) or
isinstance(mf, mutagen.id3.ID3FileType)):
mf.tags.add(mutagen.id3.APIC(mime="image/jpeg",
type=mutagen.id3.PictureType.COVER_FRONT,
data=cover_data))
elif (isinstance(mf.tags, mutagen.mp4.MP4Tags) or
isinstance(mf, mutagen.mp4.MP4)):
mf["covr"] = [mutagen.mp4.MP4Cover(cover_data,
imageformat=mutagen.mp4.AtomDataType.JPEG)]
mf.save()
|
python
|
{
"resource": ""
}
|
q2641
|
ichunk
|
train
|
def ichunk(iterable, n):
""" Split an iterable into n-sized chunks. """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
python
|
{
"resource": ""
}
|
q2642
|
redirect_logging
|
train
|
def redirect_logging(tqdm_obj, logger=logging.getLogger()):
""" Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original. """
# remove current handler
assert(len(logger.handlers) == 1)
prev_handler = logger.handlers[0]
logger.removeHandler(prev_handler)
# add tqdm handler
tqdm_handler = TqdmLoggingHandler(tqdm_obj)
if prev_handler.formatter is not None:
tqdm_handler.setFormatter(prev_handler.formatter)
logger.addHandler(tqdm_handler)
try:
yield
finally:
# restore handler
logger.removeHandler(tqdm_handler)
logger.addHandler(prev_handler)
|
python
|
{
"resource": ""
}
|
q2643
|
CoverSource.probeUrl
|
train
|
async def probeUrl(self, url, response_headers=None):
""" Probe URL reachability from cache or HEAD request. """
self.logger.debug("Probing URL '%s'..." % (url))
headers = {}
self.updateHttpHeaders(headers)
resp_headers = {}
resp_ok = await self.http.isReachable(url,
headers=headers,
response_headers=resp_headers,
cache=__class__.probe_cache)
if response_headers is not None:
response_headers.update(resp_headers)
return resp_ok
|
python
|
{
"resource": ""
}
|
q2644
|
CoverSource.unaccentuate
|
train
|
def unaccentuate(s):
""" Replace accentuated chars in string by their non accentuated equivalent. """
return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
|
python
|
{
"resource": ""
}
|
q2645
|
CoverSource.unpunctuate
|
train
|
def unpunctuate(s, *, char_blacklist=string.punctuation):
""" Remove punctuation from string s. """
# remove punctuation
s = "".join(c for c in s if c not in char_blacklist)
# remove consecutive spaces
return " ".join(filter(None, s.split(" ")))
|
python
|
{
"resource": ""
}
|
q2646
|
_glfw_get_version
|
train
|
def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None
|
python
|
{
"resource": ""
}
|
q2647
|
set_error_callback
|
train
|
def set_error_callback(cbfun):
'''
Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
'''
global _error_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWerrorfun(cbfun)
_error_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetErrorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
|
python
|
{
"resource": ""
}
|
q2648
|
destroy_window
|
train
|
def destroy_window(window):
'''
Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window);
'''
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
if window_addr in callback_repository:
del callback_repository[window_addr]
|
python
|
{
"resource": ""
}
|
q2649
|
normalize
|
train
|
def normalize(vector):
'''Normalizes the `vector` so that its length is 1. `vector` can have
any number of components.
'''
d = sum(x * x for x in vector) ** 0.5
return tuple(x / d for x in vector)
|
python
|
{
"resource": ""
}
|
q2650
|
distance
|
train
|
def distance(p1, p2):
'''Computes and returns the distance between two points, `p1` and `p2`.
The points can have any number of components.
'''
return sum((a - b) ** 2 for a, b in zip(p1, p2)) ** 0.5
|
python
|
{
"resource": ""
}
|
q2651
|
cross
|
train
|
def cross(v1, v2):
'''Computes the cross product of two vectors.
'''
return (
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0],
)
|
python
|
{
"resource": ""
}
|
q2652
|
dot
|
train
|
def dot(v1, v2):
'''Computes the dot product of two vectors.
'''
x1, y1, z1 = v1
x2, y2, z2 = v2
return x1 * x2 + y1 * y2 + z1 * z2
|
python
|
{
"resource": ""
}
|
q2653
|
add
|
train
|
def add(v1, v2):
'''Adds two vectors.
'''
return tuple(a + b for a, b in zip(v1, v2))
|
python
|
{
"resource": ""
}
|
q2654
|
sub
|
train
|
def sub(v1, v2):
'''Subtracts two vectors.
'''
return tuple(a - b for a, b in zip(v1, v2))
|
python
|
{
"resource": ""
}
|
q2655
|
interpolate
|
train
|
def interpolate(v1, v2, t):
'''Interpolate from one vector to another.
'''
return add(v1, mul(sub(v2, v1), t))
|
python
|
{
"resource": ""
}
|
q2656
|
normal_from_points
|
train
|
def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d)
|
python
|
{
"resource": ""
}
|
q2657
|
smooth_normals
|
train
|
def smooth_normals(positions, normals):
'''Assigns an averaged normal to each position based on all of the normals
originally used for the position.
'''
lookup = defaultdict(list)
for position, normal in zip(positions, normals):
lookup[position].append(normal)
result = []
for position in positions:
tx = ty = tz = 0
for x, y, z in lookup[position]:
tx += x
ty += y
tz += z
d = (tx * tx + ty * ty + tz * tz) ** 0.5
result.append((tx / d, ty / d, tz / d))
return result
|
python
|
{
"resource": ""
}
|
q2658
|
bounding_box
|
train
|
def bounding_box(positions):
'''Computes the bounding box for a list of 3-dimensional points.
'''
(x0, y0, z0) = (x1, y1, z1) = positions[0]
for x, y, z in positions:
x0 = min(x0, x)
y0 = min(y0, y)
z0 = min(z0, z)
x1 = max(x1, x)
y1 = max(y1, y)
z1 = max(z1, z)
return (x0, y0, z0), (x1, y1, z1)
|
python
|
{
"resource": ""
}
|
q2659
|
recenter
|
train
|
def recenter(positions):
'''Returns a list of new positions centered around the origin.
'''
(x0, y0, z0), (x1, y1, z1) = bounding_box(positions)
dx = x1 - (x1 - x0) / 2.0
dy = y1 - (y1 - y0) / 2.0
dz = z1 - (z1 - z0) / 2.0
result = []
for x, y, z in positions:
result.append((x - dx, y - dy, z - dz))
return result
|
python
|
{
"resource": ""
}
|
q2660
|
interleave
|
train
|
def interleave(*args):
'''Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
'''
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result
|
python
|
{
"resource": ""
}
|
q2661
|
distinct
|
train
|
def distinct(iterable, keyfunc=None):
'''Yields distinct items from `iterable` in the order that they appear.
'''
seen = set()
for item in iterable:
key = item if keyfunc is None else keyfunc(item)
if key not in seen:
seen.add(key)
yield item
|
python
|
{
"resource": ""
}
|
q2662
|
ray_triangle_intersection
|
train
|
def ray_triangle_intersection(v1, v2, v3, o, d):
'''Computes the distance from a point to a triangle given a ray.
'''
eps = 1e-6
e1 = sub(v2, v1)
e2 = sub(v3, v1)
p = cross(d, e2)
det = dot(e1, p)
if abs(det) < eps:
return None
inv = 1.0 / det
t = sub(o, v1)
u = dot(t, p) * inv
if u < 0 or u > 1:
return None
q = cross(t, e1)
v = dot(d, q) * inv
if v < 0 or v > 1:
return None
t = dot(e2, q) * inv
if t > eps:
return t
return None
|
python
|
{
"resource": ""
}
|
q2663
|
pack_list
|
train
|
def pack_list(fmt, data):
'''Convert a Python list into a ctypes buffer.
This appears to be faster than the typical method of creating a ctypes
array, e.g. (c_float * len(data))(*data)
'''
func = struct.Struct(fmt).pack
return create_string_buffer(''.join([func(x) for x in data]))
|
python
|
{
"resource": ""
}
|
q2664
|
Clickable.click
|
train
|
def click(self, jquery=False):
"""
Click by WebElement, if not, JQuery click
"""
if jquery:
e = JQuery(self)
e.click()
else:
super(Clickable, self).click()
|
python
|
{
"resource": ""
}
|
q2665
|
add_cookies_to_web_driver
|
train
|
def add_cookies_to_web_driver(driver, cookies):
"""
Sets cookies in an existing WebDriver session.
"""
for cookie in cookies:
driver.add_cookie(convert_cookie_to_dict(cookie))
return driver
|
python
|
{
"resource": ""
}
|
q2666
|
BrowserCloserPlugin.configure
|
train
|
def configure(self, options, conf):
"""Configure plugin. Plugin is enabled by default.
"""
self.conf = conf
self.when = options.browser_closer_when
|
python
|
{
"resource": ""
}
|
q2667
|
SymbolIndex.index_path
|
train
|
def index_path(self, root):
"""Index a path.
:param root: Either a package directory, a .so or a .py module.
"""
basename = os.path.basename(root)
if os.path.splitext(basename)[0] != '__init__' and basename.startswith('_'):
return
location = self._determine_location_for(root)
if os.path.isfile(root):
self._index_module(root, location)
elif os.path.isdir(root) and os.path.exists(os.path.join(root, '__init__.py')):
self._index_package(root, location)
|
python
|
{
"resource": ""
}
|
q2668
|
SymbolIndex.get_or_create_index
|
train
|
def get_or_create_index(self, paths=None, name=None, refresh=False):
"""
Get index with given name from cache. Create if it doesn't exists.
"""
if not paths:
paths = sys.path
if not name:
name = 'default'
self._name = name
idx_dir = get_cache_dir()
idx_file = os.path.join(idx_dir, name + '.json')
if os.path.exists(idx_file) and not refresh:
with open(idx_file) as fd:
self.deserialize(fd)
else:
self.build_index(paths)
with open(idx_file, 'w') as fd:
self.serialize(fd)
return self
|
python
|
{
"resource": ""
}
|
q2669
|
SymbolIndex.symbol_scores
|
train
|
def symbol_scores(self, symbol):
"""Find matches for symbol.
:param symbol: A . separated symbol. eg. 'os.path.basename'
:returns: A list of tuples of (score, package, reference|None),
ordered by score from highest to lowest.
"""
scores = []
path = []
# sys.path sys path -> import sys
# os.path.basename os.path basename -> import os.path
# basename os.path basename -> from os.path import basename
# path.basename os.path basename -> from os import path
def fixup(module, variable):
prefix = module.split('.')
if variable is not None:
prefix.append(variable)
seeking = symbol.split('.')
new_module = []
while prefix and seeking[0] != prefix[0]:
new_module.append(prefix.pop(0))
if new_module:
module, variable = '.'.join(new_module), prefix[0]
else:
variable = None
return module, variable
def score_walk(scope, scale):
sub_path, score = self._score_key(scope, full_key)
if score > 0.1:
try:
i = sub_path.index(None)
sub_path, from_symbol = sub_path[:i], '.'.join(sub_path[i + 1:])
except ValueError:
from_symbol = None
package_path = '.'.join(path + sub_path)
package_path, from_symbol = fixup(package_path, from_symbol)
scores.append((score * scale, package_path, from_symbol))
for key, subscope in scope._tree.items():
if type(subscope) is not float:
path.append(key)
score_walk(subscope, subscope.score * scale - 0.1)
path.pop()
full_key = symbol.split('.')
score_walk(self, 1.0)
scores.sort(reverse=True)
return scores
|
python
|
{
"resource": ""
}
|
q2670
|
SymbolIndex.find
|
train
|
def find(self, path):
"""Return the node for a path, or None."""
path = path.split('.')
node = self
while node._parent:
node = node._parent
for name in path:
node = node._tree.get(name, None)
if node is None or type(node) is float:
return None
return node
|
python
|
{
"resource": ""
}
|
q2671
|
SymbolIndex.location_for
|
train
|
def location_for(self, path):
"""Return the location code for a path."""
path = path.split('.')
node = self
while node._parent:
node = node._parent
location = node.location
for name in path:
tree = node._tree.get(name, None)
if tree is None or type(tree) is float:
return location
location = tree.location
return location
|
python
|
{
"resource": ""
}
|
q2672
|
Select.select_option
|
train
|
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
|
python
|
{
"resource": ""
}
|
q2673
|
Select.get_attribute_selected
|
train
|
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
|
python
|
{
"resource": ""
}
|
q2674
|
Select.select_by_visible_text
|
train
|
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
|
python
|
{
"resource": ""
}
|
q2675
|
parse_ast
|
train
|
def parse_ast(source, filename=None):
"""Parse source into a Python AST, taking care of encoding."""
if isinstance(source, text_type) and sys.version_info[0] == 2:
# ast.parse() on Python 2 doesn't like encoding declarations
# in Unicode strings
source = CODING_COOKIE_RE.sub(r'\1', source, 1)
return ast.parse(source, filename or '<unknown>')
|
python
|
{
"resource": ""
}
|
q2676
|
Scope.find_unresolved_and_unreferenced_symbols
|
train
|
def find_unresolved_and_unreferenced_symbols(self):
"""Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced})
"""
unresolved = set()
unreferenced = self._definitions.copy()
self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced,
frozenset(self._definitions), start=True)
return unresolved, unreferenced - Scope.ALL_BUILTINS
|
python
|
{
"resource": ""
}
|
q2677
|
get_item
|
train
|
def get_item(key):
"""Return content in cached file in JSON format"""
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
try:
return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"]
except (IOError, ValueError):
return None
|
python
|
{
"resource": ""
}
|
q2678
|
set_item
|
train
|
def set_item(key,value):
"""Write JSON content from value argument to cached file and return"""
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8'))
return value
|
python
|
{
"resource": ""
}
|
q2679
|
delete_item
|
train
|
def delete_item(key):
"""Delete cached file if present"""
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
if os.path.isfile(CACHED_KEY_FILE):
os.remove(CACHED_KEY_FILE)
|
python
|
{
"resource": ""
}
|
q2680
|
JsonQ.__parse_json_data
|
train
|
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
|
python
|
{
"resource": ""
}
|
q2681
|
JsonQ.__parse_json_file
|
train
|
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
|
python
|
{
"resource": ""
}
|
q2682
|
JsonQ.__get_value_from_data
|
train
|
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
|
python
|
{
"resource": ""
}
|
q2683
|
JsonQ.at
|
train
|
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
|
python
|
{
"resource": ""
}
|
q2684
|
JsonQ.reset
|
train
|
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
|
python
|
{
"resource": ""
}
|
q2685
|
JsonQ.__execute_queries
|
train
|
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
|
python
|
{
"resource": ""
}
|
q2686
|
JsonQ.or_where
|
train
|
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
|
python
|
{
"resource": ""
}
|
q2687
|
JsonQ.nth
|
train
|
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
|
python
|
{
"resource": ""
}
|
q2688
|
JsonQ.sum
|
train
|
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
|
python
|
{
"resource": ""
}
|
q2689
|
JsonQ.max
|
train
|
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
|
python
|
{
"resource": ""
}
|
q2690
|
JsonQ.avg
|
train
|
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
|
python
|
{
"resource": ""
}
|
q2691
|
JsonQ.chunk
|
train
|
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
|
python
|
{
"resource": ""
}
|
q2692
|
JsonQ.group_by
|
train
|
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
|
python
|
{
"resource": ""
}
|
q2693
|
JsonQ.sort
|
train
|
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
|
python
|
{
"resource": ""
}
|
q2694
|
JsonQ.sort_by
|
train
|
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
python
|
{
"resource": ""
}
|
q2695
|
Matcher._match
|
train
|
def _match(self, x, op, y):
"""Compare the given `x` and `y` based on `op`
:@param x, y, op
:@type x, y: mixed
:@type op: string
:@return bool
:@throws ValueError
"""
if (op not in self.condition_mapper):
raise ValueError('Invalid where condition given')
func = getattr(self, self.condition_mapper.get(op))
return func(x, y)
|
python
|
{
"resource": ""
}
|
q2696
|
overrides
|
train
|
def overrides(method):
"""Decorator to indicate that the decorated method overrides a method in
superclass.
The decorator code is executed while loading class. Using this method
should have minimal runtime performance implications.
This is based on my idea about how to do this and fwc:s highly improved
algorithm for the implementation fwc:s
algorithm : http://stackoverflow.com/a/14631397/308189
my answer : http://stackoverflow.com/a/8313042/308189
How to use:
from overrides import overrides
class SuperClass(object):
def method(self):
return 2
class SubClass(SuperClass):
@overrides
def method(self):
return 1
:raises AssertionError if no match in super classes for the method name
:return method with possibly added (if the method doesn't have one)
docstring from super class
"""
for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
if hasattr(super_class, method.__name__):
super_method = getattr(super_class, method.__name__)
if hasattr(super_method, "__finalized__"):
finalized = getattr(super_method, "__finalized__")
if finalized:
raise AssertionError('Method "%s" is finalized' %
method.__name__)
if not method.__doc__:
method.__doc__ = super_method.__doc__
return method
raise AssertionError('No super class method found for "%s"' %
method.__name__)
|
python
|
{
"resource": ""
}
|
q2697
|
_get_base_class_names
|
train
|
def _get_base_class_names(frame):
""" Get baseclass names from the code object """
co, lasti = frame.f_code, frame.f_lasti
code = co.co_code
extends = []
for (op, oparg) in op_stream(code, lasti):
if op in dis.hasconst:
if type(co.co_consts[oparg]) == str:
extends = []
elif op in dis.hasname:
if dis.opname[op] == 'LOAD_NAME':
extends.append(('name', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_ATTR':
extends.append(('attr', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_GLOBAL':
extends.append(('name', co.co_names[oparg]))
items = []
previous_item = []
for t, s in extends:
if t == 'name':
if previous_item:
items.append(previous_item)
previous_item = [s]
else:
previous_item += [s]
if previous_item:
items.append(previous_item)
return items
|
python
|
{
"resource": ""
}
|
q2698
|
load_tlds
|
train
|
def load_tlds():
"""Load all legal TLD extensions from assets
"""
file = os.path.join(os.path.dirname(__file__),
'assets',
'tlds-alpha-by-domain.txt')
with open(file) as fobj:
return [elem for elem in fobj.read().lower().splitlines()[1:]
if "--" not in elem]
|
python
|
{
"resource": ""
}
|
q2699
|
parse_text_urls
|
train
|
def parse_text_urls(mesg):
"""Parse a block of text, splitting it into its url and non-url
components."""
rval = []
loc = 0
for match in URLRE.finditer(mesg):
if loc < match.start():
rval.append(Chunk(mesg[loc:match.start()], None))
# Turn email addresses into mailto: links
email = match.group("email")
if email and "mailto" not in email:
mailto = "mailto:{}".format(email)
else:
mailto = match.group(1)
rval.append(Chunk(None, mailto))
loc = match.end()
if loc < len(mesg):
rval.append(Chunk(mesg[loc:], None))
return rval
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.