text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Insert the data in sereral steps and commit when each step finishes or rollback if there is a problem.
<END_TASK>
<USER_TASK:>
Description:
def putBlock(self, blockcontent, migration=False):
"""
Insert the data in sereral steps and commit when each step finishes or rollback if there is a problem.
""" |
#YG
try:
#1 insert configuration
self.logger.debug("insert configuration")
configList = self.insertOutputModuleConfig(
blockcontent['dataset_conf_list'], migration)
#2 insert dataset
self.logger.debug("insert dataset")
datasetId = self.insertDataset(blockcontent, configList, migration)
#3 insert block & files
self.logger.debug("insert block & files.")
self.insertBlockFile(blockcontent, datasetId, migration)
except KeyError as ex:
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/putBlock: \
KeyError exception: %s. " %ex.args[0], self.logger.exception,
"DBSBlockInsert/putBlock: KeyError exception: %s. " %ex.args[0] )
except Exception as ex:
raise |
<SYSTEM_TASK:>
Helper function to check input by using before sending to the server
<END_TASK>
<USER_TASK:>
Description:
def checkInputParameter(method, parameters, validParameters, requiredParameters=None):
"""
Helper function to check input by using before sending to the server
:param method: Name of the API
:type method: str
:param validParameters: Allow parameters for the API call
:type validParameters: list
:param requiredParameters: Required parameters for the API call (Default: None)
:type requiredParameters: list
""" |
for parameter in parameters:
if parameter not in validParameters:
raise dbsClientException("Invalid input",
"API %s does not support parameter %s. Supported parameters are %s" \
% (method, parameter, validParameters))
if requiredParameters is not None:
if 'multiple' in requiredParameters:
match = False
for requiredParameter in requiredParameters['multiple']:
if requiredParameter!='detail' and requiredParameter in parameters:
match = True
break
if not match:
raise dbsClientException("Invalid input",
"API %s does require one of the parameters %s" \
% (method, requiredParameters['multiple']))
if 'forced' in requiredParameters:
for requiredParameter in requiredParameters['forced']:
if requiredParameter not in parameters:
raise dbsClientException("Invalid input",
"API %s does require the parameter %s. Forced required parameters are %s" \
% (method, requiredParameter, requiredParameters['forced']))
if 'standalone' in requiredParameters:
overlap = []
for requiredParameter in requiredParameters['standalone']:
if requiredParameter in parameters:
overlap.append(requiredParameter)
if len(overlap) != 1:
raise dbsClientException("Invalid input",
"API %s does requires only *one* of the parameters %s." \
% (method, requiredParameters['standalone'])) |
<SYSTEM_TASK:>
Decorator to split up server calls for methods using url parameters, due to the lenght
<END_TASK>
<USER_TASK:>
Description:
def split_calls(func):
"""
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
""" |
def wrapper(*args, **kwargs):
#The size limit is 8190 bytes minus url and api to call
#For example (https://cmsweb-testbed.cern.ch:8443/dbs/prod/global/filechildren), so 192 bytes should be safe.
size_limit = 8000
encoded_url = urllib.urlencode(kwargs)
if len(encoded_url) > size_limit:
for key, value in kwargs.iteritems():
###only one (first) list at a time is splitted,
###currently only file lists are supported
if key in ('logical_file_name', 'block_name', 'lumi_list', 'run_num') and isinstance(value, list):
ret_val = []
for splitted_param in list_parameter_splitting(data=dict(kwargs), #make a copy, since it is manipulated
key=key,
size_limit=size_limit):
try:
ret_val.extend(func(*args, **splitted_param))
except (TypeError, AttributeError):#update function call do not return lists
ret_val= []
return ret_val
raise dbsClientException("Invalid input",
"The lenght of the urlencoded parameters to API %s \
is exceeding %s bytes and cannot be splitted." % (func.__name__, size_limit))
else:
return func(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
A private method to make HTTP call to the DBS Server
<END_TASK>
<USER_TASK:>
Description:
def __callServer(self, method="", params={}, data={}, callmethod='GET', content='application/json'):
"""
A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str
""" |
UserID = os.environ['USER']+'@'+socket.gethostname()
try:
UserAgent = "DBSClient/"+os.environ['DBS3_CLIENT_VERSION']+"/"+ self.userAgent
except:
UserAgent = "DBSClient/Unknown"+"/"+ self.userAgent
request_headers = {"Content-Type": content, "Accept": content, "UserID": UserID, "User-Agent":UserAgent }
method_func = getattr(self.rest_api, callmethod.lower())
data = cjson.encode(data)
try:
self.http_response = method_func(self.url, method, params, data, request_headers)
except HTTPError as http_error:
self.__parseForException(http_error)
if content != "application/json":
return self.http_response.body
try:
json_ret=cjson.decode(self.http_response.body)
except cjson.DecodeError:
print("The server output is not a valid json, most probably you have a typo in the url.\n%s.\n" % self.url, file=sys.stderr)
raise dbsClientException("Invalid url", "Possible urls are %s" %self.http_response.body)
return json_ret |
<SYSTEM_TASK:>
An internal method, should not be used by clients
<END_TASK>
<USER_TASK:>
Description:
def __parseForException(self, http_error):
"""
An internal method, should not be used by clients
:param httperror: Thrown httperror by the server
""" |
data = http_error.body
try:
if isinstance(data, str):
data = cjson.decode(data)
except:
raise http_error
if isinstance(data, dict) and 'exception' in data:# re-raise with more details
raise HTTPError(http_error.url, data['exception'], data['message'], http_error.header, http_error.body)
raise http_error |
<SYSTEM_TASK:>
Returns the time needed to process the request by the frontend server in microseconds
<END_TASK>
<USER_TASK:>
Description:
def requestTimingInfo(self):
"""
Returns the time needed to process the request by the frontend server in microseconds
and the EPOC timestamp of the request in microseconds.
:rtype: tuple containing processing time and timestamp
""" |
try:
return tuple(item.split('=')[1] for item in self.http_response.header.get('CMS-Server-Time').split())
except AttributeError:
return None, None |
<SYSTEM_TASK:>
API to list file parents using lumi section info.
<END_TASK>
<USER_TASK:>
Description:
def listFileParentsByLumi(self, **kwargs):
"""
API to list file parents using lumi section info.
:param block_name: name of block that has files who's parents needs to be found (Required)
:type block_name: str
:param logical_file_name: if not all the file parentages under the block needed, this lfn list gives the files that needs to find its parents(optional).
:type logical_file_name: list of string
:returns: List of dictionaries containing following keys [cid,pid]
:rtype: list of dicts
""" |
validParameters = ['block_name', 'logical_file_name']
requiredParameters = {'forced': ['block_name']}
checkInputParameter(method="listFileParentsByLumi", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("fileparentsbylumi", data=kwargs, callmethod='POST') |
<SYSTEM_TASK:>
API to list block parents.
<END_TASK>
<USER_TASK:>
Description:
def listBlockParents(self, **kwargs):
"""
API to list block parents.
:param block_name: name of block who's parents needs to be found (Required)
:type block_name: str
:returns: List of dictionaries containing following keys (block_name)
:rtype: list of dicts
""" |
validParameters = ['block_name']
requiredParameters = {'forced': validParameters}
checkInputParameter(method="listBlockParents", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
if isinstance(kwargs["block_name"], list):
return self.__callServer("blockparents", data=kwargs, callmethod='POST')
else:
return self.__callServer("blockparents", params=kwargs) |
<SYSTEM_TASK:>
API to list datasets in DBS.
<END_TASK>
<USER_TASK:>
Description:
def listDatasetArray(self, **kwargs):
"""
API to list datasets in DBS.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (Required if dataset_id is not presented), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset_ids that are the primary keys of datasets table: [dataset_id1,dataset_id2,..,dataset_idn] (Required if dataset is not presented), Max length 1000.
:type dataset: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
""" |
validParameters = ['dataset', 'dataset_access_type', 'detail', 'dataset_id']
requiredParameters = {'multiple': ['dataset', 'dataset_id']}
checkInputParameter(method="listDatasetArray", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
return self.__callServer("datasetlist", data=kwargs, callmethod='POST') |
<SYSTEM_TASK:>
Return a list of dictionaries. Each dictionary represents one device.
<END_TASK>
<USER_TASK:>
Description:
def find_devices():
"""Return a list of dictionaries. Each dictionary represents one device.
The dictionary contains the following keys: port, unique_id and in_use.
`port` can be used with :func:`open`. `serial_number` is the serial number
of the device (and can also be used with :func:`open`) and `in_use`
indicates whether the device was opened before and can currently not be
opened.
.. note::
There is no guarantee, that the returned information is still valid
when you open the device. Esp. if you open a device by the port, the
unique_id may change because you've just opened another device. Eg. it
may be disconnected from the machine after you call :func:`find_devices`
but before you call :func:`open`.
To open a device by its serial number, you should use the :func:`open`
with the `serial_number` parameter.
""" |
# first fetch the number of attached devices, so we can create a buffer
# with the exact amount of entries. api expects array of u16
num_devices = api.py_aa_find_devices(0, array.array('H'))
_raise_error_if_negative(num_devices)
# return an empty list if no device is connected
if num_devices == 0:
return list()
ports = array.array('H', (0,) * num_devices)
unique_ids = array.array('I', (0,) * num_devices)
num_devices = api.py_aa_find_devices_ext(len(ports), len(unique_ids),
ports, unique_ids)
_raise_error_if_negative(num_devices)
if num_devices == 0:
return list()
del ports[num_devices:]
del unique_ids[num_devices:]
devices = list()
for port, uid in zip(ports, unique_ids):
in_use = bool(port & PORT_NOT_FREE)
dev = dict(
port=port & ~PORT_NOT_FREE,
serial_number=_unique_id_str(uid),
in_use=in_use)
devices.append(dev)
return devices |
<SYSTEM_TASK:>
I2C bitrate in kHz. Not every bitrate is supported by the host
<END_TASK>
<USER_TASK:>
Description:
def i2c_bitrate(self):
"""I2C bitrate in kHz. Not every bitrate is supported by the host
adapter. Therefore, the actual bitrate may be less than the value which
is set.
The power-on default value is 100 kHz.
""" |
ret = api.py_aa_i2c_bitrate(self.handle, 0)
_raise_error_if_negative(ret)
return ret |
<SYSTEM_TASK:>
Setting this to `True` will enable the I2C pullup resistors. If set
<END_TASK>
<USER_TASK:>
Description:
def i2c_pullups(self):
"""Setting this to `True` will enable the I2C pullup resistors. If set
to `False` the pullup resistors will be disabled.
Raises an :exc:`IOError` if the hardware adapter does not support
pullup resistors.
""" |
ret = api.py_aa_i2c_pullup(self.handle, I2C_PULLUP_QUERY)
_raise_error_if_negative(ret)
return ret |
<SYSTEM_TASK:>
I2C bus lock timeout in ms.
<END_TASK>
<USER_TASK:>
Description:
def i2c_bus_timeout(self):
"""I2C bus lock timeout in ms.
Minimum value is 10 ms and the maximum value is 450 ms. Not every value
can be set and will be rounded to the next possible number. You can
read back the property to get the actual value.
The power-on default value is 200 ms.
""" |
ret = api.py_aa_i2c_bus_timeout(self.handle, 0)
_raise_error_if_negative(ret)
return ret |
<SYSTEM_TASK:>
Make an I2C write access.
<END_TASK>
<USER_TASK:>
Description:
def i2c_master_write(self, i2c_address, data, flags=I2C_NO_FLAGS):
"""Make an I2C write access.
The given I2C device is addressed and data given as a string is
written. The transaction is finished with an I2C stop condition unless
I2C_NO_STOP is set in the flags.
10 bit addresses are supported if the I2C_10_BIT_ADDR flag is set.
""" |
data = array.array('B', data)
status, _ = api.py_aa_i2c_write_ext(self.handle, i2c_address, flags,
len(data), data)
_raise_i2c_status_code_error_if_failure(status) |
<SYSTEM_TASK:>
Make an I2C read access.
<END_TASK>
<USER_TASK:>
Description:
def i2c_master_read(self, addr, length, flags=I2C_NO_FLAGS):
"""Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
""" |
data = array.array('B', (0,) * length)
status, rx_len = api.py_aa_i2c_read_ext(self.handle, addr, flags,
length, data)
_raise_i2c_status_code_error_if_failure(status)
del data[rx_len:]
return bytes(data) |
<SYSTEM_TASK:>
Wait for an event to occur.
<END_TASK>
<USER_TASK:>
Description:
def poll(self, timeout=None):
"""Wait for an event to occur.
If `timeout` is given, if specifies the length of time in milliseconds
which the function will wait for events before returing. If `timeout`
is omitted, negative or None, the call will block until there is an
event.
Returns a list of events. In case no event is pending, an empty list is
returned.
""" |
if timeout is None:
timeout = -1
ret = api.py_aa_async_poll(self.handle, timeout)
_raise_error_if_negative(ret)
events = list()
for event in (POLL_I2C_READ, POLL_I2C_WRITE, POLL_SPI,
POLL_I2C_MONITOR):
if ret & event:
events.append(event)
return events |
<SYSTEM_TASK:>
Enable I2C slave mode.
<END_TASK>
<USER_TASK:>
Description:
def enable_i2c_slave(self, slave_address):
"""Enable I2C slave mode.
The device will respond to the specified slave_address if it is
addressed.
You can wait for the data with :func:`poll` and get it with
`i2c_slave_read`.
""" |
ret = api.py_aa_i2c_slave_enable(self.handle, slave_address,
self.BUFFER_SIZE, self.BUFFER_SIZE)
_raise_error_if_negative(ret) |
<SYSTEM_TASK:>
Read the bytes from an I2C slave reception.
<END_TASK>
<USER_TASK:>
Description:
def i2c_slave_read(self):
"""Read the bytes from an I2C slave reception.
The bytes are returned as a string object.
""" |
data = array.array('B', (0,) * self.BUFFER_SIZE)
status, addr, rx_len = api.py_aa_i2c_slave_read_ext(self.handle,
self.BUFFER_SIZE, data)
_raise_i2c_status_code_error_if_failure(status)
# In case of general call, actually return the general call address
if addr == 0x80:
addr = 0x00
del data[rx_len:]
return (addr, bytes(data)) |
<SYSTEM_TASK:>
Returns the number of bytes transmitted by the slave.
<END_TASK>
<USER_TASK:>
Description:
def i2c_slave_last_transmit_size(self):
"""Returns the number of bytes transmitted by the slave.""" |
ret = api.py_aa_i2c_slave_write_stats(self.handle)
_raise_error_if_negative(ret)
return ret |
<SYSTEM_TASK:>
Retrieved any data fetched by the monitor.
<END_TASK>
<USER_TASK:>
Description:
def i2c_monitor_read(self):
"""Retrieved any data fetched by the monitor.
This function has an integrated timeout mechanism. You should use
:func:`poll` to determine if there is any data available.
Returns a list of data bytes and special symbols. There are three
special symbols: `I2C_MONITOR_NACK`, I2C_MONITOR_START and
I2C_MONITOR_STOP.
""" |
data = array.array('H', (0,) * self.BUFFER_SIZE)
ret = api.py_aa_i2c_monitor_read(self.handle, self.BUFFER_SIZE,
data)
_raise_error_if_negative(ret)
del data[ret:]
return data.tolist() |
<SYSTEM_TASK:>
SPI bitrate in kHz. Not every bitrate is supported by the host
<END_TASK>
<USER_TASK:>
Description:
def spi_bitrate(self):
"""SPI bitrate in kHz. Not every bitrate is supported by the host
adapter. Therefore, the actual bitrate may be less than the value which
is set. The slowest bitrate supported is 125kHz. Any smaller value will
be rounded up to 125kHz.
The power-on default value is 1000 kHz.
""" |
ret = api.py_aa_spi_bitrate(self.handle, 0)
_raise_error_if_negative(ret)
return ret |
<SYSTEM_TASK:>
Configure the SPI interface by the well known SPI modes.
<END_TASK>
<USER_TASK:>
Description:
def spi_configure_mode(self, spi_mode):
"""Configure the SPI interface by the well known SPI modes.""" |
if spi_mode == SPI_MODE_0:
self.spi_configure(SPI_POL_RISING_FALLING,
SPI_PHASE_SAMPLE_SETUP, SPI_BITORDER_MSB)
elif spi_mode == SPI_MODE_3:
self.spi_configure(SPI_POL_FALLING_RISING,
SPI_PHASE_SETUP_SAMPLE, SPI_BITORDER_MSB)
else:
raise RuntimeError('SPI Mode not supported') |
<SYSTEM_TASK:>
Write a stream of bytes to a SPI device.
<END_TASK>
<USER_TASK:>
Description:
def spi_write(self, data):
"""Write a stream of bytes to a SPI device.""" |
data_out = array.array('B', data)
data_in = array.array('B', (0,) * len(data_out))
ret = api.py_aa_spi_write(self.handle, len(data_out), data_out,
len(data_in), data_in)
_raise_error_if_negative(ret)
return bytes(data_in) |
<SYSTEM_TASK:>
Change the ouput polarity on the SS line.
<END_TASK>
<USER_TASK:>
Description:
def spi_ss_polarity(self, polarity):
"""Change the ouput polarity on the SS line.
Please note, that this only affects the master functions.
""" |
ret = api.py_aa_spi_master_ss_polarity(self.handle, polarity)
_raise_error_if_negative(ret) |
<SYSTEM_TASK:>
Return an instance of schema for given verb.
<END_TASK>
<USER_TASK:>
Description:
def _schema_from_verb(verb, partial=False):
"""Return an instance of schema for given verb.""" |
from .verbs import Verbs
return getattr(Verbs, verb)(partial=partial) |
<SYSTEM_TASK:>
Deserialize a data structure to an object.
<END_TASK>
<USER_TASK:>
Description:
def load(self, data, many=None, partial=None):
"""Deserialize a data structure to an object.""" |
result = super(ResumptionTokenSchema, self).load(
data, many=many, partial=partial
)
result.data.update(
result.data.get('resumptionToken', {}).get('kwargs', {})
)
return result |
<SYSTEM_TASK:>
Validate arguments in incomming request.
<END_TASK>
<USER_TASK:>
Description:
def make_request_validator(request):
"""Validate arguments in incomming request.""" |
verb = request.values.get('verb', '', type=str)
resumption_token = request.values.get('resumptionToken', None)
schema = Verbs if resumption_token is None else ResumptionVerbs
return getattr(schema, verb, OAISchema)(partial=False) |
<SYSTEM_TASK:>
Parse an ISO8601-formatted datetime and return a datetime object.
<END_TASK>
<USER_TASK:>
Description:
def from_iso_permissive(datestring, use_dateutil=True):
"""Parse an ISO8601-formatted datetime and return a datetime object.
Inspired by the marshmallow.utils.from_iso function, but also accepts
datestrings that don't contain the time.
""" |
dateutil_available = False
try:
from dateutil import parser
dateutil_available = True
except ImportError:
dateutil_available = False
import datetime
# Use dateutil's parser if possible
if dateutil_available and use_dateutil:
return parser.parse(datestring)
else:
# Strip off timezone info.
return datetime.datetime.strptime(datestring[:19],
'%Y-%m-%dT%H:%M:%S') |
<SYSTEM_TASK:>
Check range between dates under keys ``from_`` and ``until``.
<END_TASK>
<USER_TASK:>
Description:
def validate(self, data):
"""Check range between dates under keys ``from_`` and ``until``.""" |
if 'verb' in data and data['verb'] != self.__class__.__name__:
raise ValidationError(
# FIXME encode data
'This is not a valid OAI-PMH verb:{0}'.format(data['verb']),
field_names=['verb'],
)
if 'from_' in data and 'until' in data and \
data['from_'] > data['until']:
raise ValidationError('Date "from" must be before "until".')
extra = set(request.values.keys()) - set([
f.load_from or f.name for f in self.fields.values()
])
if extra:
raise ValidationError('You have passed too many arguments.') |
<SYSTEM_TASK:>
Extracts the values of a set of parameters, recursing into nested dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def extract_params(params):
"""
Extracts the values of a set of parameters, recursing into nested dictionaries.
""" |
values = []
if isinstance(params, dict):
for key, value in params.items():
values.extend(extract_params(value))
elif isinstance(params, list):
for value in params:
values.extend(extract_params(value))
else:
values.append(params)
return values |
<SYSTEM_TASK:>
Get detailed metadata information about a list.
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, list_name, options=None):
"""
Get detailed metadata information about a list.
""" |
options = options or {}
data = {'list': list_name}
data.update(options)
return self.api_get('list', data) |
<SYSTEM_TASK:>
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
<END_TASK>
<USER_TASK:>
Description:
def import_contacts(self, email, password, include_name=False):
"""
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
""" |
data = {'email': email,
'password': password}
if include_name:
data['names'] = 1
return self.api_post('contacts', data) |
<SYSTEM_TASK:>
Push a new piece of content to Sailthru.
<END_TASK>
<USER_TASK:>
Description:
def push_content(self, title, url,
images=None, date=None, expire_date=None,
description=None, location=None, price=None,
tags=None,
author=None, site_name=None,
spider=None, vars=None):
"""
Push a new piece of content to Sailthru.
Expected names for the `images` argument's map are "full" and "thumb"
Expected format for `location` should be [longitude,latitude]
@param title: title string for the content
@param url: URL string for the content
@param images: map of image names
@param date: date string
@param expire_date: date string for when the content expires
@param description: description for the content
@param location: location of the content
@param price: price for the content
@param tags: list or comma separated string values
@param author: author for the content
@param site_name: site name for the content
@param spider: truthy value to force respidering content
@param vars: replaceable vars dictionary
""" |
vars = vars or {}
data = {'title': title,
'url': url}
if images is not None:
data['images'] = images
if date is not None:
data['date'] = date
if expire_date is not None:
data['expire_date'] = date
if location is not None:
data['location'] = date
if price is not None:
data['price'] = price
if description is not None:
data['description'] = description
if site_name is not None:
data['site_name'] = images
if author is not None:
data['author'] = author
if spider:
data['spider'] = 1
if tags is not None:
data['tags'] = ",".join(tags) if isinstance(tags, list) else tags
if len(vars) > 0:
data['vars'] = vars.copy()
return self.api_post('content', data) |
<SYSTEM_TASK:>
Retrieve information about a purchase using the system's unique ID or a client's ID
<END_TASK>
<USER_TASK:>
Description:
def get_purchase(self, purchase_id, purchase_key='sid'):
"""
Retrieve information about a purchase using the system's unique ID or a client's ID
@param id_: a string that represents a unique_id or an extid.
@param key: a string that is either 'sid' or 'extid'.
""" |
data = {'purchase_id': purchase_id,
'purchase_key': purchase_key}
return self.api_get('purchase', data) |
<SYSTEM_TASK:>
Returns true if the incoming request is an authenticated verify post.
<END_TASK>
<USER_TASK:>
Description:
def receive_verify_post(self, post_params):
"""
Returns true if the incoming request is an authenticated verify post.
""" |
if isinstance(post_params, dict):
required_params = ['action', 'email', 'send_id', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'verify':
return False
sig = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if sig != get_signature_hash(post_params, self.secret):
return False
send_response = self.get_send(post_params['send_id'])
try:
send_body = send_response.get_body()
send_json = json.loads(send_body)
if 'email' not in send_body:
return False
if send_json['email'] != post_params['email']:
return False
except ValueError:
return False
return True |
<SYSTEM_TASK:>
checks if post_params contain required keys
<END_TASK>
<USER_TASK:>
Description:
def check_for_valid_postback_actions(self, required_keys, post_params):
"""
checks if post_params contain required keys
""" |
for key in required_keys:
if key not in post_params:
return False
return True |
<SYSTEM_TASK:>
Perform an HTTP GET request, using the shared-secret auth hash.
<END_TASK>
<USER_TASK:>
Description:
def api_get(self, action, data, headers=None):
"""
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
""" |
return self._api_request(action, data, 'GET', headers) |
<SYSTEM_TASK:>
Perform an HTTP POST request, using the shared-secret auth hash.
<END_TASK>
<USER_TASK:>
Description:
def api_post(self, action, data, binary_data_param=None):
"""
Perform an HTTP POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
""" |
binary_data_param = binary_data_param or []
if binary_data_param:
return self.api_post_multipart(action, data, binary_data_param)
else:
return self._api_request(action, data, 'POST') |
<SYSTEM_TASK:>
Perform an HTTP Multipart POST request, using the shared-secret auth hash.
<END_TASK>
<USER_TASK:>
Description:
def api_post_multipart(self, action, data, binary_data_param):
"""
Perform an HTTP Multipart POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
@param: binary_data_params: array of multipart keys
""" |
binary_data = {}
data = data.copy()
try:
file_handles = []
for param in binary_data_param:
if param in data:
binary_data[param] = file_handle = open(data[param], 'r')
file_handles.append(file_handle)
del data[param]
json_payload = self._prepare_json_payload(data)
return self._http_request(action, json_payload, "POST", binary_data)
finally:
for file_handle in file_handles:
file_handle.close() |
<SYSTEM_TASK:>
Make Request to Sailthru API with given data and api key, format and signature hash
<END_TASK>
<USER_TASK:>
Description:
def _api_request(self, action, data, request_type, headers=None):
"""
Make Request to Sailthru API with given data and api key, format and signature hash
""" |
if 'file' in data:
file_data = {'file': open(data['file'], 'rb')}
else:
file_data = None
return self._http_request(action, self._prepare_json_payload(data), request_type, file_data, headers) |
<SYSTEM_TASK:>
Update mappings with the percolator field.
<END_TASK>
<USER_TASK:>
Description:
def _create_percolator_mapping(index, doc_type):
"""Update mappings with the percolator field.
.. note::
This is only needed from ElasticSearch v5 onwards, because percolators
are now just a special type of field inside mappings.
""" |
if ES_VERSION[0] >= 5:
current_search_client.indices.put_mapping(
index=index, doc_type=doc_type,
body=PERCOLATOR_MAPPING, ignore=[400, 404]) |
<SYSTEM_TASK:>
Create new percolator associated with the new set.
<END_TASK>
<USER_TASK:>
Description:
def _new_percolator(spec, search_pattern):
"""Create new percolator associated with the new set.""" |
if spec and search_pattern:
query = query_string_parser(search_pattern=search_pattern).to_dict()
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
# TODO: Consider doing this only once in app initialization
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.index(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec),
body={'query': query}
) |
<SYSTEM_TASK:>
Delete percolator associated with the new oaiset.
<END_TASK>
<USER_TASK:>
Description:
def _delete_percolator(spec, search_pattern):
"""Delete percolator associated with the new oaiset.""" |
if spec:
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.delete(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec), ignore=[404]
) |
<SYSTEM_TASK:>
Update all affected records by OAISet change.
<END_TASK>
<USER_TASK:>
Description:
def update_affected_records(spec=None, search_pattern=None):
"""Update all affected records by OAISet change.
:param spec: The record spec.
:param search_pattern: The search pattern.
""" |
chunk_size = current_app.config['OAISERVER_CELERY_TASK_CHUNK_SIZE']
record_ids = get_affected_records(spec=spec, search_pattern=search_pattern)
group(
update_records_sets.s(list(filter(None, chunk)))
for chunk in zip_longest(*[iter(record_ids)] * chunk_size)
)() |
<SYSTEM_TASK:>
Create OAI-PMH envelope for response with verb.
<END_TASK>
<USER_TASK:>
Description:
def verb(**kwargs):
"""Create OAI-PMH envelope for response with verb.""" |
e_tree, e_oaipmh = envelope(**kwargs)
e_element = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, kwargs['verb']))
return e_tree, e_element |
<SYSTEM_TASK:>
Fetch a record's identifier.
<END_TASK>
<USER_TASK:>
Description:
def oaiid_fetcher(record_uuid, data):
"""Fetch a record's identifier.
:param record_uuid: The record UUID.
:param data: The record data.
:returns: A :class:`invenio_pidstore.fetchers.FetchedPID` instance.
""" |
pid_value = data.get('_oai', {}).get('id')
if pid_value is None:
raise PersistentIdentifierError()
return FetchedPID(
provider=OAIIDProvider,
pid_type=OAIIDProvider.pid_type,
pid_value=str(pid_value),
) |
<SYSTEM_TASK:>
Add a record to the OAISet.
<END_TASK>
<USER_TASK:>
Description:
def add_record(self, record):
"""Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
""" |
record.setdefault('_oai', {}).setdefault('sets', [])
assert not self.has_record(record)
record['_oai']['sets'].append(self.spec) |
<SYSTEM_TASK:>
Remove a record from the OAISet.
<END_TASK>
<USER_TASK:>
Description:
def remove_record(self, record):
"""Remove a record from the OAISet.
:param record: Record to be removed.
:type record: `invenio_records.api.Record` or derivative.
""" |
assert self.has_record(record)
record['_oai']['sets'] = [
s for s in record['_oai']['sets'] if s != self.spec] |
<SYSTEM_TASK:>
Dump MARC21 compatible record.
<END_TASK>
<USER_TASK:>
Description:
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
""" |
from dojson.contrib.to_marc21 import to_marc21
from dojson.contrib.to_marc21.utils import dumps_etree
return dumps_etree(to_marc21.do(record['_source']), **kwargs) |
<SYSTEM_TASK:>
Generate the eprints element for the identify response.
<END_TASK>
<USER_TASK:>
Description:
def eprints_description(metadataPolicy, dataPolicy,
submissionPolicy=None, content=None):
"""Generate the eprints element for the identify response.
The eprints container is used by the e-print community to describe
the content and policies of repositories.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-eprints.htm
""" |
eprints = Element(etree.QName(NS_EPRINTS[None], 'eprints'),
nsmap=NS_EPRINTS)
eprints.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(EPRINTS_SCHEMA_LOCATION,
EPRINTS_SCHEMA_LOCATION_XSD))
if content:
contentElement = etree.Element('content')
for key, value in content.items():
contentElement.append(E(key, value))
eprints.append(contentElement)
metadataPolicyElement = etree.Element('metadataPolicy')
for key, value in metadataPolicy.items():
metadataPolicyElement.append(E(key, value))
eprints.append(metadataPolicyElement)
dataPolicyElement = etree.Element('dataPolicy')
for key, value in dataPolicy.items():
dataPolicyElement.append(E(key, value))
eprints.append(dataPolicyElement)
if submissionPolicy:
submissionPolicyElement = etree.Element('submissionPolicy')
for key, value in submissionPolicy.items():
submissionPolicyElement.append(E(key, value))
eprints.append(submissionPolicyElement)
return etree.tostring(eprints, pretty_print=True) |
<SYSTEM_TASK:>
Generate the oai-identifier element for the identify response.
<END_TASK>
<USER_TASK:>
Description:
def oai_identifier_description(scheme, repositoryIdentifier,
delimiter, sampleIdentifier):
"""Generate the oai-identifier element for the identify response.
The OAI identifier format is intended to provide persistent resource
identifiers for items in repositories that implement OAI-PMH.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-oai-identifier.htm
""" |
oai_identifier = Element(etree.QName(NS_OAI_IDENTIFIER[None],
'oai_identifier'),
nsmap=NS_OAI_IDENTIFIER)
oai_identifier.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(OAI_IDENTIFIER_SCHEMA_LOCATION,
OAI_IDENTIFIER_SCHEMA_LOCATION_XSD))
oai_identifier.append(E('scheme', scheme))
oai_identifier.append(E('repositoryIdentifier', repositoryIdentifier))
oai_identifier.append(E('delimiter', delimiter))
oai_identifier.append(E('sampleIdentifier', sampleIdentifier))
return etree.tostring(oai_identifier, pretty_print=True) |
<SYSTEM_TASK:>
Generate the friends element for the identify response.
<END_TASK>
<USER_TASK:>
Description:
def friends_description(baseURLs):
"""Generate the friends element for the identify response.
The friends container is recommended for use by repositories
to list confederate repositories.
For the schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-friends.htm
""" |
friends = Element(etree.QName(NS_FRIENDS[None], 'friends'),
nsmap=NS_FRIENDS)
friends.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(FRIENDS_SCHEMA_LOCATION,
FRIENDS_SCHEMA_LOCATION_XSD))
for baseURL in baseURLs:
friends.append(E('baseURL', baseURL))
return etree.tostring(friends, pretty_print=True) |
<SYSTEM_TASK:>
Get list of affected records.
<END_TASK>
<USER_TASK:>
Description:
def get_affected_records(spec=None, search_pattern=None):
"""Get list of affected records.
:param spec: The record spec.
:param search_pattern: The search pattern.
:returns: An iterator to lazily find results.
""" |
# spec pattern query
# ---------- ---------- -------
# None None None
# None Y Y
# X None X
# X '' X
# X Y X OR Y
if spec is None and search_pattern is None:
raise StopIteration
queries = []
if spec is not None:
queries.append(Q('match', **{'_oai.sets': spec}))
if search_pattern:
queries.append(query_string_parser(search_pattern=search_pattern))
search = OAIServerSearch(
index=current_app.config['OAISERVER_RECORD_INDEX'],
).query(Q('bool', should=queries))
for result in search.scan():
yield result.meta.id |
<SYSTEM_TASK:>
Look for an existing path matching filename.
<END_TASK>
<USER_TASK:>
Description:
def get_file_path(filename, local=True, relative_to_module=None, my_dir=my_dir):
"""
Look for an existing path matching filename.
Try to resolve relative to the module location if the path cannot by found
using "normal" resolution.
""" |
# override my_dir if module is provided
if relative_to_module is not None:
my_dir = os.path.dirname(relative_to_module.__file__)
user_path = result = filename
if local:
user_path = os.path.expanduser(filename)
result = os.path.abspath(user_path)
if os.path.exists(result):
return result # The file was found normally
# otherwise look relative to the module.
result = os.path.join(my_dir, filename)
assert os.path.exists(result), "no such file " + repr((filename, result, user_path))
return result |
<SYSTEM_TASK:>
Load a javascript file to the Jupyter notebook context,
<END_TASK>
<USER_TASK:>
Description:
def load_if_not_loaded(widget, filenames, verbose=False, delay=0.1, force=False, local=True, evaluator=None):
"""
Load a javascript file to the Jupyter notebook context,
unless it was already loaded.
""" |
if evaluator is None:
evaluator = EVALUATOR # default if not specified.
for filename in filenames:
loaded = False
if force or not filename in LOADED_JAVASCRIPT:
js_text = get_text_from_file_name(filename, local)
if verbose:
print("loading javascript file", filename, "with", evaluator)
evaluator(widget, js_text)
LOADED_JAVASCRIPT.add(filename)
loaded = True
else:
if verbose:
print ("not reloading javascript file", filename)
if loaded and delay > 0:
if verbose:
print ("delaying to allow JS interpreter to sync.")
time.sleep(delay) |
<SYSTEM_TASK:>
Determine the base url for a root element.
<END_TASK>
<USER_TASK:>
Description:
def base_url(root):
"""Determine the base url for a root element.""" |
for attr, value in root.attrib.iteritems():
if attr.endswith('base') and 'http' in value:
return value
return None |
<SYSTEM_TASK:>
Return a tag and its namespace separately.
<END_TASK>
<USER_TASK:>
Description:
def clean_ns(tag):
"""Return a tag and its namespace separately.""" |
if '}' in tag:
split = tag.split('}')
return split[0].strip('{'), split[-1]
return '', tag |
<SYSTEM_TASK:>
A safe xpath that only uses namespaces if available.
<END_TASK>
<USER_TASK:>
Description:
def xpath(node, query, namespaces={}):
"""A safe xpath that only uses namespaces if available.""" |
if namespaces and 'None' not in namespaces:
return node.xpath(query, namespaces=namespaces)
return node.xpath(query) |
<SYSTEM_TASK:>
Return the inner text of a node. If a node has no sub elements, this
<END_TASK>
<USER_TASK:>
Description:
def innertext(node):
"""Return the inner text of a node. If a node has no sub elements, this
is just node.text. Otherwise, it's node.text + sub-element-text +
node.tail.""" |
if not len(node):
return node.text
return (node.text or '') + ''.join([etree.tostring(c) for c in node]) + (node.tail or '') |
<SYSTEM_TASK:>
Parse a document and return a feedparser dictionary with attr key access.
<END_TASK>
<USER_TASK:>
Description:
def parse(document, clean_html=True, unix_timestamp=False, encoding=None):
"""Parse a document and return a feedparser dictionary with attr key access.
If clean_html is False, the html in the feed will not be cleaned. If
clean_html is True, a sane version of lxml.html.clean.Cleaner will be used.
If it is a Cleaner object, that cleaner will be used. If unix_timestamp is
True, the date information will be a numerical unix timestamp rather than a
struct_time. If encoding is provided, the encoding of the document will be
manually set to that.""" |
if isinstance(clean_html, bool):
cleaner = default_cleaner if clean_html else fake_cleaner
else:
cleaner = clean_html
result = feedparser.FeedParserDict()
result['feed'] = feedparser.FeedParserDict()
result['entries'] = []
result['bozo'] = 0
try:
parser = SpeedParser(document, cleaner, unix_timestamp, encoding)
parser.update(result)
except Exception as e:
if isinstance(e, UnicodeDecodeError) and encoding is True:
encoding = chardet.detect(document)['encoding']
document = document.decode(encoding, 'replace').encode('utf-8')
return parse(document, clean_html, unix_timestamp, encoding)
import traceback
result['bozo'] = 1
result['bozo_exception'] = e
result['bozo_tb'] = traceback.format_exc()
return result |
<SYSTEM_TASK:>
wrapper to allow output redirects for handle_chunk.
<END_TASK>
<USER_TASK:>
Description:
def handle_chunk_wrapper(self, status, name, content, file_info):
"""wrapper to allow output redirects for handle_chunk.""" |
out = self.output
if out is not None:
with out:
print("handling chunk " + repr(type(content)))
self.handle_chunk(status, name, content, file_info)
else:
self.handle_chunk(status, name, content, file_info) |
<SYSTEM_TASK:>
Search the ORCID database.
<END_TASK>
<USER_TASK:>
Description:
def search(self, query, method="lucene", start=None,
rows=None, access_token=None):
"""Search the ORCID database.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param start: string
Index of the first record requested. Use for pagination.
:param rows: string
Number of records requested. Use for pagination.
:param access_token: string
If obtained before, the access token to use to pass through
authorization. Note that if this argument is not provided,
the function will take more time.
Returns
-------
:returns: dict
Search result with error description available. The results can
be obtained by accessing key 'result'. To get the number
of all results, access the key 'num-found'.
""" |
if access_token is None:
access_token = self. \
get_search_token_from_orcid()
headers = {'Accept': 'application/orcid+json',
'Authorization': 'Bearer %s' % access_token}
return self._search(query, method, start, rows, headers,
self._endpoint) |
<SYSTEM_TASK:>
Search the ORCID database with a generator.
<END_TASK>
<USER_TASK:>
Description:
def search_generator(self, query, method="lucene",
pagination=10, access_token=None):
"""Search the ORCID database with a generator.
The generator will yield every result.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param pagination: integer
How many papers should be fetched with the request.
:param access_token: string
If obtained before, the access token to use to pass through
authorization. Note that if this argument is not provided,
the function will take more time.
Yields
-------
:yields: dict
Single profile from the search results.
""" |
if access_token is None:
access_token = self. \
get_search_token_from_orcid()
headers = {'Accept': 'application/orcid+json',
'Authorization': 'Bearer %s' % access_token}
index = 0
while True:
paginated_result = self._search(query, method, index, pagination,
headers, self._endpoint)
if not paginated_result['result']:
return
for result in paginated_result['result']:
yield result
index += pagination |
<SYSTEM_TASK:>
Get a token for searching ORCID records.
<END_TASK>
<USER_TASK:>
Description:
def get_search_token_from_orcid(self, scope='/read-public'):
"""Get a token for searching ORCID records.
Parameters
----------
:param scope: string
/read-public or /read-member
Returns
-------
:returns: string
The token.
""" |
payload = {'client_id': self._key,
'client_secret': self._secret,
'scope': scope,
'grant_type': 'client_credentials'
}
url = "%s/oauth/token" % self._endpoint
headers = {'Accept': 'application/json'}
response = requests.post(url, data=payload, headers=headers,
timeout=self._timeout)
response.raise_for_status()
if self.do_store_raw_response:
self.raw_response = response
return response.json()['access_token'] |
<SYSTEM_TASK:>
Like `get_token`, but using an OAuth 2 authorization code.
<END_TASK>
<USER_TASK:>
Description:
def get_token_from_authorization_code(self,
authorization_code, redirect_uri):
"""Like `get_token`, but using an OAuth 2 authorization code.
Use this method if you run a webserver that serves as an endpoint for
the redirect URI. The webserver can retrieve the authorization code
from the URL that is requested by ORCID.
Parameters
----------
:param redirect_uri: string
The redirect uri of the institution.
:param authorization_code: string
The authorization code.
Returns
-------
:returns: dict
All data of the access token. The access token itself is in the
``"access_token"`` key.
""" |
token_dict = {
"client_id": self._key,
"client_secret": self._secret,
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": redirect_uri,
}
response = requests.post(self._token_url, data=token_dict,
headers={'Accept': 'application/json'},
timeout=self._timeout)
response.raise_for_status()
if self.do_store_raw_response:
self.raw_response = response
return json.loads(response.text) |
<SYSTEM_TASK:>
Get the public info about the researcher.
<END_TASK>
<USER_TASK:>
Description:
def read_record_public(self, orcid_id, request_type, token, put_code=None,
accept_type='application/orcid+json'):
"""Get the public info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
For example: 'record'.
See https://members.orcid.org/api/tutorial/read-orcid-records
for possible values.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param put_code: string | list of strings
The id of the queried work. In case of 'works' request_type
might be a list of strings
:param accept_type: expected MIME type of received data
Returns
-------
:returns: dict | lxml.etree._Element
Record(s) in JSON-compatible dictionary representation or
in XML E-tree, depending on accept_type specified.
""" |
return self._get_info(orcid_id, self._get_public_info, request_type,
token, put_code, accept_type) |
<SYSTEM_TASK:>
Get the user orcid from authentication process.
<END_TASK>
<USER_TASK:>
Description:
def get_user_orcid(self, user_id, password, redirect_uri):
"""Get the user orcid from authentication process.
Parameters
----------
:param user_id: string
The id of the user used for authentication.
:param password: string
The user password.
:param redirect_uri: string
The redirect uri of the institution.
Returns
-------
:returns: string
The orcid.
""" |
response = self._authenticate(user_id, password, redirect_uri,
'/authenticate')
return response['orcid'] |
<SYSTEM_TASK:>
Get the member info about the researcher.
<END_TASK>
<USER_TASK:>
Description:
def read_record_member(self, orcid_id, request_type, token, put_code=None,
accept_type='application/orcid+json'):
"""Get the member info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
For example: 'record'.
See https://members.orcid.org/api/tutorial/read-orcid-records
for possible values..
:param response_format: string
One of json, xml.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param put_code: string | list of strings
The id of the queried work. In case of 'works' request_type
might be a list of strings
:param accept_type: expected MIME type of received data
Returns
-------
:returns: dict | lxml.etree._Element
Record(s) in JSON-compatible dictionary representation or
in XML E-tree, depending on accept_type specified.
""" |
return self._get_info(orcid_id, self._get_member_info, request_type,
token, put_code, accept_type) |
<SYSTEM_TASK:>
From the authorization code, get the "access token" and the "refresh token" from Box.
<END_TASK>
<USER_TASK:>
Description:
def get_access_tokens(self, authorization_code):
"""From the authorization code, get the "access token" and the "refresh token" from Box.
Args:
authorization_code (str). Authorisation code emitted by Box at the url provided by the function :func:`get_authorization_url`.
Returns:
tuple. (access_token, refresh_token)
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
response = self.box_request.get_access_token(authorization_code)
try:
att = response.json()
except Exception, ex:
raise BoxHttpResponseError(ex)
if response.status_code >= 400:
raise BoxError(response.status_code, att)
return att['access_token'], att['refresh_token'] |
<SYSTEM_TASK:>
STOMP acknowledge command.
<END_TASK>
<USER_TASK:>
Description:
def ack(messageid, transactionid=None):
"""STOMP acknowledge command.
Acknowledge receipt of a specific message from the server.
messageid:
This is the id of the message we are acknowledging,
what else could it be? ;)
transactionid:
This is the id that all actions in this transaction
will have. If this is not given then a random UUID
will be generated for this.
""" |
header = 'message-id: %s' % messageid
if transactionid:
header = 'message-id: %s\ntransaction: %s' % (messageid, transactionid)
return "ACK\n%s\n\n\x00\n" % header |
<SYSTEM_TASK:>
STOMP send command.
<END_TASK>
<USER_TASK:>
Description:
def send(dest, msg, transactionid=None):
"""STOMP send command.
dest:
This is the channel we wish to subscribe to
msg:
This is the message body to be sent.
transactionid:
This is an optional field and is not needed
by default.
""" |
transheader = ''
if transactionid:
transheader = 'transaction: %s\n' % transactionid
return "SEND\ndestination: %s\n%s\n%s\x00\n" % (dest, transheader, msg) |
<SYSTEM_TASK:>
Check the cmd is valid, FrameError will be raised if its not.
<END_TASK>
<USER_TASK:>
Description:
def setCmd(self, cmd):
"""Check the cmd is valid, FrameError will be raised if its not.""" |
cmd = cmd.upper()
if cmd not in VALID_COMMANDS:
raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % (
cmd, VALID_COMMANDS, STOMP_VERSION)
)
else:
self._cmd = cmd |
<SYSTEM_TASK:>
Called to create a STOMP message from the internal values.
<END_TASK>
<USER_TASK:>
Description:
def pack(self):
"""Called to create a STOMP message from the internal values.
""" |
headers = ''.join(
['%s:%s\n' % (f, v) for f, v in sorted(self.headers.items())]
)
stomp_message = "%s\n%s\n%s%s\n" % (self._cmd, headers, self.body, NULL)
# import pprint
# print "stomp_message: ", pprint.pprint(stomp_message)
return stomp_message |
<SYSTEM_TASK:>
Called to extract a STOMP message into this instance.
<END_TASK>
<USER_TASK:>
Description:
def unpack(self, message):
"""Called to extract a STOMP message into this instance.
message:
This is a text string representing a valid
STOMP (v1.0) message.
This method uses unpack_frame(...) to extract the
information, before it is assigned internally.
retuned:
The result of the unpack_frame(...) call.
""" |
if not message:
raise FrameError("Unpack error! The given message isn't valid '%s'!" % message)
msg = unpack_frame(message)
self.cmd = msg['cmd']
self.headers = msg['headers']
# Assign directly as the message will have the null
# character in the message already.
self.body = msg['body']
return msg |
<SYSTEM_TASK:>
Called to provide a response to a message if needed.
<END_TASK>
<USER_TASK:>
Description:
def react(self, msg):
"""Called to provide a response to a message if needed.
msg:
This is a dictionary as returned by unpack_frame(...)
or it can be a straight STOMP message. This function
will attempt to determine which an deal with it.
returned:
A message to return or an empty string.
""" |
returned = ""
# If its not a string assume its a dict.
mtype = type(msg)
if mtype in stringTypes:
msg = unpack_frame(msg)
elif mtype == dict:
pass
else:
raise FrameError("Unknown message type '%s', I don't know what to do with this!" % mtype)
if msg['cmd'] in self.states:
# print("reacting to message - %s" % msg['cmd'])
returned = self.states[msg['cmd']](msg)
return returned |
<SYSTEM_TASK:>
Called to handle an error message received from the server.
<END_TASK>
<USER_TASK:>
Description:
def error(self, msg):
"""Called to handle an error message received from the server.
This method just logs the error message
returned:
NO_RESPONSE_NEEDED
""" |
body = msg['body'].replace(NULL, '')
brief_msg = ""
if 'message' in msg['headers']:
brief_msg = msg['headers']['message']
self.log.error("Received server error - message%s\n\n%s" % (brief_msg, body))
returned = NO_RESPONSE_NEEDED
if self.testing:
returned = 'error'
return returned |
<SYSTEM_TASK:>
Called to handle a receipt message received from the server.
<END_TASK>
<USER_TASK:>
Description:
def receipt(self, msg):
"""Called to handle a receipt message received from the server.
This method just logs the receipt message
returned:
NO_RESPONSE_NEEDED
""" |
body = msg['body'].replace(NULL, '')
brief_msg = ""
if 'receipt-id' in msg['headers']:
brief_msg = msg['headers']['receipt-id']
self.log.info("Received server receipt message - receipt-id:%s\n\n%s" % (brief_msg, body))
returned = NO_RESPONSE_NEEDED
if self.testing:
returned = 'receipt'
return returned |
<SYSTEM_TASK:>
Set up a logger that catches all channels and logs it to stdout.
<END_TASK>
<USER_TASK:>
Description:
def log_init(level):
"""Set up a logger that catches all channels and logs it to stdout.
This is used to set up logging when testing.
""" |
log = logging.getLogger()
hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(level) |
<SYSTEM_TASK:>
Override this and do some customer message handler.
<END_TASK>
<USER_TASK:>
Description:
def ack(self, msg):
"""Override this and do some customer message handler.
""" |
print("Got a message:\n%s\n" % msg['body'])
# do something with the message...
# Generate the ack or not if you subscribed with ack='auto'
return super(Pong, self).ack(msg) |
<SYSTEM_TASK:>
This is a decorator that will wrap the decorated method in an atomic transaction and
<END_TASK>
<USER_TASK:>
Description:
def transaction_atomic_with_retry(num_retries=5, backoff=0.1):
"""
This is a decorator that will wrap the decorated method in an atomic transaction and
retry the transaction a given number of times
:param num_retries: How many times should we retry before we give up
:param backoff: How long should we wait after each try
""" |
# Create the decorator
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
# Keep track of how many times we have tried
num_tries = 0
exception = None
# Call the main sync entities method and catch any exceptions
while num_tries <= num_retries:
# Try running the transaction
try:
with transaction.atomic():
return wrapped(*args, **kwargs)
# Catch any operation errors
except db.utils.OperationalError as e:
num_tries += 1
exception = e
sleep(backoff * num_tries)
# If we have an exception raise it
raise exception
# Return the decorator
return wrapper |
<SYSTEM_TASK:>
A decorator that can be used to defer the syncing of entities until after the method has been run
<END_TASK>
<USER_TASK:>
Description:
def defer_entity_syncing(wrapped, instance, args, kwargs):
"""
A decorator that can be used to defer the syncing of entities until after the method has been run
This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand
why they are happening
""" |
# Defer entity syncing while we run our method
sync_entities.defer = True
# Run the method
try:
return wrapped(*args, **kwargs)
# After we run the method disable the deferred syncing
# and sync all the entities that have been buffered to be synced
finally:
# Enable entity syncing again
sync_entities.defer = False
# Get the models that need to be synced
model_objs = list(sync_entities.buffer.values())
# If none is in the model objects we need to sync all
if None in sync_entities.buffer:
model_objs = list()
# Sync the entities that were deferred if any
if len(sync_entities.buffer):
sync_entities(*model_objs)
# Clear the buffer
sync_entities.buffer = {} |
<SYSTEM_TASK:>
Given model objects organized by content type and a dictionary of all model IDs that need
<END_TASK>
<USER_TASK:>
Description:
def _get_super_entities_by_ctype(model_objs_by_ctype, model_ids_to_sync, sync_all):
"""
Given model objects organized by content type and a dictionary of all model IDs that need
to be synced, organize all super entity relationships that need to be synced.
Ensure that the model_ids_to_sync dict is updated with any new super entities
that need to be part of the overall entity sync
""" |
super_entities_by_ctype = defaultdict(lambda: defaultdict(list)) # pragma: no cover
for ctype, model_objs_for_ctype in model_objs_by_ctype.items():
entity_config = entity_registry.entity_registry.get(ctype.model_class())
super_entities = entity_config.get_super_entities(model_objs_for_ctype, sync_all)
super_entities_by_ctype[ctype] = {
ContentType.objects.get_for_model(model_class, for_concrete_model=False): relationships
for model_class, relationships in super_entities.items()
}
# Continue adding to the set of entities that need to be synced
for super_entity_ctype, relationships in super_entities_by_ctype[ctype].items():
for sub_entity_id, super_entity_id in relationships:
model_ids_to_sync[ctype].add(sub_entity_id)
model_ids_to_sync[super_entity_ctype].add(super_entity_id)
return super_entities_by_ctype |
<SYSTEM_TASK:>
Syncs entities watching changes of a model instance.
<END_TASK>
<USER_TASK:>
Description:
def sync_entities_watching(instance):
"""
Syncs entities watching changes of a model instance.
""" |
for entity_model, entity_model_getter in entity_registry.entity_watching[instance.__class__]:
model_objs = list(entity_model_getter(instance))
if model_objs:
sync_entities(*model_objs) |
<SYSTEM_TASK:>
Given a list of entity kinds ensure they are synced properly to the database.
<END_TASK>
<USER_TASK:>
Description:
def upsert_entity_kinds(self, entity_kinds):
"""
Given a list of entity kinds ensure they are synced properly to the database.
This will ensure that only unchanged entity kinds are synced and will still return all
updated entity kinds
:param entity_kinds: The list of entity kinds to sync
""" |
# Filter out unchanged entity kinds
unchanged_entity_kinds = {}
if entity_kinds:
unchanged_entity_kinds = {
(entity_kind.name, entity_kind.display_name): entity_kind
for entity_kind in EntityKind.all_objects.extra(
where=['(name, display_name) IN %s'],
params=[tuple(
(entity_kind.name, entity_kind.display_name)
for entity_kind in entity_kinds
)]
)
}
# Filter out the unchanged entity kinds
changed_entity_kinds = [
entity_kind
for entity_kind in entity_kinds
if (entity_kind.name, entity_kind.display_name) not in unchanged_entity_kinds
]
# If any of our kinds have changed upsert them
upserted_enitity_kinds = []
if changed_entity_kinds:
# Select all our existing entity kinds for update so we can do proper locking
# We have to select all here for some odd reason, if we only select the ones
# we are syncing we still run into deadlock issues
list(EntityKind.all_objects.all().select_for_update().values_list('id', flat=True))
# Upsert the entity kinds
upserted_enitity_kinds = manager_utils.bulk_upsert(
queryset=EntityKind.all_objects.filter(
name__in=[entity_kind.name for entity_kind in changed_entity_kinds]
),
model_objs=changed_entity_kinds,
unique_fields=['name'],
update_fields=['display_name'],
return_upserts=True
)
# Return all the entity kinds
return upserted_enitity_kinds + list(unchanged_entity_kinds.values()) |
<SYSTEM_TASK:>
Returns a tuple for a kind name and kind display name of an entity.
<END_TASK>
<USER_TASK:>
Description:
def get_entity_kind(self, model_obj):
"""
Returns a tuple for a kind name and kind display name of an entity.
By default, uses the app_label and model of the model object's content
type as the kind.
""" |
model_obj_ctype = ContentType.objects.get_for_model(self.queryset.model)
return (u'{0}.{1}'.format(model_obj_ctype.app_label, model_obj_ctype.model), u'{0}'.format(model_obj_ctype)) |
<SYSTEM_TASK:>
Registers an entity config
<END_TASK>
<USER_TASK:>
Description:
def register_entity(self, entity_config):
"""
Registers an entity config
""" |
if not issubclass(entity_config, EntityConfig):
raise ValueError('Must register entity config class of subclass EntityConfig')
if entity_config.queryset is None:
raise ValueError('Entity config must define queryset')
model = entity_config.queryset.model
self._entity_registry[model] = entity_config()
# Add watchers to the global look up table
for watching_model, entity_model_getter in entity_config.watching:
self._entity_watching[watching_model].append((model, entity_model_getter)) |
<SYSTEM_TASK:>
Start twisted event loop and the fun should begin...
<END_TASK>
<USER_TASK:>
Description:
def start(host='localhost', port=61613, username='', password=''):
"""Start twisted event loop and the fun should begin...
""" |
StompClientFactory.username = username
StompClientFactory.password = password
reactor.connectTCP(host, port, StompClientFactory())
reactor.run() |
<SYSTEM_TASK:>
Send out a hello message periodically.
<END_TASK>
<USER_TASK:>
Description:
def send(self):
"""Send out a hello message periodically.
""" |
self.log.info("Saying hello (%d)." % self.counter)
f = stomper.Frame()
f.unpack(stomper.send(DESTINATION, 'hello there (%d)' % self.counter))
self.counter += 1
# ActiveMQ specific headers:
#
#f.headers['persistent'] = 'true'
self.transport.write(f.pack()) |
<SYSTEM_TASK:>
Register with stomp server.
<END_TASK>
<USER_TASK:>
Description:
def connectionMade(self):
"""Register with stomp server.
""" |
cmd = stomper.connect(self.username, self.password)
self.transport.write(cmd) |
<SYSTEM_TASK:>
Use stompbuffer to determine when a complete message has been received.
<END_TASK>
<USER_TASK:>
Description:
def dataReceived(self, data):
"""Use stompbuffer to determine when a complete message has been received.
""" |
self.stompBuffer.appendData(data)
while True:
msg = self.stompBuffer.getOneMessage()
if msg is None:
break
returned = self.react(msg)
if returned:
self.transport.write(returned) |
<SYSTEM_TASK:>
Process the message and determine what to do with it.
<END_TASK>
<USER_TASK:>
Description:
def ack(self, msg):
"""Process the message and determine what to do with it.
""" |
self.log.info("receiverId <%s> Received: <%s> " % (self.receiverId, msg['body']))
#return super(MyStomp, self).ack(msg)
return stomper.NO_REPONSE_NEEDED |
<SYSTEM_TASK:>
Register with the stomp server.
<END_TASK>
<USER_TASK:>
Description:
def connectionMade(self):
"""Register with the stomp server.
""" |
cmd = self.sm.connect()
self.transport.write(cmd) |
<SYSTEM_TASK:>
Data received, react to it and respond if needed.
<END_TASK>
<USER_TASK:>
Description:
def dataReceived(self, data):
"""Data received, react to it and respond if needed.
""" |
# print "receiver dataReceived: <%s>" % data
msg = stomper.unpack_frame(data)
returned = self.sm.react(msg)
# print "receiver returned <%s>" % returned
if returned:
self.transport.write(returned) |
<SYSTEM_TASK:>
Find a folder or a file ID from its name, inside a given folder.
<END_TASK>
<USER_TASK:>
Description:
def find_id_in_folder(self, name, parent_folder_id=0):
"""Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
if name is None or len(name) == 0:
return parent_folder_id
offset = 0
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
total = int(resp['total_count'])
while offset < total:
found = self.__find_name(resp, name)
if found is not None:
return found
offset += int(len(resp['entries']))
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
return None |
<SYSTEM_TASK:>
Create a folder
<END_TASK>
<USER_TASK:>
Description:
def create_folder(self, name, parent_folder_id=0):
"""Create a folder
If the folder exists, a BoxError will be raised.
Args:
folder_id (int): Name of the folder.
parent_folder_id (int): ID of the folder where to create the new one.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
return self.__request("POST", "folders",
data={ "name": name,
"parent": {"id": unicode(parent_folder_id)} }) |
<SYSTEM_TASK:>
Delete an existing folder
<END_TASK>
<USER_TASK:>
Description:
def delete_folder(self, folder_id, recursive=True):
"""Delete an existing folder
Args:
folder_id (int): ID of the folder to delete.
recursive (bool): Delete all subfolder if True.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
return self.__request("DELETE", "folders/%s" % (folder_id, ),
querystring={'recursive': unicode(recursive).lower()}) |
<SYSTEM_TASK:>
Get files and folders inside a given folder
<END_TASK>
<USER_TASK:>
Description:
def get_folder_items(self, folder_id,
limit=100, offset=0, fields_list=None):
"""Get files and folders inside a given folder
Args:
folder_id (int): Where to get files and folders info.
limit (int): The number of items to return.
offset (int): The item at which to begin the response.
fields_list (list): List of attributes to get. All attributes if None.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
qs = { "limit": limit,
"offset": offset }
if fields_list:
qs['fields'] = ','.join(fields_list)
return self.__request("GET", "folders/%s/items" % (folder_id, ),
querystring=qs) |
<SYSTEM_TASK:>
Upload a file into a folder.
<END_TASK>
<USER_TASK:>
Description:
def upload_file(self, name, folder_id, file_path):
"""Upload a file into a folder.
Use function for small file otherwise there is the chunk_upload_file() function
Args::
name (str): Name of the file on your Box storage.
folder_id (int): ID of the folder where to upload the file.
file_path (str): Local path of the file to upload.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
try:
return self.__do_upload_file(name, folder_id, file_path)
except BoxError, ex:
if ex.status != 401:
raise
#tokens had been refreshed, so we start again the upload
return self.__do_upload_file(name, folder_id, file_path) |
<SYSTEM_TASK:>
Upload a new version of a file into a folder.
<END_TASK>
<USER_TASK:>
Description:
def upload_new_file_version(self, name, folder_id, file_id, file_path):
"""Upload a new version of a file into a folder.
Use function for small file otherwise there is the chunk_upload_file() function.
Args::
name (str): Name of the file on your Box storage.
folder_id (int): ID of the folder where to upload the file.
file_id (int): ID of the file to update.
file_path (str): Local path of the file to upload.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
try:
return self.__do_upload_file(name, folder_id, file_path, file_id)
except BoxError, ex:
if ex.status != 401:
raise
#tokens had been refreshed, so we start again the upload
return self.__do_upload_file(name, folder_id, file_path, file_id) |
<SYSTEM_TASK:>
Upload a file chunk by chunk.
<END_TASK>
<USER_TASK:>
Description:
def chunk_upload_file(self, name, folder_id, file_path,
progress_callback=None,
chunk_size=1024*1024*1):
"""Upload a file chunk by chunk.
The whole file is never loaded in memory.
Use this function for big file.
The callback(transferred, total) to let you know the upload progress.
Upload can be cancelled if the callback raise an Exception.
>>> def progress_callback(transferred, total):
... print 'Uploaded %i bytes of %i' % (transferred, total, )
... if user_request_cancel:
... raise MyCustomCancelException()
Args:
name (str): Name of the file on your Box storage.
folder_id (int): ID of the folder where to upload the file.
file_path (str): Local path of the file to upload.
progress_callback (func): Function called each time a chunk is uploaded.
chunk_size (int): Size of chunks.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
try:
return self.__do_chunk_upload_file(name, folder_id, file_path,
progress_callback,
chunk_size)
except BoxError, ex:
if ex.status != 401:
raise
#tokens had been refreshed, so we start again the upload
return self.__do_chunk_upload_file(name, folder_id, file_path,
progress_callback,
chunk_size) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.