text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def i2c_master_read(self, addr, length, flags=I2C_NO_FLAGS): """Make an I2C read access. The given I2C device is addressed and clock cycles for `length` bytes are generated. A short read will occur if the device generates an early NAK. The transaction is finished with an I2C stop condition unless the I2C_NO_STOP flag is set. """
data = array.array('B', (0,) * length) status, rx_len = api.py_aa_i2c_read_ext(self.handle, addr, flags, length, data) _raise_i2c_status_code_error_if_failure(status) del data[rx_len:] return bytes(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def poll(self, timeout=None): """Wait for an event to occur. If `timeout` is given, if specifies the length of time in milliseconds which the function will wait for events before returing. If `timeout` is omitted, negative or None, the call will block until there is an event. Returns a list of events. In case no event is pending, an empty list is returned. """
if timeout is None: timeout = -1 ret = api.py_aa_async_poll(self.handle, timeout) _raise_error_if_negative(ret) events = list() for event in (POLL_I2C_READ, POLL_I2C_WRITE, POLL_SPI, POLL_I2C_MONITOR): if ret & event: events.append(event) return events
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_i2c_slave(self, slave_address): """Enable I2C slave mode. The device will respond to the specified slave_address if it is addressed. You can wait for the data with :func:`poll` and get it with `i2c_slave_read`. """
ret = api.py_aa_i2c_slave_enable(self.handle, slave_address, self.BUFFER_SIZE, self.BUFFER_SIZE) _raise_error_if_negative(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def i2c_slave_read(self): """Read the bytes from an I2C slave reception. The bytes are returned as a string object. """
data = array.array('B', (0,) * self.BUFFER_SIZE) status, addr, rx_len = api.py_aa_i2c_slave_read_ext(self.handle, self.BUFFER_SIZE, data) _raise_i2c_status_code_error_if_failure(status) # In case of general call, actually return the general call address if addr == 0x80: addr = 0x00 del data[rx_len:] return (addr, bytes(data))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def i2c_slave_last_transmit_size(self): """Returns the number of bytes transmitted by the slave."""
ret = api.py_aa_i2c_slave_write_stats(self.handle) _raise_error_if_negative(ret) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def i2c_monitor_read(self): """Retrieved any data fetched by the monitor. This function has an integrated timeout mechanism. You should use :func:`poll` to determine if there is any data available. Returns a list of data bytes and special symbols. There are three special symbols: `I2C_MONITOR_NACK`, I2C_MONITOR_START and I2C_MONITOR_STOP. """
data = array.array('H', (0,) * self.BUFFER_SIZE) ret = api.py_aa_i2c_monitor_read(self.handle, self.BUFFER_SIZE, data) _raise_error_if_negative(ret) del data[ret:] return data.tolist()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spi_bitrate(self): """SPI bitrate in kHz. Not every bitrate is supported by the host adapter. Therefore, the actual bitrate may be less than the value which is set. The slowest bitrate supported is 125kHz. Any smaller value will be rounded up to 125kHz. The power-on default value is 1000 kHz. """
ret = api.py_aa_spi_bitrate(self.handle, 0) _raise_error_if_negative(ret) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spi_configure(self, polarity, phase, bitorder): """Configure the SPI interface."""
ret = api.py_aa_spi_configure(self.handle, polarity, phase, bitorder) _raise_error_if_negative(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spi_configure_mode(self, spi_mode): """Configure the SPI interface by the well known SPI modes."""
if spi_mode == SPI_MODE_0: self.spi_configure(SPI_POL_RISING_FALLING, SPI_PHASE_SAMPLE_SETUP, SPI_BITORDER_MSB) elif spi_mode == SPI_MODE_3: self.spi_configure(SPI_POL_FALLING_RISING, SPI_PHASE_SETUP_SAMPLE, SPI_BITORDER_MSB) else: raise RuntimeError('SPI Mode not supported')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spi_write(self, data): """Write a stream of bytes to a SPI device."""
data_out = array.array('B', data) data_in = array.array('B', (0,) * len(data_out)) ret = api.py_aa_spi_write(self.handle, len(data_out), data_out, len(data_in), data_in) _raise_error_if_negative(ret) return bytes(data_in)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spi_ss_polarity(self, polarity): """Change the ouput polarity on the SS line. Please note, that this only affects the master functions. """
ret = api.py_aa_spi_master_ss_polarity(self.handle, polarity) _raise_error_if_negative(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edit_form(self, obj): """Customize edit form."""
form = super(OAISetModelView, self).edit_form(obj) del form.spec return form
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _schema_from_verb(verb, partial=False): """Return an instance of schema for given verb."""
from .verbs import Verbs return getattr(Verbs, verb)(partial=partial)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(pagination, **kwargs): """Return resumption token serializer."""
if not pagination.has_next: return token_builder = URLSafeTimedSerializer( current_app.config['SECRET_KEY'], salt=kwargs['verb'], ) schema = _schema_from_verb(kwargs['verb'], partial=False) data = dict(seed=random.random(), page=pagination.next_num, kwargs=schema.dump(kwargs).data) scroll_id = getattr(pagination, '_scroll_id', None) if scroll_id: data['scroll_id'] = scroll_id return token_builder.dumps(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _deserialize(self, value, attr, data): """Serialize resumption token."""
token_builder = URLSafeTimedSerializer( current_app.config['SECRET_KEY'], salt=data['verb'], ) result = token_builder.loads(value, max_age=current_app.config[ 'OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME']) result['token'] = value result['kwargs'] = self.root.load(result['kwargs'], partial=True).data return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, data, many=None, partial=None): """Deserialize a data structure to an object."""
result = super(ResumptionTokenSchema, self).load( data, many=many, partial=partial ) result.data.update( result.data.get('resumptionToken', {}).get('kwargs', {}) ) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_request_validator(request): """Validate arguments in incomming request."""
verb = request.values.get('verb', '', type=str) resumption_token = request.values.get('resumptionToken', None) schema = Verbs if resumption_token is None else ResumptionVerbs return getattr(schema, verb, OAISchema)(partial=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_iso_permissive(datestring, use_dateutil=True): """Parse an ISO8601-formatted datetime and return a datetime object. Inspired by the marshmallow.utils.from_iso function, but also accepts datestrings that don't contain the time. """
dateutil_available = False try: from dateutil import parser dateutil_available = True except ImportError: dateutil_available = False import datetime # Use dateutil's parser if possible if dateutil_available and use_dateutil: return parser.parse(datestring) else: # Strip off timezone info. return datetime.datetime.strptime(datestring[:19], '%Y-%m-%dT%H:%M:%S')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, data): """Check range between dates under keys ``from_`` and ``until``."""
if 'verb' in data and data['verb'] != self.__class__.__name__: raise ValidationError( # FIXME encode data 'This is not a valid OAI-PMH verb:{0}'.format(data['verb']), field_names=['verb'], ) if 'from_' in data and 'until' in data and \ data['from_'] > data['until']: raise ValidationError('Date "from" must be before "until".') extra = set(request.values.keys()) - set([ f.load_from or f.name for f in self.fields.values() ]) if extra: raise ValidationError('You have passed too many arguments.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sets(self): """Get list of sets."""
if self.cache: return self.cache.get( self.app.config['OAISERVER_CACHE_KEY'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sets(self, values): """Set list of sets."""
# if cache server is configured, save sets list if self.cache: self.cache.set(self.app.config['OAISERVER_CACHE_KEY'], values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_signals_oaiset(self): """Register OAISet signals to update records."""
from .models import OAISet from .receivers import after_insert_oai_set, \ after_update_oai_set, after_delete_oai_set listen(OAISet, 'after_insert', after_insert_oai_set) listen(OAISet, 'after_update', after_update_oai_set) listen(OAISet, 'after_delete', after_delete_oai_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unregister_signals_oaiset(self): """Unregister signals oaiset."""
from .models import OAISet from .receivers import after_insert_oai_set, \ after_update_oai_set, after_delete_oai_set if contains(OAISet, 'after_insert', after_insert_oai_set): remove(OAISet, 'after_insert', after_insert_oai_set) remove(OAISet, 'after_update', after_update_oai_set) remove(OAISet, 'after_delete', after_delete_oai_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_params(params): """ Extracts the values of a set of parameters, recursing into nested dictionaries. """
values = [] if isinstance(params, dict): for key, value in params.items(): values.extend(extract_params(value)) elif isinstance(params, list): for value in params: values.extend(extract_params(value)) else: values.append(params) return values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_list(self, list_name, options=None): """ Get detailed metadata information about a list. """
options = options or {} data = {'list': list_name} data.update(options) return self.api_get('list', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_contacts(self, email, password, include_name=False): """ Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail. """
data = {'email': email, 'password': password} if include_name: data['names'] = 1 return self.api_post('contacts', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push_content(self, title, url, images=None, date=None, expire_date=None, description=None, location=None, price=None, tags=None, author=None, site_name=None, spider=None, vars=None): """ Push a new piece of content to Sailthru. Expected names for the `images` argument's map are "full" and "thumb" Expected format for `location` should be [longitude,latitude] @param title: title string for the content @param url: URL string for the content @param images: map of image names @param date: date string @param expire_date: date string for when the content expires @param description: description for the content @param location: location of the content @param price: price for the content @param tags: list or comma separated string values @param author: author for the content @param site_name: site name for the content @param spider: truthy value to force respidering content @param vars: replaceable vars dictionary """
vars = vars or {} data = {'title': title, 'url': url} if images is not None: data['images'] = images if date is not None: data['date'] = date if expire_date is not None: data['expire_date'] = date if location is not None: data['location'] = date if price is not None: data['price'] = price if description is not None: data['description'] = description if site_name is not None: data['site_name'] = images if author is not None: data['author'] = author if spider: data['spider'] = 1 if tags is not None: data['tags'] = ",".join(tags) if isinstance(tags, list) else tags if len(vars) > 0: data['vars'] = vars.copy() return self.api_post('content', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_alert(self, email, alert_id): """ delete user alert """
data = {'email': email, 'alert_id': alert_id} return self.api_delete('alert', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_purchase(self, purchase_id, purchase_key='sid'): """ Retrieve information about a purchase using the system's unique ID or a client's ID @param id_: a string that represents a unique_id or an extid. @param key: a string that is either 'sid' or 'extid'. """
data = {'purchase_id': purchase_id, 'purchase_key': purchase_key} return self.api_get('purchase', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def receive_verify_post(self, post_params): """ Returns true if the incoming request is an authenticated verify post. """
if isinstance(post_params, dict): required_params = ['action', 'email', 'send_id', 'sig'] if not self.check_for_valid_postback_actions(required_params, post_params): return False else: return False if post_params['action'] != 'verify': return False sig = post_params['sig'] post_params = post_params.copy() del post_params['sig'] if sig != get_signature_hash(post_params, self.secret): return False send_response = self.get_send(post_params['send_id']) try: send_body = send_response.get_body() send_json = json.loads(send_body) if 'email' not in send_body: return False if send_json['email'] != post_params['email']: return False except ValueError: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def receive_hardbounce_post(self, post_params): """ Hard bounce postbacks """
if isinstance(post_params, dict): required_params = ['action', 'email', 'sig'] if not self.check_for_valid_postback_actions(required_params, post_params): return False else: return False if post_params['action'] != 'hardbounce': return False signature = post_params['sig'] post_params = post_params.copy() del post_params['sig'] if signature != get_signature_hash(post_params, self.secret): return False # for sends if 'send_id' in post_params: send_id = post_params['send_id'] send_response = self.get_send(send_id) if not send_response.is_ok(): return False send_obj = send_response.get_body() if not send_obj or 'email' not in send_obj: return False # for blasts if 'blast_id' in post_params: blast_id = post_params['blast_id'] blast_response = self.get_blast(blast_id) if not blast_response.is_ok(): return False blast_obj = blast_response.get_body() if not blast_obj: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_for_valid_postback_actions(self, required_keys, post_params): """ checks if post_params contain required keys """
for key in required_keys: if key not in post_params: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_get(self, action, data, headers=None): """ Perform an HTTP GET request, using the shared-secret auth hash. @param action: API action call @param data: dictionary values """
return self._api_request(action, data, 'GET', headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_post(self, action, data, binary_data_param=None): """ Perform an HTTP POST request, using the shared-secret auth hash. @param action: API action call @param data: dictionary values """
binary_data_param = binary_data_param or [] if binary_data_param: return self.api_post_multipart(action, data, binary_data_param) else: return self._api_request(action, data, 'POST')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_post_multipart(self, action, data, binary_data_param): """ Perform an HTTP Multipart POST request, using the shared-secret auth hash. @param action: API action call @param data: dictionary values @param: binary_data_params: array of multipart keys """
binary_data = {} data = data.copy() try: file_handles = [] for param in binary_data_param: if param in data: binary_data[param] = file_handle = open(data[param], 'r') file_handles.append(file_handle) del data[param] json_payload = self._prepare_json_payload(data) return self._http_request(action, json_payload, "POST", binary_data) finally: for file_handle in file_handles: file_handle.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _api_request(self, action, data, request_type, headers=None): """ Make Request to Sailthru API with given data and api key, format and signature hash """
if 'file' in data: file_data = {'file': open(data['file'], 'rb')} else: file_data = None return self._http_request(action, self._prepare_json_payload(data), request_type, file_data, headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validation_error(exception): """Return formatter validation error."""
messages = getattr(exception, 'messages', None) if messages is None: messages = getattr(exception, 'data', {'messages': None})['messages'] def extract_errors(): """Extract errors from exception.""" if isinstance(messages, dict): for field, message in messages.items(): if field == 'verb': yield 'badVerb', '\n'.join(message) else: yield 'badArgument', '\n'.join(message) else: for field in exception.field_names: if field == 'verb': yield 'badVerb', '\n'.join(messages) else: yield 'badArgument', '\n'.join(messages) if not exception.field_names: yield 'badArgument', '\n'.join(messages) return (etree.tostring(xml.error(extract_errors())), 422, {'Content-Type': 'text/xml'})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def response(args): """Response endpoint."""
e_tree = getattr(xml, args['verb'].lower())(**args) response = make_response(etree.tostring( e_tree, pretty_print=True, xml_declaration=True, encoding='UTF-8', )) response.headers['Content-Type'] = 'text/xml' return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_percolator_mapping(index, doc_type): """Update mappings with the percolator field. .. note:: This is only needed from ElasticSearch v5 onwards, because percolators are now just a special type of field inside mappings. """
if ES_VERSION[0] >= 5: current_search_client.indices.put_mapping( index=index, doc_type=doc_type, body=PERCOLATOR_MAPPING, ignore=[400, 404])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _percolate_query(index, doc_type, percolator_doc_type, document): """Get results for a percolate query."""
if ES_VERSION[0] in (2, 5): results = current_search_client.percolate( index=index, doc_type=doc_type, allow_no_indices=True, ignore_unavailable=True, body={'doc': document} ) return results['matches'] elif ES_VERSION[0] == 6: results = current_search_client.search( index=index, doc_type=percolator_doc_type, allow_no_indices=True, ignore_unavailable=True, body={ 'query': { 'percolate': { 'field': 'query', 'document_type': percolator_doc_type, 'document': document, } } } ) return results['hits']['hits']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _new_percolator(spec, search_pattern): """Create new percolator associated with the new set."""
if spec and search_pattern: query = query_string_parser(search_pattern=search_pattern).to_dict() for index in current_search.mappings.keys(): # Create the percolator doc_type in the existing index for >= ES5 # TODO: Consider doing this only once in app initialization percolator_doc_type = _get_percolator_doc_type(index) _create_percolator_mapping(index, percolator_doc_type) current_search_client.index( index=index, doc_type=percolator_doc_type, id='oaiset-{}'.format(spec), body={'query': query} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _delete_percolator(spec, search_pattern): """Delete percolator associated with the new oaiset."""
if spec: for index in current_search.mappings.keys(): # Create the percolator doc_type in the existing index for >= ES5 percolator_doc_type = _get_percolator_doc_type(index) _create_percolator_mapping(index, percolator_doc_type) current_search_client.delete( index=index, doc_type=percolator_doc_type, id='oaiset-{}'.format(spec), ignore=[404] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_cache(): """Build sets cache."""
sets = current_oaiserver.sets if sets is None: # build sets cache sets = current_oaiserver.sets = [ oaiset.spec for oaiset in OAISet.query.filter( OAISet.search_pattern.is_(None)).all()] return sets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_record_sets(record): """Find matching sets."""
# get lists of sets with search_pattern equals to None but already in the # set list inside the record record_sets = set(record.get('_oai', {}).get('sets', [])) for spec in _build_cache(): if spec in record_sets: yield spec # get list of sets that match using percolator index, doc_type = RecordIndexer().record_to_index(record) document = record.dumps() percolator_doc_type = _get_percolator_doc_type(index) _create_percolator_mapping(index, percolator_doc_type) results = _percolate_query(index, doc_type, percolator_doc_type, document) prefix = 'oaiset-' prefix_len = len(prefix) for match in results: set_name = match['_id'] if set_name.startswith(prefix): name = set_name[prefix_len:] yield name raise StopIteration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _records_commit(record_ids): """Commit all records."""
for record_id in record_ids: record = Record.get_record(record_id) record.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_affected_records(spec=None, search_pattern=None): """Update all affected records by OAISet change. :param spec: The record spec. :param search_pattern: The search pattern. """
chunk_size = current_app.config['OAISERVER_CELERY_TASK_CHUNK_SIZE'] record_ids = get_affected_records(spec=spec, search_pattern=search_pattern) group( update_records_sets.s(list(filter(None, chunk))) for chunk in zip_longest(*[iter(record_ids)] * chunk_size) )()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def envelope(**kwargs): """Create OAI-PMH envelope for response."""
e_oaipmh = Element(etree.QName(NS_OAIPMH, 'OAI-PMH'), nsmap=NSMAP) e_oaipmh.set(etree.QName(NS_XSI, 'schemaLocation'), '{0} {1}'.format(NS_OAIPMH, NS_OAIPMH_XSD)) e_tree = ElementTree(element=e_oaipmh) if current_app.config['OAISERVER_XSL_URL']: e_oaipmh.addprevious(etree.ProcessingInstruction( 'xml-stylesheet', 'type="text/xsl" href="{0}"' .format(current_app.config['OAISERVER_XSL_URL']))) e_responseDate = SubElement( e_oaipmh, etree.QName( NS_OAIPMH, 'responseDate')) # date should be first possible moment e_responseDate.text = datetime_to_datestamp(datetime.utcnow()) e_request = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, 'request')) for key, value in kwargs.items(): if key == 'from_' or key == 'until': value = datetime_to_datestamp(value) elif key == 'resumptionToken': value = value['token'] e_request.set(key, value) e_request.text = url_for('invenio_oaiserver.response', _external=True) return e_tree, e_oaipmh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def error(errors): """Create error element."""
e_tree, e_oaipmh = envelope() for code, message in errors: e_error = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, 'error')) e_error.set('code', code) e_error.text = message return e_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verb(**kwargs): """Create OAI-PMH envelope for response with verb."""
e_tree, e_oaipmh = envelope(**kwargs) e_element = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, kwargs['verb'])) return e_tree, e_element
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resumption_token(parent, pagination, **kwargs): """Attach resumption token element to a parent."""
# Do not add resumptionToken if all results fit to the first page. if pagination.page == 1 and not pagination.has_next: return token = serialize(pagination, **kwargs) e_resumptionToken = SubElement(parent, etree.QName(NS_OAIPMH, 'resumptionToken')) if pagination.total: expiration_date = datetime.utcnow() + timedelta( seconds=current_app.config[ 'OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME' ] ) e_resumptionToken.set('expirationDate', datetime_to_datestamp( expiration_date )) e_resumptionToken.set('cursor', str( (pagination.page - 1) * pagination.per_page )) e_resumptionToken.set('completeListSize', str(pagination.total)) if token: e_resumptionToken.text = token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listsets(**kwargs): """Create OAI-PMH response for ListSets verb."""
e_tree, e_listsets = verb(**kwargs) page = kwargs.get('resumptionToken', {}).get('page', 1) size = current_app.config['OAISERVER_PAGE_SIZE'] oai_sets = OAISet.query.paginate(page=page, per_page=size, error_out=False) for oai_set in oai_sets.items: e_set = SubElement(e_listsets, etree.QName(NS_OAIPMH, 'set')) e_setSpec = SubElement(e_set, etree.QName(NS_OAIPMH, 'setSpec')) e_setSpec.text = oai_set.spec e_setName = SubElement(e_set, etree.QName(NS_OAIPMH, 'setName')) e_setName.text = sanitize_unicode(oai_set.name) if oai_set.description: e_setDescription = SubElement(e_set, etree.QName(NS_OAIPMH, 'setDescription')) e_dc = SubElement( e_setDescription, etree.QName(NS_OAIDC, 'dc'), nsmap=NSMAP_DESCRIPTION ) e_dc.set(etree.QName(NS_XSI, 'schemaLocation'), NS_OAIDC) e_description = SubElement(e_dc, etree.QName(NS_DC, 'description')) e_description.text = oai_set.description resumption_token(e_listsets, oai_sets, **kwargs) return e_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listmetadataformats(**kwargs): """Create OAI-PMH response for ListMetadataFormats verb."""
cfg = current_app.config e_tree, e_listmetadataformats = verb(**kwargs) if 'identifier' in kwargs: # test if record exists OAIIDProvider.get(pid_value=kwargs['identifier']) for prefix, metadata in cfg.get('OAISERVER_METADATA_FORMATS', {}).items(): e_metadataformat = SubElement( e_listmetadataformats, etree.QName(NS_OAIPMH, 'metadataFormat') ) e_metadataprefix = SubElement( e_metadataformat, etree.QName(NS_OAIPMH, 'metadataPrefix') ) e_metadataprefix.text = prefix e_schema = SubElement( e_metadataformat, etree.QName(NS_OAIPMH, 'schema') ) e_schema.text = metadata['schema'] e_metadataNamespace = SubElement( e_metadataformat, etree.QName(NS_OAIPMH, 'metadataNamespace') ) e_metadataNamespace.text = metadata['namespace'] return e_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listidentifiers(**kwargs): """Create OAI-PMH response for verb ListIdentifiers."""
e_tree, e_listidentifiers = verb(**kwargs) result = get_records(**kwargs) for record in result.items: pid = oaiid_fetcher(record['id'], record['json']['_source']) header( e_listidentifiers, identifier=pid.pid_value, datestamp=record['updated'], sets=record['json']['_source'].get('_oai', {}).get('sets', []), ) resumption_token(e_listidentifiers, result, **kwargs) return e_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listrecords(**kwargs): """Create OAI-PMH response for verb ListRecords."""
record_dumper = serializer(kwargs['metadataPrefix']) e_tree, e_listrecords = verb(**kwargs) result = get_records(**kwargs) for record in result.items: pid = oaiid_fetcher(record['id'], record['json']['_source']) e_record = SubElement(e_listrecords, etree.QName(NS_OAIPMH, 'record')) header( e_record, identifier=pid.pid_value, datestamp=record['updated'], sets=record['json']['_source'].get('_oai', {}).get('sets', []), ) e_metadata = SubElement(e_record, etree.QName(NS_OAIPMH, 'metadata')) e_metadata.append(record_dumper(pid, record['json'])) resumption_token(e_listrecords, result, **kwargs) return e_tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def oaiid_fetcher(record_uuid, data): """Fetch a record's identifier. :param record_uuid: The record UUID. :param data: The record data. :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` instance. """
pid_value = data.get('_oai', {}).get('id') if pid_value is None: raise PersistentIdentifierError() return FetchedPID( provider=OAIIDProvider, pid_type=OAIIDProvider.pid_type, pid_value=str(pid_value), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_spec(self, key, value): """Forbit updates of set identifier."""
if self.spec and self.spec != value: raise OAISetSpecUpdateError("Updating spec is not allowed.") return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_record(self, record): """Add a record to the OAISet. :param record: Record to be added. :type record: `invenio_records.api.Record` or derivative. """
record.setdefault('_oai', {}).setdefault('sets', []) assert not self.has_record(record) record['_oai']['sets'].append(self.spec)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_record(self, record): """Remove a record from the OAISet. :param record: Record to be removed. :type record: `invenio_records.api.Record` or derivative. """
assert self.has_record(record) record['_oai']['sets'] = [ s for s in record['_oai']['sets'] if s != self.spec]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def oaiserver(sets, records): """Initialize OAI-PMH server."""
from invenio_db import db from invenio_oaiserver.models import OAISet from invenio_records.api import Record # create a OAI Set with db.session.begin_nested(): for i in range(sets): db.session.add(OAISet( spec='test{0}'.format(i), name='Test{0}'.format(i), description='test desc {0}'.format(i), search_pattern='title_statement.title:Test{0}'.format(i), )) # create a record schema = { 'type': 'object', 'properties': { 'title_statement': { 'type': 'object', 'properties': { 'title': { 'type': 'string', }, }, }, 'field': {'type': 'boolean'}, }, } with app.app_context(): indexer = RecordIndexer() with db.session.begin_nested(): for i in range(records): record_id = uuid.uuid4() data = { 'title_statement': {'title': 'Test{0}'.format(i)}, '$schema': schema, } recid_minter(record_id, data) oaiid_minter(record_id, data) record = Record.create(data, id_=record_id) indexer.index(record) db.session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serializer(metadata_prefix): """Return etree_dumper instances. :param metadata_prefix: One of the metadata identifiers configured in ``OAISERVER_METADATA_FORMATS``. """
metadataFormats = current_app.config['OAISERVER_METADATA_FORMATS'] serializer_ = metadataFormats[metadata_prefix]['serializer'] if isinstance(serializer_, tuple): return partial(import_string(serializer_[0]), **serializer_[1]) return import_string(serializer_)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dumps_etree(pid, record, **kwargs): """Dump MARC21 compatible record. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` instance. :param record: The :class:`invenio_records.api.Record` instance. :returns: A LXML Element instance. """
from dojson.contrib.to_marc21 import to_marc21 from dojson.contrib.to_marc21.utils import dumps_etree return dumps_etree(to_marc21.do(record['_source']), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eprints_description(metadataPolicy, dataPolicy, submissionPolicy=None, content=None): """Generate the eprints element for the identify response. The eprints container is used by the e-print community to describe the content and policies of repositories. For the full specification and schema definition visit: http://www.openarchives.org/OAI/2.0/guidelines-eprints.htm """
eprints = Element(etree.QName(NS_EPRINTS[None], 'eprints'), nsmap=NS_EPRINTS) eprints.set(etree.QName(ns['xsi'], 'schemaLocation'), '{0} {1}'.format(EPRINTS_SCHEMA_LOCATION, EPRINTS_SCHEMA_LOCATION_XSD)) if content: contentElement = etree.Element('content') for key, value in content.items(): contentElement.append(E(key, value)) eprints.append(contentElement) metadataPolicyElement = etree.Element('metadataPolicy') for key, value in metadataPolicy.items(): metadataPolicyElement.append(E(key, value)) eprints.append(metadataPolicyElement) dataPolicyElement = etree.Element('dataPolicy') for key, value in dataPolicy.items(): dataPolicyElement.append(E(key, value)) eprints.append(dataPolicyElement) if submissionPolicy: submissionPolicyElement = etree.Element('submissionPolicy') for key, value in submissionPolicy.items(): submissionPolicyElement.append(E(key, value)) eprints.append(submissionPolicyElement) return etree.tostring(eprints, pretty_print=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def oai_identifier_description(scheme, repositoryIdentifier, delimiter, sampleIdentifier): """Generate the oai-identifier element for the identify response. The OAI identifier format is intended to provide persistent resource identifiers for items in repositories that implement OAI-PMH. For the full specification and schema definition visit: http://www.openarchives.org/OAI/2.0/guidelines-oai-identifier.htm """
oai_identifier = Element(etree.QName(NS_OAI_IDENTIFIER[None], 'oai_identifier'), nsmap=NS_OAI_IDENTIFIER) oai_identifier.set(etree.QName(ns['xsi'], 'schemaLocation'), '{0} {1}'.format(OAI_IDENTIFIER_SCHEMA_LOCATION, OAI_IDENTIFIER_SCHEMA_LOCATION_XSD)) oai_identifier.append(E('scheme', scheme)) oai_identifier.append(E('repositoryIdentifier', repositoryIdentifier)) oai_identifier.append(E('delimiter', delimiter)) oai_identifier.append(E('sampleIdentifier', sampleIdentifier)) return etree.tostring(oai_identifier, pretty_print=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def friends_description(baseURLs): """Generate the friends element for the identify response. The friends container is recommended for use by repositories to list confederate repositories. For the schema definition visit: http://www.openarchives.org/OAI/2.0/guidelines-friends.htm """
friends = Element(etree.QName(NS_FRIENDS[None], 'friends'), nsmap=NS_FRIENDS) friends.set(etree.QName(ns['xsi'], 'schemaLocation'), '{0} {1}'.format(FRIENDS_SCHEMA_LOCATION, FRIENDS_SCHEMA_LOCATION_XSD)) for baseURL in baseURLs: friends.append(E('baseURL', baseURL)) return etree.tostring(friends, pretty_print=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_insert_oai_set(mapper, connection, target): """Update records on OAISet insertion."""
_new_percolator(spec=target.spec, search_pattern=target.search_pattern) sleep(2) update_affected_records.delay( search_pattern=target.search_pattern )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_update_oai_set(mapper, connection, target): """Update records on OAISet update."""
_delete_percolator(spec=target.spec, search_pattern=target.search_pattern) _new_percolator(spec=target.spec, search_pattern=target.search_pattern) sleep(2) update_affected_records.delay( spec=target.spec, search_pattern=target.search_pattern )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_delete_oai_set(mapper, connection, target): """Update records on OAISet deletion."""
_delete_percolator(spec=target.spec, search_pattern=target.search_pattern) sleep(2) update_affected_records.delay( spec=target.spec )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_string_parser(search_pattern): """Elasticsearch query string parser."""
if not hasattr(current_oaiserver, 'query_parser'): query_parser = current_app.config['OAISERVER_QUERY_PARSER'] if isinstance(query_parser, six.string_types): query_parser = import_string(query_parser) current_oaiserver.query_parser = query_parser return current_oaiserver.query_parser('query_string', query=search_pattern)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_affected_records(spec=None, search_pattern=None): """Get list of affected records. :param spec: The record spec. :param search_pattern: The search pattern. :returns: An iterator to lazily find results. """
# spec pattern query # ---------- ---------- ------- # None None None # None Y Y # X None X # X '' X # X Y X OR Y if spec is None and search_pattern is None: raise StopIteration queries = [] if spec is not None: queries.append(Q('match', **{'_oai.sets': spec})) if search_pattern: queries.append(query_string_parser(search_pattern=search_pattern)) search = OAIServerSearch( index=current_app.config['OAISERVER_RECORD_INDEX'], ).query(Q('bool', should=queries)) for result in search.scan(): yield result.meta.id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_records(**kwargs): """Get records paginated."""
page_ = kwargs.get('resumptionToken', {}).get('page', 1) size_ = current_app.config['OAISERVER_PAGE_SIZE'] scroll = current_app.config['OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME'] scroll_id = kwargs.get('resumptionToken', {}).get('scroll_id') if scroll_id is None: search = OAIServerSearch( index=current_app.config['OAISERVER_RECORD_INDEX'], ).params( scroll='{0}s'.format(scroll), ).extra( version=True, )[(page_-1)*size_:page_*size_] if 'set' in kwargs: search = search.query('match', **{'_oai.sets': kwargs['set']}) time_range = {} if 'from_' in kwargs: time_range['gte'] = kwargs['from_'] if 'until' in kwargs: time_range['lte'] = kwargs['until'] if time_range: search = search.filter('range', **{'_updated': time_range}) response = search.execute().to_dict() else: response = current_search_client.scroll( scroll_id=scroll_id, scroll='{0}s'.format(scroll), ) class Pagination(object): """Dummy pagination class.""" page = page_ per_page = size_ def __init__(self, response): """Initilize pagination.""" self.response = response self.total = response['hits']['total'] self._scroll_id = response.get('_scroll_id') # clean descriptor on last page if not self.has_next: current_search_client.clear_scroll( scroll_id=self._scroll_id ) self._scroll_id = None @cached_property def has_next(self): """Return True if there is next page.""" return self.page * self.per_page <= self.total @cached_property def next_num(self): """Return next page number.""" return self.page + 1 if self.has_next else None @property def items(self): """Return iterator.""" from datetime import datetime for result in self.response['hits']['hits']: if '_oai' in result['_source']: yield { 'id': result['_id'], 'json': result, 'updated': datetime.strptime( result['_source']['_updated'][:19], '%Y-%m-%dT%H:%M:%S' ), } return Pagination(response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_file_path(filename, local=True, relative_to_module=None, my_dir=my_dir): """ Look for an existing path matching filename. Try to resolve relative to the module location if the path cannot by found using "normal" resolution. """
# override my_dir if module is provided if relative_to_module is not None: my_dir = os.path.dirname(relative_to_module.__file__) user_path = result = filename if local: user_path = os.path.expanduser(filename) result = os.path.abspath(user_path) if os.path.exists(result): return result # The file was found normally # otherwise look relative to the module. result = os.path.join(my_dir, filename) assert os.path.exists(result), "no such file " + repr((filename, result, user_path)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_if_not_loaded(widget, filenames, verbose=False, delay=0.1, force=False, local=True, evaluator=None): """ Load a javascript file to the Jupyter notebook context, unless it was already loaded. """
if evaluator is None: evaluator = EVALUATOR # default if not specified. for filename in filenames: loaded = False if force or not filename in LOADED_JAVASCRIPT: js_text = get_text_from_file_name(filename, local) if verbose: print("loading javascript file", filename, "with", evaluator) evaluator(widget, js_text) LOADED_JAVASCRIPT.add(filename) loaded = True else: if verbose: print ("not reloading javascript file", filename) if loaded and delay > 0: if verbose: print ("delaying to allow JS interpreter to sync.") time.sleep(delay)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _set(self, name, value): "Proxy to set a property of the widget element." return self.widget(self.widget_element._set(name, value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def base_url(root): """Determine the base url for a root element."""
for attr, value in root.attrib.iteritems(): if attr.endswith('base') and 'http' in value: return value return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_ns(tag): """Return a tag and its namespace separately."""
if '}' in tag: split = tag.split('}') return split[0].strip('{'), split[-1] return '', tag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xpath(node, query, namespaces={}): """A safe xpath that only uses namespaces if available."""
if namespaces and 'None' not in namespaces: return node.xpath(query, namespaces=namespaces) return node.xpath(query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def innertext(node): """Return the inner text of a node. If a node has no sub elements, this is just node.text. Otherwise, it's node.text + sub-element-text + node.tail."""
if not len(node): return node.text return (node.text or '') + ''.join([etree.tostring(c) for c in node]) + (node.tail or '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(document, clean_html=True, unix_timestamp=False, encoding=None): """Parse a document and return a feedparser dictionary with attr key access. If clean_html is False, the html in the feed will not be cleaned. If clean_html is True, a sane version of lxml.html.clean.Cleaner will be used. If it is a Cleaner object, that cleaner will be used. If unix_timestamp is True, the date information will be a numerical unix timestamp rather than a struct_time. If encoding is provided, the encoding of the document will be manually set to that."""
if isinstance(clean_html, bool): cleaner = default_cleaner if clean_html else fake_cleaner else: cleaner = clean_html result = feedparser.FeedParserDict() result['feed'] = feedparser.FeedParserDict() result['entries'] = [] result['bozo'] = 0 try: parser = SpeedParser(document, cleaner, unix_timestamp, encoding) parser.update(result) except Exception as e: if isinstance(e, UnicodeDecodeError) and encoding is True: encoding = chardet.detect(document)['encoding'] document = document.decode(encoding, 'replace').encode('utf-8') return parse(document, clean_html, unix_timestamp, encoding) import traceback result['bozo'] = 1 result['bozo_exception'] = e result['bozo_tb'] = traceback.format_exc() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def changed_path(self): "Find any changed path and update all changed modification times." result = None # default for path in self.paths_to_modification_times: lastmod = self.paths_to_modification_times[path] mod = os.path.getmtime(path) if mod > lastmod: result = "Watch file has been modified: " + repr(path) self.paths_to_modification_times[path] = mod for folder in self.folder_paths: for filename in os.listdir(folder): subpath = os.path.join(folder, filename) if os.path.isfile(subpath) and subpath not in self.paths_to_modification_times: result = "New file in watched folder: " + repr(subpath) self.add(subpath) if self.check_python_modules: # refresh the modules self.add_all_modules() if self.check_javascript: self.watch_javascript() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params: year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(float(params.get('second', 0))) # weekday is normalized by mktime(), we can ignore it weekday = 0 daylight_savings_flag = -1 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tuple(tm)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} return _parse_date_rfc822(rfc822date)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m or m.group(2) not in _hungarian_months: return None month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} return _parse_date_w3dtf(w3dtfdate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' if not dateString: return None for handler in _date_handlers: try: date9tuple = handler(dateString) except (KeyError, OverflowError, ValueError): continue if not date9tuple: continue if len(date9tuple) != 9: continue return date9tuple return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_chunk_wrapper(self, status, name, content, file_info): """wrapper to allow output redirects for handle_chunk."""
out = self.output if out is not None: with out: print("handling chunk " + repr(type(content))) self.handle_chunk(status, name, content, file_info) else: self.handle_chunk(status, name, content, file_info)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_chunk(self, status, name, content, file_info): "Handle one chunk of the file. Override this method for peicewise delivery or error handling." if status == "error": msg = repr(file_info.get("message")) exc = JavaScriptError(msg) exc.file_info = file_info self.status = "Javascript sent exception " + msg self.chunk_collector = [] raise exc if status == "more": self.chunk_collector.append(content) self.progress_callback(self.chunk_collector, file_info) else: assert status == "done", "Unknown status " + repr(status) self.save_chunks = self.chunk_collector self.chunk_collector.append(content) all_content = self.combine_chunks(self.chunk_collector) self.chunk_collector = [] content_callback = self.content_callback if content_callback is None: content_callback = self.default_content_callback self.status = "calling " + repr(content_callback) try: content_callback(self.widget, name, all_content) except Exception as e: self.status += "\n" + repr(content_callback) + " raised " + repr(e) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search(self, query, method="lucene", start=None, rows=None, access_token=None): """Search the ORCID database. Parameters :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param start: string Index of the first record requested. Use for pagination. :param rows: string Number of records requested. Use for pagination. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Returns ------- :returns: dict Search result with error description available. The results can be obtained by accessing key 'result'. To get the number of all results, access the key 'num-found'. """
if access_token is None: access_token = self. \ get_search_token_from_orcid() headers = {'Accept': 'application/orcid+json', 'Authorization': 'Bearer %s' % access_token} return self._search(query, method, start, rows, headers, self._endpoint)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search_generator(self, query, method="lucene", pagination=10, access_token=None): """Search the ORCID database with a generator. The generator will yield every result. Parameters :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param pagination: integer How many papers should be fetched with the request. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Yields ------- :yields: dict Single profile from the search results. """
if access_token is None: access_token = self. \ get_search_token_from_orcid() headers = {'Accept': 'application/orcid+json', 'Authorization': 'Bearer %s' % access_token} index = 0 while True: paginated_result = self._search(query, method, index, pagination, headers, self._endpoint) if not paginated_result['result']: return for result in paginated_result['result']: yield result index += pagination
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_search_token_from_orcid(self, scope='/read-public'): """Get a token for searching ORCID records. Parameters :param scope: string /read-public or /read-member Returns ------- :returns: string The token. """
payload = {'client_id': self._key, 'client_secret': self._secret, 'scope': scope, 'grant_type': 'client_credentials' } url = "%s/oauth/token" % self._endpoint headers = {'Accept': 'application/json'} response = requests.post(url, data=payload, headers=headers, timeout=self._timeout) response.raise_for_status() if self.do_store_raw_response: self.raw_response = response return response.json()['access_token']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_token_from_authorization_code(self, authorization_code, redirect_uri): """Like `get_token`, but using an OAuth 2 authorization code. Use this method if you run a webserver that serves as an endpoint for the redirect URI. The webserver can retrieve the authorization code from the URL that is requested by ORCID. Parameters :param redirect_uri: string The redirect uri of the institution. :param authorization_code: string The authorization code. Returns ------- :returns: dict All data of the access token. The access token itself is in the ``"access_token"`` key. """
token_dict = { "client_id": self._key, "client_secret": self._secret, "grant_type": "authorization_code", "code": authorization_code, "redirect_uri": redirect_uri, } response = requests.post(self._token_url, data=token_dict, headers={'Accept': 'application/json'}, timeout=self._timeout) response.raise_for_status() if self.do_store_raw_response: self.raw_response = response return json.loads(response.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_record_public(self, orcid_id, request_type, token, put_code=None, accept_type='application/orcid+json'): """Get the public info about the researcher. Parameters :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified. """
return self._get_info(orcid_id, self._get_public_info, request_type, token, put_code, accept_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_user_orcid(self, user_id, password, redirect_uri): """Get the user orcid from authentication process. Parameters :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. Returns ------- :returns: string The orcid. """
response = self._authenticate(user_id, password, redirect_uri, '/authenticate') return response['orcid']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_record_member(self, orcid_id, request_type, token, put_code=None, accept_type='application/orcid+json'): """Get the member info about the researcher. Parameters :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values.. :param response_format: string One of json, xml. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified. """
return self._get_info(orcid_id, self._get_member_info, request_type, token, put_code, accept_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_access_tokens(self, authorization_code): """From the authorization code, get the "access token" and the "refresh token" from Box. Args: authorization_code (str). Authorisation code emitted by Box at the url provided by the function :func:`get_authorization_url`. Returns: tuple. (access_token, refresh_token) Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """
response = self.box_request.get_access_token(authorization_code) try: att = response.json() except Exception, ex: raise BoxHttpResponseError(ex) if response.status_code >= 400: raise BoxError(response.status_code, att) return att['access_token'], att['refresh_token']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unpack_frame(message): """Called to unpack a STOMP message into a dictionary. returned = { # STOMP Command: # Headers e.g. 'headers' : { 'destination' : 'xyz', 'message-id' : 'some event', : etc, } # Body: } """
body = [] returned = dict(cmd='', headers={}, body='') breakdown = message.split('\n') # Get the message command: returned['cmd'] = breakdown[0] breakdown = breakdown[1:] def headD(field): # find the first ':' everything to the left of this is a # header, everything to the right is data: index = field.find(':') if index: header = field[:index].strip() data = field[index+1:].strip() # print "header '%s' data '%s'" % (header, data) returned['headers'][header.strip()] = data.strip() def bodyD(field): field = field.strip() if field: body.append(field) # Recover the header fields and body data handler = headD for field in breakdown: # print "field:", field if field.strip() == '': # End of headers, it body data next. handler = bodyD continue handler(field) # Stich the body data together: # print "1. body: ", body body = "".join(body) returned['body'] = body.replace('\x00', '') # print "2. body: <%s>" % returned['body'] return returned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ack(messageid, transactionid=None): """STOMP acknowledge command. Acknowledge receipt of a specific message from the server. messageid: This is the id of the message we are acknowledging, what else could it be? ;) transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this. """
header = 'message-id: %s' % messageid if transactionid: header = 'message-id: %s\ntransaction: %s' % (messageid, transactionid) return "ACK\n%s\n\n\x00\n" % header
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(dest, msg, transactionid=None): """STOMP send command. dest: This is the channel we wish to subscribe to msg: This is the message body to be sent. transactionid: This is an optional field and is not needed by default. """
transheader = '' if transactionid: transheader = 'transaction: %s\n' % transactionid return "SEND\ndestination: %s\n%s\n%s\x00\n" % (dest, transheader, msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setCmd(self, cmd): """Check the cmd is valid, FrameError will be raised if its not."""
cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pack(self): """Called to create a STOMP message from the internal values. """
headers = ''.join( ['%s:%s\n' % (f, v) for f, v in sorted(self.headers.items())] ) stomp_message = "%s\n%s\n%s%s\n" % (self._cmd, headers, self.body, NULL) # import pprint # print "stomp_message: ", pprint.pprint(stomp_message) return stomp_message