text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def up(self, migration_id=None, fake=False):
"""Executes migrations."""
|
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info('Executing migration: %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, 'up'):
migration_module.up(self.db)
else:
logger.error('No up method on migration %s' % migration.filename)
record = migration.as_dict()
record['date'] = datetime.utcnow()
self.collection.insert(record)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_migrations_to_down(self, migration_id):
"""Find migrations to rollback."""
|
migration_id = MigrationFile.validate_id(migration_id)
if not migration_id:
return []
migrations = self.get_migration_files()
last_migration_id = self.get_last_migrated_id()
if migration_id in (m.id for m in self.get_unregistered_migrations()):
logger.error('Migration is not applied %s' % migration_id)
return []
try:
migration = [m for m in migrations if m.id == migration_id][0]
except IndexError:
logger.error('Migration does not exists %s' % migration_id)
return []
return list(reversed([m for m in migrations
if migration.id <= m.id <= last_migration_id]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def down(self, migration_id):
"""Rollback to migration."""
|
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info('Rollback migration %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, 'down'):
migration_module.down(self.db)
else:
logger.info('No down method on %s' % migration.filename)
self.collection.remove({'filename': migration.filename})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def username(self, username):
"""Set username. .. note:: The username will be converted to lowercase. The display name will contain the original version. """
|
validate_username(username)
self._username = username.lower()
self._displayname = username
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_by_username(cls, username):
"""Get profile by username. :param username: A username to query for (case insensitive). """
|
return cls.query.filter(
UserProfile._username == username.lower()
).one()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nice_classname(obj):
"""Returns a nice name for class object or class instance. """
|
if inspect.isclass(obj):
cls_name = obj.__name__
else:
cls_name = obj.__class__.__name__
mod = inspect.getmodule(obj)
if mod:
name = mod.__name__
# jython
if name.startswith('org.python.core.'):
name = name[len('org.python.core.'):]
return "%s.%s" % (name, cls_name)
else:
return cls_name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exc_message(exc_info):
"""Return the exception's message."""
|
exc = exc_info[1]
if exc is None:
# str exception
result = exc_info[0]
else:
try:
result = str(exc)
except UnicodeEncodeError:
try:
result = unicode(exc) # flake8: noqa
except UnicodeError:
# Fallback to args as neither str nor
# unicode(Exception(u'\xe6')) work in Python < 2.6
result = exc.args[0]
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def options(self, parser, env):
"""Sets additional command line options."""
|
Plugin.options(self, parser, env)
parser.add_option(
'--html-file', action='store',
dest='html_file', metavar="FILE",
default=env.get('NOSE_HTML_FILE', 'nosetests.html'),
help="Path to html file to store the report in. "
"Default is nosetests.html in the working directory "
"[NOSE_HTML_FILE]")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure(self, options, config):
"""Configures the xunit plugin."""
|
Plugin.configure(self, options, config)
self.config = config
if self.enabled:
self.jinja = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
trim_blocks=True,
lstrip_blocks=True
)
self.stats = {'errors': 0, 'failures': 0, 'passes': 0, 'skipped': 0}
self.report_data = defaultdict(Group)
self.report_file = codecs.open(options.html_file, 'w', self.encoding, 'replace')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report(self, stream):
"""Writes an Xunit-formatted XML file The file includes a report of test errors and failures. """
|
from collections import OrderedDict
self.stats['total'] = sum(self.stats.values())
for group in self.report_data.values():
group.stats['total'] = sum(group.stats.values())
self.report_file.write(self.jinja.get_template('report.html').render(
report=OrderedDict(sorted(self.report_data.items())),
stats=self.stats,
))
self.report_file.close()
if self.config.verbosity > 1:
stream.writeln("-" * 70)
stream.writeln("HTML: %s" % self.report_file.name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addError(self, test, err, capt=None):
"""Add error output to Xunit report. """
|
exc_type, exc_val, tb = err
tb = ''.join(traceback.format_exception(
exc_type,
exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),
tb
))
name = id_split(test.id())
group = self.report_data[name[0]]
if issubclass(err[0], SkipTest):
type = 'skipped'
self.stats['skipped'] += 1
group.stats['skipped'] += 1
else:
type = 'error'
self.stats['errors'] += 1
group.stats['errors'] += 1
group.tests.append({
'name': name[-1],
'failed': True,
'type': type,
'errtype': nice_classname(err[0]),
'message': exc_message(err),
'tb': tb,
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get(self, *args, **kwargs):
""" Gets a single list of messages from all storage backends. """
|
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _store(self, messages, response, *args, **kwargs):
""" Stores the messages, returning any unstored messages after trying all backends. For each storage backend, any messages not stored are passed on to the next backend. """
|
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _message_queryset(self, include_read=False):
""" Return a queryset of messages for the request user """
|
expire = timezone.now()
qs = PersistentMessage.objects.\
filter(user=self.get_user()).\
filter(Q(expires=None) | Q(expires__gt=expire))
if not include_read:
qs = qs.exclude(read=True)
return qs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_message(self, message, *args, **kwargs):
""" If its level is into persist levels, convert the message to models and save it """
|
if not message.level in PERSISTENT_MESSAGE_LEVELS:
return message
user = kwargs.get("user") or self.get_user()
try:
anonymous = user.is_anonymous()
except TypeError:
anonymous = user.is_anonymous
if anonymous:
raise NotImplementedError('Persistent message levels cannot be used for anonymous users.')
message_persistent = PersistentMessage()
message_persistent.level = message.level
message_persistent.message = message.message
message_persistent.extra_tags = message.extra_tags
message_persistent.user = user
if "expires" in kwargs:
message_persistent.expires = kwargs["expires"]
message_persistent.save()
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, level, message, extra_tags='', *args, **kwargs):
""" Queues a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``self.level``). """
|
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
message = self.process_message(message, *args, **kwargs)
if message:
self._queued_messages.append(message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _store(self, messages, response, *args, **kwargs):
""" Delete all messages that are sticky and return the other messages This storage never save objects """
|
return [message for message in messages if not message.level in STICKY_MESSAGE_LEVELS]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_current_userprofile():
"""Get current user profile. .. note:: If the user is anonymous, then a :class:`invenio_userprofiles.models.AnonymousUserProfile` instance is returned. :returns: The :class:`invenio_userprofiles.models.UserProfile` instance. """
|
if current_user.is_anonymous:
return AnonymousUserProfile()
profile = g.get(
'userprofile',
UserProfile.get_by_userid(current_user.get_id()))
if profile is None:
profile = UserProfile(user_id=int(current_user.get_id()))
g.userprofile = profile
return profile
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def silence_ibapi_logging(levels=["DEBUG", "INFO"]):
""" Silences the excessive ibapi logging to the root logger. """
|
levels = levels or ["DEBUG", "INFO"]
for level in levels:
if level not in ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"):
raise ValueError("unknown log level: {0}".format(level))
for _, module_name, _ in pkgutil.iter_modules(ibapi.__path__):
module = __import__("ibapi.{0}".format(module_name), fromlist="ibapi")
if not hasattr(module, "logging"):
continue
for level in levels:
setattr(module.logging, level.lower(), noop)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def persistant_debug(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
""" Adds a persistant message with the ``DEBUG`` level. """
|
add_message(request, DEBUG_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def persistant_info(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
""" Adds a persistant message with the ``INFO`` level. """
|
add_message(request, INFO_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def persistant_success(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
""" Adds a persistant message with the ``SUCCESS`` level. """
|
add_message(request, SUCCESS_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def persistant_warning(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
""" Adds a persistant message with the ``WARNING`` level. """
|
add_message(request, WARNING_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def persistant_error(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
""" Adds a persistant message with the ``ERROR`` level. """
|
add_message(request, ERROR_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chunks(self, items, limit):
""" Yield successive chunks from list \a items with a minimum size \a limit """
|
for i in range(0, len(items), limit):
yield items[i:i + limit]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromordinal(cls, ordinal):
"""Return the week corresponding to the proleptic Gregorian ordinal, where January 1 of year 1 starts the week with ordinal 1. """
|
if ordinal < 1:
raise ValueError("ordinal must be >= 1")
return super(Week, cls).__new__(cls, *(date.fromordinal((ordinal-1) * 7 + 1).isocalendar()[:2]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromstring(cls, isostring):
"""Return a week initialized from an ISO formatted string like "2011W08" or "2011-W08"."""
|
if isinstance(isostring, basestring) and len(isostring) == 7 and isostring[4] == 'W':
return cls(int(isostring[0:4]), int(isostring[5:7]))
elif isinstance(isostring, basestring) and len(isostring) == 8 and isostring[4:6] == '-W':
return cls(int(isostring[0:4]), int(isostring[6:8]))
else:
raise ValueError("Week.tostring argument must be on the form <yyyy>W<ww>; got %r" % (isostring,))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def weeks_of_year(cls, year):
"""Return an iterator over the weeks of the given year. Years have either 52 or 53 weeks."""
|
w = cls(year, 1)
while w.year == year:
yield w
w += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def last_week_of_year(cls, year):
"""Return the last week of the given year. This week with either have week-number 52 or 53. This will be the same as Week(year+1, 0), but will even work for year 9999 where this expression would overflow. The first week of a given year is simply Week(year, 1), so there is no dedicated classmethod for that. """
|
if year == cls.max.year:
return cls.max
return cls(year+1, 0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def day(self, num):
"""Return the given day of week as a date object. Day 0 is the Monday."""
|
d = date(self.year, 1, 4) # The Jan 4th must be in week 1 according to ISO
return d + timedelta(weeks=self.week-1, days=-d.weekday() + num)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace(self, year=None, week=None):
"""Return a Week with either the year or week attribute value replaced"""
|
return self.__class__(self.year if year is None else year,
self.week if week is None else week)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ts_parse(ts):
"""Parse alert timestamp, return UTC datetime object to maintain Python 2 compatibility."""
|
dt = datetime.strptime(ts[:19],"%Y-%m-%dT%H:%M:%S")
if ts[19] == '+':
dt -= timedelta(hours=int(ts[20:22]),minutes=int(ts[23:]))
elif ts[19] == '-':
dt += timedelta(hours=int(ts[20:22]),minutes=int(ts[23:]))
return dt.replace(tzinfo=pytz.UTC)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _serialized(self):
"""Provides a sanitized & serializeable dict of the alert mainly for forward & backwards compatibility"""
|
return {'title': self.title,
'summary': self.summary,
'areadesc': self.areadesc,
'event': self.event,
'samecodes': self.samecodes,
'zonecodes': self.zonecodes,
'expiration': self.expiration,
'updated': self.updated,
'effective': self.effective,
'published': self.published,
'severity': self.severity,
'category': self.category,
'urgency': self.urgency,
'msgtype': self.msgtype,
'link': self.link,
}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_protocol(self, protocol):
""" A lot of measurement types make use of a protocol value, so we handle that here. """
|
if protocol is not None:
try:
return self.PROTOCOL_MAP[protocol]
except KeyError:
self._handle_malformation(
'"{protocol}" is not a recognised protocol'.format(
protocol=protocol
)
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_median(given_list):
""" Returns the median of values in the given list. """
|
median = None
if not given_list:
return median
given_list = sorted(given_list)
list_length = len(given_list)
if list_length % 2:
median = given_list[int(list_length / 2)]
else:
median = (given_list[int(list_length / 2)] + given_list[int(list_length / 2) - 1]) / 2.0
return median
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_subject_alternative_names(self, ext):
""" Return a list of Subject Alternative Name values for the given x509 extension object. """
|
values = []
for san in ext.value:
if isinstance(san.value, string):
# Pass on simple string SAN values
values.append(san.value)
elif isinstance(san.value, x509.Name):
# In theory there there could be >1 RDN here...
values.extend(
self._name_attribute_to_string(rdn) for rdn in san.value.rdns
)
return values
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure(self):
""" Configure SPI controller with the SPI mode and operating frequency """
|
# Convert standard SPI sheme to USBISS scheme
lookup_table = [0, 2, 1, 3]
mode = lookup_table[self._mode]
# Add signal for SPI switch
iss_mode = self._usbiss.SPI_MODE + mode
# Configure USB-ISS
self._usbiss.mode = [iss_mode, self.sck_divisor]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iss_spi_divisor(self, sck):
""" Calculate a USBISS SPI divisor value from the input SPI clock speed :param sck: SPI clock frequency :type sck: int :returns: ISS SCK divisor :rtype: int """
|
_divisor = (6000000 / sck) - 1
divisor = int(_divisor)
if divisor != _divisor:
raise ValueError('Non-integer SCK divisor.')
if not 1 <= divisor < 256:
error = (
"The value of sck_divisor, {}, "
"is not between 0 and 255".format(divisor)
)
raise ValueError(error)
return divisor
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exchange(self, data):
""" Perform SPI transaction. The first received byte is either ACK or NACK. :TODO: enforce rule that up to 63 bytes of data can be sent. :TODO: enforce rule that there is no gaps in data bytes (what define a gap?) :param data: List of bytes :returns: List of bytes :rtype: List of bytes """
|
self._usbiss.write_data([self._usbiss.SPI_CMD] + data)
response = self._usbiss.read_data(1 + len(data))
if len(response) != 0:
response = self._usbiss.decode(response)
status = response.pop(0)
if status == 0:
raise USBISSError('SPI Transmission Error')
return response
else:
raise USBISSError('SPI Transmission Error: No bytes received!')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_feed_cache(self):
"""If a recent cache exists, return it, else return None"""
|
feed_cache = None
if os.path.exists(self._feed_cache_file):
maxage = datetime.now() - timedelta(minutes=self._cachetime)
file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)
if file_ts > maxage:
try:
with open(self._feed_cache_file, 'rb') as cache:
feed_cache = cache.read()
finally:
pass
return feed_cache
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_medians_and_extremes(self):
""" Sets median values for rtt and the offset of result packets. """
|
rtts = sorted([p.rtt for p in self.packets if p.rtt is not None])
if rtts:
self.rtt_min = rtts[0]
self.rtt_max = rtts[-1]
self.rtt_median = self.calculate_median(rtts)
offsets = sorted(
[p.offset for p in self.packets if p.offset is not None]
)
if offsets:
self.offset_min = offsets[0]
self.offset_max = offsets[-1]
self.offset_median = self.calculate_median(offsets)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
|
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_request(req, collect=False, collector_addr='tcp://127.0.0.2:2345', prefix='my_app'):
""" register a request registers a request in the internal request table, optionally also sends it to the collector :param req: request, can be mostly any hash-able object :param collect: whether to send the request started event to the collector (bool) :param collector_addr: collector address, in zeromq format (string, default tcp://127.0.0.2:2345) :param prefix: label under which to register the request (string, default my_app) """
|
if collect:
collector = get_context().socket(zmq.PUSH)
collector.connect(collector_addr)
collector.send_multipart([prefix, ''])
collector.close()
requests[hash(req)] = time()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def end_request(req, collector_addr='tcp://127.0.0.2:2345', prefix='my_app'):
""" registers the end of a request registers the end of a request, computes elapsed time, sends it to the collector :param req: request, can be mostly any hash-able object :param collector_addr: collector address, in zeromq format (string, default tcp://127.0.0.2:2345) :param prefix: label under which to register the request (string, default my_app) """
|
req_end = time()
hreq = hash(req)
if hreq in requests:
req_time = req_end - requests[hreq]
req_time *= 1000
del requests[hreq]
collector = get_context().socket(zmq.PUSH)
collector.connect(collector_addr)
collector.send_multipart([prefix, str(req_time)])
collector.close()
return req_time
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
|
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_alerts(self):
""" Public method that parses """
|
emptyfeed = "There are no active watches, warnings or advisories"
alerts = []
if emptyfeed in str(self._raw_cap):
pass
else:
main_dom = minidom.parseString(self._raw_cap)
xml_entries = main_dom.getElementsByTagName('entry')
# title is currently first so we can detect an empty cap feed
for dom in xml_entries:
# parse the entry to a temp 'entry' dict
entry = self._parse_entry(dom)
# perform some cleanup before creating an object
# entry['locations'] = self.build_locations(entry) # FIXME: remove?
entry['target_areas'] = build_target_areas(entry)
alert = Alert(entry)
alerts.append(alert)
del entry
del alert
return alerts
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path This turns "foo.bar" into "foo/bar.ipynb" and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar does not exist. """
|
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_module(self, fullname):
"""import a notebook as a module"""
|
path = find_notebook(fullname, self.path)
print ("importing Jupyter notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = read(f, 4)
# create the module and add it to sys.modules if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def location_lookup(self, req_location):
""" returns full location given samecode or county and state. Returns False if not valid. *currently locations are a dictionary, once other geo data is added, they will move to a location class/obj* """
|
location = False
try:
location = self.samecodes[req_location['code']]
except Exception:
pass
try:
location = self.lookup_samecode(req_location['local'], req_location['state'])
except Exception:
pass
return location
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup_samecode(self, local, state):
"""Given County, State return the SAME code for specified location. Return False if not found"""
|
for location in self.samecodes:
if state.lower() == self.samecodes[location]['state'].lower():
if local.lower() == self.samecodes[location]['local'].lower():
return self.samecodes[location]
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getfeedscope(self, geocodes):
"""Given multiple SAME codes, determine if they are all in one state. If so, it returns that state. Otherwise return 'US'. This is used to determine which NWS feed needs to be parsed to get all alerts for the requested SAME codes"""
|
states = self._get_states_from_samecodes(geocodes)
if len(states) >= 2:
return 'US'
else:
return states[0]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_states_from_samecodes(self, geocodes):
"""Returns all states for a given list of SAME codes *Shouldn't be used to determine feed scope, please use getfeedscope()* """
|
states = []
for code in geocodes:
if not isinstance(geocodes, list):
raise Exception("specified geocodes must be list")
try:
state = self.samecodes[code]['state']
except KeyError:
raise Exception("Samecode Not Found")
else:
if state not in states:
states.append(state)
return states
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_same_codes(self, refresh=False):
"""Loads the Same Codes into this object"""
|
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_same_codes(self):
"""get SAME codes, load into a dict and cache"""
|
same = {}
url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
code, local, state = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location
finally:
pass
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cached_same_codes(self):
"""If a cached copy is available, return it"""
|
cache_file = self._same_cache_file
if os.path.exists(cache_file):
maxage = datetime.now() - timedelta(minutes=4320)
file_ts = datetime.fromtimestamp(os.stat(cache_file).st_mtime)
if file_ts > maxage:
try:
cache = open(cache_file, 'rb')
self._samecodes = cPickle.load(cache)
cache.close()
return True
finally:
pass
self.reload()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_destination_ip_responded(self, last_hop):
"""Sets the flag if destination IP responded."""
|
if not self.destination_address:
return
for packet in last_hop.packets:
if packet.origin and \
self.destination_address == packet.origin:
self.destination_ip_responded = True
break
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_last_hop_responded(self, last_hop):
"""Sets the flag if last hop responded."""
|
for packet in last_hop.packets:
if packet.rtt:
self.last_hop_responded = True
break
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_is_success(self, last_hop):
"""Sets the flag if traceroute result is successful or not."""
|
for packet in last_hop.packets:
if packet.rtt and not packet.is_error:
self.is_success = True
break
else:
self.set_last_hop_errors(last_hop)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_last_hop_errors(self, last_hop):
"""Sets the last hop's errors."""
|
if last_hop.is_error:
self.last_hop_errors.append(last_hop.error_message)
return
for packet in last_hop.packets:
if packet.is_error:
self.last_hop_errors.append(packet.error_message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ip_path(self):
""" Returns just the IPs from the traceroute. """
|
r = []
for hop in self.hops:
r.append([packet.origin for packet in hop.packets])
return r
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_iss_info(self):
""" Get information about the USB-ISS Querying will return three bytes; - the module ID (7), - firmware version (currently 2), - the current operating mode. """
|
self.write_data([self.ISS_CMD, self.ISS_VERSION])
response = self.read_data(3)
if len(response) == 3:
response = self.decode(response)
self.module = response[0]
self.firmware = response[1]
self._mode = response[2]
else:
raise USBISSError("Could not get version details")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_iss_serial_no(self):
""" Get serial number of USB-ISS module """
|
self.write_data([self.ISS_CMD, self.ISS_SER_NUM])
# Return 8 bytes serial number
self.iss_sn = self.read_data(8)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mode(self, set_bytes):
"""Set the operating protocol of the USB-ISS with additional parameters for the protocol """
|
self._mode = set_bytes
data = [self.ISS_CMD, self.ISS_SET_MODE] + set_bytes
self.write_data(data)
response = self.read_data(2)
if response[0] == 0:
error_dict = {
0x05: 'Unknown Command',
0x06: 'Internal Error 1',
0x07: 'Internal Error 2'
}
try:
raise USBISSError(error_dict[response(1)])
except KeyError:
raise USBISSError('Undocumented Error')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keys_to_datetime(obj, *keys):
""" Converts all the keys in an object to DateTime instances. Args: obj (dict):
the JSON-like ``dict`` object to modify inplace. keys (str):
keys of the object being converted into DateTime instances. Returns: dict: ``obj`` inplace. True {} True 'two': '2016-06-06T19:41:43.039284'} True datetime.datetime(2016, 6, 6, 19, 41, 43, 39284) '2016-06-06T19:41:43.039284' """
|
if not keys:
return obj
for k in keys:
if k not in obj:
continue
v = obj[k]
if not isinstance(v, string_types):
continue
obj[k] = parse_datetime(v)
return obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse(self, path=None):
""" Returns a list of directories matching the path given. Args: path (str):
glob pattern. Returns: List[str] """
|
params = None
if path:
assert isinstance(path, string_types)
params = {'current': path}
return self.get('browse', params=params)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_insync(self):
""" Returns whether the config is in sync, i.e. whether the running configuration is the same as that on disk. Returns: bool """
|
status = self.get('config/insync').get('configInSync', False)
if status is None:
status = False
return status
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def errors(self):
""" Returns the list of recent errors. Returns: list: of :obj:`.ErrorEvent` tuples. """
|
ret_errs = list()
errors = self.get('error').get('errors', None) or list()
assert isinstance(errors, list)
for err in errors:
when = parse_datetime(err.get('when', None))
msg = err.get('message', '')
e = ErrorEvent(when, msg)
ret_errs.append(e)
return ret_errs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show_error(self, message):
""" Send an error message to the active client. The new error will be displayed on any active GUI clients. Args: message (str):
Plain-text message to display. Returns: None [] """
|
assert isinstance(message, string_types)
self.post('error', data=message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pause(self, device):
""" Pause the given device. Args: device (str):
Device ID. Returns: dict: with keys ``success`` and ``error``. """
|
resp = self.post('pause', params={'device': device},
return_response=True)
error = resp.text
if not error:
error = None
return {'success': resp.status_code == requests.codes.ok,
'error': error}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset_folder(self, folder):
""" Erase the database index from a given folder and restart Syncthing. Args: folder (str):
Folder ID. Returns: None """
|
warnings.warn('This is a destructive action that cannot be undone.')
self.post('reset', data={}, params={'folder': folder})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse(self, folder, levels=None, prefix=None):
""" Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str):
The root folder to traverse. levels (int):
How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str):
Defines a prefix within the tree where to start building the structure. Returns: dict """
|
assert isinstance(levels, int) or levels is None
assert isinstance(prefix, string_types) or prefix is None
return self.get('browse', params={'folder': folder,
'levels': levels,
'prefix': prefix})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_ignores(self, folder, *patterns):
""" Applies ``patterns`` to ``folder``'s ``.stignore`` file. Args: folder (str):
patterns (str):
Returns: dict """
|
if not patterns:
return {}
data = {'ignore': list(patterns)}
return self.post('ignores', params={'folder': folder}, data=data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def need(self, folder, page=None, perpage=None):
""" Returns lists of files which are needed by this device in order for it to become in sync. Args: folder (str):
page (int):
If defined applies pagination accross the collection of results. perpage (int):
If defined applies pagination across the collection of results. Returns: dict """
|
assert isinstance(page, int) or page is None
assert isinstance(perpage, int) or perpage is None
return self.get('need', params={'folder': folder,
'page': page,
'perpage': perpage})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan(self, folder, sub=None, next_=None):
""" Request immediate rescan of a folder, or a specific path within a folder. Args: folder (str):
Folder ID. sub (str):
Path relative to the folder root. If sub is omitted the entire folder is scanned for changes, otherwise only the given path children are scanned. next_ (int):
Delays Syncthing's automated rescan interval for a given amount of seconds. Returns: str """
|
if not sub:
sub = ''
assert isinstance(sub, string_types)
assert isinstance(next_, int) or next_ is None
return self.post('scan', params={'folder': folder,
'sub': sub,
'next': next_})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _events(self, using_url, filters=None, limit=None):
""" A long-polling method that queries Syncthing for events.. Args: using_url (str):
REST HTTP endpoint filters (List[str]):
Creates an "event group" in Syncthing to only receive events that have been subscribed to. limit (int):
The number of events to query in the history to catch up to the current state. Returns: generator[dict] """
|
# coerce
if not isinstance(limit, (int, NoneType)):
limit = None
# coerce
if filters is None:
filters = []
# format our list into the correct expectation of string with commas
if isinstance(filters, string_types):
filters = filters.split(',')
# reset the state if the loop was broken with `stop`
if not self.blocking:
self.blocking = True
# block/long-poll for updates to the events api
while self.blocking:
params = {
'since': self._last_seen_id,
'limit': limit,
}
if filters:
params['events'] = ','.join(map(str, filters))
try:
data = self.get(using_url, params=params, raw_exceptions=True)
except (ConnectTimeout, ConnectionError) as e:
# swallow timeout errors for long polling
data = None
except Exception as e:
reraise('', e)
if data:
# update our last_seen_id to move our event counter forward
self._last_seen_id = data[-1]['id']
for event in data:
# handle potentially multiple events returned in a list
self._count += 1
yield event
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subtree_leaf_positions(subtree):
"""Return tree positions of all leaves of a subtree."""
|
relative_leaf_positions = subtree.treepositions('leaves')
subtree_root_pos = subtree.treeposition()
absolute_leaf_positions = []
for rel_leaf_pos in relative_leaf_positions:
absolute_leaf_positions.append( subtree_root_pos + rel_leaf_pos)
return absolute_leaf_positions
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_dis_format(self):
"""Return a string representation of the tree in .dis format."""
|
dis_raw_str = self.disfiletree.pformat()
return re.sub('_!(.*?)_!', join_lines, dis_raw_str, flags=re.DOTALL)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_graphml(docgraph, output_file):
""" takes a document graph, converts it into GraphML format and writes it to a file. """
|
dg_copy = deepcopy(docgraph)
layerset2str(dg_copy)
attriblist2str(dg_copy)
remove_root_metadata(dg_copy)
nx_write_graphml(dg_copy, output_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def parse_child_elements(self, element):
'''parses all children of an etree element'''
for child in element.iterchildren():
self.parsers[child.tag](child)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def parse_descedant_elements(self, element):
'''parses all descendants of an etree element'''
for descendant in element.iterdescendants():
self.parsers[descendant.tag](descendant)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_discrel(self, discrel):
""" Add a discourse relation to the document graph. Parameters add_discrel : etree.Element etree representation of a <discRel> element which describes the relation between two EDUs. The ID of the other EDU is given in the arg2 attribute. Note, that arg2 can either reference an EDU (e.g. edu_9_3_2 or an EDU range, e.g. edus9_3_1-5_0). Example ------- <edu xml:id="edu_9_3_0"> <discRel relation="Explanation-Speechact" marking="-" arg2="edus9_3_1-5_0"/> <node xml:id="s128_504" cat="SIMPX" func="--"> </node> <word xml:id="s128_3" form=":" pos="$." lemma=":" func="--" deprel="ROOT"/> </edu> <edu xml:id="edu_9_3_1"> <discRel relation="Continuation" marking="-" arg2="edu_9_3_2"/> <node xml:id="s128_506" cat="VF" func="-" parent="s128_525"> </node> </edu> """
|
if self.ignore_relations is False:
arg1_id = self.get_element_id(discrel)
arg2_id = discrel.attrib['arg2']
reltype = discrel.attrib['relation']
discrel_attribs = self.element_attribs_to_dict(discrel)
self.node[arg1_id].update(discrel_attribs)
self.add_layer(arg1_id, self.ns+':discourse')
self.add_layer(arg1_id, self.ns+':relation')
self.add_edge(arg1_id, arg2_id,
layers={self.ns, self.ns+':discourse', self.ns+':relation'},
edge_type=dg.EdgeTypes.pointing_relation,
relation=reltype,
label='discourse:'+reltype)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parent_id(element):
"""returns the ID of the parent of the given element"""
|
if 'parent' in element.attrib:
return element.attrib['parent']
else:
return element.getparent().attrib[add_ns('id')]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sentence_id(self, element):
"""returns the ID of the sentence the given element belongs to."""
|
try:
sentence_elem = element.iterancestors('sentence').next()
except StopIteration as e:
warnings.warn("<{}> element is not a descendant of a <sentence> "
"We'll try to extract the sentence ID from the "
"prefix of the element ID".format(element.tag))
return self.get_element_id(element).split('_')[0]
return self.get_element_id(sentence_elem)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_node_type(dgtree):
"""Returns the type of the root node of a DGParentedTree."""
|
if is_leaf(dgtree):
return TreeNodeTypes.leaf_node
root_label = dgtree.label()
if root_label == '':
assert dgtree == DGParentedTree('', []), \
"The tree has no root label, but isn't empty: {}".format(dgtree)
return TreeNodeTypes.empty_tree
elif root_label in NUCLEARITY_LABELS:
return TreeNodeTypes.nuclearity_node
else:
assert isinstance(dgtree, (RSTTree, DGParentedTree)), type(dgtree)
return TreeNodeTypes.relation_node
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_children_treepos(self, treepos):
"""Given a treeposition, return the treepositions of its children."""
|
children_treepos = []
for i, child in enumerate(self.dgtree[treepos]):
if isinstance(child, nltk.Tree):
children_treepos.append(child.treeposition())
elif is_leaf(child):
# we can't call .treeposition() on a leaf node
treepos_list = list(treepos)
treepos_list.append(i)
leaf_treepos = tuple(treepos_list)
children_treepos.append(leaf_treepos)
return children_treepos
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_siblings_treepos(self, treepos):
"""Given a treeposition, return the treepositions of its siblings."""
|
parent_pos = self.get_parent_treepos(treepos)
siblings_treepos = []
if parent_pos is not None:
for child_treepos in self.get_children_treepos(parent_pos):
if child_treepos != treepos:
siblings_treepos.append(child_treepos)
return siblings_treepos
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cousins_treepos(self, treepos):
"""Given a treeposition, return the treeposition of its siblings."""
|
cousins_pos = []
mother_pos = self.get_parent_treepos(treepos)
if mother_pos is not None:
aunts_pos = self.get_siblings_treepos(mother_pos)
for aunt_pos in aunts_pos:
cousins_pos.extend( self.get_children_treepos(aunt_pos) )
return cousins_pos
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parent_label(self, treepos):
"""Given the treeposition of a node, return the label of its parent. Returns None, if the tree has no parent. """
|
parent_pos = self.get_parent_treepos(treepos)
if parent_pos is not None:
parent = self.dgtree[parent_pos]
return parent.label()
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_children_labels(self, treepos):
"""Given the treeposition of a node, return the labels of its children."""
|
children_labels = []
node = self.dgtree[treepos]
for child in node:
if is_leaf(child):
# we can't call .label() on a leaf node
children_labels.append(child)
else:
children_labels.append(child.label())
return children_labels
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def natural_sort_key(s):
""" returns a key that can be used in sort functions. Example: The normal sort function will ignore the natural order of the integers in the string: ['A99', 'a1', 'a10', 'a100', 'a12', 'a2', 'a24'] When we use this function as a key to the sort function, the natural order of the integer is considered. ['A99', 'a1', 'a2', 'a10', 'a12', 'a24', 'a100'] """
|
return [int(text) if text.isdigit() else text
for text in re.split(INTEGER_RE, str(s))]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_unicode(str_or_unicode):
""" tests, if the input is ``str`` or ``unicode``. if it is ``str``, it will be decoded from ``UTF-8`` to unicode. """
|
if isinstance(str_or_unicode, str):
return str_or_unicode.decode('utf-8')
elif isinstance(str_or_unicode, unicode):
return str_or_unicode
else:
raise ValueError("Input '{0}' should be a string or unicode, "
"but its of type {1}".format(str_or_unicode,
type(str_or_unicode)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_utf8(str_or_unicode):
""" tests, if the input is ``str`` or ``unicode``. if it is ``unicode``, it will be encoded from ``unicode`` to ``utf-8``. otherwise, the input string is returned. """
|
if isinstance(str_or_unicode, str):
return str_or_unicode
elif isinstance(str_or_unicode, unicode):
return str_or_unicode.encode('utf-8')
else:
raise ValueError(
"Input '{0}' should be a string or unicode, but it is of "
"type {1}".format(str_or_unicode, type(str_or_unicode)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_dir(path):
""" Creates a directory. Warns, if the directory can't be accessed. Passes, if the directory already exists. modified from http://stackoverflow.com/a/600612 Parameters path : str path to the directory to be created """
|
import sys
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
if os.path.isdir(path):
pass
else: # if something exists at the path, but it's not a dir
raise
elif exc.errno == errno.EACCES:
sys.stderr.write("Cannot create [{0}]! Check Permissions".format(path))
raise
else:
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_labels_explicit(docgraph):
""" Appends the node ID to each node label and appends the edge type to each edge label in the given document graph. This can be used to debug a graph visually with ``write_dot``. Parameters docgraph : DiscourseDocumentGraph document graph from which the nodes will be extracted Returns ------- explicit_docgraph : DiscourseDocumentGraph document graph with explicit node and edge labels """
|
def make_nodelabels_explicit(docgraph):
for node_id, node_attribs in docgraph.nodes(data=True):
if 'label' in docgraph.node[node_id]:
docgraph.node[node_id]['label'] = \
u"{0}_{1}".format(node_attribs['label'], node_id)
return docgraph
def make_edgelabels_explicit(docgraph):
for from_id, to_id, edge_attribs in docgraph.edges(data=True):
for edge_num in docgraph.edge[from_id][to_id]:
if 'label' in docgraph.edge[from_id][to_id][edge_num]:
docgraph.edge[from_id][to_id][edge_num]['label'] = \
u"{0}_{1}".format(edge_attribs['label'],
edge_attribs['edge_type'])
else:
docgraph.edge[from_id][to_id][edge_num]['label'] = \
edge_attribs['edge_type']
return docgraph
return make_edgelabels_explicit(make_nodelabels_explicit(docgraph))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_multiple_replace_func(*args, **kwds):
""" You can call this function and pass it a dictionary, or any other combination of arguments you could pass to built-in dict in order to construct a dictionary. The function will return a xlat closure that takes as its only argument text the string on which the substitutions are desired and returns a copy of text with all the substitutions performed. Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns in a Single Pass. https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html """
|
adict = dict(*args, **kwds)
rx = re.compile('|'.join(map(re.escape, adict)))
def one_xlat(match):
return adict[match.group(0)]
def xlat(text):
return rx.sub(one_xlat, text)
return xlat
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_connective(docgraph, unit_id):
""" returns the lowercased string of the connective used in the given Conano unit. """
|
unit_index, _unit_type = unit_id.split(':')
connective_id = unit_index+':connective'
return ' '.join(docgraph.get_token(tok_id).lower()
for tok_id in get_span(docgraph, connective_id))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_token(self, token, parent_node='root'):
"""add a token to this docgraph"""
|
if parent_node == 'root':
parent_node = self.root
token_node_id = 'token:{}'.format(self.token_count)
self.add_node(token_node_id, layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': token})
self.add_edge(parent_node, token_node_id,
layers={self.ns},
edge_type=EdgeTypes.spanning_relation)
self.tokens.append(token_node_id)
self.token_count += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_token_to_document(self, token, token_id, connected):
""" adds a token to the document graph as a node with the given ID. Parameters token : str the token to be added to the document graph token_id : int the node ID of the token to be added, which must not yet exist in the document graph connected : bool Make the graph connected, i.e. add an edge from root this token. """
|
regex_match = ANNOTATED_ANAPHORA_REGEX.search(token)
if regex_match: # token is annotated
unannotated_token = regex_match.group('token')
unicode_token = ensure_unicode(unannotated_token)
annotation = regex_match.group('annotation')
anno_type = ANNOTATION_TYPES[annotation]
certainty = "1.0" if not regex_match.group('uncertain') else "0.5"
self.add_node(
token_id,
layers={self.ns, self.ns+':token', self.ns+':annotated'},
attr_dict={
self.ns+':annotation': anno_type,
self.ns+':certainty': certainty,
self.ns+':token': unicode_token,
'label': u"{0}_{1}".format(unicode_token, anno_type)})
else: # token is not annotated
self.add_node(
token_id,
layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': ensure_unicode(token),
'label': ensure_unicode(token)})
if connected:
self.add_edge(self.root, token_id,
layers={self.ns, self.ns+':token'})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_nodes_for_pydot(nodes_with_data):
"""throw away all node attributes, except for 'label'"""
|
for (node_id, attrs) in nodes_with_data:
if 'label' in attrs:
yield (quote_for_pydot(node_id),
{'label': quote_for_pydot(attrs['label'])})
else:
yield (quote_for_pydot(node_id), {})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_edges_for_pydot(edges_with_data):
"""throw away all edge attributes, except for 'label'"""
|
for (source, target, attrs) in edges_with_data:
if 'label' in attrs:
yield (quote_for_pydot(source), quote_for_pydot(target),
{'label': quote_for_pydot(attrs['label'])})
else:
yield (quote_for_pydot(source), quote_for_pydot(target), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.