text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, data, labels, **kwargs):
"""\ Training the SOM on the the data and calibrate itself. After the training, `self.quant_error` and `self.topog_error` are respectively set. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param \**kwargs: optional parameters for :meth:`train` """
|
# train the network
self._som.train(data, **kwargs)
# retrieve first and second bmus and distances
bmus, q_error, t_error = self.bmus_with_errors(data)
# set errors measures of training data
self.quant_error = q_error
self.topog_error = t_error
# store training bmus
self._bmus = bmus
# calibrate
self._calibrate(data, labels)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calibrate(self, data, labels):
"""\ Calibrate the network using `self._bmus`. """
|
# network calibration
classifier = defaultdict(Counter)
for (i,j), label in zip(self._bmus, labels):
classifier[i,j][label] += 1
self.classifier = {}
for ij, cnt in classifier.items():
maxi = max(cnt.items(), key=itemgetter(1))
nb = sum(cnt.values())
self.classifier[ij] = maxi[0], maxi[1] / nb
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, data, unkown=None):
"""\ Classify data according to previous calibration. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array` """
|
assert self.classifier is not None, 'not calibrated'
bmus = self._som.bmus(data)
return self._predict_from_bmus(bmus, unkown)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_predict(self, data, labels, unkown=None):
"""\ Fit and classify data efficiently. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array` """
|
self.fit(data, labels)
return self._predict_from_bmus(self._bmus, unkown)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def histogram(self, bmus=None):
"""\ Return a 2D histogram of bmus. :param bmus: the best-match units indexes for underlying data. :type bmus: :class:`numpy.ndarray` :returns: the computed 2D histogram of bmus. :rtype: :class:`numpy.ndarray` """
|
if bmus is None:
assert self._bmus is not None, 'not trained'
bmus = self._bmus
arr = np.zeros((self._som.nrows, self._som.ncols))
for i,j in bmus:
arr[i,j] += 1
return arr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_protocol_version(protocol=None, target=None):
""" Return a suitable pickle protocol version for a given target. Arguments: target: The internals description of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. protocol(None or int):
The requested protocol version (or None for the default of the target python version). Returns: int: A suitable pickle protocol version. """
|
target = get_py_internals(target)
if protocol is None:
protocol = target['pickle_default_protocol']
if protocol > cPickle.HIGHEST_PROTOCOL:
warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL)
protocol = cPickle.HIGHEST_PROTOCOL
target_highest_protocol = target['pickle_highest_protocol']
if protocol > target_highest_protocol:
warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol)
protocol = target_highest_protocol
return protocol
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def translate_opcodes(code_obj, target):
""" Very crude inter-python version opcode translator. Raises SyntaxError when the opcode doesn't exist in the destination opmap. Used to transcribe python code objects between python versions. Arguments: code_obj(pwnypack.bytecode.CodeObject):
The code object representation to translate. target(dict):
The py_internals structure for the target python version. """
|
target = get_py_internals(target)
src_ops = code_obj.disassemble()
dst_opmap = target['opmap']
dst_ops = []
op_iter = enumerate(src_ops)
for i, op in op_iter:
if isinstance(op, pwnypack.bytecode.Label):
dst_ops.append(op)
continue
if op.name not in dst_opmap:
if op.name == 'POP_JUMP_IF_FALSE' and 'JUMP_IF_TRUE' in dst_opmap:
lbl = pwnypack.bytecode.Label()
dst_ops.extend([
pwnypack.bytecode.Op('JUMP_IF_TRUE', lbl),
pwnypack.bytecode.Op('POP_TOP', None),
pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),
lbl,
pwnypack.bytecode.Op('POP_TOP', None),
])
elif op.name == 'POP_JUMP_IF_TRUE' and 'JUMP_IF_FALSE' in dst_opmap:
lbl = pwnypack.bytecode.Label()
dst_ops.extend([
pwnypack.bytecode.Op('JUMP_IF_FALSE', lbl),
pwnypack.bytecode.Op('POP_TOP', None),
pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),
lbl,
pwnypack.bytecode.Op('POP_TOP', None),
])
elif op.name == 'JUMP_IF_FALSE' and 'JUMP_IF_FALSE_OR_POP' in dst_opmap and \
src_ops[i + 1].name == 'POP_TOP':
next(op_iter)
dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_FALSE_OR_POP', op.arg))
elif op.name == 'JUMP_IF_TRUE' and 'JUMP_IF_TRUE_OR_POP' in dst_opmap and \
src_ops[i + 1].name == 'POP_TOP':
next(op_iter)
dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_TRUE_OR_POP', op.arg))
else:
raise SyntaxError('Opcode %s not supported on target.' % op.name)
else:
dst_ops.append(op)
code_obj.assemble(dst_ops, target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stops(self):
"""Return stops served by this route."""
|
serves = set()
for trip in self.trips():
for stop_time in trip.stop_times():
serves |= stop_time.stops()
return serves
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def P(value, bits=None, endian=None, target=None):
""" Pack an unsigned pointer for a given target. Args: value(int):
The value to pack. bits(:class:`~pwnypack.target.Target.Bits`):
Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`):
Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`):
Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. """
|
return globals()['P%d' % _get_bits(bits, target)](value, endian=endian, target=target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def p(value, bits=None, endian=None, target=None):
""" Pack a signed pointer for a given target. Args: value(int):
The value to pack. bits(:class:`pwnypack.target.Target.Bits`):
Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`):
Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`):
Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. """
|
return globals()['p%d' % _get_bits(bits, target)](value, endian=endian, target=target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def U(data, bits=None, endian=None, target=None):
""" Unpack an unsigned pointer for a given target. Args: data(bytes):
The data to unpack. bits(:class:`pwnypack.target.Target.Bits`):
Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`):
Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`):
Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. Returns: int: The pointer value. """
|
return globals()['U%d' % _get_bits(bits, target)](data, endian=endian, target=target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def u(data, bits=None, endian=None, target=None):
""" Unpack a signed pointer for a given target. Args: data(bytes):
The data to unpack. bits(:class:`pwnypack.target.Target.Bits`):
Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`):
Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`):
Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. Returns: int: The pointer value. """
|
return globals()['u%d' % _get_bits(bits, target)](data, endian=endian, target=target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trips(self):
"""Return all trips for this agency."""
|
trips = set()
for route in self.routes():
trips |= route.trips()
return trips
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stops(self):
"""Return all stops visited by trips for this agency."""
|
stops = set()
for stop_time in self.stop_times():
stops |= stop_time.stops()
return stops
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop_times(self):
"""Return all stop_times for this agency."""
|
stop_times = set()
for trip in self.trips():
stop_times |= trip.stop_times()
return stop_times
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def localize_fields(cls, localized_fields):
""" For each field name in localized_fields, for each language in settings.LANGUAGES, add fields to cls, and remove the original field, instead replace it with a DefaultFieldDescriptor, which always returns the field in the current language. """
|
# never do this twice
if hasattr(cls, 'localized_fields'):
return cls
# MSGID_LANGUAGE is the language that is used for the gettext message id's.
# If it is not available, because the site isn't using subsites, the
# LANGUAGE_CODE is good too. MSGID_LANGUAGE gives the opportunity to
# specify a language not available in the site but which is still used for
# the message id's.
msgid_language = getattr(settings,
'MSGID_LANGUAGE', settings.LANGUAGE_CODE)
# set the localized fields property
cls.localized_fields = localized_fields
for field in localized_fields:
original_attr = get_field_from_model_by_name(cls, field)
for cnt, language_code in enumerate(get_all_language_codes()):
i18n_attr = copy.copy(original_attr)
# add support for south introspection.
i18n_attr._south_introspects = True
i18n_attr.original_fieldname = field
i18n_attr.include_in_xml = False
lang_attr_name = get_real_fieldname(field, language_code)
i18n_attr.name = lang_attr_name
i18n_attr.creation_counter = i18n_attr.creation_counter + .01 * cnt
# null must be allowed for the message id language because this
# language might not be available at all in the backend
if not i18n_attr.null and i18n_attr.default is NOT_PROVIDED:
i18n_attr.null = True
if language_code != msgid_language:
# no validation for the fields that are language specific
if not i18n_attr.blank:
i18n_attr.blank = True
if i18n_attr.verbose_name:
i18n_attr.verbose_name = translation.string_concat(
i18n_attr.verbose_name, u' (%s)' % language_code)
cls.add_to_class(lang_attr_name, i18n_attr)
# delete original field
del cls._meta.local_fields[cls._meta.local_fields.index(original_attr)]
# copy some values and functions from the original_attr
# so the field can emulate the original_attr as good as possible
kwargs = {
'serialize': getattr(original_attr, 'serialize', True),
'extra_attrs': getattr(original_attr, 'extra_attrs', None),
'max_length': getattr(original_attr, 'max_length', None),
'min_length': getattr(original_attr, 'min_length', None),
'form_field': original_attr.formfield(
**FORMFIELD_FOR_DBFIELD_DEFAULTS.get(
original_attr.__class__, {})),
'get_internal_type': original_attr.get_internal_type,
'unique': getattr(original_attr, 'unique', False),
'to_python': original_attr.to_python,
}
# copy __serialize__ if it was defined on the original attr
if hasattr(original_attr, '__serialize__'):
kwargs['__serialize__'] = original_attr.__serialize__
# add the DefaultFieldDescriptor where the original_attr was.
cls.add_to_class(field, DefaultFieldDescriptor(field, **kwargs))
# update fields cache
try:
cls._meta._fill_fields_cache()
except AttributeError:
# Django 1.8 removed _fill_fields_cache
cls._meta._expire_cache()
cls._meta._get_fields(reverse=False)
# return the finished product
return cls
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_labels(self):
""" Read the label file of the documents and extract all the labels Returns: An array of labels.Label objects """
|
labels = []
try:
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE),
'r') as file_desc:
for line in file_desc.readlines():
line = line.strip()
(label_name, label_color) = line.split(",", 1)
labels.append(Label(name=label_name,
color=label_color))
except IOError:
pass
return labels
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __set_labels(self, labels):
""" Add a label on the document. """
|
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \
as file_desc:
for label in labels:
file_desc.write("%s,%s\n" % (label.name,
label.get_color_str()))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_label(self, old_label, new_label):
""" Update a label Replace 'old_label' by 'new_label' """
|
logger.info("%s : Updating label ([%s] -> [%s])"
% (str(self), old_label.name, new_label.name))
labels = self.labels
try:
labels.remove(old_label)
except ValueError:
# this document doesn't have this label
return
logger.info("%s : Updating label ([%s] -> [%s])"
% (str(self), old_label.name, new_label.name))
labels.append(new_label)
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \
as file_desc:
for label in labels:
file_desc.write("%s,%s\n" % (label.name,
label.get_color_str()))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __doc_cmp(self, other):
""" Comparison function. Can be used to sort docs alphabetically. """
|
if other is None:
return -1
if self.is_new and other.is_new:
return 0
if self.__docid < other.__docid:
return -1
elif self.__docid == other.__docid:
return 0
else:
return 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def arff_to_orange_table(arff):
'''
Convert a string in arff format to an Orange table.
:param arff: string in arff format
:return: Orange data table object constructed from the arff string
:rtype: orange.ExampleTable
'''
with tempfile.NamedTemporaryFile(suffix='.arff', delete=True) as f:
f.write(arff)
f.flush()
table = orange.ExampleTable(f.name)
return table
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value_to_string(self, obj):
"""This descriptor acts as a Field, as far as the serializer is concerned."""
|
try:
return force_unicode(self.__get__(obj))
except TypeError:
return str(self.__get__(obj))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poify(self, model):
"""turn a django model into a po file."""
|
if not hasattr(model, 'localized_fields'):
return None
# create po stream with header
po_stream = polibext.PoStream(StringIO.StringIO(self.po_header)).parse()
for (name, field) in easymode.tree.introspection.get_default_field_descriptors(model):
occurrence = u"%s.%s.%s" % (model._meta.app_label, model.__class__.__name__, name)
value = field.value_to_string(model)
# only add empty strings
if value != "":
entry = polib.POEntry(msgid=value, occurrences=[(occurrence, model.pk)])
# make sure no duplicate entries in the po_stream
existing_entry = po_stream.find(entry.msgid)
if existing_entry is None:
po_stream.append(entry)
else:
# no really, existing_entry.merge does not merge the occurrences.
existing_entry.occurrences += entry.occurrences
return po_stream
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xgettext(self, template):
"""Extracts to be translated strings from template and turns it into po format."""
|
cmd = 'xgettext -d django -L Python --keyword=gettext_noop \
--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 --from-code=UTF-8 \
--output=- -'
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(msg, err) = p.communicate(input=templatize(template))
if err:
# dont raise exception, some stuff in stderr are just warmings
logging.warning(err)
if XGETTEXT_REENCODES_UTF8:
return msg.decode('utf-8').encode('iso-8859-1')
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def msgmerge(self, locale_file, po_string):
""" Runs msgmerge on a locale_file and po_string """
|
cmd = "msgmerge -q %s -" % locale_file
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(msg, err) = p.communicate(input=po_string)
if err:
# dont raise exception, some stuff in stderr are just warmings
logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string))
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def msguniq(self, locale_file):
""" run msgunique on the locale_file """
|
# group related language strings together.
# except if no real entries where written or the header will be removed.
p = subprocess.Popen('msguniq --to-code=utf-8 %s' % (locale_file,),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
(msg, err) = p.communicate()
if err:
# raise exception, none of the stuff in stderr are just warmings
logging.error(err)
try:
err = unicodedata.normalize('NFKD', err.decode('utf-8')).encode('ascii','ignore')
except UnicodeError:
err = "can not decode error message"
raise CommandError(
u"error happened while running msguniq on: %s %s" % \
(locale_file, err)
)
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_utf8(obj):
"""Walks a simple data structure, converting unicode to byte string. Supports lists, tuples, and dictionaries. """
|
if isinstance(obj, unicode_type):
return _utf8(obj)
elif isinstance(obj, dict):
return dict((to_utf8(k), to_utf8(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(to_utf8(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(to_utf8(i) for i in obj)
return obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setPostScript(self, goal, script):
""" After learning call the given script using 'goal'. :param goal: goal name :param script: prolog script to call """
|
self.postGoal = goal
self.postScript = script
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def induce(self, mode, pos, neg, b, filestem='default', printOutput=False):
""" Induce a theory or features in 'mode'. :param filestem: The base name of this experiment. :param mode: In which mode to induce rules/features. :param pos: String of positive examples. :param neg: String of negative examples. :param b: String of background knowledge. :return: The theory as a string or an arff dataset in induce_features mode. :rtype: str """
|
# Write the inputs to appropriate files.
self.__prepare(filestem, pos, neg, b)
# Make a script to run aleph (with appropriate settings).
self.__script(mode, filestem)
logger.info("Running aleph...")
dumpFile = None
if not printOutput:
dumpFile = tempfile.TemporaryFile()
# Run the aleph script.
p = SafePopen(['yap', '-s50000', '-h200000', '-L', Aleph.SCRIPT],
cwd=self.tmpdir,
stdout=dumpFile,
stderr=dumpFile
).safe_run()
stdout_str, stderr_str = p.communicate()
logger.info("Done.")
result = None
if mode != 'induce_features':
# Return the rules written in the output file.
rules_fn = filestem + Aleph.RULES_SUFFIX
result = open('%s/%s' % (self.tmpdir, rules_fn)).read()
features = None
else:
features_fn = filestem + Aleph.FEATURES_SUFFIX
features = open('%s/%s' % (self.tmpdir, features_fn)).read()
dataset_fn = filestem + Aleph.PROP_DATASET_SUFFIX
pl_dataset = open('%s/%s' % (self.tmpdir, dataset_fn)).read()
result = self.__to_arff(features, pl_dataset, filestem)
# Cleanup.
self.__cleanup()
return (result, features)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __script(self, mode, filestem):
""" Makes the script file to be run by yap. """
|
scriptPath = '%s/%s' % (self.tmpdir, Aleph.SCRIPT)
script = open(scriptPath, 'w')
# Permit the owner to execute and read this script
os.chmod(scriptPath, S_IREAD | S_IEXEC)
cat = lambda x: script.write(x + '\n')
cat(":- initialization(run_aleph).")
cat("run_aleph :- ")
cat("consult(aleph),")
cat("read_all('%s')," % filestem)
# Cat all the non-default settings
for setting, value in self.settings.items():
cat("set(%s, %s)," % (setting, str(value)))
cat("%s," % mode)
eof = ',' if self.postScript else '.'
if mode == 'induce_features':
cat("consult(features),")
features_fn = filestem + Aleph.FEATURES_SUFFIX
dataset_fn = filestem + Aleph.PROP_DATASET_SUFFIX
cat('save_features(%s),' % features_fn)
cat('save_dataset(%s)%s' % (dataset_fn, eof))
else:
rules_fn = filestem + Aleph.RULES_SUFFIX
cat("write_rules('%s')%s" % (rules_fn, eof))
if self.postScript:
cat(self.postGoal + ".")
cat(self.postScript)
script.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raw(request):
"""shows untransformed hierarchical xml output"""
|
foos = foobar_models.Foo.objects.all()
return HttpResponse(tree.xml(foos), mimetype='text/xml')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chain(request):
"""shows how the XmlQuerySetChain can be used instead of @toxml decorator"""
|
bars = foobar_models.Bar.objects.all()
bazs = foobar_models.Baz.objects.all()
qsc = XmlQuerySetChain(bars, bazs)
return HttpResponse(tree.xml(qsc), mimetype='text/xml')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xslt(request):
"""Shows xml output transformed with standard xslt"""
|
foos = foobar_models.Foo.objects.all()
return render_xslt_to_response('xslt/model-to-xml.xsl', foos, mimetype='text/xml')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def label_contours(self, intervals, window=150, hop=30):
""" In a very flowy contour, it is not trivial to say which pitch value corresponds to what interval. This function labels pitch contours with intervals by guessing from the characteristics of the contour and its melodic context. :param window: the size of window over which the context is gauged, in milliseconds. :param hop: hop size in milliseconds. """
|
window /= 1000.0
hop /= 1000.0
exposure = int(window / hop)
boundary = window - hop
final_index = utils.find_nearest_index(self.pitch_obj.timestamps,
self.pitch_obj.timestamps[-1] - boundary)
interval = np.median(np.diff(self.pitch_obj.timestamps))
#interval = 0.00290254832393
window_step = window / interval
hop_step = hop / interval
start_index = 0
end_index = window_step
contour_labels = {}
means = []
while end_index < final_index:
temp = self.pitch_obj.pitch[start_index:end_index][self.pitch_obj.pitch[start_index:end_index] > -10000]
means.append(np.mean(temp))
start_index = start_index + hop_step
end_index = start_index + window_step
for i in xrange(exposure, len(means) - exposure + 1):
_median = np.median(means[i - exposure:i])
if _median < -5000:
continue
ind = utils.find_nearest_index(_median, intervals)
contour_end = (i - exposure) * hop_step + window_step
contour_start = contour_end - hop_step
#print sliceBegin, sliceEnd, JICents[ind]
#newPitch[sliceBegin:sliceEnd] = JICents[ind]
if intervals[ind] in contour_labels.keys():
contour_labels[intervals[ind]].append([contour_start, contour_end])
else:
contour_labels[intervals[ind]] = [[contour_start, contour_end]]
self.contour_labels = contour_labels
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_contour_labels(self, new_fig=True):
""" Plots the labelled contours! """
|
timestamps = []
pitch = []
if new_fig:
p.figure()
for interval, contours in self.contour_labels.items():
for contour in contours:
x = self.pitch_obj.timestamps[contour[0]:contour[1]]
y = [interval]*len(x)
timestamps.extend(x)
pitch.extend(y)
data = np.array([timestamps, pitch]).T
data = np.array(sorted(data, key=lambda xx: xx[0]))
p.plot(data[:, 0], data[:, 1], 'g-')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wordify_example(name_to_table, connecting_tables, context, cached_sentences, index_by_value, target_table_name, word_att_length, data_name, ex, searched_connections):
""" Recursively constructs the 'wordification' document for the given example. :param data: The given examples ExampleTable :param ex: Example for which the document is constructed """
|
debug = False
data_name = str(data_name)
if debug:
print("======================================")
print("example:", ex)
print("table name:", data_name)
print("searched_connections:", len(searched_connections), searched_connections)
print("connecting_tables:", len(connecting_tables[data_name]), connecting_tables[data_name])
ex_pkey_value = data_name in context.pkeys and ex[str(context.pkeys[data_name])]
if not data_name in cached_sentences or not str(ex_pkey_value) in cached_sentences[data_name]:
words = [] # word list for every example
if debug:
print("words:", len(words))
# Construct words (tableName_attributeName_attributeValue) from the given table
for att in name_to_table[data_name].domain.attributes:
if not str(att.name) in context.pkeys[data_name] and not str(att.name) in context.fkeys[data_name]:
words.append(att_to_s(data_name) + "_" + att_to_s(att.name) + "_" + att_to_s(ex[att]))
# Words from pairs of attributes
single_words = words[:]
for comb_length in range(word_att_length + 1):
if comb_length > 1:
words.extend(["__".join(sorted(b)) for b in itertools.combinations(single_words, comb_length)])
# Apply the wordification methodology recursively on all connecting tables
for sec_t_name, sec_fkey, prim_fkey in connecting_tables[data_name]:
sec_t = name_to_table[sec_t_name]
if debug:
print("------------------")
print("(sec_t,sec_fkey,prim):", (sec_t_name, sec_fkey, prim_fkey))
print("search this table:", not (sec_t_name,
sec_fkey) in searched_connections and sec_t_name != target_table_name)
print("search this table:", not prim_fkey or not (data_name,
sec_fkey) in searched_connections) # and sec_t!=self.target_table
if not (sec_t_name, sec_fkey) in searched_connections and sec_t_name != target_table_name and (
not prim_fkey or not (data_name, sec_fkey) in searched_connections):
example_indexes = index_by_value[sec_t_name][str(sec_fkey)][str(ex_pkey_value)] if not prim_fkey else \
index_by_value[sec_t_name][str(prim_fkey)][str(ex[str(sec_fkey)])]
for sec_ex_idx in example_indexes:
words += wordify_example(name_to_table, connecting_tables, context, cached_sentences,
index_by_value, target_table_name, word_att_length, sec_t_name,
sec_t[sec_ex_idx], searched_connections | set(
[(sec_t_name, sec_fkey), prim_fkey and (data_name, prim_fkey)]))
cached_sentences[data_name][str(ex_pkey_value)] = words
return cached_sentences[data_name][str(ex_pkey_value)]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, num_of_processes=multiprocessing.cpu_count()):
""" Applies the wordification methodology on the target table :param num_of_processes: number of processes """
|
# class + wordification on every example of the main table
p = multiprocessing.Pool(num_of_processes)
indices = chunks(list(range(len(self.target_table))), num_of_processes) # )
for ex_idxs in indices:
self.resulting_documents.extend(wordify_examples(self.name_to_table, self.connecting_tables, self.context,
self.index_by_value, self.target_table.name,
self.word_att_length, ex_idxs))
p.close()
p.join()
for i, ex in enumerate(self.target_table):
self.resulting_classes.append(ex.get_class())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_weights(self, measure='tfidf'):
""" Counts word frequency and calculates tf-idf values for words in every document. :param measure: example weights approach (can be one of ``tfidf, binary, tf``). """
|
from math import log
# TODO replace with spipy matrices (and calculate with scikit)
if measure == 'tfidf':
self.calculate_idf()
for doc_idx, document in enumerate(self.resulting_documents):
train_word_count = defaultdict(int)
self.tf_idfs[doc_idx] = {}
for word in document:
train_word_count[word] += 1
for word in document:
if measure == "binary":
tf = 1
idf = 1
else:
tf = train_word_count[word]
idf = 1 if measure == "tf" else (self.idf[word] if word in self.idf else None)
if idf != None:
self.tf_idfs[doc_idx][word] = tf * idf
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def to_arff(self):
'''
Returns the "wordified" representation in ARFF.
:rtype: str
'''
arff_string = "@RELATION " + self.target_table.name + "\n\n"
words = set()
for document in self.resulting_documents:
for word in document:
words.add(word)
words = sorted(words)
for i, word in enumerate(words):
arff_string += "@ATTRIBUTE '" + word.replace("'", "") + "' REAL\n"
arff_string += "@ATTRIBUTE class {" + ','.join(set([str(a) for a in self.resulting_classes])) + "}\n\n@DATA\n"
self.word_features = []
for doc_idx in range(len(self.resulting_documents)):
features = []
for word in words:
if word not in self.word_features:
self.word_features.append(word)
if word in self.tf_idfs[doc_idx]:
features.append(str(self.tf_idfs[doc_idx][word]))
else:
features.append("0")
features.append(str(self.resulting_classes[doc_idx]))
arff_string += ','.join(features)
arff_string += "\n"
return arff_string
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prune(self, minimum_word_frequency_percentage=1):
""" Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep """
|
pruned_resulting_documents = []
for document in self.resulting_documents:
new_document = []
for word in document:
if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len(
self.resulting_documents):
new_document.append(word)
pruned_resulting_documents.append(new_document)
self.resulting_documents = pruned_resulting_documents
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wordify(self):
""" Constructs string of all documents. :return: document representation of the dataset, one line per document :rtype: str """
|
string_documents = []
for klass, document in zip(self.resulting_classes, self.resulting_documents):
string_documents.append("!" + str(klass) + " " + '' .join(document))
return '\n'.join(string_documents)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def binary_value_or_stdin(value):
""" Return fsencoded value or read raw data from stdin if value is None. """
|
if value is None:
reader = io.open(sys.stdin.fileno(), mode='rb', closefd=False)
return reader.read()
elif six.PY3:
return os.fsencode(value)
else:
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __label_cmp(self, other):
""" Comparaison function. Can be used to sort labels alphabetically. """
|
if other is None:
return -1
label_name = strip_accents(self.name).lower()
other_name = strip_accents(other.name).lower()
if label_name < other_name:
return -1
elif label_name == other_name:
return 0
else:
return 1
if self.get_color_str() < other.get_color_str():
return -1
elif self.get_color_str() == other.get_color_str():
return 0
else:
return 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_html_color(self):
""" get a string representing the color, using HTML notation """
|
color = self.color
return ("#%02x%02x%02x" % (
int(color.red), int(color.green), int(color.blue)
))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forget(self, label_name):
""" Forget training for label 'label_name' """
|
self._bayes.pop(label_name)
baye_dir = self._get_baye_dir(label_name)
logger.info("Deleting label training {} : {}".format(
label_name, baye_dir
))
rm_rf(baye_dir)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename(self, old_label_name, new_label_name):
""" Take into account that a label has been renamed """
|
assert(old_label_name != new_label_name)
self._bayes.pop(old_label_name)
old_baye_dir = self._get_baye_dir(old_label_name)
new_baye_dir = self._get_baye_dir(new_label_name)
logger.info("Renaming label training {} -> {} : {} -> {}".format(
old_label_name, new_label_name, old_baye_dir, new_baye_dir
))
os.rename(old_baye_dir, new_baye_dir)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_filepath(self, ext):
""" Returns a file path relative to this page """
|
filename = ("%s%d.%s" % (self.FILE_PREFIX, self.page_nb + 1, ext))
return self.fs.join(self.doc.path, filename)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __make_thumbnail(self, width, height):
""" Create the page's thumbnail """
|
(w, h) = self.size
factor = max(
(float(w) / width),
(float(h) / height)
)
w /= factor
h /= factor
return self.get_image((round(w), round(h)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_thumbnail(self, width, height):
""" thumbnail with a memory cache """
|
# get from the file
thumb_path = self._get_thumb_path()
try:
doc_file_path = self.get_doc_file_path()
if (self.fs.exists(thumb_path) and
self.fs.getmtime(doc_file_path) <
self.fs.getmtime(thumb_path)):
with self.fs.open(thumb_path, 'rb') as fd:
thumbnail = PIL.Image.open(fd)
thumbnail.load()
if thumbnail.size[0] == width or thumbnail.size[1] == height:
# fills the specified area
return thumbnail
logger.warning(
"[%s] Unexpected thumbnail size: %s instead of %s ;"
" Updating thumbnail ...",
str(self.doc.docid), str(thumbnail.size),
str((width, height))
)
except Exception as exc:
logger.warning(
"[%s] Failed to check doc and thumbnail mdate. Forcing update"
" of the thumbnail", str(self.doc.docid), exc_info=exc
)
logger.info("[%s] Updating thumbnail ...", str(self.doc.docid))
thumbnail = self.__make_thumbnail(width, height)
with self.fs.open(thumb_path, 'wb') as fd:
thumbnail.save(fd, format="JPEG")
return thumbnail
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_keywords(self):
""" Get all the keywords related of this page Returns: An array of strings """
|
txt = self.text
for line in txt:
for word in split_words(line):
yield(word)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip_accents(string):
""" Strip all the accents from the string """
|
return u''.join(
(character for character in unicodedata.normalize('NFD', string)
if unicodedata.category(character) != 'Mn'))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rm_rf(path):
""" Act as 'rm -rf' in the shell """
|
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
filepath = os.path.join(root, filename)
logger.info("Deleting file %s" % filepath)
os.unlink(filepath)
for dirname in dirs:
dirpath = os.path.join(root, dirname)
if os.path.islink(dirpath):
logger.info("Deleting link %s" % dirpath)
os.unlink(dirpath)
else:
logger.info("Deleting dir %s" % dirpath)
os.rmdir(dirpath)
logger.info("Deleting dir %s", path)
os.rmdir(path)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def surface2image(surface):
""" Convert a cairo surface into a PIL image """
|
# TODO(Jflesch): Python 3 problem
# cairo.ImageSurface.get_data() raises NotImplementedYet ...
# import PIL.ImageDraw
#
# if surface is None:
# return None
# dimension = (surface.get_width(), surface.get_height())
# img = PIL.Image.frombuffer("RGBA", dimension,
# surface.get_data(), "raw", "BGRA", 0, 1)
#
# background = PIL.Image.new("RGB", img.size, (255, 255, 255))
# background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
# return background
global g_lock
with g_lock:
img_io = io.BytesIO()
surface.write_to_png(img_io)
img_io.seek(0)
img = PIL.Image.open(img_io)
img.load()
if "A" not in img.getbands():
return img
img_no_alpha = PIL.Image.new("RGB", img.size, (255, 255, 255))
img_no_alpha.paste(img, mask=img.split()[3]) # 3 is the alpha channel
return img_no_alpha
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image2surface(img):
""" Convert a PIL image into a Cairo surface """
|
if not CAIRO_AVAILABLE:
raise Exception("Cairo not available(). image2surface() cannot work.")
# TODO(Jflesch): Python 3 problem
# cairo.ImageSurface.create_for_data() raises NotImplementedYet ...
# img.putalpha(256)
# (width, height) = img.size
# imgd = img.tobytes('raw', 'BGRA')
# imga = array.array('B', imgd)
# stride = width * 4
# return cairo.ImageSurface.create_for_data(
# imga, cairo.FORMAT_ARGB32, width, height, stride)
# So we fall back to this method:
global g_lock
with g_lock:
img_io = io.BytesIO()
img.save(img_io, format="PNG")
img_io.seek(0)
return cairo.ImageSurface.create_from_png(img_io)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def domain_map(features, feature_format, train_context, test_context,
intervals={},
format='arff',
positive_class=None):
'''
Use the features returned by a propositionalization method to map
unseen test examples into the new feature space.
:param features: string of features as returned by rsd, aleph or treeliker
:param feature_format: 'rsd', 'aleph', 'treeliker'
:param train_context: DBContext with training examples
:param test_context: DBContext with test examples
:param intervals: discretization intervals (optional)
:param format: output format (only arff is used atm)
:param positive_class: required for aleph
:return: returns the test examples in propositional form
:rtype: str
:Example:
>>> test_arff = mapper.domain_map(features, 'rsd', train_context, test_context)
'''
dataset = None
if feature_format in ['rsd', 'aleph']:
train_rsd = RSDConverter(train_context)
test_rsd = RSDConverter(test_context, discr_intervals=intervals)
mapper_target_name = train_context.target_table + '_mapper'
train_examples = train_rsd.all_examples(pred_name=mapper_target_name)
test_examples = test_rsd.all_examples(pred_name=mapper_target_name)
if feature_format == 'aleph':
features = aleph_to_rsd_features(features)
prolog_bk = '\n'.join([
_example_ids('testExampleIDs', test_examples),
'%% test examples',
test_examples,
'%% train examples',
train_examples,
'%% train background knowledge',
train_rsd.background_knowledge(),
'%% test background knowledge',
test_rsd.background_knowledge(),
_feature_numbers(features),
'%% features',
features,
])
THIS_DIR = os.path.dirname(__file__) if os.path.dirname(__file__) else '.'
f = tempfile.NamedTemporaryFile(delete=False, mode='w')
f.write(prolog_bk)
f.close()
cmd_args = ['yap', '-L', '--', '%s/mapper.pl' % THIS_DIR, f.name, mapper_target_name]
evaluations = subprocess.check_output(cmd_args).decode()
dataset = dump_dataset(features, feature_format, evaluations,
train_context,
format=format,
positive_class=positive_class)
# Cleanup
os.remove(f.name)
elif feature_format == 'treeliker':
# We provide treeliker with the test dataset
# since it has a built-in ability to evaluate features
treeliker_test = TreeLikerConverter(test_context,
discr_intervals=intervals)
treeliker = features
treeliker.test_dataset = treeliker_test.dataset()
_, test_dataset = treeliker.run()
if format == 'arff':
dataset = test_dataset
else:
return 'unsupported format'
return dataset
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_text(self):
""" Get the text corresponding to this page """
|
boxes = self.boxes
txt = []
for line in boxes:
txt_line = u""
for box in line.word_boxes:
txt_line += u" " + box.content
txt.append(txt_line)
return txt
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_img(self):
""" Returns an image object corresponding to the page """
|
with self.fs.open(self.__img_path, 'rb') as fd:
img = PIL.Image.open(fd)
img.load()
return img
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def change_index(self, offset=0):
""" Move the page number by a given offset. Beware to not let any hole in the page numbers when doing this. Make sure also that the wanted number is available. Will also change the page number of the current object. """
|
src = {}
src["box"] = self.__get_box_path()
src["img"] = self.__get_img_path()
src["thumb"] = self._get_thumb_path()
page_nb = self.page_nb
page_nb += offset
logger.info("--> Moving page %d (+%d) to index %d"
% (self.page_nb, offset, page_nb))
self.page_nb = page_nb
dst = {}
dst["box"] = self.__get_box_path()
dst["img"] = self.__get_img_path()
dst["thumb"] = self._get_thumb_path()
for key in src.keys():
if self.fs.exists(src[key]):
if self.fs.exists(dst[key]):
logger.error("Error: file already exists: %s" % dst[key])
assert(0)
self.fs.rename(src[key], dst[key])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy(self):
""" Delete the page. May delete the whole document if it's actually the last page. """
|
logger.info("Destroying page: %s" % self)
if self.doc.nb_pages <= 1:
self.doc.destroy()
return
doc_pages = self.doc.pages[:]
current_doc_nb_pages = self.doc.nb_pages
paths = [
self.__get_box_path(),
self.__get_img_path(),
self._get_thumb_path(),
]
for path in paths:
if self.fs.exists(path):
self.fs.unlink(path)
for page_nb in range(self.page_nb + 1, current_doc_nb_pages):
page = doc_pages[page_nb]
page.change_index(offset=-1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pclink(self, parent, child):
"""Create a parent-child relationship."""
|
if parent._children is None:
parent._children = set()
if child._parents is None:
child._parents = set()
parent._children.add(child)
child._parents.add(parent)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def discretize(self, intervals, slope_thresh=1500, cents_thresh=50):
""" This function takes the pitch data and returns it quantized to given set of intervals. All transactions must happen in cent scale. slope_thresh is the bound beyond which the pitch contour is said to transit from one svara to another. It is specified in cents/sec. cents_thresh is a limit within which two pitch values are considered the same. This is what pushes the quantization limit. The function returns quantized pitch data. """
|
#eps = np.finfo(float).eps
#pitch = median_filter(pitch, 7)+eps
self.pitch = median_filter(self.pitch, 7)
pitch_quantized = np.zeros(len(self.pitch))
pitch_quantized[0] = utils.find_nearest_index(intervals, self.pitch[0])
pitch_quantized[-1] = utils.find_nearest_index(intervals, self.pitch[-1])
for i in xrange(1, len(self.pitch)-1):
if self.pitch[i] == -10000:
pitch_quantized[i] = -10000
continue
slope_back = abs((self.pitch[i] - self.pitch[i-1])/(self.timestamps[i] - self.timestamps[i-1]))
slope_front = abs((self.pitch[i+1] - self.pitch[i])/(self.timestamps[i+1] - self.timestamps[i]))
if slope_front < slope_thresh or slope_back < slope_thresh:
ind = utils.find_nearest_index(intervals, self.pitch[i])
cents_diff = abs(self.pitch[i] - intervals[ind])
if cents_diff <= cents_thresh:
pitch_quantized[i] = intervals[ind]
else:
pitch_quantized[i] = -10000
else:
pitch_quantized[i] = -10000
self.pitch = pitch_quantized
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assume(self, other):
""" Assume the identity of another target. This can be useful to make the global target assume the identity of an ELF executable. Arguments: other(:class:`Target`):
The target whose identity to assume. Example: """
|
self._arch = other._arch
self._bits = other._bits
self._endian = other._endian
self._mode = other._mode
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_read_permission(self, request, path):
""" Just return True if the user is an authenticated staff member. Extensions could base the permissions on the path too. """
|
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user.is_staff:
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def rows(self, table, cols):
'''
Fetches rows from the local cache or from the db if there's no cache.
:param table: table name to select
:cols: list of columns to select
:return: list of rows
:rtype: list
'''
if self.orng_tables:
data = []
for ex in self.orng_tables[table]:
data.append([ex[str(col)] for col in cols])
return data
else:
return self.fetch(table, cols)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def select_where(self, table, cols, pk_att, pk):
'''
SELECT with WHERE clause.
:param table: target table
:param cols: list of columns to select
:param pk_att: attribute for the where clause
:param pk: the id that the pk_att should match
:return: rows from the given table and cols, with the condition pk_att==pk
:rtype: list
'''
if self.orng_tables:
data = []
for ex in self.orng_tables[table]:
if str(ex[str(pk_att)]) == str(pk):
data.append([ex[str(col)] for col in cols])
return data
else:
return self.src.select_where(table, cols, pk_att, pk)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode(string):
""" Encode the given string as an OID. '5.104.101.108.108.111' """
|
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self,oid):
"""Return snmp value for the given OID."""
|
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next(self,oid):
"""Return snmp value for the next OID."""
|
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_first(self):
"""Return snmp value for the first OID."""
|
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut_oid(self,full_oid):
""" Remove the base OID from the given string. '28.12' """
|
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
|
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
|
self.add_oid_entry(oid,'OBJECTID',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
|
self.add_oid_entry(oid,'INTEGER',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
|
self.add_oid_entry(oid,'OCTET',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
|
self.add_oid_entry(oid,'STRING',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
|
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
|
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
|
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
|
self.add_oid_entry(oid,'GAUGE',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
|
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main_passpersist(self):
""" Main function that handle SNMP's pass_persist protocol, called by the start method. Direct call is unnecessary. """
|
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main_update(self):
""" Main function called by the updater thread. Direct call is unnecessary. """
|
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_setter(self, oid):
""" Retrieve the nearest parent setter function for an OID """
|
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, oid, typevalue):
""" Call the default or user setter function if available """
|
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self, user_func, refresh):
""" Start the SNMP's protocol handler and the updater thread user_func is a reference to an update function, ran every 'refresh' seconds. """
|
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_members_of_type(obj, member_type):
""" Finds members of a certain type in obj. :param obj: A model instance or class. :param member_type: The type of the menber we are trying to find. :rtype: A :class:`list` of ``member_type`` found in ``obj`` """
|
if not issubclass(type(obj), ModelBase):
obj = obj.__class__
key_hash = []
for key in dir(obj):
try:
attr = getattr(obj, key)
except AttributeError as e:
try:
attr = obj.__dict__[key]
except KeyError:
raise AttributeError(INTROSPECTION_ERROR % (e, obj, member_type))
if type(attr) is member_type:
key_hash.append((key, attr))
return key_hash
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next(self):
""" Provide the next element of the list. """
|
if self.idx >= len(self.page_list):
raise StopIteration()
page = self.page_list[self.idx]
self.idx += 1
return page
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_nb_pages(self):
""" Compute the number of pages in the document. It basically counts how many JPG files there are in the document. """
|
try:
filelist = self.fs.listdir(self.path)
count = 0
for filepath in filelist:
filename = self.fs.basename(filepath)
if (filename[-4:].lower() != "." + ImgPage.EXT_IMG or
(filename[-10:].lower() == "." + ImgPage.EXT_THUMB) or
(filename[:len(ImgPage.FILE_PREFIX)].lower() !=
ImgPage.FILE_PREFIX)):
continue
count += 1
return count
except IOError as exc:
logger.debug("Exception while trying to get the number of"
" pages of '%s': %s", self.docid, exc)
return 0
except OSError as exc:
if exc.errno != errno.ENOENT:
logger.error("Exception while trying to get the number of"
" pages of '%s': %s", self.docid, exc)
raise
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def steal_page(self, page):
""" Steal a page from another document """
|
if page.doc == self:
return
self.fs.mkdir_p(self.path)
new_page = ImgPage(self, self.nb_pages)
logger.info("%s --> %s" % (str(page), str(new_page)))
new_page._steal_content(page)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recursion_depth(key):
""" A context manager used to guard recursion depth for some function. Multiple functions can be kept separately because it will be counted per key. Any exceptions raise in the recursive function will reset the counter, because the stack will be unwinded. usage:: with recursion_depth('some_function_name') as recursion_level: if recursion_level > getattr(settings, 'RECURSION_LIMIT', sys.getrecursionlimit() / 10):
raise Exception("Too deep") # do some recursive dangerous things. :param key: The key under which the recursion depth is kept. """
|
try:
if not getattr(RECURSION_LEVEL_DICT, 'key', False):
RECURSION_LEVEL_DICT.key = 0
RECURSION_LEVEL_DICT.key += 1
yield RECURSION_LEVEL_DICT.key
RECURSION_LEVEL_DICT.key -= 1
except Exception as e:
RECURSION_LEVEL_DICT.key = 0
raise e
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first_match(predicate, lst):
""" returns the first value of predicate applied to list, which does not return None 4 :param predicate: a function that returns None or a value. :param list: A list of items that can serve as input to ``predicate``. :rtype: whatever ``predicate`` returns instead of None. (or None). """
|
for item in lst:
val = predicate(item)
if val is not None:
return val
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bases_walker(cls):
""" Loop through all bases of cls True True :param cls: The class in which we want to loop through the base classes. """
|
for base in cls.__bases__:
yield base
for more in bases_walker(base):
yield more
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_add_params(url, **kwargs):
""" Add parameters to an url 'http://example.com/?a=1&b=3' 'http://example.com/?c=8&a=1&b=3' 'http://example.com/?a=1&b=3#/irock' 'http://example.com/?id=10&a=1&b=3#/irock' """
|
parsed_url = urlparse.urlsplit(url)
params = urlparse.parse_qsl(parsed_url.query)
parsed_url = list(parsed_url)
for pair in kwargs.iteritems():
params.append(pair)
parsed_url[3] = urllib.urlencode(params)
return urlparse.urlunsplit(parsed_url)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def run(self, cleanup=True, printOutput=False):
'''
Runs TreeLiker with the given settings.
:param cleanup: deletes temporary files after completion
:param printOutput: print algorithm output to the terminal
'''
self._copy_data()
self._batch()
dumpFile = None
if not printOutput:
dumpFile = tempfile.TemporaryFile()
p = Popen(['java', '-Xmx3G', '-cp', 'bin/TreeLiker.jar',
'ida.ilp.treeLiker.TreeLikerMain', '-batch', self.batch],
cwd=self.tmpdir,
stdout=dumpFile,
stderr=dumpFile)
stdout_str, stderr_str = p.communicate()
if not self.test_dataset:
arff = open('%s/%s.arff' % (self.tmpdir, self.basename)).read()
arff_test = None
else:
arff = open('%s/conversion/train.arff' % self.tmpdir).read()
arff_test = open('%s/conversion/test.arff' % self.tmpdir).read()
if cleanup:
self._cleanup()
return (arff, arff_test)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _batch(self):
'''
Creates the batch file to run the experiment.
'''
self.batch = '%s/%s.treeliker' % (self.tmpdir, self.basename)
commands = []
if not self.test_dataset:
commands.append('set(output_type, single)')
commands.append("set(examples, '%s.txt')" % self.basename)
else:
commands.append('set(output_type, train_test)')
commands.append("set(train_set, '%s.txt')" % self.basename)
commands.append("set(test_set, '%s_test.txt')" % self.basename)
commands.append('set(template, %s)' % self.template)
if not self.test_dataset:
commands.append('set(output, %s.arff)' % self.basename)
else:
commands.append('set(output, conversion)')
# Optional settings
for key, val in self.settings.items():
if val not in [None, '']:
commands.append('set(%s, %s)' % (key, str(val)))
commands.append('work(yes)')
with open(self.batch, 'w') as f:
f.write('\n'.join(commands))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iterread(self, table):
"""Iteratively read data from a GTFS table. Returns namedtuples."""
|
self.log('Reading: %s'%table)
# Entity class
cls = self.FACTORIES[table]
f = self._open(table)
# csv reader
if unicodecsv:
data = unicodecsv.reader(f, encoding='utf-8-sig')
else:
data = csv.reader(f)
header = data.next()
headerlen = len(header)
ent = collections.namedtuple(
'EntityNamedTuple',
map(str, header)
)
for row in data:
if len(row) == 0:
continue
# Get rid of extra spaces.
row = [i.strip() for i in row]
# pad to length if necessary... :(
if len(row) < headerlen:
row += ['']*(headerlen-len(row))
yield cls.from_row(ent._make(row), self)
f.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, filename, entities, sortkey=None, columns=None):
"""Write entities out to filename in csv format. Note: this doesn't write directly into a Zip archive, because this behavior is difficult to achieve with Zip archives. Use make_zip() to create a new GTFS Zip archive. """
|
if os.path.exists(filename):
raise IOError('File exists: %s'%filename)
# Make sure we have all the entities loaded.
if sortkey:
entities = sorted(entities, key=lambda x:x[sortkey])
if not columns:
columns = set()
for entity in entities:
columns |= set(entity.keys())
columns = sorted(columns)
# Write the csv file
with open(filename, 'wb') as f:
writer = unicodecsv.writer(f) # , encoding='utf-8-sig'
writer.writerow(columns)
for entity in entities:
writer.writerow([entity.get(column) for column in columns])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_zip(self, filename, files=None, path=None, clone=None, compress=True):
"""Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive. """
|
if filename and os.path.exists(filename):
raise IOError('File exists: %s'%filename)
files = files or []
arcnames = []
if path and os.path.isdir(path):
files += glob.glob(os.path.join(path, '*.txt'))
if compress:
compress_level = zipfile.ZIP_DEFLATED
else:
compress_level = zipfile.ZIP_STORED
# Write files.
self.log("Creating zip archive: %s"%filename)
zf = zipfile.ZipFile(filename, 'a', compression=compress_level)
for f in files:
base = os.path.basename(f)
if base in arcnames:
self.log('... skipping: %s'%f)
else:
self.log('... adding: %s'%f)
arcnames.append(base)
zf.write(f, base)
# Clone from existing zip archive.
if clone and os.path.exists(clone):
zc = zipfile.ZipFile(clone)
for f in zc.namelist():
base = os.path.basename(f)
if os.path.splitext(base)[-1] != '.txt':
pass
# self.log('... skipping from clone: %s'%f)
elif base in arcnames:
self.log('... skipping from clone: %s'%f)
else:
self.log('... adding from clone: %s'%f)
arcnames.append(base)
with zc.open(f) as i:
data = i.read()
zf.writestr(base, data)
zf.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shapes(self):
"""Return the route shapes as a dictionary."""
|
# Todo: Cache?
if self._shapes:
return self._shapes
# Group together by shape_id
self.log("Generating shapes...")
ret = collections.defaultdict(entities.ShapeLine)
for point in self.read('shapes'):
ret[point['shape_id']].add_child(point)
self._shapes = ret
return self._shapes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, validator=None, skip_relations=False):
"""Validate a GTFS :param validator: a ValidationReport :param (bool) skip_relations: skip validation of relations between entities (e.g. stop_times to stops) :return: """
|
validator = validation.make_validator(validator)
self.log('Loading...')
self.preload()
# required
required = [
'agency',
'stops',
'routes',
'trips',
'stop_times',
'calendar'
]
for f in required:
self.log("Validating required file: %s"%f)
data = self.read(f)
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
# optional
optional = [
'calendar_dates',
'fare_attributes',
'fare_rules',
'shapes',
'frequencies',
'transfers',
'feed_info'
]
for f in optional:
self.log("Validating optional file: %s"%f)
try:
data = self.read(f)
except KeyError, e:
data = []
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
return validator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.