text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Create a summary string for the accessible fields in a model. Unlike
<END_TASK>
<USER_TASK:>
Description:
def _summarize_accessible_fields(field_descriptions, width=40,
section_title='Accessible fields'):
"""
Create a summary string for the accessible fields in a model. Unlike
`_toolkit_repr_print`, this function does not look up the values of the
fields, it just formats the names and descriptions.
Parameters
----------
field_descriptions : dict{str: str}
Name of each field and its description, in a dictionary. Keys and
values should be strings.
width : int, optional
Width of the names. This is usually determined and passed by the
calling `__repr__` method.
section_title : str, optional
Name of the accessible fields section in the summary string.
Returns
-------
out : str
""" |
key_str = "{:<{}}: {}"
items = []
items.append(section_title)
items.append("-" * len(section_title))
for field_name, field_desc in field_descriptions.items():
items.append(key_str.format(field_name, width, field_desc))
return "\n".join(items) |
<SYSTEM_TASK:>
Returns true if datatype_instance is a valid datatype object and false otherwise.
<END_TASK>
<USER_TASK:>
Description:
def _is_valid_datatype(datatype_instance):
"""
Returns true if datatype_instance is a valid datatype object and false otherwise.
""" |
# Remap so we can still use the python types for the simple cases
global _simple_type_remap
if datatype_instance in _simple_type_remap:
return True
# Now set the protobuf from this interface.
if isinstance(datatype_instance, (Int64, Double, String, Array)):
return True
elif isinstance(datatype_instance, Dictionary):
kt = datatype_instance.key_type
if isinstance(kt, (Int64, String)):
return True
return False |
<SYSTEM_TASK:>
Translates a user specified datatype to an instance of the ones defined above.
<END_TASK>
<USER_TASK:>
Description:
def _normalize_datatype(datatype_instance):
"""
Translates a user specified datatype to an instance of the ones defined above.
Valid data types are passed through, and the following type specifications
are translated to the proper instances:
str, "String" -> String()
int, "Int64" -> Int64()
float, "Double" -> Double()
If a data type is not recognized, then an error is raised.
""" |
global _simple_type_remap
if datatype_instance in _simple_type_remap:
return _simple_type_remap[datatype_instance]
# Now set the protobuf from this interface.
if isinstance(datatype_instance, (Int64, Double, String, Array)):
return datatype_instance
elif isinstance(datatype_instance, Dictionary):
kt = datatype_instance.key_type
if isinstance(kt, (Int64, String)):
return datatype_instance
raise ValueError("Datatype instance not recognized.") |
<SYSTEM_TASK:>
Given a list of objects, reorder them so that the constains specified
<END_TASK>
<USER_TASK:>
Description:
def order (self, objects):
""" Given a list of objects, reorder them so that the constains specified
by 'add_pair' are satisfied.
The algorithm was adopted from an awk script by Nikita Youshchenko
(yoush at cs dot msu dot su)
""" |
# The algorithm used is the same is standard transitive closure,
# except that we're not keeping in-degree for all vertices, but
# rather removing edges.
result = []
if not objects:
return result
constraints = self.__eliminate_unused_constraits (objects)
# Find some library that nobody depends upon and add it to
# the 'result' array.
obj = None
while objects:
new_objects = []
while objects:
obj = objects [0]
if self.__has_no_dependents (obj, constraints):
# Emulate break ;
new_objects.extend (objects [1:])
objects = []
else:
new_objects.append (obj)
obj = None
objects = objects [1:]
if not obj:
raise BaseException ("Circular order dependencies")
# No problem with placing first.
result.append (obj)
# Remove all containts where 'obj' comes first,
# since they are already satisfied.
constraints = self.__remove_satisfied (constraints, obj)
# Add the remaining objects for further processing
# on the next iteration
objects = new_objects
return result |
<SYSTEM_TASK:>
Eliminate constraints which mention objects not in 'objects'.
<END_TASK>
<USER_TASK:>
Description:
def __eliminate_unused_constraits (self, objects):
""" Eliminate constraints which mention objects not in 'objects'.
In graph-theory terms, this is finding subgraph induced by
ordered vertices.
""" |
result = []
for c in self.constraints_:
if c [0] in objects and c [1] in objects:
result.append (c)
return result |
<SYSTEM_TASK:>
Returns true if there's no constraint in 'constraints' where
<END_TASK>
<USER_TASK:>
Description:
def __has_no_dependents (self, obj, constraints):
""" Returns true if there's no constraint in 'constraints' where
'obj' comes second.
""" |
failed = False
while constraints and not failed:
c = constraints [0]
if c [1] == obj:
failed = True
constraints = constraints [1:]
return not failed |
<SYSTEM_TASK:>
Helper for as_path, below. Orders properties with the implicit ones
<END_TASK>
<USER_TASK:>
Description:
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
""" |
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0 |
<SYSTEM_TASK:>
Refines 'properties' by overriding any non-free properties
<END_TASK>
<USER_TASK:>
Description:
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
""" |
assert is_iterable_typed(properties, Property)
assert is_iterable_typed(requirements, Property)
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition:
required[r.feature] = r
for p in properties:
# Skip conditional properties
if p.condition:
result.add(p)
# No processing for free properties
elif p.feature.free:
result.add(p)
else:
if p.feature in required:
result.add(required[p.feature])
else:
result.add(p)
return sequence.unique(list(result) + requirements) |
<SYSTEM_TASK:>
Interpret all path properties in 'properties' as relative to 'path'
<END_TASK>
<USER_TASK:>
Description:
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
""" |
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if p.feature.path:
values = __re_two_ampersands.split(p.value)
new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values)
if new_value != p.value:
result.append(Property(p.feature, new_value, p.condition))
else:
result.append(p)
else:
result.append (p)
return result |
<SYSTEM_TASK:>
Assumes that all feature values that start with '@' are
<END_TASK>
<USER_TASK:>
Description:
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module.""" |
assert is_iterable_typed(properties, Property)
assert isinstance(context_module, basestring)
result = []
for p in properties:
if p.value[0] == '@':
q = qualify_jam_action(p.value[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature, '@' + q, p.condition))
else:
result.append(p)
return result |
<SYSTEM_TASK:>
Exit with error if any of the properties is not valid.
<END_TASK>
<USER_TASK:>
Description:
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
""" |
if isinstance(properties, Property):
properties = [properties]
assert is_iterable_typed(properties, Property)
for p in properties:
__validate1(p) |
<SYSTEM_TASK:>
Selects properties which correspond to any of the given features.
<END_TASK>
<USER_TASK:>
Description:
def select (features, properties):
""" Selects properties which correspond to any of the given features.
""" |
assert is_iterable_typed(properties, basestring)
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features] |
<SYSTEM_TASK:>
Removes all conditional properties which conditions are not met
<END_TASK>
<USER_TASK:>
Description:
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properies
in conditions are looked up in 'context'
""" |
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(properties, Property)
assert isinstance(context, PropertySet)
base = []
conditional = []
for p in properties:
if p.condition:
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition):
result.append(Property(p.feature, p.value))
return result |
<SYSTEM_TASK:>
Returns a modified version of properties with all values of the
<END_TASK>
<USER_TASK:>
Description:
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
""" |
assert is_iterable_typed(properties, basestring)
assert isinstance(feature, basestring)
assert isinstance(value, (basestring, type(None)))
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result |
<SYSTEM_TASK:>
Exit with error if property is not valid.
<END_TASK>
<USER_TASK:>
Description:
def __validate1 (property):
""" Exit with error if property is not valid.
""" |
assert isinstance(property, Property)
msg = None
if not property.feature.free:
feature.validate_value_string (property.feature, property.value) |
<SYSTEM_TASK:>
Returns a property sets which include all the elements
<END_TASK>
<USER_TASK:>
Description:
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'.""" |
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result |
<SYSTEM_TASK:>
Returns a property set which include all
<END_TASK>
<USER_TASK:>
Description:
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'.""" |
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result |
<SYSTEM_TASK:>
Associate value with properties.
<END_TASK>
<USER_TASK:>
Description:
def insert (self, properties, value):
""" Associate value with properties.
""" |
assert is_iterable_typed(properties, basestring)
assert isinstance(value, basestring)
self.__properties.append(properties)
self.__values.append(value) |
<SYSTEM_TASK:>
Benchmark one command execution
<END_TASK>
<USER_TASK:>
Description:
def benchmark_command(cmd, progress):
"""Benchmark one command execution""" |
full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd)
print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd)
(_, err) = subprocess.Popen(
['/bin/sh', '-c', full_cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
values = err.strip().split(' ')
if len(values) == 2:
try:
return (float(values[0]), float(values[1]))
except: # pylint:disable=I0011,W0702
pass # Handled by the code after the "if"
print err
raise Exception('Error during benchmarking') |
<SYSTEM_TASK:>
Benchmark one file
<END_TASK>
<USER_TASK:>
Description:
def benchmark_file(
filename, compiler, include_dirs, (progress_from, progress_to),
iter_count, extra_flags = ''):
"""Benchmark one file""" |
time_sum = 0
mem_sum = 0
for nth_run in xrange(0, iter_count):
(time_spent, mem_used) = benchmark_command(
'{0} -std=c++11 {1} -c {2} {3}'.format(
compiler,
' '.join('-I{0}'.format(i) for i in include_dirs),
filename,
extra_flags
),
(
progress_to * nth_run + progress_from * (iter_count - nth_run)
) / iter_count
)
os.remove(os.path.splitext(os.path.basename(filename))[0] + '.o')
time_sum = time_sum + time_spent
mem_sum = mem_sum + mem_used
return {
"time": time_sum / iter_count,
"memory": mem_sum / (iter_count * 1024)
} |
<SYSTEM_TASK:>
Determine the name + version of the compiler
<END_TASK>
<USER_TASK:>
Description:
def compiler_info(compiler):
"""Determine the name + version of the compiler""" |
(out, err) = subprocess.Popen(
['/bin/sh', '-c', '{0} -v'.format(compiler)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
for line in (out + err).split('\n'):
mtch = gcc_clang.search(line)
if mtch:
return mtch.group(1) + ' ' + mtch.group(2)
return compiler |
<SYSTEM_TASK:>
Enumartes the files in path with the given extension
<END_TASK>
<USER_TASK:>
Description:
def files_in_dir(path, extension):
"""Enumartes the files in path with the given extension""" |
ends = '.{0}'.format(extension)
return (f for f in os.listdir(path) if f.endswith(ends)) |
<SYSTEM_TASK:>
Do the benchmarking
<END_TASK>
<USER_TASK:>
Description:
def benchmark(src_dir, compiler, include_dirs, iter_count):
"""Do the benchmarking""" |
files = list(files_in_dir(src_dir, 'cpp'))
random.shuffle(files)
has_string_templates = True
string_template_file_cnt = sum(1 for file in files if 'bmp' in file)
file_count = len(files) + string_template_file_cnt
started_at = time.time()
result = {}
for filename in files:
progress = len(result)
result[filename] = benchmark_file(
os.path.join(src_dir, filename),
compiler,
include_dirs,
(float(progress) / file_count, float(progress + 1) / file_count),
iter_count
)
if 'bmp' in filename and has_string_templates:
try:
temp_result = benchmark_file(
os.path.join(src_dir, filename),
compiler,
include_dirs,
(float(progress + 1) / file_count, float(progress + 2) / file_count),
iter_count,
'-Xclang -fstring-literal-templates'
)
result[filename.replace('bmp', 'slt')] = temp_result
except:
has_string_templates = False
file_count -= string_template_file_cnt
print 'Stopping the benchmarking of string literal templates'
elapsed = time.time() - started_at
total = float(file_count * elapsed) / len(result)
print 'Elapsed time: {0}, Remaining time: {1}'.format(
format_time(elapsed),
format_time(total - elapsed)
)
return result |
<SYSTEM_TASK:>
Enumerate all configs in src_dir
<END_TASK>
<USER_TASK:>
Description:
def configs_in(src_dir):
"""Enumerate all configs in src_dir""" |
for filename in files_in_dir(src_dir, 'json'):
with open(os.path.join(src_dir, filename), 'rb') as in_f:
yield json.load(in_f) |
<SYSTEM_TASK:>
Join the list of images into the out file
<END_TASK>
<USER_TASK:>
Description:
def join_images(img_files, out_file):
"""Join the list of images into the out file""" |
images = [PIL.Image.open(f) for f in img_files]
joined = PIL.Image.new(
'RGB',
(sum(i.size[0] for i in images), max(i.size[1] for i in images))
)
left = 0
for img in images:
joined.paste(im=img, box=(left, 0))
left = left + img.size[0]
joined.save(out_file) |
<SYSTEM_TASK:>
Load any Turi Create model that was previously saved.
<END_TASK>
<USER_TASK:>
Description:
def load_model(location):
"""
Load any Turi Create model that was previously saved.
This function assumes the model (can be any model) was previously saved in
Turi Create model format with model.save(filename).
Parameters
----------
location : string
Location of the model to load. Can be a local path or a remote URL.
Because models are saved as directories, there is no file extension.
Examples
----------
>>> model.save('my_model_file')
>>> loaded_model = tc.load_model('my_model_file')
""" |
# Check if the location is a dir_archive, if not, use glunpickler to load
# as pure python model
# If the location is a http location, skip the check, and directly proceed
# to load model as dir_archive. This is because
# 1) exists() does not work with http protocol, and
# 2) GLUnpickler does not support http
protocol = file_util.get_protocol(location)
dir_archive_exists = False
if protocol == '':
model_path = file_util.expand_full_path(location)
dir_archive_exists = file_util.exists(os.path.join(model_path, 'dir_archive.ini'))
else:
model_path = location
if protocol in ['http', 'https']:
dir_archive_exists = True
else:
import posixpath
dir_archive_exists = file_util.exists(posixpath.join(model_path, 'dir_archive.ini'))
if not dir_archive_exists:
raise IOError("Directory %s does not exist" % location)
_internal_url = _make_internal_url(location)
saved_state = glconnect.get_unity().load_model(_internal_url)
saved_state = _wrap_function_return(saved_state)
# The archive version could be both bytes/unicode
key = u'archive_version'
archive_version = saved_state[key] if key in saved_state else saved_state[key.encode()]
if archive_version < 0:
raise ToolkitError("File does not appear to be a Turi Create model.")
elif archive_version > 1:
raise ToolkitError("Unable to load model.\n\n"
"This model looks to have been saved with a future version of Turi Create.\n"
"Please upgrade Turi Create before attempting to load this model file.")
elif archive_version == 1:
name = saved_state['model_name'];
if name in MODEL_NAME_MAP:
cls = MODEL_NAME_MAP[name]
if 'model' in saved_state:
# this is a native model
return cls(saved_state['model'])
else:
# this is a CustomModel
model_data = saved_state['side_data']
model_version = model_data['model_version']
del model_data['model_version']
return cls._load_version(model_data, model_version)
elif hasattr(_extensions, name):
return saved_state["model"]
else:
raise ToolkitError("Unable to load model of name '%s'; model name not registered." % name)
else:
# very legacy model format. Attempt pickle loading
import sys
sys.stderr.write("This model was saved in a legacy model format. Compatibility cannot be guaranteed in future versions.\n")
if _six.PY3:
raise ToolkitError("Unable to load legacy model in Python 3.\n\n"
"To migrate a model, try loading it using Turi Create 4.0 or\n"
"later in Python 2 and then re-save it. The re-saved model should\n"
"work in Python 3.")
if 'graphlab' not in sys.modules:
sys.modules['graphlab'] = sys.modules['turicreate']
# backward compatibility. Otherwise old pickles will not load
sys.modules["turicreate_util"] = sys.modules['turicreate.util']
sys.modules["graphlab_util"] = sys.modules['turicreate.util']
# More backwards compatibility with the turicreate namespace code.
for k, v in list(sys.modules.items()):
if 'turicreate' in k:
sys.modules[k.replace('turicreate', 'graphlab')] = v
#legacy loader
import pickle
model_wrapper = pickle.loads(saved_state[b'model_wrapper'])
return model_wrapper(saved_state[b'model_base']) |
<SYSTEM_TASK:>
Internal function to return a get_default_options function.
<END_TASK>
<USER_TASK:>
Description:
def _get_default_options_wrapper(unity_server_model_name,
module_name='',
python_class_name='',
sdk_model = False):
"""
Internal function to return a get_default_options function.
Parameters
----------
unity_server_model_name: str
Name of the class/toolkit as registered with the unity server
module_name: str, optional
Name of the module.
python_class_name: str, optional
Name of the Python class.
sdk_model : bool, optional (default False)
True if the SDK interface was used for the model. False otherwise.
Examples
----------
get_default_options = _get_default_options_wrapper('classifier_svm',
'svm', 'SVMClassifier')
""" |
def get_default_options_for_model(output_type = 'sframe'):
"""
Get the default options for the toolkit
:class:`~turicreate.{module_name}.{python_class_name}`.
Parameters
----------
output_type : str, optional
The output can be of the following types.
- `sframe`: A table description each option used in the model.
- `json`: A list of option dictionaries suitable for JSON serialization.
| Each dictionary/row in the dictionary/SFrame object describes the
following parameters of the given model.
+------------------+-------------------------------------------------------+
| Name | Description |
+==================+=======================================================+
| name | Name of the option used in the model. |
+------------------+---------+---------------------------------------------+
| description | A detailed description of the option used. |
+------------------+-------------------------------------------------------+
| type | Option type (REAL, BOOL, INTEGER or CATEGORICAL) |
+------------------+-------------------------------------------------------+
| default_value | The default value for the option. |
+------------------+-------------------------------------------------------+
| possible_values | List of acceptable values (CATEGORICAL only) |
+------------------+-------------------------------------------------------+
| lower_bound | Smallest acceptable value for this option (REAL only) |
+------------------+-------------------------------------------------------+
| upper_bound | Largest acceptable value for this option (REAL only) |
+------------------+-------------------------------------------------------+
Returns
-------
out : dict/SFrame
See Also
--------
turicreate.{module_name}.{python_class_name}.get_current_options
Examples
--------
.. sourcecode:: python
>>> import turicreate
# SFrame formatted output.
>>> out_sframe = turicreate.{module_name}.get_default_options()
# dict formatted output suitable for JSON serialization.
>>> out_json = turicreate.{module_name}.get_default_options('json')
"""
if sdk_model:
response = _tc.extensions._toolkits_sdk_get_default_options(
unity_server_model_name)
else:
response = _tc.extensions._toolkits_get_default_options(
unity_server_model_name)
if output_type == 'json':
return response
else:
json_list = [{'name': k, '': v} for k,v in response.items()]
return _SFrame(json_list).unpack('X1', column_name_prefix='')\
.unpack('X1', column_name_prefix='')
# Change the doc string before returning.
get_default_options_for_model.__doc__ = get_default_options_for_model.\
__doc__.format(python_class_name = python_class_name,
module_name = module_name)
return get_default_options_for_model |
<SYSTEM_TASK:>
A helper rule to get the command to invoke some tool. If
<END_TASK>
<USER_TASK:>
Description:
def get_invocation_command_nodefault(
toolset, tool, user_provided_command=[], additional_paths=[], path_last=False):
"""
A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'.
""" |
assert isinstance(toolset, basestring)
assert isinstance(tool, basestring)
assert is_iterable_typed(user_provided_command, basestring)
assert is_iterable_typed(additional_paths, basestring) or additional_paths is None
assert isinstance(path_last, (int, bool))
if not user_provided_command:
command = find_tool(tool, additional_paths, path_last)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization: can't find tool, tool"
#FIXME
#print "warning: initialized from" [ errors.nearest-user-location ] ;
else:
command = check_tool(user_provided_command)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization:"
print "warning: can't find user-provided command", user_provided_command
#FIXME
#ECHO "warning: initialized from" [ errors.nearest-user-location ]
command = []
if command:
command = ' '.join(command)
return command |
<SYSTEM_TASK:>
Same as get_invocation_command_nodefault, except that if no tool is found,
<END_TASK>
<USER_TASK:>
Description:
def get_invocation_command(toolset, tool, user_provided_command = [],
additional_paths = [], path_last = False):
""" Same as get_invocation_command_nodefault, except that if no tool is found,
returns either the user-provided-command, if present, or the 'tool' parameter.
""" |
assert isinstance(toolset, basestring)
assert isinstance(tool, basestring)
assert is_iterable_typed(user_provided_command, basestring)
assert is_iterable_typed(additional_paths, basestring) or additional_paths is None
assert isinstance(path_last, (int, bool))
result = get_invocation_command_nodefault(toolset, tool,
user_provided_command,
additional_paths,
path_last)
if not result:
if user_provided_command:
result = user_provided_command[0]
else:
result = tool
assert(isinstance(result, str))
return result |
<SYSTEM_TASK:>
Given an invocation command,
<END_TASK>
<USER_TASK:>
Description:
def get_absolute_tool_path(command):
"""
Given an invocation command,
return the absolute path to the command. This works even if commnad
has not path element and is present in PATH.
""" |
assert isinstance(command, basestring)
if os.path.dirname(command):
return os.path.dirname(command)
else:
programs = path.programs_path()
m = path.glob(programs, [command, command + '.exe' ])
if not len(m):
if __debug_configuration:
print "Could not find:", command, "in", programs
return None
return os.path.dirname(m[0]) |
<SYSTEM_TASK:>
Checks if 'command' can be found either in path
<END_TASK>
<USER_TASK:>
Description:
def check_tool_aux(command):
""" Checks if 'command' can be found either in path
or is a full name to an existing file.
""" |
assert isinstance(command, basestring)
dirname = os.path.dirname(command)
if dirname:
if os.path.exists(command):
return command
# Both NT and Cygwin will run .exe files by their unqualified names.
elif on_windows() and os.path.exists(command + '.exe'):
return command
# Only NT will run .bat files by their unqualified names.
elif os_name() == 'NT' and os.path.exists(command + '.bat'):
return command
else:
paths = path.programs_path()
if path.glob(paths, [command]):
return command |
<SYSTEM_TASK:>
Checks that a tool can be invoked by 'command'.
<END_TASK>
<USER_TASK:>
Description:
def check_tool(command):
""" Checks that a tool can be invoked by 'command'.
If command is not an absolute path, checks if it can be found in 'path'.
If comand is absolute path, check that it exists. Returns 'command'
if ok and empty string otherwise.
""" |
assert is_iterable_typed(command, basestring)
#FIXME: why do we check the first and last elements????
if check_tool_aux(command[0]) or check_tool_aux(command[-1]):
return command |
<SYSTEM_TASK:>
returns the location of the "program files" directory on a windows
<END_TASK>
<USER_TASK:>
Description:
def get_program_files_dir():
""" returns the location of the "program files" directory on a windows
platform
""" |
ProgramFiles = bjam.variable("ProgramFiles")
if ProgramFiles:
ProgramFiles = ' '.join(ProgramFiles)
else:
ProgramFiles = "c:\\Program Files"
return ProgramFiles |
<SYSTEM_TASK:>
Returns the command needed to set an environment variable on the current
<END_TASK>
<USER_TASK:>
Description:
def variable_setting_command(variable, value):
"""
Returns the command needed to set an environment variable on the current
platform. The variable setting persists through all following commands and is
visible in the environment seen by subsequently executed commands. In other
words, on Unix systems, the variable is exported, which is consistent with the
only possible behavior on Windows systems.
""" |
assert isinstance(variable, basestring)
assert isinstance(value, basestring)
if os_name() == 'NT':
return "set " + variable + "=" + value + os.linesep
else:
# (todo)
# The following does not work on CYGWIN and needs to be fixed. On
# CYGWIN the $(nl) variable holds a Windows new-line \r\n sequence that
# messes up the executed export command which then reports that the
# passed variable name is incorrect. This is most likely due to the
# extra \r character getting interpreted as a part of the variable name.
#
# Several ideas pop to mind on how to fix this:
# * One way would be to separate the commands using the ; shell
# command separator. This seems like the quickest possible
# solution but I do not know whether this would break code on any
# platforms I I have no access to.
# * Another would be to not use the terminating $(nl) but that would
# require updating all the using code so it does not simply
# prepend this variable to its own commands.
# * I guess the cleanest solution would be to update Boost Jam to
# allow explicitly specifying \n & \r characters in its scripts
# instead of always relying only on the 'current OS native newline
# sequence'.
#
# Some code found to depend on this behaviour:
# * This Boost Build module.
# * __test__ rule.
# * path-variable-setting-command rule.
# * python.jam toolset.
# * xsltproc.jam toolset.
# * fop.jam toolset.
# (todo) (07.07.2008.) (Jurko)
#
# I think that this works correctly in python -- Steven Watanabe
return variable + "=" + value + os.linesep + "export " + variable + os.linesep |
<SYSTEM_TASK:>
Returns a command to sets a named shell path variable to the given NATIVE
<END_TASK>
<USER_TASK:>
Description:
def path_variable_setting_command(variable, paths):
"""
Returns a command to sets a named shell path variable to the given NATIVE
paths on the current platform.
""" |
assert isinstance(variable, basestring)
assert is_iterable_typed(paths, basestring)
sep = os.path.pathsep
return variable_setting_command(variable, sep.join(paths)) |
<SYSTEM_TASK:>
Returns a command that prepends the given paths to the named path variable on
<END_TASK>
<USER_TASK:>
Description:
def prepend_path_variable_command(variable, paths):
"""
Returns a command that prepends the given paths to the named path variable on
the current platform.
""" |
assert isinstance(variable, basestring)
assert is_iterable_typed(paths, basestring)
return path_variable_setting_command(
variable, paths + [expand_variable(variable)]) |
<SYSTEM_TASK:>
Given a target, as given to a custom tag rule, returns a string formatted
<END_TASK>
<USER_TASK:>
Description:
def format_name(format, name, target_type, prop_set):
""" Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule.
""" |
if __debug__:
from ..build.property_set import PropertySet
assert is_iterable_typed(format, basestring)
assert isinstance(name, basestring)
assert isinstance(target_type, basestring)
assert isinstance(prop_set, PropertySet)
# assert(isinstance(prop_set, property_set.PropertySet))
if type.is_derived(target_type, 'LIB'):
result = "" ;
for f in format:
grist = get_grist(f)
if grist == '<base>':
result += os.path.basename(name)
elif grist == '<toolset>':
result += join_tag(get_value(f),
toolset_tag(name, target_type, prop_set))
elif grist == '<threading>':
result += join_tag(get_value(f),
threading_tag(name, target_type, prop_set))
elif grist == '<runtime>':
result += join_tag(get_value(f),
runtime_tag(name, target_type, prop_set))
elif grist.startswith('<version:'):
key = grist[len('<version:'):-1]
version = prop_set.get('<' + key + '>')
if not version:
version = key
version = __re_version.match(version)
result += join_tag(get_value(f), version[1] + '_' + version[2])
elif grist.startswith('<property:'):
key = grist[len('<property:'):-1]
property_re = re.compile('<(' + key + ')>')
p0 = None
for prop in prop_set.raw():
match = property_re.match(prop)
if match:
p0 = match[1]
break
if p0:
p = prop_set.get('<' + p0 + '>')
if p:
assert(len(p) == 1)
result += join_tag(ungrist(f), p)
else:
result += f
result = b2.build.virtual_target.add_prefix_and_suffix(
''.join(result), target_type, prop_set)
return result |
<SYSTEM_TASK:>
Registers a configuration.
<END_TASK>
<USER_TASK:>
Description:
def register(self, id):
"""
Registers a configuration.
Returns True if the configuration has been added and False if
it already exists. Reports an error if the configuration is 'used'.
""" |
assert isinstance(id, basestring)
if id in self.used_:
#FIXME
errors.error("common: the configuration '$(id)' is in use")
if id not in self.all_:
self.all_.add(id)
# Indicate that a new configuration has been added.
return True
else:
return False |
<SYSTEM_TASK:>
Returns the value of a configuration parameter.
<END_TASK>
<USER_TASK:>
Description:
def get(self, id, param):
""" Returns the value of a configuration parameter. """ |
assert isinstance(id, basestring)
assert isinstance(param, basestring)
return self.params_.get(param, {}).get(id) |
<SYSTEM_TASK:>
Sets the value of a configuration parameter.
<END_TASK>
<USER_TASK:>
Description:
def set (self, id, param, value):
""" Sets the value of a configuration parameter. """ |
assert isinstance(id, basestring)
assert isinstance(param, basestring)
assert is_iterable_typed(value, basestring)
self.params_.setdefault(param, {})[id] = value |
<SYSTEM_TASK:>
Like get_num_gpus_in_use, but returns a list of dictionaries with just
<END_TASK>
<USER_TASK:>
Description:
def get_gpus_in_use(max_devices=None):
"""
Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information.
""" |
from turicreate.util import _get_cuda_gpus
gpu_indices = get_gpu_ids_in_use(max_devices=max_devices)
gpus = _get_cuda_gpus()
return [gpus[index] for index in gpu_indices] |
<SYSTEM_TASK:>
Returns the environment for unity_server.
<END_TASK>
<USER_TASK:>
Description:
def make_unity_server_env():
"""
Returns the environment for unity_server.
The environment is necessary to start the unity_server
by setting the proper environments for shared libraries,
hadoop classpath, and module search paths for python lambda workers.
The environment has 3 components:
1. CLASSPATH, contains hadoop class path
2. __GL_PYTHON_EXECUTABLE__, path to the python executable
3. __GL_PYLAMBDA_SCRIPT__, path to the lambda worker executable
4. __GL_SYS_PATH__: contains the python sys.path of the interpreter
""" |
env = os.environ.copy()
# Add hadoop class path
classpath = get_hadoop_class_path()
if ("CLASSPATH" in env):
env["CLASSPATH"] = env['CLASSPATH'] + (os.path.pathsep + classpath if classpath != '' else '')
else:
env["CLASSPATH"] = classpath
# Add python syspath
env['__GL_SYS_PATH__'] = (os.path.pathsep).join(sys.path + [os.getcwd()])
# Add the python executable to the runtime config
env['__GL_PYTHON_EXECUTABLE__'] = os.path.abspath(sys.executable)
# Add the pylambda execution script to the runtime config
env['__GL_PYLAMBDA_SCRIPT__'] = os.path.abspath(_pylambda_worker.__file__)
#### Remove PYTHONEXECUTABLE ####
# Anaconda overwrites this environment variable
# which forces all python sub-processes to use the same binary.
# When using virtualenv with ipython (which is outside virtualenv),
# all subprocess launched under unity_server will use the
# conda binary outside of virtualenv, which lacks the access
# to all packages installed inside virtualenv.
if 'PYTHONEXECUTABLE' in env:
del env['PYTHONEXECUTABLE']
# Set mxnet envvars
if 'MXNET_CPU_WORKER_NTHREADS' not in env:
from multiprocessing import cpu_count
num_cpus = int(env.get('OMP_NUM_THREADS', cpu_count()))
if sys.platform == 'darwin':
num_workers = num_cpus
else:
# On Linux, BLAS doesn't seem to tolerate larger numbers of workers.
num_workers = min(2, num_cpus)
env['MXNET_CPU_WORKER_NTHREADS'] = str(num_workers)
## set local to be c standard so that unity_server will run ##
env['LC_ALL']='C'
# add certificate file
if 'TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE' not in env and \
'TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR' not in env:
try:
import certifi
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE'] = certifi.where()
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR'] = ""
except:
pass
return env |
<SYSTEM_TASK:>
Sets the dll load path so that things are resolved correctly.
<END_TASK>
<USER_TASK:>
Description:
def set_windows_dll_path():
"""
Sets the dll load path so that things are resolved correctly.
""" |
lib_path = os.path.dirname(os.path.abspath(_pylambda_worker.__file__))
lib_path = os.path.abspath(os.path.join(lib_path, os.pardir))
def errcheck_bool(result, func, args):
if not result:
last_error = ctypes.get_last_error()
if last_error != 0:
raise ctypes.WinError(last_error)
else:
raise OSError
return args
# Also need to set the dll loading directory to the main
# folder so windows attempts to load all DLLs from this
# directory.
import ctypes.wintypes as wintypes
try:
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32.SetDllDirectoryW.errcheck = errcheck_bool
kernel32.SetDllDirectoryW.argtypes = (wintypes.LPCWSTR,)
kernel32.SetDllDirectoryW(lib_path)
except Exception as e:
logging.getLogger(__name__).warning(
"Error setting DLL load orders: %s (things should still work)." % str(e)) |
<SYSTEM_TASK:>
Returns either sframe or turicreate depending on which library
<END_TASK>
<USER_TASK:>
Description:
def get_library_name():
"""
Returns either sframe or turicreate depending on which library
this file is bundled with.
""" |
from os.path import split, abspath
__lib_name = split(split(abspath(sys.modules[__name__].__file__))[0])[1]
assert __lib_name in ["sframe", "turicreate"]
return __lib_name |
<SYSTEM_TASK:>
Returns the file name of the config file from which the environment
<END_TASK>
<USER_TASK:>
Description:
def get_config_file():
"""
Returns the file name of the config file from which the environment
variables are written.
""" |
import os
from os.path import abspath, expanduser, join, exists
__lib_name = get_library_name()
assert __lib_name in ["sframe", "turicreate"]
__default_config_path = join(expanduser("~"), ".%s" % __lib_name, "config")
if "TURI_CONFIG_FILE" in os.environ:
__default_config_path = abspath(expanduser(os.environ["TURI_CONFIG_FILE"]))
if not exists(__default_config_path):
print(("WARNING: Config file specified in environment variable "
"'TURI_CONFIG_FILE' as "
"'%s', but this path does not exist.") % __default_config_path)
return __default_config_path |
<SYSTEM_TASK:>
Imports the environmental configuration settings from the
<END_TASK>
<USER_TASK:>
Description:
def setup_environment_from_config_file():
"""
Imports the environmental configuration settings from the
config file, if present, and sets the environment
variables to test it.
""" |
from os.path import exists
config_file = get_config_file()
if not exists(config_file):
return
try:
config = _ConfigParser.SafeConfigParser()
config.read(config_file)
__section = "Environment"
if config.has_section(__section):
items = config.items(__section)
for k, v in items:
try:
os.environ[k.upper()] = v
except Exception as e:
print(("WARNING: Error setting environment variable "
"'%s = %s' from config file '%s': %s.")
% (k, str(v), config_file, str(e)) )
except Exception as e:
print("WARNING: Error reading config file '%s': %s." % (config_file, str(e))) |
<SYSTEM_TASK:>
Writes an environment variable configuration to the current
<END_TASK>
<USER_TASK:>
Description:
def write_config_file_value(key, value):
"""
Writes an environment variable configuration to the current
config file. This will be read in on the next restart.
The config file is created if not present.
Note: The variables will not take effect until after restart.
""" |
filename = get_config_file()
config = _ConfigParser.SafeConfigParser()
config.read(filename)
__section = "Environment"
if not(config.has_section(__section)):
config.add_section(__section)
config.set(__section, key, value)
with open(filename, 'w') as config_file:
config.write(config_file) |
<SYSTEM_TASK:>
Constructs the service class.
<END_TASK>
<USER_TASK:>
Description:
def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
""" |
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method)) |
<SYSTEM_TASK:>
Calls the method described by a given method descriptor.
<END_TASK>
<USER_TASK:>
Description:
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
""" |
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback) |
<SYSTEM_TASK:>
Returns the class of the request protocol message.
<END_TASK>
<USER_TASK:>
Description:
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
""" |
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class |
<SYSTEM_TASK:>
Returns the class of the response protocol message.
<END_TASK>
<USER_TASK:>
Description:
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
""" |
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class |
<SYSTEM_TASK:>
Generates and returns a method that can be set for a service methods.
<END_TASK>
<USER_TASK:>
Description:
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
""" |
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback)) |
<SYSTEM_TASK:>
Constructs the stub class.
<END_TASK>
<USER_TASK:>
Description:
def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
""" |
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method)) |
<SYSTEM_TASK:>
The body of all service methods in the generated stub class.
<END_TASK>
<USER_TASK:>
Description:
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
""" |
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback) |
<SYSTEM_TASK:>
Returns a protobuf message instance.
<END_TASK>
<USER_TASK:>
Description:
def _BuildMessageFromTypeName(type_name, descriptor_pool):
"""Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name.
""" |
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
database = symbol_database.Default()
try:
message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)
except KeyError:
return None
message_type = database.GetPrototype(message_descriptor)
return message_type() |
<SYSTEM_TASK:>
Skips over a field message.
<END_TASK>
<USER_TASK:>
Description:
def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
""" |
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter) |
<SYSTEM_TASK:>
Skips over a field value.
<END_TASK>
<USER_TASK:>
Description:
def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
""" |
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) |
<SYSTEM_TASK:>
Consumes an integer number from tokenizer.
<END_TASK>
<USER_TASK:>
Description:
def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
"""Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed.
""" |
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result |
<SYSTEM_TASK:>
Parses an integer.
<END_TASK>
<USER_TASK:>
Description:
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
""" |
# Do the actual parsing. Exception handling is propagated to caller.
result = _ParseAbstractInteger(text, is_long=is_long)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result |
<SYSTEM_TASK:>
Parse a floating point number.
<END_TASK>
<USER_TASK:>
Description:
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
""" |
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text) |
<SYSTEM_TASK:>
Parse an enum value.
<END_TASK>
<USER_TASK:>
Description:
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
""" |
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value named %s.' %
(enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value with number %d.' %
(enum_descriptor.full_name, number))
return enum_value.number |
<SYSTEM_TASK:>
Serializes if message is a google.protobuf.Any field.
<END_TASK>
<USER_TASK:>
Description:
def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field.""" |
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False |
<SYSTEM_TASK:>
Merges a text representation of a protocol message into a message.
<END_TASK>
<USER_TASK:>
Description:
def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message.""" |
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message |
<SYSTEM_TASK:>
Converts a text representation of a protocol message into a message.
<END_TASK>
<USER_TASK:>
Description:
def _ParseOrMerge(self, lines, message):
"""Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems.
""" |
tokenizer = Tokenizer(lines)
while not tokenizer.AtEnd():
self._MergeField(tokenizer, message) |
<SYSTEM_TASK:>
Tries to consume a given piece of text.
<END_TASK>
<USER_TASK:>
Description:
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
""" |
if self.token == token:
self.NextToken()
return True
return False |
<SYSTEM_TASK:>
Consumes an integer number.
<END_TASK>
<USER_TASK:>
Description:
def ConsumeInteger(self, is_long=False):
"""Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed.
""" |
try:
result = _ParseAbstractInteger(self.token, is_long=is_long)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result |
<SYSTEM_TASK:>
Consumes a string value.
<END_TASK>
<USER_TASK:>
Description:
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
""" |
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except UnicodeDecodeError as e:
raise self._StringParseError(e) |
<SYSTEM_TASK:>
Consumes a byte array value.
<END_TASK>
<USER_TASK:>
Description:
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
""" |
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in _QUOTES:
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list) |
<SYSTEM_TASK:>
Reads the next meaningful token.
<END_TASK>
<USER_TASK:>
Description:
def NextToken(self):
"""Reads the next meaningful token.""" |
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if not match and not self._skip_comments:
match = self._COMMENT.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column] |
<SYSTEM_TASK:>
Compute the value of a composite distance function on two dictionaries,
<END_TASK>
<USER_TASK:>
Description:
def compute_composite_distance(distance, x, y):
"""
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
""" |
## Validate inputs
_validate_composite_distance(distance)
distance = _convert_distance_names_to_functions(distance)
if not isinstance(x, dict) or not isinstance(y, dict):
raise TypeError("Inputs 'x' and 'y' must be in dictionary form. " +
"Selecting individual rows of an SFrame yields the " +
"correct format.")
ans = 0.
for d in distance:
ftrs, dist, weight = d
## Special check for multiple columns with levenshtein distance.
if dist == _tc.distances.levenshtein and len(ftrs) > 1:
raise ValueError("levenshtein distance cannot be used with multiple" +
"columns. Please concatenate strings into a single " +
"column before computing the distance.")
## Extract values for specified features.
a = {}
b = {}
for ftr in ftrs:
if type(x[ftr]) != type(y[ftr]):
if not isinstance(x[ftr], (int, float)) or not isinstance(y[ftr], (int, float)):
raise ValueError("Input data has different types.")
if isinstance(x[ftr], (int, float, str)):
a[ftr] = x[ftr]
b[ftr] = y[ftr]
elif isinstance(x[ftr], dict):
for key, val in _six.iteritems(x[ftr]):
a['{}.{}'.format(ftr, key)] = val
for key, val in _six.iteritems(y[ftr]):
b['{}.{}'.format(ftr, key)] = val
elif isinstance(x[ftr], (list, _array.array)):
for i, val in enumerate(x[ftr]):
a[i] = val
for i, val in enumerate(y[ftr]):
b[i] = val
else:
raise TypeError("Type of feature '{}' not understood.".format(ftr))
## Pull out the raw values for levenshtein
if dist == _tc.distances.levenshtein:
a = list(a.values())[0]
b = list(b.values())[0]
## Compute component distance and add to the total distance.
ans += weight * dist(a, b)
return ans |
<SYSTEM_TASK:>
Check that composite distance function is in valid form. Don't modify the
<END_TASK>
<USER_TASK:>
Description:
def _validate_composite_distance(distance):
"""
Check that composite distance function is in valid form. Don't modify the
composite distance in any way.
""" |
if not isinstance(distance, list):
raise TypeError("Input 'distance' must be a composite distance.")
if len(distance) < 1:
raise ValueError("Composite distances must have a least one distance "
"component, consisting of a list of feature names, "
"a distance function (string or function handle), "
"and a weight.")
for d in distance:
## Extract individual pieces of the distance component
try:
ftrs, dist, weight = d
except:
raise TypeError("Elements of a composite distance function must " +
"have three items: a set of feature names (tuple or list), " +
"a distance function (string or function handle), " +
"and a weight.")
## Validate feature names
if len(ftrs) == 0:
raise ValueError("An empty list of features cannot be passed " +\
"as part of a composite distance function.")
if not isinstance(ftrs, (list, tuple)):
raise TypeError("Feature names must be specified in a list or tuple.")
if not all([isinstance(x, str) for x in ftrs]):
raise TypeError("Feature lists must contain only strings.")
## Validate standard distance function
if not isinstance(dist, str) and not hasattr(dist, '__call__'):
raise ValueError("Standard distances must be the name of a distance " +
"function (string) or a distance function handle")
if isinstance(dist, str):
try:
_tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized".format(dist))
## Validate weight
if not isinstance(weight, (int, float)):
raise ValueError(
"The weight of each distance component must be a single " +\
"integer or a float value.")
if weight < 0:
raise ValueError("The weight on each distance component must be " +
"greater than or equal to zero.") |
<SYSTEM_TASK:>
Remove feature names from the feature lists in a composite distance
<END_TASK>
<USER_TASK:>
Description:
def _scrub_composite_distance_features(distance, feature_blacklist):
"""
Remove feature names from the feature lists in a composite distance
function.
""" |
dist_out = []
for i, d in enumerate(distance):
ftrs, dist, weight = d
new_ftrs = [x for x in ftrs if x not in feature_blacklist]
if len(new_ftrs) > 0:
dist_out.append([new_ftrs, dist, weight])
return dist_out |
<SYSTEM_TASK:>
Convert function names in a composite distance function into function
<END_TASK>
<USER_TASK:>
Description:
def _convert_distance_names_to_functions(distance):
"""
Convert function names in a composite distance function into function
handles.
""" |
dist_out = _copy.deepcopy(distance)
for i, d in enumerate(distance):
_, dist, _ = d
if isinstance(dist, str):
try:
dist_out[i][1] = _tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized.".format(dist))
return dist_out |
<SYSTEM_TASK:>
Builds a dictionary of all the messages available in a set of files.
<END_TASK>
<USER_TASK:>
Description:
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
""" |
for file_proto in file_protos:
_FACTORY.pool.Add(file_proto)
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) |
<SYSTEM_TASK:>
Builds a proto2 message class based on the passed in descriptor.
<END_TASK>
<USER_TASK:>
Description:
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
""" |
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
(message.Message,),
{'DESCRIPTOR': descriptor, '__module__': None})
# If module not set, it wrongly points to the reflection.py module.
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name] |
<SYSTEM_TASK:>
Gets all the messages from a specified file.
<END_TASK>
<USER_TASK:>
Description:
def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
""" |
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return result |
<SYSTEM_TASK:>
Initializes an MpsGraphAPI for object detection.
<END_TASK>
<USER_TASK:>
Description:
def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
""" |
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network |
<SYSTEM_TASK:>
Predict object instances in an sframe of images.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
""" |
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset))) |
<SYSTEM_TASK:>
Evaluate the model by making predictions and comparing these to ground
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
""" |
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret |
<SYSTEM_TASK:>
Intermediate callback to wrap the locator
<END_TASK>
<USER_TASK:>
Description:
def _xmlTextReaderErrorFunc(xxx_todo_changeme,msg,severity,locator):
"""Intermediate callback to wrap the locator""" |
(f,arg) = xxx_todo_changeme
return f(arg,msg,severity,xmlTextReaderLocator(locator)) |
<SYSTEM_TASK:>
parse an XML from a file descriptor and build a tree.
<END_TASK>
<USER_TASK:>
Description:
def htmlReadFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. """ |
ret = libxml2mod.htmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('htmlReadFd() failed')
return xmlDoc(_obj=ret) |
<SYSTEM_TASK:>
Load the catalog and build the associated data structures.
<END_TASK>
<USER_TASK:>
Description:
def loadACatalog(filename):
"""Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. """ |
ret = libxml2mod.xmlLoadACatalog(filename)
if ret is None:raise treeError('xmlLoadACatalog() failed')
return catalog(_obj=ret) |
<SYSTEM_TASK:>
Load an SGML super catalog. It won't expand CATALOG or
<END_TASK>
<USER_TASK:>
Description:
def loadSGMLSuperCatalog(filename):
"""Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. """ |
ret = libxml2mod.xmlLoadSGMLSuperCatalog(filename)
if ret is None:raise treeError('xmlLoadSGMLSuperCatalog() failed')
return catalog(_obj=ret) |
<SYSTEM_TASK:>
Dumps informations about the string, shorten it if necessary
<END_TASK>
<USER_TASK:>
Description:
def debugDumpString(output, str):
"""Dumps informations about the string, shorten it if necessary """ |
if output is not None: output.flush()
libxml2mod.xmlDebugDumpString(output, str) |
<SYSTEM_TASK:>
Check whether this name is an predefined entity.
<END_TASK>
<USER_TASK:>
Description:
def predefinedEntity(name):
"""Check whether this name is an predefined entity. """ |
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret) |
<SYSTEM_TASK:>
Setup the FTP proxy informations. This can also be done by
<END_TASK>
<USER_TASK:>
Description:
def nanoFTPProxy(host, port, user, passwd, type):
"""Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. """ |
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type) |
<SYSTEM_TASK:>
Load and parse an external subset.
<END_TASK>
<USER_TASK:>
Description:
def parseDTD(ExternalID, SystemID):
"""Load and parse an external subset. """ |
ret = libxml2mod.xmlParseDTD(ExternalID, SystemID)
if ret is None:raise parserError('xmlParseDTD() failed')
return xmlDtd(_obj=ret) |
<SYSTEM_TASK:>
parse an XML in-memory block and build a tree.
<END_TASK>
<USER_TASK:>
Description:
def parseMemory(buffer, size):
"""parse an XML in-memory block and build a tree. """ |
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret) |
<SYSTEM_TASK:>
parse an XML from a file descriptor and build a tree. NOTE
<END_TASK>
<USER_TASK:>
Description:
def readFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. NOTE
that the file descriptor will not be closed when the reader
is closed or reset. """ |
ret = libxml2mod.xmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('xmlReadFd() failed')
return xmlDoc(_obj=ret) |
<SYSTEM_TASK:>
parse an XML in-memory document and build a tree. In the
<END_TASK>
<USER_TASK:>
Description:
def recoverDoc(cur):
"""parse an XML in-memory document and build a tree. In the
case the document is not Well Formed, a attempt to build a
tree is tried anyway """ |
ret = libxml2mod.xmlRecoverDoc(cur)
if ret is None:raise treeError('xmlRecoverDoc() failed')
return xmlDoc(_obj=ret) |
<SYSTEM_TASK:>
parse an XML in-memory block and build a tree. In the case
<END_TASK>
<USER_TASK:>
Description:
def recoverMemory(buffer, size):
"""parse an XML in-memory block and build a tree. In the case
the document is not Well Formed, an attempt to build a tree
is tried anyway """ |
ret = libxml2mod.xmlRecoverMemory(buffer, size)
if ret is None:raise treeError('xmlRecoverMemory() failed')
return xmlDoc(_obj=ret) |
<SYSTEM_TASK:>
append the char value in the array
<END_TASK>
<USER_TASK:>
Description:
def copyChar(len, out, val):
"""append the char value in the array """ |
ret = libxml2mod.xmlCopyChar(len, out, val)
return ret |
<SYSTEM_TASK:>
Pops the top element name from the name stack
<END_TASK>
<USER_TASK:>
Description:
def namePop(ctxt):
"""Pops the top element name from the name stack """ |
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePop(ctxt__o)
return ret |
<SYSTEM_TASK:>
Pushes a new element name on top of the name stack
<END_TASK>
<USER_TASK:>
Description:
def namePush(ctxt, value):
"""Pushes a new element name on top of the name stack """ |
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePush(ctxt__o, value)
return ret |
<SYSTEM_TASK:>
Pops the top element node from the node stack
<END_TASK>
<USER_TASK:>
Description:
def nodePop(ctxt):
"""Pops the top element node from the node stack """ |
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.nodePop(ctxt__o)
if ret is None:raise treeError('nodePop() failed')
return xmlNode(_obj=ret) |
<SYSTEM_TASK:>
Create a libxml2 input buffer from a Python file
<END_TASK>
<USER_TASK:>
Description:
def createInputBuffer(file, encoding):
"""Create a libxml2 input buffer from a Python file """ |
ret = libxml2mod.xmlCreateInputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateInputBuffer() failed')
return inputBuffer(_obj=ret) |
<SYSTEM_TASK:>
Create a progressive XML parser context to build either an
<END_TASK>
<USER_TASK:>
Description:
def createPushParser(SAX, chunk, size, URI):
"""Create a progressive XML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """ |
ret = libxml2mod.xmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('xmlCreatePushParser() failed')
return parserCtxt(_obj=ret) |
<SYSTEM_TASK:>
Create a progressive HTML parser context to build either an
<END_TASK>
<USER_TASK:>
Description:
def htmlCreatePushParser(SAX, chunk, size, URI):
"""Create a progressive HTML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """ |
ret = libxml2mod.htmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('htmlCreatePushParser() failed')
return parserCtxt(_obj=ret) |
<SYSTEM_TASK:>
Create an XML RelaxNGs parse context for that memory buffer
<END_TASK>
<USER_TASK:>
Description:
def relaxNGNewMemParserCtxt(buffer, size):
"""Create an XML RelaxNGs parse context for that memory buffer
expected to contain an XML RelaxNGs file. """ |
ret = libxml2mod.xmlRelaxNGNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlRelaxNGNewMemParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret) |
<SYSTEM_TASK:>
Builds the QName @prefix:@ncname in @memory if there is
<END_TASK>
<USER_TASK:>
Description:
def buildQName(ncname, prefix, memory, len):
"""Builds the QName @prefix:@ncname in @memory if there is
enough space and prefix is not None nor empty, otherwise
allocate a new string. If prefix is None or empty it
returns ncname. """ |
ret = libxml2mod.xmlBuildQName(ncname, prefix, memory, len)
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.