_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q277700
Converter.register_to_openmath
test
def register_to_openmath(self, py_class, converter): """Register a conversion from Python to OpenMath :param py_class: A Python class the conversion is attached to, or None :type py_class: None, type :param converter: A conversion function or an OpenMath object :type converter: Callable, OMAny :rtype: None ``converter`` will used to convert any object of type ``py_class``, or any object if ``py_class`` is ``None``. If ``converter`` is an OpenMath object, it is returned immediately. If it is a callable, it is called with the Python object as paramter; in this case, it must either return an OpenMath object, or raise an exception. The special exception ``CannotConvertError`` can be used to signify that ``converter`` does not know how to convert the current object, and that ``to_openmath`` shall continue with the other converters. Any other exception stops conversion immediately. Converters registered by this function are called in order from the most recent to the oldest. """ if py_class is not None and not isclass(py_class): raise TypeError('Expected class, found %r' % py_class) if not callable(converter) and not isinstance(converter, om.OMAny): raise TypeError('Expected callable or openmath.OMAny object, found %r' % converter) self._conv_to_om.append((py_class, converter))
python
{ "resource": "" }
q277701
Converter._deprecated_register_to_python
test
def _deprecated_register_to_python(self, cd, name, converter=None): """Register a conversion from OpenMath to Python This function has two forms. A three-arguments one: :param cd: A content dictionary name :type cd: str :param name: A symbol name :type name: str :param converter: A conversion function, or a Python object :type: Callable, Any Any object of type ``openmath.OMSymbol``, with content dictionary equal to ``cd`` and name equal to ``name`` will be converted using ``converter``. Also, any object of type ``openmath.OMApplication`` whose first child is an ``openmath.OMSymbol`` as above will be converted using ``converter``. If ``converter`` is a callable, it will be called with the OpenMath object as parameter; otherwise ``converter`` will be returned. In the two-argument form :param cd: A subclass of ``OMAny`` :type cd: type :param name: A conversion function :type name: Callable Any object of type ``cd`` will be passed to ``name()``, and the result will be returned. This forms is mainly to override default conversions for basic OpenMath tags (OMInteger, OMString, etc.). It is discouraged to use it for ``OMSymbol`` and ``OMApplication``. """ if converter is None: if isclass(cd) and issubclass(cd, om.OMAny): self._conv_to_py[cd] = name else: raise TypeError('Two-arguments form expects subclass of openmath.OMAny, found %r' % cd) else: if isinstance(cd, str) and isinstance(name, str): self._conv_sym_to_py[(cd, name)] = converter else: raise TypeError('Three-arguments form expects string, found %r' % cd.__class__)
python
{ "resource": "" }
q277702
Redis.init_app
test
def init_app(self, app): """ Used to initialize redis with app object """ app.config.setdefault('REDIS_URLS', { 'main': 'redis://localhost:6379/0', 'admin': 'redis://localhost:6379/1', }) app.before_request(self.before_request) self.app = app
python
{ "resource": "" }
q277703
valid_choices
test
def valid_choices(choices): """ Return list of choices's keys """ for key, value in choices: if isinstance(value, (list, tuple)): for key, _ in value: yield key else: yield key
python
{ "resource": "" }
q277704
split_model_kwargs
test
def split_model_kwargs(kw): """ django_any birds language parser """ from collections import defaultdict model_fields = {} fields_agrs = defaultdict(lambda : {}) for key in kw.keys(): if '__' in key: field, _, subfield = key.partition('__') fields_agrs[field][subfield] = kw[key] else: model_fields[key] = kw[key] return model_fields, fields_agrs
python
{ "resource": "" }
q277705
ExtensionMethod.register
test
def register(self, field_type, impl=None): """ Register form field data function. Could be used as decorator """ def _wrapper(func): self.registry[field_type] = func return func if impl: return _wrapper(impl) return _wrapper
python
{ "resource": "" }
q277706
ExtensionMethod._create_value
test
def _create_value(self, *args, **kwargs): """ Lowest value generator. Separated from __call__, because it seems that python cache __call__ reference on module import """ if not len(args): raise TypeError('Object instance is not provided') if self.by_instance: field_type = args[0] else: field_type = args[0].__class__ function = self.registry.get(field_type, self.default) if function is None: raise TypeError("no match %s" % field_type) return function(*args, **kwargs)
python
{ "resource": "" }
q277707
any_form_default
test
def any_form_default(form_cls, **kwargs): """ Returns tuple with form data and files """ form_data = {} form_files = {} form_fields, fields_args = split_model_kwargs(kwargs) for name, field in form_cls.base_fields.iteritems(): if name in form_fields: form_data[name] = kwargs[name] else: form_data[name] = any_form_field(field, **fields_args[name]) return form_data, form_files
python
{ "resource": "" }
q277708
field_required_attribute
test
def field_required_attribute(function): """ Sometimes return None if field is not required >>> result = any_form_field(forms.BooleanField(required=False)) >>> result in ['', 'True', 'False'] True """ def _wrapper(field, **kwargs): if not field.required and random.random < 0.1: return None return function(field, **kwargs) return _wrapper
python
{ "resource": "" }
q277709
field_choices_attibute
test
def field_choices_attibute(function): """ Selection from field.choices """ def _wrapper(field, **kwargs): if hasattr(field.widget, 'choices'): return random.choice(list(valid_choices(field.widget.choices))) return function(field, **kwargs) return _wrapper
python
{ "resource": "" }
q277710
decimal_field_data
test
def decimal_field_data(field, **kwargs): """ Return random value for DecimalField >>> result = any_form_field(forms.DecimalField(max_value=100, min_value=11, max_digits=4, decimal_places = 2)) >>> type(result) <type 'str'> >>> from decimal import Decimal >>> Decimal(result) >= 11, Decimal(result) <= Decimal('99.99') (True, True) """ min_value = 0 max_value = 10 from django.core.validators import MinValueValidator, MaxValueValidator for elem in field.validators: if isinstance(elem, MinValueValidator): min_value = elem.limit_value if isinstance(elem, MaxValueValidator): max_value = elem.limit_value if (field.max_digits and field.decimal_places): from decimal import Decimal max_value = min(max_value, Decimal('%s.%s' % ('9'*(field.max_digits-field.decimal_places), '9'*field.decimal_places))) min_value = kwargs.get('min_value') or min_value max_value = kwargs.get('max_value') or max_value return str(xunit.any_decimal(min_value=min_value, max_value=max_value, decimal_places = field.decimal_places or 2))
python
{ "resource": "" }
q277711
email_field_data
test
def email_field_data(field, **kwargs): """ Return random value for EmailField >>> result = any_form_field(forms.EmailField(min_length=10, max_length=30)) >>> type(result) <type 'str'> >>> len(result) <= 30, len(result) >= 10 (True, True) """ max_length = 10 if field.max_length: max_length = (field.max_length -5) / 2 min_length = 10 if field.min_length: min_length = (field.min_length-4) / 2 return "%s@%s.%s" % ( xunit.any_string(min_length=min_length, max_length=max_length), xunit.any_string(min_length=min_length, max_length=max_length), xunit.any_string(min_length=2, max_length=3))
python
{ "resource": "" }
q277712
date_field_data
test
def date_field_data(field, **kwargs): """ Return random value for DateField >>> result = any_form_field(forms.DateField()) >>> type(result) <type 'str'> """ from_date = kwargs.get('from_date', date(1990, 1, 1)) to_date = kwargs.get('to_date', date.today()) date_format = random.choice(field.input_formats or formats.get_format('DATE_INPUT_FORMATS')) return xunit.any_date(from_date=from_date, to_date=to_date).strftime(date_format)
python
{ "resource": "" }
q277713
datetime_field_data
test
def datetime_field_data(field, **kwargs): """ Return random value for DateTimeField >>> result = any_form_field(forms.DateTimeField()) >>> type(result) <type 'str'> """ from_date = kwargs.get('from_date', datetime(1990, 1, 1)) to_date = kwargs.get('to_date', datetime.today()) date_format = random.choice(field.input_formats or formats.get_format('DATETIME_INPUT_FORMATS')) return xunit.any_datetime(from_date=from_date, to_date=to_date).strftime(date_format)
python
{ "resource": "" }
q277714
float_field_data
test
def float_field_data(field, **kwargs): """ Return random value for FloatField >>> result = any_form_field(forms.FloatField(max_value=200, min_value=100)) >>> type(result) <type 'str'> >>> float(result) >=100, float(result) <=200 (True, True) """ min_value = 0 max_value = 100 from django.core.validators import MinValueValidator, MaxValueValidator for elem in field.validators: if isinstance(elem, MinValueValidator): min_value = elem.limit_value if isinstance(elem, MaxValueValidator): max_value = elem.limit_value min_value = kwargs.get('min_value', min_value) max_value = kwargs.get('max_value', max_value) precision = kwargs.get('precision', 3) return str(xunit.any_float(min_value=min_value, max_value=max_value, precision=precision))
python
{ "resource": "" }
q277715
integer_field_data
test
def integer_field_data(field, **kwargs): """ Return random value for IntegerField >>> result = any_form_field(forms.IntegerField(max_value=200, min_value=100)) >>> type(result) <type 'str'> >>> int(result) >=100, int(result) <=200 (True, True) """ min_value = 0 max_value = 100 from django.core.validators import MinValueValidator, MaxValueValidator for elem in field.validators: if isinstance(elem, MinValueValidator): min_value = elem.limit_value if isinstance(elem, MaxValueValidator): max_value = elem.limit_value min_value = kwargs.get('min_value', min_value) max_value = kwargs.get('max_value', max_value) return str(xunit.any_int(min_value=min_value, max_value=max_value))
python
{ "resource": "" }
q277716
time_field_data
test
def time_field_data(field, **kwargs): """ Return random value for TimeField >>> result = any_form_field(forms.TimeField()) >>> type(result) <type 'str'> """ time_format = random.choice(field.input_formats or formats.get_format('TIME_INPUT_FORMATS')) return time(xunit.any_int(min_value=0, max_value=23), xunit.any_int(min_value=0, max_value=59), xunit.any_int(min_value=0, max_value=59)).strftime(time_format)
python
{ "resource": "" }
q277717
choice_field_data
test
def choice_field_data(field, **kwargs): """ Return random value for ChoiceField >>> CHOICES = [('YNG', 'Child'), ('OLD', 'Parent')] >>> result = any_form_field(forms.ChoiceField(choices=CHOICES)) >>> type(result) <type 'str'> >>> result in ['YNG', 'OLD'] True >>> typed_result = any_form_field(forms.TypedChoiceField(choices=CHOICES)) >>> typed_result in ['YNG', 'OLD'] True """ if field.choices: return str(random.choice(list(valid_choices(field.choices)))) return 'None'
python
{ "resource": "" }
q277718
multiple_choice_field_data
test
def multiple_choice_field_data(field, **kwargs): """ Return random value for MultipleChoiceField >>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')] >>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES)) >>> type(result) <type 'str'> """ if field.choices: from django_any.functions import valid_choices l = list(valid_choices(field.choices)) random.shuffle(l) choices = [] count = xunit.any_int(min_value=1, max_value=len(field.choices)) for i in xrange(0, count): choices.append(l[i]) return ' '.join(choices) return 'None'
python
{ "resource": "" }
q277719
model_choice_field_data
test
def model_choice_field_data(field, **kwargs): """ Return one of first ten items for field queryset """ data = list(field.queryset[:10]) if data: return random.choice(data) else: raise TypeError('No %s available in queryset' % field.queryset.model)
python
{ "resource": "" }
q277720
encode_bytes
test
def encode_bytes(obj, nsprefix=None): """ Encodes an OpenMath element into a string. :param obj: Object to encode as string. :type obj: OMAny :rtype: bytes """ node = encode_xml(obj, nsprefix) return etree.tostring(node)
python
{ "resource": "" }
q277721
publish
test
def publish(msg="checkpoint: publish package"): """Deploy the app to PYPI. Args: msg (str, optional): Description """ test = check() if test.succeeded: # clean() # push(msg) sdist = local("python setup.py sdist") if sdist.succeeded: build = local( 'python setup.py build && python setup.py bdist_egg') if build.succeeded: upload = local("twine upload dist/*") if upload.succeeded: tag()
python
{ "resource": "" }
q277722
tag
test
def tag(version=__version__): """Deploy a version tag.""" build = local("git tag {0}".format(version)) if build.succeeded: local("git push --tags")
python
{ "resource": "" }
q277723
any_field_blank
test
def any_field_blank(function): """ Sometimes return None if field could be blank """ def wrapper(field, **kwargs): if kwargs.get('isnull', False): return None if field.blank and random.random < 0.1: return None return function(field, **kwargs) return wrapper
python
{ "resource": "" }
q277724
load_python_global
test
def load_python_global(module, name): """ Evaluate an OpenMath symbol describing a global Python object EXAMPLES:: >>> from openmath.convert_pickle import to_python >>> from openmath.convert_pickle import load_python_global >>> load_python_global('math', 'sin') <built-in function sin> >>> from openmath import openmath as om >>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin') >>> to_python(o) <built-in function sin> """ # The builtin module has been renamed in python3 if module == '__builtin__' and six.PY3: module = 'builtins' module = importlib.import_module(module) return getattr(module, name)
python
{ "resource": "" }
q277725
cls_build
test
def cls_build(inst, state): """ Apply the setstate protocol to initialize `inst` from `state`. INPUT: - ``inst`` -- a raw instance of a class - ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values EXAMPLES:: >>> from openmath.convert_pickle import cls_build >>> class A(object): pass >>> inst = A.__new__(A) >>> state = {"foo": 1, "bar": 4} >>> inst2 = cls_build(inst,state) >>> inst is inst2 True >>> inst.foo 1 >>> inst.bar 4 """ # Copied from Pickler.load_build setstate = getattr(inst, "__setstate__", None) if setstate: setstate(state) return inst slotstate = None if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if state: try: d = inst.__dict__ try: for k, v in six.iteritems(state): d[six.moves.intern(k)] = v # keys in state don't have to be strings # don't blow up, but don't go out of our way except TypeError: d.update(state) except RuntimeError: # XXX In restricted execution, the instance's __dict__ # is not accessible. Use the old way of unpickling # the instance variables. This is a semantic # difference when unpickling in restricted # vs. unrestricted modes. # Note, however, that cPickle has never tried to do the # .update() business, and always uses # PyObject_SetItem(inst.__dict__, key, value) in a # loop over state.items(). for k, v in state.items(): setattr(inst, k, v) if slotstate: for k, v in slotstate.items(): setattr(inst, k, v) return inst
python
{ "resource": "" }
q277726
PickleConverter.OMList
test
def OMList(self, l): """ Convert a list of OM objects into an OM object EXAMPLES:: >>> from openmath import openmath as om >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMList([om.OMInteger(2), om.OMInteger(2)]); o OMApplication(elem=OMSymbol(name='list', cd='Python', id=None, cdbase='http://python.org/'), arguments=[OMInteger(integer=2, id=None), OMInteger(integer=2, id=None)], id=None, cdbase=None) >>> converter.to_python(o) [2, 2] """ # Except for the conversion of operands, this duplicates the default # implementation of python's list conversion to openmath in py_openmath return om.OMApplication(elem=om.OMSymbol(cdbase=self._cdbase, cd='Python', name='list', ), arguments=l)
python
{ "resource": "" }
q277727
PickleConverter.OMTuple
test
def OMTuple(self, l): """ Convert a tuple of OM objects into an OM object EXAMPLES:: >>> from openmath import openmath as om >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMTuple([om.OMInteger(2), om.OMInteger(3)]); o OMApplication(elem=OMSymbol(name='tuple', cd='Python', id=None, cdbase='http://python.org/'), arguments=[OMInteger(integer=2, id=None), OMInteger(integer=3, id=None)], id=None, cdbase=None) >>> converter.to_python(o) (2, 3) """ return om.OMApplication(elem=self.OMSymbol(module='Python', name='tuple'), arguments=l)
python
{ "resource": "" }
q277728
decode
test
def decode(data): """ Decodes a PackBit encoded data. """ data = bytearray(data) # <- python 2/3 compatibility fix result = bytearray() pos = 0 while pos < len(data): header_byte = data[pos] if header_byte > 127: header_byte -= 256 pos += 1 if 0 <= header_byte <= 127: result.extend(data[pos:pos+header_byte+1]) pos += header_byte+1 elif header_byte == -128: pass else: result.extend([data[pos]] * (1 - header_byte)) pos += 1 return bytes(result)
python
{ "resource": "" }
q277729
encode
test
def encode(data): """ Encodes data using PackBits encoding. """ if len(data) == 0: return data if len(data) == 1: return b'\x00' + data data = bytearray(data) result = bytearray() buf = bytearray() pos = 0 repeat_count = 0 MAX_LENGTH = 127 # we can safely start with RAW as empty RAW sequences # are handled by finish_raw() state = 'RAW' def finish_raw(): if len(buf) == 0: return result.append(len(buf)-1) result.extend(buf) buf[:] = bytearray() def finish_rle(): result.append(256-(repeat_count - 1)) result.append(data[pos]) while pos < len(data)-1: current_byte = data[pos] if data[pos] == data[pos+1]: if state == 'RAW': # end of RAW data finish_raw() state = 'RLE' repeat_count = 1 elif state == 'RLE': if repeat_count == MAX_LENGTH: # restart the encoding finish_rle() repeat_count = 0 # move to next byte repeat_count += 1 else: if state == 'RLE': repeat_count += 1 finish_rle() state = 'RAW' repeat_count = 0 elif state == 'RAW': if len(buf) == MAX_LENGTH: # restart the encoding finish_raw() buf.append(current_byte) pos += 1 if state == 'RAW': buf.append(data[pos]) finish_raw() else: repeat_count += 1 finish_rle() return bytes(result)
python
{ "resource": "" }
q277730
Accounting.to_fixed
test
def to_fixed(self, value, precision): """Implementation that treats floats more like decimals. Fixes binary rounding issues (eg. (0.615).toFixed(2) === "0.61") that present problems for accounting and finance-related software. """ precision = self._change_precision( precision, self.settings['number']['precision']) power = pow(10, precision) # Multiply up by precision, round accurately, then divide power = round(self.parse(value) * power) / power return '{0} {1}.{2}f'.format(value, precision, precision)
python
{ "resource": "" }
q277731
Accounting.format
test
def format(self, number, **kwargs): """Format a given number. Format a number, with comma-separated thousands and custom precision/decimal places Localise by overriding the precision and thousand / decimal separators 2nd parameter `precision` can be an object matching `settings.number` Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description """ # Resursively format lists if check_type(number, 'list'): return map(lambda val: self.format(val, **kwargs)) # Clean up number number = self.parse(number) # Build options object from second param (if object) or all params, # extending defaults if check_type(kwargs, 'dict'): options = (self.settings['number'].update(kwargs)) # Clean up precision precision = self._change_precision(options['precision']) negative = (lambda num: "-" if num < 0 else "")(number) base = str(int(self.to_fixed(abs(number) or 0, precision)), 10) mod = (lambda num: len(num) % 3 if len(num) > 3 else 0)(base) # Format the number: num = negative + (lambda num: base[0:num] if num else '')(mod) num += re.sub('/(\d{3})(?=\d)/g', '$1' + options['thousand'], base[mod:]) num += (lambda val: options[ 'decimal'] + self.to_fixed(abs(number), precision) .split('.')[1] if val else '')(precision) return num
python
{ "resource": "" }
q277732
Accounting.as_money
test
def as_money(self, number, **options): """Format a number into currency. Usage: accounting.formatMoney(number, symbol, precision, thousandsSep, decimalSep, format) defaults: (0, "$", 2, ",", ".", "%s%v") Localise by overriding the symbol, precision, thousand / decimal separators and format Second param can be an object matching `settings.currency` which is the easiest way. Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description """ # Resursively format arrays if isinstance(number, list): return map(lambda val: self.as_money(val, **options)) # Clean up number decimal = options.get('decimal') number = self.parse(number, decimal) # Build options object from second param (if object) or all params, # extending defaults if check_type(options, 'dict'): options = (self.settings['currency'].update(options)) # Check format (returns object with pos, neg and zero) formats = self._check_currency_format(options['format']) # Choose which format to use for this value use_format = (lambda num: formats['pos'] if num > 0 else formats[ 'neg'] if num < 0 else formats['zero'])(number) precision = self._change_precision(number, options['precision']) thousands = options['thousand'] decimal = options['decimal'] formater = self.format(abs(number), precision, thousands, decimal) # Return with currency symbol added amount = use_format.replace( '%s', options['symbol']).replace('%v', formater) return amount
python
{ "resource": "" }
q277733
to_array
test
def to_array(data): """ Import a blosc array into a numpy array. Arguments: data: A blosc packed numpy array Returns: A numpy array with data from a blosc compressed array """ try: numpy_data = blosc.unpack_array(data) except Exception as e: raise ValueError("Could not load numpy data. {}".format(e)) return numpy_data
python
{ "resource": "" }
q277734
from_array
test
def from_array(array): """ Export a numpy array to a blosc array. Arguments: array: The numpy array to compress to blosc array Returns: Bytes/String. A blosc compressed array """ try: raw_data = blosc.pack_array(array) except Exception as e: raise ValueError("Could not compress data from array. {}".format(e)) return raw_data
python
{ "resource": "" }
q277735
Workspace.add
test
def add(self, name, path): """Add a workspace entry in user config file.""" if not (os.path.exists(path)): raise ValueError("Workspace path `%s` doesn't exists." % path) if (self.exists(name)): raise ValueError("Workspace `%s` already exists." % name) self.config["workspaces"][name] = {"path": path, "repositories": {}} self.config.write()
python
{ "resource": "" }
q277736
Workspace.remove
test
def remove(self, name): """Remove workspace from config file.""" if not (self.exists(name)): raise ValueError("Workspace `%s` doesn't exists." % name) self.config["workspaces"].pop(name, 0) self.config.write()
python
{ "resource": "" }
q277737
Workspace.list
test
def list(self): """List all available workspaces.""" ws_list = {} for key, value in self.config["workspaces"].items(): ws_list[key] = dict({"name": key}, **value) return ws_list
python
{ "resource": "" }
q277738
Workspace.get
test
def get(self, name): """ Get workspace infos from name. Return None if workspace doesn't exists. """ ws_list = self.list() return ws_list[name] if name in ws_list else None
python
{ "resource": "" }
q277739
Workspace.repository_exists
test
def repository_exists(self, workspace, repo): """Return True if workspace contains repository name.""" if not self.exists(workspace): return False workspaces = self.list() return repo in workspaces[workspace]["repositories"]
python
{ "resource": "" }
q277740
Workspace.sync
test
def sync(self, ws_name): """Synchronise workspace's repositories.""" path = self.config["workspaces"][ws_name]["path"] repositories = self.config["workspaces"][ws_name]["repositories"] logger = logging.getLogger(__name__) color = Color() for r in os.listdir(path): try: repo = Repository(os.path.join(path, r)) except RepositoryError: continue else: repositories[r] = repo.path for repo_name, path in repositories.items(): logger.info(color.colored( " - %s" % repo_name, "blue")) self.config["workspaces"][ws_name]["repositories"] self.config.write()
python
{ "resource": "" }
q277741
clone
test
def clone(url, path): """Clone a repository.""" adapter = None if url[:4] == "git@" or url[-4:] == ".git": adapter = Git(path) if url[:6] == "svn://": adapter = Svn(path) if url[:6] == "bzr://": adapter = Bzr(path) if url[:9] == "ssh://hg@": adapter = Hg(path) if adapter is None: raise RepositoryAdapterNotFound( "Can't find adapter for `%s` repository url" % url) return adapter.clone(url)
python
{ "resource": "" }
q277742
check_version
test
def check_version(): """ Tells you if you have an old version of ndio. """ import requests r = requests.get('https://pypi.python.org/pypi/ndio/json').json() r = r['info']['version'] if r != version: print("A newer version of ndio is available. " + "'pip install -U ndio' to update.") return r
python
{ "resource": "" }
q277743
to_voxels
test
def to_voxels(array): """ Converts an array to its voxel list. Arguments: array (numpy.ndarray): A numpy nd array. This must be boolean! Returns: A list of n-tuples """ if type(array) is not numpy.ndarray: raise ValueError("array argument must be of type numpy.ndarray") return numpy.argwhere(array)
python
{ "resource": "" }
q277744
from_voxels
test
def from_voxels(voxels): """ Converts a voxel list to an ndarray. Arguments: voxels (tuple[]): A list of coordinates indicating coordinates of populated voxels in an ndarray. Returns: numpy.ndarray The result of the transformation. """ dimensions = len(voxels[0]) for d in range(len(dimensions)): size.append(max([i[d] for i in voxels])) result = numpy.zeros(dimensions) for v in voxels: result[v] = 1 return result
python
{ "resource": "" }
q277745
Update.execute
test
def execute(self, args): """Execute update subcommand.""" if args.name is not None: self.print_workspace(args.name) elif args.all is not None: self.print_all()
python
{ "resource": "" }
q277746
Update.print_update
test
def print_update(self, repo_name, repo_path): """Print repository update.""" color = Color() self.logger.info(color.colored( "=> [%s] %s" % (repo_name, repo_path), "green")) try: repo = Repository(repo_path) repo.update() except RepositoryError as e: self.logger.error(e) pass print("\n")
python
{ "resource": "" }
q277747
Logger.set_console_handler
test
def set_console_handler(self, debug=False): """Set Console handler.""" console = logging.StreamHandler() console.setFormatter(Formatter(LFORMAT)) if not debug: console.setLevel(logging.INFO) self.addHandler(console)
python
{ "resource": "" }
q277748
Abstract.execute
test
def execute(self, command, path=None): """Execute command with os.popen and return output.""" logger = logging.getLogger(__name__) self.check_executable() logger.debug("Executing command `%s` (cwd: %s)" % (command, path)) process = subprocess.Popen( command, shell=True, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = process.communicate() exit_code = process.wait() if stdout: logger.info(stdout.decode("utf-8")) if stderr: if exit_code != 0: logger.error(stderr.decode("utf-8")) else: logger.info(stderr.decode("utf-8")) return process
python
{ "resource": "" }
q277749
load
test
def load(png_filename): """ Import a png file into a numpy array. Arguments: png_filename (str): A string filename of a png datafile Returns: A numpy array with data from the png file """ # Expand filename to be absolute png_filename = os.path.expanduser(png_filename) try: img = Image.open(png_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(png_filename)) raise return numpy.array(img)
python
{ "resource": "" }
q277750
save
test
def save(filename, numpy_data): """ Export a numpy array to a png file. Arguments: filename (str): A filename to which to save the png data numpy_data (numpy.ndarray OR str): The numpy array to save to png. OR a string: If a string is provded, it should be a binary png str Returns: str. The expanded filename that now holds the png data Raises: ValueError: If the save fails; for instance if the binary string data cannot be coerced into a png, or perhaps your numpy.ndarray is ill-formed? """ # Expand filename to be absolute png_filename = os.path.expanduser(filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: if numpy_data.dtype.name != 'uint8': m = 'I' img = Image.fromarray(numpy_data, mode=m) else: img = Image.fromarray(numpy_data) img.save(png_filename) except Exception as e: raise ValueError("Could not save png file {0}.".format(png_filename)) return png_filename
python
{ "resource": "" }
q277751
save_collection
test
def save_collection(png_filename_base, numpy_data, start_layers_at=1): """ Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data. """ file_ext = png_filename_base.split('.')[-1] if file_ext in ['png']: # Filename is "name*.ext", set file_base to "name*". file_base = '.'.join(png_filename_base.split('.')[:-1]) else: # Filename is "name*", set file_base to "name*". # That is, extension wasn't included. file_base = png_filename_base file_ext = ".png" file_base_array = file_base.split('*') # The array of filenames to return output_files = [] # Filename 0-padding i = start_layers_at for layer in numpy_data: layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext output_files.append(save(layer_filename, layer)) i += 1 return output_files
python
{ "resource": "" }
q277752
Status.print_workspace
test
def print_workspace(self, name): """Print workspace status.""" path_list = find_path(name, self.config) if len(path_list) == 0: self.logger.error("No matches for `%s`" % name) return False for name, path in path_list.items(): self.print_status(name, path)
python
{ "resource": "" }
q277753
Status.print_status
test
def print_status(self, repo_name, repo_path): """Print repository status.""" color = Color() self.logger.info(color.colored( "=> [%s] %s" % (repo_name, repo_path), "green")) try: repo = Repository(repo_path) repo.status() except RepositoryError as e: self.logger.error(e) pass print("\n")
python
{ "resource": "" }
q277754
data.get_block_size
test
def get_block_size(self, token, resolution=None): """ Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize. """ cdims = self.get_metadata(token)['dataset']['cube_dimension'] if resolution is None: resolution = min(cdims.keys()) return cdims[str(resolution)]
python
{ "resource": "" }
q277755
data._post_cutout_no_chunking_blosc
test
def _post_cutout_no_chunking_blosc(self, token, channel, x_start, y_start, z_start, data, resolution): """ Accepts data in zyx. !!! """ data = numpy.expand_dims(data, axis=0) blosc_data = blosc.pack_array(data) url = self.url("{}/{}/blosc/{}/{},{}/{},{}/{},{}/0,0/".format( token, channel, resolution, x_start, x_start + data.shape[3], y_start, y_start + data.shape[2], z_start, z_start + data.shape[1] )) req = self.remote_utils.post_url(url, data=blosc_data, headers={ 'Content-Type': 'application/octet-stream' }) if req.status_code is not 200: raise RemoteDataUploadError(req.text) else: return True
python
{ "resource": "" }
q277756
load
test
def load(tiff_filename): """ Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(tiff_filename)) raise return numpy.array(img)
python
{ "resource": "" }
q277757
save
test
def save(tiff_filename, numpy_data): """ Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data """ # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: img = tiff.imsave(tiff_filename, numpy_data) except Exception as e: raise ValueError("Could not save TIFF file {0}.".format(tiff_filename)) return tiff_filename
python
{ "resource": "" }
q277758
load_tiff_multipage
test
def load_tiff_multipage(tiff_filename, dtype='float32'): """ Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order """ if not os.path.isfile(tiff_filename): raise RuntimeError('could not find file "%s"' % tiff_filename) # load the data from multi-layer TIF files data = tiff.imread(tiff_filename) im = [] while True: Xi = numpy.array(data, dtype=dtype) if Xi.ndim == 2: Xi = Xi[numpy.newaxis, ...] # add slice dimension im.append(Xi) try: data.seek(data.tell()+1) except EOFError: break # this just means hit end of file (not really an error) im = numpy.concatenate(im, axis=0) # list of 2d -> tensor im = numpy.rollaxis(im, 1) im = numpy.rollaxis(im, 2) return im
python
{ "resource": "" }
q277759
Config.write
test
def write(self): """ Write config in configuration file. Data must me a dict. """ file = open(self.config_file, "w+") file.write(yaml.dump(dict(self), default_flow_style=False)) file.close()
python
{ "resource": "" }
q277760
Bzr.clone
test
def clone(self, url): """Clone repository from url.""" return self.execute("%s branch %s %s" % (self.executable, url, self.path))
python
{ "resource": "" }
q277761
get_version
test
def get_version(): """Get version from package resources.""" requirement = pkg_resources.Requirement.parse("yoda") provider = pkg_resources.get_provider(requirement) return provider.version
python
{ "resource": "" }
q277762
mix_and_match
test
def mix_and_match(name, greeting='Hello', yell=False): '''Mixing and matching positional args and keyword options.''' say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
python
{ "resource": "" }
q277763
option_decorator
test
def option_decorator(name, greeting, yell): '''Same as mix_and_match, but using the @option decorator.''' # Use the @option decorator when you need more control over the # command line options. say = '%s, %s' % (greeting, name) if yell: print '%s!' % say.upper() else: print '%s.' % say
python
{ "resource": "" }
q277764
neuroRemote.reserve_ids
test
def reserve_ids(self, token, channel, quantity): """ Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted """ quantity = str(quantity) url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Invalid req: ' + req.status_code) out = req.json() return [out[0] + i for i in range(out[1])]
python
{ "resource": "" }
q277765
neuroRemote.merge_ids
test
def merge_ids(self, token, channel, ids, delete=False): """ Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore """ url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
python
{ "resource": "" }
q277766
neuroRemote.propagate
test
def propagate(self, token, channel): """ Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success """ if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
python
{ "resource": "" }
q277767
resources.list_projects
test
def list_projects(self, dataset_name): """ Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
python
{ "resource": "" }
q277768
resources.get_dataset
test
def get_dataset(self, name): """ Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information """ url = self.url() + "/resource/dataset/{}".format(name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
python
{ "resource": "" }
q277769
resources.list_datasets
test
def list_datasets(self, get_global_public): """ Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format """ appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
python
{ "resource": "" }
q277770
Show.parse
test
def parse(self): """Parse show subcommand.""" parser = self.subparser.add_parser( "show", help="Show workspace details", description="Show workspace details.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help="All workspaces") group.add_argument('name', type=str, help="Workspace name", nargs='?')
python
{ "resource": "" }
q277771
Show.execute
test
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
python
{ "resource": "" }
q277772
Show.show_workspace
test
def show_workspace(self, name): """Show specific workspace.""" if not self.workspace.exists(name): raise ValueError("Workspace `%s` doesn't exists." % name) color = Color() workspaces = self.workspace.list() self.logger.info("<== %s workspace ==>" % color.colored(name, "green")) self.logger.info("\tPath: %s" % workspaces[name]["path"]) self.logger.info("\tNumber of repositories: %s" % color.colored( len(workspaces[name]["repositories"]), "yellow")) repo_colored = color.colored("Repositories", "blue") path_colored = color.colored("Path", "blue") trepositories = PrettyTable( [repo_colored, path_colored, color.colored("+", "blue")]) trepositories.align[repo_colored] = "l" trepositories.align[path_colored] = "l" for repo_name in workspaces[name]["repositories"]: fullname = "%s/%s" % (name, repo_name) fullpath = find_path(fullname, self.config)[fullname] try: repo = Repository(fullpath) repo_scm = repo.get_scm() except RepositoryAdapterNotFound: repo_scm = None trepositories.add_row( [color.colored(repo_name, "cyan"), fullpath, repo_scm]) self.logger.info(trepositories)
python
{ "resource": "" }
q277773
Show.show_all
test
def show_all(self): """Show details for all workspaces.""" for ws in self.workspace.list().keys(): self.show_workspace(ws) print("\n\n")
python
{ "resource": "" }
q277774
Remote.url
test
def url(self, endpoint=''): """ Get the base URL of the Remote. Arguments: None Returns: `str` base URL """ if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + "://" + self.hostname + endpoint
python
{ "resource": "" }
q277775
_guess_format_from_extension
test
def _guess_format_from_extension(ext): """ Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed """ ext = ext.strip('.') # We look through FILE_FORMATS for this extension. # - If it appears zero times, return False. We can't guess. # - If it appears once, we can simply return that format. # - If it appears more than once, we can't guess (it's ambiguous, # e.g .m = RAMON or MATLAB) formats = [] for fmt in FILE_FORMATS: if ext in FILE_FORMATS[fmt]: formats.append(fmt) if formats == [] or len(formats) > 1: return False return formats[0]
python
{ "resource": "" }
q277776
open
test
def open(in_file, in_fmt=None): """ Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray """ fmt = in_file.split('.')[-1] if in_fmt: fmt = in_fmt fmt = fmt.lower() if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']: return Image.open(in_file) else: raise NotImplementedError("Cannot open file of type {fmt}".format(fmt))
python
{ "resource": "" }
q277777
convert
test
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
python
{ "resource": "" }
q277778
grute.build_graph
test
def build_graph(self, project, site, subject, session, scan, size, email=None, invariants=Invariants.ALL, fiber_file=DEFAULT_FIBER_FILE, atlas_file=None, use_threads=False, callback=None): """ Builds a graph using the graph-services endpoint. Arguments: project (str): The project to use site (str): The site in question subject (str): The subject's identifier session (str): The session (per subject) scan (str): The scan identifier size (str): Whether to return a big (grute.BIG) or small (grute.SMALL) graph. For a better explanation, see m2g.io. email (str : self.email)*: An email to notify invariants (str[]: Invariants.ALL)*: An array of invariants to compute. You can use the grute.Invariants class to construct a list, or simply pass grute.Invariants.ALL to compute them all. fiber_file (str: DEFAULT_FIBER_FILE)*: A local filename of an MRI Studio .dat file atlas_file (str: None)*: A local atlas file, in NIFTI .nii format. If none is specified, the Desikan atlas is used by default. use_threads (bool: False)*: Whether to run the download in a Python thread. If set to True, the call to `build_graph` will end quickly, and the `callback` will be called with the returned status-code of the restful call as its only argument. callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: When the supplied values are invalid (contain invalid characters, bad email address supplied, etc.) RemoteDataNotFoundError: When the data cannot be processed due to a server error. """ if email is None: email = self.email if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") # Once we get here, we know the callback is if size not in [self.BIG, self.SMALL]: raise ValueError("size must be either grute.BIG or grute.SMALL.") url = "buildgraph/{}/{}/{}/{}/{}/{}/{}/{}/".format( project, site, subject, session, scan, size, email, "/".join(invariants) ) if " " in url: raise ValueError("Arguments must not contain spaces.") if use_threads: # Run in the background. download_thread = threading.Thread( target=self._run_build_graph, args=[url, fiber_file, atlas_file, callback] ) download_thread.start() else: # Run in the foreground. return self._run_build_graph(url, fiber_file, atlas_file) return
python
{ "resource": "" }
q277779
grute.compute_invariants
test
def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None): """ Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format, {}.".format(input_format)) if not set(invariants) <= set(Invariants.ALL): raise ValueError("Invariants must be a subset of Invariants.ALL.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") url = "graphupload/{}/{}/{}/".format( email, input_format, "/".join(invariants) ) if " " in url: raise ValueError("Arguments cannot have spaces in them.") if not (os.path.exists(graph_file)): raise ValueError("File {} does not exist.".format(graph_file)) if use_threads: # Run in the background. upload_thread = threading.Thread( target=self._run_compute_invariants, args=[url, graph_file, callback] ) upload_thread.start() else: # Run in the foreground. return self._run_compute_invariants(url, graph_file) return
python
{ "resource": "" }
q277780
grute.convert_graph
test
def convert_graph(self, graph_file, input_format, output_formats, email=None, use_threads=False, callback=None): """ Convert a graph from one GraphFormat to another. Arguments: graph_file (str): Filename of the file to convert input_format (str): A grute.GraphFormats output_formats (str[]): A grute.GraphFormats email (str: self.email)*: The email to notify use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads=False. Else, no return value. Raises: RemoteDataUploadError: If there's an issue uploading the data RemoteError: If there's a server-side issue ValueError: If there's a problem with the supplied arguments """ if email is None: email = self.email if input_format not in GraphFormats._any: raise ValueError("Invalid input format {}.".format(input_format)) if not set(output_formats) <= set(GraphFormats._any): raise ValueError("Output formats must be a GraphFormats.") if use_threads and callback is not None: if not hasattr(callback, '__call__'): raise ValueError("callback must be a function.") if len(inspect.getargspec(callback).args) != 1: raise ValueError("callback must take exactly 1 argument.") if not (os.path.exists(graph_file)): raise ValueError("No such file, {}!".format(graph_file)) url = "convert/{}/{}/{}/l".format( email, input_format, ','.join(output_formats) ) if " " in url: raise ValueError("Spaces are not permitted in arguments.") if use_threads: # Run in the background. convert_thread = threading.Thread( target=self._run_convert_graph, args=[url, graph_file, callback] ) convert_thread.start() else: # Run in the foreground. return self._run_convert_graph(url, graph_file) return
python
{ "resource": "" }
q277781
to_dict
test
def to_dict(ramons, flatten=False): """ Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects. """ if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } return out_ramons
python
{ "resource": "" }
q277782
AnnotationType.RAMON
test
def RAMON(typ): """ Takes str or int, returns class type """ if six.PY2: lookup = [str, unicode] elif six.PY3: lookup = [str] if type(typ) is int: return _ramon_types[typ] elif type(typ) in lookup: return _ramon_types[_types[typ]]
python
{ "resource": "" }
q277783
neurodata.delete_channel
test
def delete_channel(self, channel_name, project_name, dataset_name): """ Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not """ return self.resources.delete_channel(channel_name, project_name, dataset_name)
python
{ "resource": "" }
q277784
NDIngest.add_dataset
test
def add_dataset(self, dataset_name, imagesize, voxelres, offset=None, timerange=None, scalinglevels=None, scaling=None): """ Add a new dataset to the ingest. Arguments: dataset_name (str): Dataset Name is the overarching name of the research effort. Standard naming convention is to do LabNamePublicationYear or LeadResearcherCurrentYear. imagesize (int, int, int): Image size is the pixel count dimensions of the data. For example is the data is stored as a series of 100 slices each 2100x2000 pixel TIFF images, the X,Y,Z dimensions are (2100, 2000, 100). voxelres (float, float, float): Voxel Resolution is the number of voxels per unit pixel. We store X,Y,Z voxel resolution separately. offset (int, int, int): If your data is not well aligned and there is "excess" image data you do not wish to examine, but are present in your images, offset is how you specify where your actual image starts. Offset is provided a pixel coordinate offset from origin which specifies the "actual" origin of the image. The offset is for X,Y,Z dimensions. timerange (int, int): Time Range is a parameter to support storage of Time Series data, so the value of the tuple is a 0 to X range of how many images over time were taken. It takes 2 inputs timeStepStart and timeStepStop. scalinglevels (int): Scaling levels is the number of levels the data is scalable to (how many zoom levels are present in the data). The highest resolution of the data is at scaling level 0, and for each level up the data is down sampled by 2x2 (per slice). To learn more about the sampling service used, visit the the propagation service page. scaling (int): Scaling is the scaling method of the data being stored. 0 corresponds to a Z-slice orientation (as in a collection of tiff images in which each tiff is a slice on the z plane) where data will be scaled only on the xy plane, not the z plane. 1 corresponds to an isotropic orientation (in which each tiff is a slice on the y plane) where data is scaled along all axis. Returns: None """ self.dataset = (dataset_name.strip().replace(" ", ""), imagesize, voxelres, offset, timerange, scalinglevels, scaling)
python
{ "resource": "" }
q277785
NDIngest.nd_json
test
def nd_json(self, dataset, project, channel_list, metadata): """ Genarate ND json object. """ nd_dict = {} nd_dict['dataset'] = self.dataset_dict(*dataset) nd_dict['project'] = self.project_dict(*project) nd_dict['metadata'] = metadata nd_dict['channels'] = {} for channel_name, value in channel_list.items(): nd_dict['channels'][channel_name] = self.channel_dict(*value) return json.dumps(nd_dict, sort_keys=True, indent=4)
python
{ "resource": "" }
q277786
NDIngest.dataset_dict
test
def dataset_dict( self, dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling): """Generate the dataset dictionary""" dataset_dict = {} dataset_dict['dataset_name'] = dataset_name dataset_dict['imagesize'] = imagesize dataset_dict['voxelres'] = voxelres if offset is not None: dataset_dict['offset'] = offset if timerange is not None: dataset_dict['timerange'] = timerange if scalinglevels is not None: dataset_dict['scalinglevels'] = scalinglevels if scaling is not None: dataset_dict['scaling'] = scaling return dataset_dict
python
{ "resource": "" }
q277787
NDIngest.channel_dict
test
def channel_dict(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions, resolution, windowrange, readonly): """ Generate the project dictionary. """ channel_dict = {} channel_dict['channel_name'] = channel_name channel_dict['datatype'] = datatype channel_dict['channel_type'] = channel_type if exceptions is not None: channel_dict['exceptions'] = exceptions if resolution is not None: channel_dict['resolution'] = resolution if windowrange is not None: channel_dict['windowrange'] = windowrange if readonly is not None: channel_dict['readonly'] = readonly channel_dict['data_url'] = data_url channel_dict['file_format'] = file_format channel_dict['file_type'] = file_type return channel_dict
python
{ "resource": "" }
q277788
NDIngest.project_dict
test
def project_dict(self, project_name, token_name, public): """ Genarate the project dictionary. """ project_dict = {} project_dict['project_name'] = project_name if token_name is not None: if token_name == '': project_dict['token_name'] = project_name else: project_dict['token_name'] = token_name else: project_dict['token_name'] = project_name if public is not None: project_dict['public'] = public return project_dict
python
{ "resource": "" }
q277789
NDIngest.identify_imagesize
test
def identify_imagesize(self, image_type, image_path='/tmp/img.'): """ Identify the image size using the data location and other parameters """ dims = () try: if (image_type.lower() == 'png'): dims = np.shape(ndpng.load('{}{}'.format( image_path, image_type ))) elif (image_type.lower() == 'tif' or image_type.lower() == 'tiff'): dims = np.shape(ndtiff.load('{}{}'.format( image_path, image_type ))) else: raise ValueError("Unsupported image type.") except: raise OSError('The file was not accessible at {}{}'.format( image_path, image_type )) return dims[::-1]
python
{ "resource": "" }
q277790
NDIngest.put_data
test
def put_data(self, data): """ Try to post data to the server. """ URLPath = self.oo.url("autoIngest/") # URLPath = 'https://{}/ca/autoIngest/'.format(self.oo.site_host) try: response = requests.post(URLPath, data=json.dumps(data), verify=False) assert(response.status_code == 200) print("From ndio: {}".format(response.content)) except: raise OSError("Error in posting JSON file {}\ ".format(response.status_code))
python
{ "resource": "" }
q277791
find_path
test
def find_path(name, config, wsonly=False): """Find path for given workspace and|or repository.""" workspace = Workspace(config) config = config["workspaces"] path_list = {} if name.find('/') != -1: wsonly = False try: ws, repo = name.split('/') except ValueError: raise ValueError("There is too many / in `name` argument. " "Argument syntax: `workspace/repository`.") if (workspace.exists(ws)): if (repo in config[ws]["repositories"]): path_name = "%s/%s" % (ws, repo) path_list[path_name] = config[ws]["repositories"][repo] for ws_name, ws in sorted(config.items()): if (name == ws_name): if wsonly is True: return {ws_name: ws["path"]} repositories = sorted(config[ws_name]["repositories"].items()) for name, path in repositories: path_list["%s/%s" % (ws_name, name)] = path break for repo_name, repo_path in sorted(ws["repositories"].items()): if (repo_name == name): path_list["%s/%s" % (ws_name, repo_name)] = repo_path return path_list
python
{ "resource": "" }
q277792
metadata.get_public_tokens
test
def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
python
{ "resource": "" }
q277793
metadata.get_proj_info
test
def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
python
{ "resource": "" }
q277794
metadata.set_metadata
test
def set_metadata(self, token, data): """ Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key. """ req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
python
{ "resource": "" }
q277795
remote_utils.get_url
test
def get_url(self, url): """ Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object """ try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
python
{ "resource": "" }
q277796
remote_utils.post_url
test
def post_url(self, url, token='', json=None, data=None, headers=None): """ Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object """ if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
python
{ "resource": "" }
q277797
remote_utils.delete_url
test
def delete_url(self, url, token=''): """ Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object """ if (token == ''): token = self._user_token return requests.delete(url, headers={ 'Authorization': 'Token {}'.format(token)}, verify=False,)
python
{ "resource": "" }
q277798
load
test
def load(hdf5_filename): """ Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") # neurodata stores data inside the 'cutout' h5 dataset data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
python
{ "resource": "" }
q277799
save
test
def save(hdf5_filename, array): """ Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data """ # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
python
{ "resource": "" }