id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
249,100
jmgilman/Neolib
neolib/pyamf/amf3.py
Decoder.readObject
def readObject(self): """ Reads an object from the stream. """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: obj = self.context.getObject(ref >> 1) if obj is None: raise pyamf.ReferenceError('Unknown reference %d' % (ref >> 1,)) if self.use_proxies is True: obj = self.readProxy(obj) return obj ref >>= 1 class_def = self._getClassDefinition(ref) alias = class_def.alias obj = alias.createInstance(codec=self) obj_attrs = dict() self.context.addObject(obj) if class_def.encoding in (ObjectEncoding.EXTERNAL, ObjectEncoding.PROXY): obj.__readamf__(DataInput(self)) if self.use_proxies is True: obj = self.readProxy(obj) return obj elif class_def.encoding == ObjectEncoding.DYNAMIC: self._readStatic(class_def, obj_attrs) self._readDynamic(class_def, obj_attrs) elif class_def.encoding == ObjectEncoding.STATIC: self._readStatic(class_def, obj_attrs) else: raise pyamf.DecodeError("Unknown object encoding") alias.applyAttributes(obj, obj_attrs, codec=self) if self.use_proxies is True: obj = self.readProxy(obj) return obj
python
def readObject(self): """ Reads an object from the stream. """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: obj = self.context.getObject(ref >> 1) if obj is None: raise pyamf.ReferenceError('Unknown reference %d' % (ref >> 1,)) if self.use_proxies is True: obj = self.readProxy(obj) return obj ref >>= 1 class_def = self._getClassDefinition(ref) alias = class_def.alias obj = alias.createInstance(codec=self) obj_attrs = dict() self.context.addObject(obj) if class_def.encoding in (ObjectEncoding.EXTERNAL, ObjectEncoding.PROXY): obj.__readamf__(DataInput(self)) if self.use_proxies is True: obj = self.readProxy(obj) return obj elif class_def.encoding == ObjectEncoding.DYNAMIC: self._readStatic(class_def, obj_attrs) self._readDynamic(class_def, obj_attrs) elif class_def.encoding == ObjectEncoding.STATIC: self._readStatic(class_def, obj_attrs) else: raise pyamf.DecodeError("Unknown object encoding") alias.applyAttributes(obj, obj_attrs, codec=self) if self.use_proxies is True: obj = self.readProxy(obj) return obj
[ "def", "readObject", "(", "self", ")", ":", "ref", "=", "self", ".", "readInteger", "(", "False", ")", "if", "ref", "&", "REFERENCE_BIT", "==", "0", ":", "obj", "=", "self", ".", "context", ".", "getObject", "(", "ref", ">>", "1", ")", "if", "obj", "is", "None", ":", "raise", "pyamf", ".", "ReferenceError", "(", "'Unknown reference %d'", "%", "(", "ref", ">>", "1", ",", ")", ")", "if", "self", ".", "use_proxies", "is", "True", ":", "obj", "=", "self", ".", "readProxy", "(", "obj", ")", "return", "obj", "ref", ">>=", "1", "class_def", "=", "self", ".", "_getClassDefinition", "(", "ref", ")", "alias", "=", "class_def", ".", "alias", "obj", "=", "alias", ".", "createInstance", "(", "codec", "=", "self", ")", "obj_attrs", "=", "dict", "(", ")", "self", ".", "context", ".", "addObject", "(", "obj", ")", "if", "class_def", ".", "encoding", "in", "(", "ObjectEncoding", ".", "EXTERNAL", ",", "ObjectEncoding", ".", "PROXY", ")", ":", "obj", ".", "__readamf__", "(", "DataInput", "(", "self", ")", ")", "if", "self", ".", "use_proxies", "is", "True", ":", "obj", "=", "self", ".", "readProxy", "(", "obj", ")", "return", "obj", "elif", "class_def", ".", "encoding", "==", "ObjectEncoding", ".", "DYNAMIC", ":", "self", ".", "_readStatic", "(", "class_def", ",", "obj_attrs", ")", "self", ".", "_readDynamic", "(", "class_def", ",", "obj_attrs", ")", "elif", "class_def", ".", "encoding", "==", "ObjectEncoding", ".", "STATIC", ":", "self", ".", "_readStatic", "(", "class_def", ",", "obj_attrs", ")", "else", ":", "raise", "pyamf", ".", "DecodeError", "(", "\"Unknown object encoding\"", ")", "alias", ".", "applyAttributes", "(", "obj", ",", "obj_attrs", ",", "codec", "=", "self", ")", "if", "self", ".", "use_proxies", "is", "True", ":", "obj", "=", "self", ".", "readProxy", "(", "obj", ")", "return", "obj" ]
Reads an object from the stream.
[ "Reads", "an", "object", "from", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L997-L1044
249,101
jmgilman/Neolib
neolib/pyamf/amf3.py
Decoder.readXML
def readXML(self): """ Reads an xml object from the stream. @return: An etree interface compatible object @see: L{xml.set_default_interface} """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: return self.context.getObject(ref >> 1) xmlstring = self.stream.read(ref >> 1) x = xml.fromstring(xmlstring) self.context.addObject(x) return x
python
def readXML(self): """ Reads an xml object from the stream. @return: An etree interface compatible object @see: L{xml.set_default_interface} """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: return self.context.getObject(ref >> 1) xmlstring = self.stream.read(ref >> 1) x = xml.fromstring(xmlstring) self.context.addObject(x) return x
[ "def", "readXML", "(", "self", ")", ":", "ref", "=", "self", ".", "readInteger", "(", "False", ")", "if", "ref", "&", "REFERENCE_BIT", "==", "0", ":", "return", "self", ".", "context", ".", "getObject", "(", "ref", ">>", "1", ")", "xmlstring", "=", "self", ".", "stream", ".", "read", "(", "ref", ">>", "1", ")", "x", "=", "xml", ".", "fromstring", "(", "xmlstring", ")", "self", ".", "context", ".", "addObject", "(", "x", ")", "return", "x" ]
Reads an xml object from the stream. @return: An etree interface compatible object @see: L{xml.set_default_interface}
[ "Reads", "an", "xml", "object", "from", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1046-L1063
249,102
jmgilman/Neolib
neolib/pyamf/amf3.py
Decoder.readByteArray
def readByteArray(self): """ Reads a string of data from the stream. Detects if the L{ByteArray} was compressed using C{zlib}. @see: L{ByteArray} @note: This is not supported in ActionScript 1.0 and 2.0. """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: return self.context.getObject(ref >> 1) buffer = self.stream.read(ref >> 1) try: buffer = zlib.decompress(buffer) compressed = True except zlib.error: compressed = False obj = ByteArray(buffer) obj.compressed = compressed self.context.addObject(obj) return obj
python
def readByteArray(self): """ Reads a string of data from the stream. Detects if the L{ByteArray} was compressed using C{zlib}. @see: L{ByteArray} @note: This is not supported in ActionScript 1.0 and 2.0. """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: return self.context.getObject(ref >> 1) buffer = self.stream.read(ref >> 1) try: buffer = zlib.decompress(buffer) compressed = True except zlib.error: compressed = False obj = ByteArray(buffer) obj.compressed = compressed self.context.addObject(obj) return obj
[ "def", "readByteArray", "(", "self", ")", ":", "ref", "=", "self", ".", "readInteger", "(", "False", ")", "if", "ref", "&", "REFERENCE_BIT", "==", "0", ":", "return", "self", ".", "context", ".", "getObject", "(", "ref", ">>", "1", ")", "buffer", "=", "self", ".", "stream", ".", "read", "(", "ref", ">>", "1", ")", "try", ":", "buffer", "=", "zlib", ".", "decompress", "(", "buffer", ")", "compressed", "=", "True", "except", "zlib", ".", "error", ":", "compressed", "=", "False", "obj", "=", "ByteArray", "(", "buffer", ")", "obj", ".", "compressed", "=", "compressed", "self", ".", "context", ".", "addObject", "(", "obj", ")", "return", "obj" ]
Reads a string of data from the stream. Detects if the L{ByteArray} was compressed using C{zlib}. @see: L{ByteArray} @note: This is not supported in ActionScript 1.0 and 2.0.
[ "Reads", "a", "string", "of", "data", "from", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1074-L1101
249,103
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeBoolean
def writeBoolean(self, n): """ Writes a Boolean to the stream. """ t = TYPE_BOOL_TRUE if n is False: t = TYPE_BOOL_FALSE self.stream.write(t)
python
def writeBoolean(self, n): """ Writes a Boolean to the stream. """ t = TYPE_BOOL_TRUE if n is False: t = TYPE_BOOL_FALSE self.stream.write(t)
[ "def", "writeBoolean", "(", "self", ",", "n", ")", ":", "t", "=", "TYPE_BOOL_TRUE", "if", "n", "is", "False", ":", "t", "=", "TYPE_BOOL_FALSE", "self", ".", "stream", ".", "write", "(", "t", ")" ]
Writes a Boolean to the stream.
[ "Writes", "a", "Boolean", "to", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1145-L1154
249,104
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeInteger
def writeInteger(self, n): """ Writes an integer to the stream. @type n: integer data @param n: The integer data to be encoded to the AMF3 data stream. """ if n < MIN_29B_INT or n > MAX_29B_INT: self.writeNumber(float(n)) return self.stream.write(TYPE_INTEGER) self.stream.write(encode_int(n))
python
def writeInteger(self, n): """ Writes an integer to the stream. @type n: integer data @param n: The integer data to be encoded to the AMF3 data stream. """ if n < MIN_29B_INT or n > MAX_29B_INT: self.writeNumber(float(n)) return self.stream.write(TYPE_INTEGER) self.stream.write(encode_int(n))
[ "def", "writeInteger", "(", "self", ",", "n", ")", ":", "if", "n", "<", "MIN_29B_INT", "or", "n", ">", "MAX_29B_INT", ":", "self", ".", "writeNumber", "(", "float", "(", "n", ")", ")", "return", "self", ".", "stream", ".", "write", "(", "TYPE_INTEGER", ")", "self", ".", "stream", ".", "write", "(", "encode_int", "(", "n", ")", ")" ]
Writes an integer to the stream. @type n: integer data @param n: The integer data to be encoded to the AMF3 data stream.
[ "Writes", "an", "integer", "to", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1169-L1182
249,105
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeNumber
def writeNumber(self, n): """ Writes a float to the stream. @type n: C{float} """ self.stream.write(TYPE_NUMBER) self.stream.write_double(n)
python
def writeNumber(self, n): """ Writes a float to the stream. @type n: C{float} """ self.stream.write(TYPE_NUMBER) self.stream.write_double(n)
[ "def", "writeNumber", "(", "self", ",", "n", ")", ":", "self", ".", "stream", ".", "write", "(", "TYPE_NUMBER", ")", "self", ".", "stream", ".", "write_double", "(", "n", ")" ]
Writes a float to the stream. @type n: C{float}
[ "Writes", "a", "float", "to", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1184-L1191
249,106
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeProxy
def writeProxy(self, obj): """ Encodes a proxied object to the stream. @since: 0.6 """ proxy = self.context.getProxyForObject(obj) self.writeObject(proxy, is_proxy=True)
python
def writeProxy(self, obj): """ Encodes a proxied object to the stream. @since: 0.6 """ proxy = self.context.getProxyForObject(obj) self.writeObject(proxy, is_proxy=True)
[ "def", "writeProxy", "(", "self", ",", "obj", ")", ":", "proxy", "=", "self", ".", "context", ".", "getProxyForObject", "(", "obj", ")", "self", ".", "writeObject", "(", "proxy", ",", "is_proxy", "=", "True", ")" ]
Encodes a proxied object to the stream. @since: 0.6
[ "Encodes", "a", "proxied", "object", "to", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1371-L1379
249,107
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeObject
def writeObject(self, obj, is_proxy=False): """ Writes an object to the stream. """ if self.use_proxies and not is_proxy: self.writeProxy(obj) return self.stream.write(TYPE_OBJECT) ref = self.context.getObjectReference(obj) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(obj) # object is not referenced, serialise it kls = obj.__class__ definition = self.context.getClass(kls) alias = None class_ref = False # if the class definition is a reference if definition: class_ref = True alias = definition.alias else: alias = self.context.getClassAlias(kls) definition = ClassDefinition(alias) self.context.addClass(definition, alias.klass) if class_ref: self.stream.write(definition.reference) else: ref = 0 if definition.encoding != ObjectEncoding.EXTERNAL: ref += definition.attr_len << 4 final_reference = encode_int(ref | definition.encoding << 2 | REFERENCE_BIT << 1 | REFERENCE_BIT) self.stream.write(final_reference) definition.reference = encode_int( definition.reference << 2 | REFERENCE_BIT) if alias.anonymous: self.stream.write('\x01') else: self.serialiseString(alias.alias) # work out what the final reference for the class will be. # this is okay because the next time an object of the same # class is encoded, class_ref will be True and never get here # again. if alias.external: obj.__writeamf__(DataOutput(self)) return attrs = alias.getEncodableAttributes(obj, codec=self) if alias.static_attrs: if not class_ref: [self.serialiseString(attr) for attr in alias.static_attrs] for attr in alias.static_attrs: value = attrs.pop(attr) self.writeElement(value) if definition.encoding == ObjectEncoding.STATIC: return if definition.encoding == ObjectEncoding.DYNAMIC: if attrs: for attr, value in attrs.iteritems(): if type(attr) in python.int_types: attr = str(attr) self.serialiseString(attr) self.writeElement(value) self.stream.write('\x01')
python
def writeObject(self, obj, is_proxy=False): """ Writes an object to the stream. """ if self.use_proxies and not is_proxy: self.writeProxy(obj) return self.stream.write(TYPE_OBJECT) ref = self.context.getObjectReference(obj) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(obj) # object is not referenced, serialise it kls = obj.__class__ definition = self.context.getClass(kls) alias = None class_ref = False # if the class definition is a reference if definition: class_ref = True alias = definition.alias else: alias = self.context.getClassAlias(kls) definition = ClassDefinition(alias) self.context.addClass(definition, alias.klass) if class_ref: self.stream.write(definition.reference) else: ref = 0 if definition.encoding != ObjectEncoding.EXTERNAL: ref += definition.attr_len << 4 final_reference = encode_int(ref | definition.encoding << 2 | REFERENCE_BIT << 1 | REFERENCE_BIT) self.stream.write(final_reference) definition.reference = encode_int( definition.reference << 2 | REFERENCE_BIT) if alias.anonymous: self.stream.write('\x01') else: self.serialiseString(alias.alias) # work out what the final reference for the class will be. # this is okay because the next time an object of the same # class is encoded, class_ref will be True and never get here # again. if alias.external: obj.__writeamf__(DataOutput(self)) return attrs = alias.getEncodableAttributes(obj, codec=self) if alias.static_attrs: if not class_ref: [self.serialiseString(attr) for attr in alias.static_attrs] for attr in alias.static_attrs: value = attrs.pop(attr) self.writeElement(value) if definition.encoding == ObjectEncoding.STATIC: return if definition.encoding == ObjectEncoding.DYNAMIC: if attrs: for attr, value in attrs.iteritems(): if type(attr) in python.int_types: attr = str(attr) self.serialiseString(attr) self.writeElement(value) self.stream.write('\x01')
[ "def", "writeObject", "(", "self", ",", "obj", ",", "is_proxy", "=", "False", ")", ":", "if", "self", ".", "use_proxies", "and", "not", "is_proxy", ":", "self", ".", "writeProxy", "(", "obj", ")", "return", "self", ".", "stream", ".", "write", "(", "TYPE_OBJECT", ")", "ref", "=", "self", ".", "context", ".", "getObjectReference", "(", "obj", ")", "if", "ref", "!=", "-", "1", ":", "self", ".", "_writeInteger", "(", "ref", "<<", "1", ")", "return", "self", ".", "context", ".", "addObject", "(", "obj", ")", "# object is not referenced, serialise it", "kls", "=", "obj", ".", "__class__", "definition", "=", "self", ".", "context", ".", "getClass", "(", "kls", ")", "alias", "=", "None", "class_ref", "=", "False", "# if the class definition is a reference", "if", "definition", ":", "class_ref", "=", "True", "alias", "=", "definition", ".", "alias", "else", ":", "alias", "=", "self", ".", "context", ".", "getClassAlias", "(", "kls", ")", "definition", "=", "ClassDefinition", "(", "alias", ")", "self", ".", "context", ".", "addClass", "(", "definition", ",", "alias", ".", "klass", ")", "if", "class_ref", ":", "self", ".", "stream", ".", "write", "(", "definition", ".", "reference", ")", "else", ":", "ref", "=", "0", "if", "definition", ".", "encoding", "!=", "ObjectEncoding", ".", "EXTERNAL", ":", "ref", "+=", "definition", ".", "attr_len", "<<", "4", "final_reference", "=", "encode_int", "(", "ref", "|", "definition", ".", "encoding", "<<", "2", "|", "REFERENCE_BIT", "<<", "1", "|", "REFERENCE_BIT", ")", "self", ".", "stream", ".", "write", "(", "final_reference", ")", "definition", ".", "reference", "=", "encode_int", "(", "definition", ".", "reference", "<<", "2", "|", "REFERENCE_BIT", ")", "if", "alias", ".", "anonymous", ":", "self", ".", "stream", ".", "write", "(", "'\\x01'", ")", "else", ":", "self", ".", "serialiseString", "(", "alias", ".", "alias", ")", "# work out what the final reference for the class will be.", "# this is okay because the next time an object of the same", "# class is encoded, class_ref will be True and never get here", "# again.", "if", "alias", ".", "external", ":", "obj", ".", "__writeamf__", "(", "DataOutput", "(", "self", ")", ")", "return", "attrs", "=", "alias", ".", "getEncodableAttributes", "(", "obj", ",", "codec", "=", "self", ")", "if", "alias", ".", "static_attrs", ":", "if", "not", "class_ref", ":", "[", "self", ".", "serialiseString", "(", "attr", ")", "for", "attr", "in", "alias", ".", "static_attrs", "]", "for", "attr", "in", "alias", ".", "static_attrs", ":", "value", "=", "attrs", ".", "pop", "(", "attr", ")", "self", ".", "writeElement", "(", "value", ")", "if", "definition", ".", "encoding", "==", "ObjectEncoding", ".", "STATIC", ":", "return", "if", "definition", ".", "encoding", "==", "ObjectEncoding", ".", "DYNAMIC", ":", "if", "attrs", ":", "for", "attr", ",", "value", "in", "attrs", ".", "iteritems", "(", ")", ":", "if", "type", "(", "attr", ")", "in", "python", ".", "int_types", ":", "attr", "=", "str", "(", "attr", ")", "self", ".", "serialiseString", "(", "attr", ")", "self", ".", "writeElement", "(", "value", ")", "self", ".", "stream", ".", "write", "(", "'\\x01'", ")" ]
Writes an object to the stream.
[ "Writes", "an", "object", "to", "the", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1381-L1470
249,108
jmgilman/Neolib
neolib/pyamf/amf3.py
Encoder.writeXML
def writeXML(self, n): """ Writes a XML string to the data stream. @type n: L{ET<xml.ET>} @param n: The XML Document to be encoded to the AMF3 data stream. """ self.stream.write(TYPE_XMLSTRING) ref = self.context.getObjectReference(n) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(n) self.serialiseString(xml.tostring(n).encode('utf-8'))
python
def writeXML(self, n): """ Writes a XML string to the data stream. @type n: L{ET<xml.ET>} @param n: The XML Document to be encoded to the AMF3 data stream. """ self.stream.write(TYPE_XMLSTRING) ref = self.context.getObjectReference(n) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(n) self.serialiseString(xml.tostring(n).encode('utf-8'))
[ "def", "writeXML", "(", "self", ",", "n", ")", ":", "self", ".", "stream", ".", "write", "(", "TYPE_XMLSTRING", ")", "ref", "=", "self", ".", "context", ".", "getObjectReference", "(", "n", ")", "if", "ref", "!=", "-", "1", ":", "self", ".", "_writeInteger", "(", "ref", "<<", "1", ")", "return", "self", ".", "context", ".", "addObject", "(", "n", ")", "self", ".", "serialiseString", "(", "xml", ".", "tostring", "(", "n", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
Writes a XML string to the data stream. @type n: L{ET<xml.ET>} @param n: The XML Document to be encoded to the AMF3 data stream.
[ "Writes", "a", "XML", "string", "to", "the", "data", "stream", "." ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1495-L1512
249,109
dossier/dossier.web
dossier/web/tags.py
v1_tag_associate
def v1_tag_associate(request, tags, tag): '''Associate an HTML element with a tag. The association should be a JSON serialized object on the request body. Here is an example association that should make the object's structure clear: .. code-block:: python { "url": "http://example.com/abc/xyz?foo=bar", "text": "The text the user highlighted.", "stream_id": "{unix timestamp}-{md5 of url}", "hash": "{nilsimsa hash of the HTML}", "timestamp": {unix timestamp}, "xpath": { "start_node": "/html/body/p[1]/text()[2]", "start_idx": 3, "end_node": "/html/body/p[1]/text()[3]", "end_idx": 9 } } All fields are required and cannot be empty or ``null``. The tag of the association should be specified in the URL and is delimited by ``//``. ''' tag = tag.decode('utf-8').strip() assoc = dict(json.loads(request.body.read()), **{'tag': tag}) tags.add(assoc)
python
def v1_tag_associate(request, tags, tag): '''Associate an HTML element with a tag. The association should be a JSON serialized object on the request body. Here is an example association that should make the object's structure clear: .. code-block:: python { "url": "http://example.com/abc/xyz?foo=bar", "text": "The text the user highlighted.", "stream_id": "{unix timestamp}-{md5 of url}", "hash": "{nilsimsa hash of the HTML}", "timestamp": {unix timestamp}, "xpath": { "start_node": "/html/body/p[1]/text()[2]", "start_idx": 3, "end_node": "/html/body/p[1]/text()[3]", "end_idx": 9 } } All fields are required and cannot be empty or ``null``. The tag of the association should be specified in the URL and is delimited by ``//``. ''' tag = tag.decode('utf-8').strip() assoc = dict(json.loads(request.body.read()), **{'tag': tag}) tags.add(assoc)
[ "def", "v1_tag_associate", "(", "request", ",", "tags", ",", "tag", ")", ":", "tag", "=", "tag", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "assoc", "=", "dict", "(", "json", ".", "loads", "(", "request", ".", "body", ".", "read", "(", ")", ")", ",", "*", "*", "{", "'tag'", ":", "tag", "}", ")", "tags", ".", "add", "(", "assoc", ")" ]
Associate an HTML element with a tag. The association should be a JSON serialized object on the request body. Here is an example association that should make the object's structure clear: .. code-block:: python { "url": "http://example.com/abc/xyz?foo=bar", "text": "The text the user highlighted.", "stream_id": "{unix timestamp}-{md5 of url}", "hash": "{nilsimsa hash of the HTML}", "timestamp": {unix timestamp}, "xpath": { "start_node": "/html/body/p[1]/text()[2]", "start_idx": 3, "end_node": "/html/body/p[1]/text()[3]", "end_idx": 9 } } All fields are required and cannot be empty or ``null``. The tag of the association should be specified in the URL and is delimited by ``//``.
[ "Associate", "an", "HTML", "element", "with", "a", "tag", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L38-L68
249,110
dossier/dossier.web
dossier/web/tags.py
v1_tag_list
def v1_tag_list(tags, tag=''): '''List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] } ''' tag = tag.decode('utf-8').strip() return {'children': tags.list(tag)}
python
def v1_tag_list(tags, tag=''): '''List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] } ''' tag = tag.decode('utf-8').strip() return {'children': tags.list(tag)}
[ "def", "v1_tag_list", "(", "tags", ",", "tag", "=", "''", ")", ":", "tag", "=", "tag", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "return", "{", "'children'", ":", "tags", ".", "list", "(", "tag", ")", "}" ]
List all direct children tags of the given parent. If no parent is specified, then list all top-level tags. The JSON returned for ``/dossier/v1/tags/list/foo/bar`` might look like this: .. code-block:: python { 'children': [ {'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'}, ] }
[ "List", "all", "direct", "children", "tags", "of", "the", "given", "parent", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L73-L90
249,111
dossier/dossier.web
dossier/web/tags.py
v1_tag_suggest
def v1_tag_suggest(request, tags, prefix, parent=''): '''Provide fast suggestions for tag components. This yields suggestions for *components* of a tag and a given prefix. For example, given the tags ``foo/bar/baz`` and ``fob/bob``, here are some example completions (ordering may be different): .. code-block:: text /dossier/v1/tags/suggest/prefix/f => ['foo', 'fob'] /dossier/v1/tags/suggest/prefix/foo => ['foo'] /dossier/v1/tags/suggest/prefix/b/parent/foo => ['bar'] /dossier/v1/tags/suggest/prefix/b/parent/fob => ['bob'] /dossier/v1/tags/suggest/prefix/b/parent/foo/bar => ['baz'] N.B. Each of the lists above are wrapped in the following JSON envelope for the response: .. code-block:: text {'suggestions': ['foo', 'fob']} An optional query parameter, ``limit``, may be passed to control the number of suggestions returned. ''' prefix = prefix.decode('utf-8').strip() parent = parent.decode('utf-8').strip() limit = min(10000, int(request.params.get('limit', 100))) return {'suggestions': tags.suggest(parent, prefix, limit=limit)}
python
def v1_tag_suggest(request, tags, prefix, parent=''): '''Provide fast suggestions for tag components. This yields suggestions for *components* of a tag and a given prefix. For example, given the tags ``foo/bar/baz`` and ``fob/bob``, here are some example completions (ordering may be different): .. code-block:: text /dossier/v1/tags/suggest/prefix/f => ['foo', 'fob'] /dossier/v1/tags/suggest/prefix/foo => ['foo'] /dossier/v1/tags/suggest/prefix/b/parent/foo => ['bar'] /dossier/v1/tags/suggest/prefix/b/parent/fob => ['bob'] /dossier/v1/tags/suggest/prefix/b/parent/foo/bar => ['baz'] N.B. Each of the lists above are wrapped in the following JSON envelope for the response: .. code-block:: text {'suggestions': ['foo', 'fob']} An optional query parameter, ``limit``, may be passed to control the number of suggestions returned. ''' prefix = prefix.decode('utf-8').strip() parent = parent.decode('utf-8').strip() limit = min(10000, int(request.params.get('limit', 100))) return {'suggestions': tags.suggest(parent, prefix, limit=limit)}
[ "def", "v1_tag_suggest", "(", "request", ",", "tags", ",", "prefix", ",", "parent", "=", "''", ")", ":", "prefix", "=", "prefix", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "parent", "=", "parent", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "limit", "=", "min", "(", "10000", ",", "int", "(", "request", ".", "params", ".", "get", "(", "'limit'", ",", "100", ")", ")", ")", "return", "{", "'suggestions'", ":", "tags", ".", "suggest", "(", "parent", ",", "prefix", ",", "limit", "=", "limit", ")", "}" ]
Provide fast suggestions for tag components. This yields suggestions for *components* of a tag and a given prefix. For example, given the tags ``foo/bar/baz`` and ``fob/bob``, here are some example completions (ordering may be different): .. code-block:: text /dossier/v1/tags/suggest/prefix/f => ['foo', 'fob'] /dossier/v1/tags/suggest/prefix/foo => ['foo'] /dossier/v1/tags/suggest/prefix/b/parent/foo => ['bar'] /dossier/v1/tags/suggest/prefix/b/parent/fob => ['bob'] /dossier/v1/tags/suggest/prefix/b/parent/foo/bar => ['baz'] N.B. Each of the lists above are wrapped in the following JSON envelope for the response: .. code-block:: text {'suggestions': ['foo', 'fob']} An optional query parameter, ``limit``, may be passed to control the number of suggestions returned.
[ "Provide", "fast", "suggestions", "for", "tag", "components", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L95-L124
249,112
dossier/dossier.web
dossier/web/tags.py
v1_stream_id_associations
def v1_stream_id_associations(tags, stream_id): '''Retrieve associations for a given stream_id. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' stream_id = stream_id.decode('utf-8').strip() return {'associations': tags.assocs_by_stream_id(stream_id)}
python
def v1_stream_id_associations(tags, stream_id): '''Retrieve associations for a given stream_id. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' stream_id = stream_id.decode('utf-8').strip() return {'associations': tags.assocs_by_stream_id(stream_id)}
[ "def", "v1_stream_id_associations", "(", "tags", ",", "stream_id", ")", ":", "stream_id", "=", "stream_id", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "return", "{", "'associations'", ":", "tags", ".", "assocs_by_stream_id", "(", "stream_id", ")", "}" ]
Retrieve associations for a given stream_id. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association.
[ "Retrieve", "associations", "for", "a", "given", "stream_id", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L128-L136
249,113
dossier/dossier.web
dossier/web/tags.py
v1_url_associations
def v1_url_associations(tags, url): '''Retrieve associations for a given URL. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' url = urllib.unquote_plus(url.decode('utf-8')).strip() return {'associations': tags.assocs_by_url(url)}
python
def v1_url_associations(tags, url): '''Retrieve associations for a given URL. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' url = urllib.unquote_plus(url.decode('utf-8')).strip() return {'associations': tags.assocs_by_url(url)}
[ "def", "v1_url_associations", "(", "tags", ",", "url", ")", ":", "url", "=", "urllib", ".", "unquote_plus", "(", "url", ".", "decode", "(", "'utf-8'", ")", ")", ".", "strip", "(", ")", "return", "{", "'associations'", ":", "tags", ".", "assocs_by_url", "(", "url", ")", "}" ]
Retrieve associations for a given URL. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association.
[ "Retrieve", "associations", "for", "a", "given", "URL", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L140-L148
249,114
dossier/dossier.web
dossier/web/tags.py
v1_tag_associations
def v1_tag_associations(tags, tag): '''Retrieve associations for a given tag. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' tag = tag.decode('utf-8').strip() return {'associations': tags.assocs_by_tag(tag)}
python
def v1_tag_associations(tags, tag): '''Retrieve associations for a given tag. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' tag = tag.decode('utf-8').strip() return {'associations': tags.assocs_by_tag(tag)}
[ "def", "v1_tag_associations", "(", "tags", ",", "tag", ")", ":", "tag", "=", "tag", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "return", "{", "'associations'", ":", "tags", ".", "assocs_by_tag", "(", "tag", ")", "}" ]
Retrieve associations for a given tag. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association.
[ "Retrieve", "associations", "for", "a", "given", "tag", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L152-L160
249,115
dossier/dossier.web
dossier/web/tags.py
Tags.delete_all
def delete_all(self): '''Deletes all tag data. This does not destroy the ES index, but instead only deletes all tags with the configured doc types. ''' try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type_tag) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type_tag, exc_info=True) try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type_assoc) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type_assoc, exc_info=True)
python
def delete_all(self): '''Deletes all tag data. This does not destroy the ES index, but instead only deletes all tags with the configured doc types. ''' try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type_tag) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type_tag, exc_info=True) try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type_assoc) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type_assoc, exc_info=True)
[ "def", "delete_all", "(", "self", ")", ":", "try", ":", "self", ".", "conn", ".", "indices", ".", "delete_mapping", "(", "index", "=", "self", ".", "index", ",", "doc_type", "=", "self", ".", "type_tag", ")", "except", "TransportError", ":", "logger", ".", "warn", "(", "'type %r in index %r already deleted'", ",", "self", ".", "index", ",", "self", ".", "type_tag", ",", "exc_info", "=", "True", ")", "try", ":", "self", ".", "conn", ".", "indices", ".", "delete_mapping", "(", "index", "=", "self", ".", "index", ",", "doc_type", "=", "self", ".", "type_assoc", ")", "except", "TransportError", ":", "logger", ".", "warn", "(", "'type %r in index %r already deleted'", ",", "self", ".", "index", ",", "self", ".", "type_assoc", ",", "exc_info", "=", "True", ")" ]
Deletes all tag data. This does not destroy the ES index, but instead only deletes all tags with the configured doc types.
[ "Deletes", "all", "tag", "data", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L272-L289
249,116
dossier/dossier.web
dossier/web/tags.py
Tags._create_mappings
def _create_mappings(self): 'Create the field type mapping.' created1 = self._create_tag_mapping() created2 = self._create_assoc_mapping() return created1 or created2
python
def _create_mappings(self): 'Create the field type mapping.' created1 = self._create_tag_mapping() created2 = self._create_assoc_mapping() return created1 or created2
[ "def", "_create_mappings", "(", "self", ")", ":", "created1", "=", "self", ".", "_create_tag_mapping", "(", ")", "created2", "=", "self", ".", "_create_assoc_mapping", "(", ")", "return", "created1", "or", "created2" ]
Create the field type mapping.
[ "Create", "the", "field", "type", "mapping", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/tags.py#L311-L315
249,117
cogniteev/docido-python-sdk
docido_sdk/toolbox/__init__.py
get_last_traceback
def get_last_traceback(): """Retrieve the last traceback as an `unicode` string.""" import traceback from StringIO import StringIO tb = StringIO() traceback.print_exc(file=tb) return to_unicode(tb.getvalue())
python
def get_last_traceback(): """Retrieve the last traceback as an `unicode` string.""" import traceback from StringIO import StringIO tb = StringIO() traceback.print_exc(file=tb) return to_unicode(tb.getvalue())
[ "def", "get_last_traceback", "(", ")", ":", "import", "traceback", "from", "StringIO", "import", "StringIO", "tb", "=", "StringIO", "(", ")", "traceback", ".", "print_exc", "(", "file", "=", "tb", ")", "return", "to_unicode", "(", "tb", ".", "getvalue", "(", ")", ")" ]
Retrieve the last traceback as an `unicode` string.
[ "Retrieve", "the", "last", "traceback", "as", "an", "unicode", "string", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/__init__.py#L5-L11
249,118
Sean1708/HipPy
hippy/__init__.py
write
def write(file_name, data): """Encode and write a Hip file.""" with open(file_name, 'w') as f: f.write(encode(data))
python
def write(file_name, data): """Encode and write a Hip file.""" with open(file_name, 'w') as f: f.write(encode(data))
[ "def", "write", "(", "file_name", ",", "data", ")", ":", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "encode", "(", "data", ")", ")" ]
Encode and write a Hip file.
[ "Encode", "and", "write", "a", "Hip", "file", "." ]
d0ea8fb1e417f1fedaa8e215e3d420b90c4de691
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/__init__.py#L21-L24
249,119
fred49/linshare-api
linshareapi/user/threadmembers.py
ThreadsMembers2.get
def get(self, thread_uuid, uuid): """ Get one thread member.""" members = (v for v in self.list(thread_uuid) if v.get('userUuid') == uuid) for i in members: self.log.debug(i) return i return None
python
def get(self, thread_uuid, uuid): """ Get one thread member.""" members = (v for v in self.list(thread_uuid) if v.get('userUuid') == uuid) for i in members: self.log.debug(i) return i return None
[ "def", "get", "(", "self", ",", "thread_uuid", ",", "uuid", ")", ":", "members", "=", "(", "v", "for", "v", "in", "self", ".", "list", "(", "thread_uuid", ")", "if", "v", ".", "get", "(", "'userUuid'", ")", "==", "uuid", ")", "for", "i", "in", "members", ":", "self", ".", "log", ".", "debug", "(", "i", ")", "return", "i", "return", "None" ]
Get one thread member.
[ "Get", "one", "thread", "member", "." ]
be646c25aa8ba3718abb6869c620b157d53d6e41
https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/threadmembers.py#L110-L116
249,120
fred49/linshare-api
linshareapi/user/threadmembers.py
WorkgroupMembers.delete
def delete(self, wg_uuid, uuid): """ Delete one thread member.""" url = "%(base)s/%(wg_uuid)s/members/%(uuid)s" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } return self.core.delete(url)
python
def delete(self, wg_uuid, uuid): """ Delete one thread member.""" url = "%(base)s/%(wg_uuid)s/members/%(uuid)s" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } return self.core.delete(url)
[ "def", "delete", "(", "self", ",", "wg_uuid", ",", "uuid", ")", ":", "url", "=", "\"%(base)s/%(wg_uuid)s/members/%(uuid)s\"", "%", "{", "'base'", ":", "self", ".", "local_base_url", ",", "'wg_uuid'", ":", "wg_uuid", ",", "'uuid'", ":", "uuid", "}", "return", "self", ".", "core", ".", "delete", "(", "url", ")" ]
Delete one thread member.
[ "Delete", "one", "thread", "member", "." ]
be646c25aa8ba3718abb6869c620b157d53d6e41
https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/threadmembers.py#L170-L177
249,121
cariad/py-wpconfigr
wpconfigr/__main__.py
run_from_cli
def run_from_cli(): """ Perform an update instigated from a CLI. """ arg_parser = argparse.ArgumentParser( description='Read and write properties in a wp-config.php file. ' 'Include a --value argument to set the value, omit it to ' 'read the value of the specified key.', prog='python -m wpconfigr') arg_parser.add_argument('--filename', help='wp-config.php filename', required=True) arg_parser.add_argument('--key', help='Property key', required=True) arg_parser.add_argument('--value', help='New property value', required=False) arg_parser.add_argument('--log-level', default='CRITICAL', help='Log level', required=False) arg_parser.add_argument('--set-true', action='store_true', help='Set the value as boolean true') arg_parser.add_argument('--set-false', action='store_true', help='Set the value as boolean false') args = arg_parser.parse_args() if args.set_true and args.set_false: arg_parser.error('Cannot set --set-true and --set-false.') if args.value and args.set_true: arg_parser.error('Cannot set --value and --set-true.') if args.value and args.set_false: arg_parser.error('Cannot set --value and --set-false.') basicConfig(level=str(args.log_level).upper()) updater = WpConfigFile(filename=args.filename) if args.set_true: value = True elif args.set_false: value = False else: value = args.value if value is not None: updater.set(key=args.key, value=value) else: got = updater.get(key=args.key) if got: print(got)
python
def run_from_cli(): """ Perform an update instigated from a CLI. """ arg_parser = argparse.ArgumentParser( description='Read and write properties in a wp-config.php file. ' 'Include a --value argument to set the value, omit it to ' 'read the value of the specified key.', prog='python -m wpconfigr') arg_parser.add_argument('--filename', help='wp-config.php filename', required=True) arg_parser.add_argument('--key', help='Property key', required=True) arg_parser.add_argument('--value', help='New property value', required=False) arg_parser.add_argument('--log-level', default='CRITICAL', help='Log level', required=False) arg_parser.add_argument('--set-true', action='store_true', help='Set the value as boolean true') arg_parser.add_argument('--set-false', action='store_true', help='Set the value as boolean false') args = arg_parser.parse_args() if args.set_true and args.set_false: arg_parser.error('Cannot set --set-true and --set-false.') if args.value and args.set_true: arg_parser.error('Cannot set --value and --set-true.') if args.value and args.set_false: arg_parser.error('Cannot set --value and --set-false.') basicConfig(level=str(args.log_level).upper()) updater = WpConfigFile(filename=args.filename) if args.set_true: value = True elif args.set_false: value = False else: value = args.value if value is not None: updater.set(key=args.key, value=value) else: got = updater.get(key=args.key) if got: print(got)
[ "def", "run_from_cli", "(", ")", ":", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Read and write properties in a wp-config.php file. '", "'Include a --value argument to set the value, omit it to '", "'read the value of the specified key.'", ",", "prog", "=", "'python -m wpconfigr'", ")", "arg_parser", ".", "add_argument", "(", "'--filename'", ",", "help", "=", "'wp-config.php filename'", ",", "required", "=", "True", ")", "arg_parser", ".", "add_argument", "(", "'--key'", ",", "help", "=", "'Property key'", ",", "required", "=", "True", ")", "arg_parser", ".", "add_argument", "(", "'--value'", ",", "help", "=", "'New property value'", ",", "required", "=", "False", ")", "arg_parser", ".", "add_argument", "(", "'--log-level'", ",", "default", "=", "'CRITICAL'", ",", "help", "=", "'Log level'", ",", "required", "=", "False", ")", "arg_parser", ".", "add_argument", "(", "'--set-true'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Set the value as boolean true'", ")", "arg_parser", ".", "add_argument", "(", "'--set-false'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Set the value as boolean false'", ")", "args", "=", "arg_parser", ".", "parse_args", "(", ")", "if", "args", ".", "set_true", "and", "args", ".", "set_false", ":", "arg_parser", ".", "error", "(", "'Cannot set --set-true and --set-false.'", ")", "if", "args", ".", "value", "and", "args", ".", "set_true", ":", "arg_parser", ".", "error", "(", "'Cannot set --value and --set-true.'", ")", "if", "args", ".", "value", "and", "args", ".", "set_false", ":", "arg_parser", ".", "error", "(", "'Cannot set --value and --set-false.'", ")", "basicConfig", "(", "level", "=", "str", "(", "args", ".", "log_level", ")", ".", "upper", "(", ")", ")", "updater", "=", "WpConfigFile", "(", "filename", "=", "args", ".", "filename", ")", "if", "args", ".", "set_true", ":", "value", "=", "True", "elif", "args", ".", "set_false", ":", "value", "=", "False", "else", ":", "value", "=", "args", ".", "value", "if", "value", "is", "not", "None", ":", "updater", ".", "set", "(", "key", "=", "args", ".", "key", ",", "value", "=", "value", ")", "else", ":", "got", "=", "updater", ".", "get", "(", "key", "=", "args", ".", "key", ")", "if", "got", ":", "print", "(", "got", ")" ]
Perform an update instigated from a CLI.
[ "Perform", "an", "update", "instigated", "from", "a", "CLI", "." ]
8f25bb849b72ce95957566544a2be8445316c818
https://github.com/cariad/py-wpconfigr/blob/8f25bb849b72ce95957566544a2be8445316c818/wpconfigr/__main__.py#L12-L76
249,122
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
manage_file_analysis
def manage_file_analysis(args: argparse.Namespace, filename: str, data: object) -> None: """ Take care of the analysis of a datafile """ key = DataStore.hashfile(filename) print('Analyzing {} --> {}'.format(filename, key)) if data.check_key(key): # if exists in database, prepopulate fit = LineFit(filename, data=data.get_data(key)) else: fit = LineFit(filename) if args.time: noise, curvature, rnge, domn = fit.analyze(time=args.time) newrow = [args.time, noise, curvature, rnge, domn, fit.accepts[args.time]] data.update1(key, newrow, len(fit.noises)) else: fit.analyze_full() newrows = np.array([range(len(fit.noises)), fit.noises, fit.curves, fit.ranges, fit.domains, fit.accepts]) data.update(key, newrows) data.save()
python
def manage_file_analysis(args: argparse.Namespace, filename: str, data: object) -> None: """ Take care of the analysis of a datafile """ key = DataStore.hashfile(filename) print('Analyzing {} --> {}'.format(filename, key)) if data.check_key(key): # if exists in database, prepopulate fit = LineFit(filename, data=data.get_data(key)) else: fit = LineFit(filename) if args.time: noise, curvature, rnge, domn = fit.analyze(time=args.time) newrow = [args.time, noise, curvature, rnge, domn, fit.accepts[args.time]] data.update1(key, newrow, len(fit.noises)) else: fit.analyze_full() newrows = np.array([range(len(fit.noises)), fit.noises, fit.curves, fit.ranges, fit.domains, fit.accepts]) data.update(key, newrows) data.save()
[ "def", "manage_file_analysis", "(", "args", ":", "argparse", ".", "Namespace", ",", "filename", ":", "str", ",", "data", ":", "object", ")", "->", "None", ":", "key", "=", "DataStore", ".", "hashfile", "(", "filename", ")", "print", "(", "'Analyzing {} --> {}'", ".", "format", "(", "filename", ",", "key", ")", ")", "if", "data", ".", "check_key", "(", "key", ")", ":", "# if exists in database, prepopulate", "fit", "=", "LineFit", "(", "filename", ",", "data", "=", "data", ".", "get_data", "(", "key", ")", ")", "else", ":", "fit", "=", "LineFit", "(", "filename", ")", "if", "args", ".", "time", ":", "noise", ",", "curvature", ",", "rnge", ",", "domn", "=", "fit", ".", "analyze", "(", "time", "=", "args", ".", "time", ")", "newrow", "=", "[", "args", ".", "time", ",", "noise", ",", "curvature", ",", "rnge", ",", "domn", ",", "fit", ".", "accepts", "[", "args", ".", "time", "]", "]", "data", ".", "update1", "(", "key", ",", "newrow", ",", "len", "(", "fit", ".", "noises", ")", ")", "else", ":", "fit", ".", "analyze_full", "(", ")", "newrows", "=", "np", ".", "array", "(", "[", "range", "(", "len", "(", "fit", ".", "noises", ")", ")", ",", "fit", ".", "noises", ",", "fit", ".", "curves", ",", "fit", ".", "ranges", ",", "fit", ".", "domains", ",", "fit", ".", "accepts", "]", ")", "data", ".", "update", "(", "key", ",", "newrows", ")", "data", ".", "save", "(", ")" ]
Take care of the analysis of a datafile
[ "Take", "care", "of", "the", "analysis", "of", "a", "datafile" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L79-L99
249,123
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
get_args
def get_args() -> argparse.Namespace: """ Get program arguments. Just use --help.... """ parser = argparse.ArgumentParser(prog='python3 linefit.py', description=('Parameterize and analyze ' 'usability of conduit edge data')) parser.add_argument('files', metavar='F', type=str, nargs='*', help=('File(s) for processing. ' 'Each file has a specific format: ' 'See README (or header) for specification.')) parser.add_argument('-p', '--plot', action='store_true', default=False, help=('Create Plot of file(s)? Note, unless --time flag used, ' 'will plot middle time.')) parser.add_argument('-pd', '--plotdata', action='store_true', default=False, help='Create plot of current datastore.') parser.add_argument('-a', '--analyze', action='store_true', default=False, help=('Analyze the file and determine Curvature/Noise parameters. ' 'If --time not specified, will examine entire file. ' 'This will add results to datastore with false flags ' 'in accept field if not provided.')) parser.add_argument('-mt', '--machinetest', action='store_true', default=False, help=('Determine if the times from the file are usable based on ' 'supervised learning model. If --time not specified, ' 'will examine entire file.')) parser.add_argument('-m', '--model', type=str, default='nn', help=('Learning Model to use. Options are ["nn", "svm", "forest", "sgd"]')) parser.add_argument('-nnk', '--nnk', type=int, default=10, help=('k-Parameter for k nearest neighbors. Google it.')) parser.add_argument('-t', '--time', type=int, default=None, help=('Time (column) of data to use for analysis OR plotting. ' 'Zero-Indexed')) parser.add_argument('-d', '--datastore', type=str, default=DATASTORE, help=("Datastore filename override. " "Don't do this unless you know what you're doing")) parser.add_argument('-pds', '--printdata', action='store_true', default=False, help=("Print data")) parser.add_argument('-pdss', '--printdatashort', action='store_true', default=False, help=("Print data short")) args = parser.parse_args() return args
python
def get_args() -> argparse.Namespace: """ Get program arguments. Just use --help.... """ parser = argparse.ArgumentParser(prog='python3 linefit.py', description=('Parameterize and analyze ' 'usability of conduit edge data')) parser.add_argument('files', metavar='F', type=str, nargs='*', help=('File(s) for processing. ' 'Each file has a specific format: ' 'See README (or header) for specification.')) parser.add_argument('-p', '--plot', action='store_true', default=False, help=('Create Plot of file(s)? Note, unless --time flag used, ' 'will plot middle time.')) parser.add_argument('-pd', '--plotdata', action='store_true', default=False, help='Create plot of current datastore.') parser.add_argument('-a', '--analyze', action='store_true', default=False, help=('Analyze the file and determine Curvature/Noise parameters. ' 'If --time not specified, will examine entire file. ' 'This will add results to datastore with false flags ' 'in accept field if not provided.')) parser.add_argument('-mt', '--machinetest', action='store_true', default=False, help=('Determine if the times from the file are usable based on ' 'supervised learning model. If --time not specified, ' 'will examine entire file.')) parser.add_argument('-m', '--model', type=str, default='nn', help=('Learning Model to use. Options are ["nn", "svm", "forest", "sgd"]')) parser.add_argument('-nnk', '--nnk', type=int, default=10, help=('k-Parameter for k nearest neighbors. Google it.')) parser.add_argument('-t', '--time', type=int, default=None, help=('Time (column) of data to use for analysis OR plotting. ' 'Zero-Indexed')) parser.add_argument('-d', '--datastore', type=str, default=DATASTORE, help=("Datastore filename override. " "Don't do this unless you know what you're doing")) parser.add_argument('-pds', '--printdata', action='store_true', default=False, help=("Print data")) parser.add_argument('-pdss', '--printdatashort', action='store_true', default=False, help=("Print data short")) args = parser.parse_args() return args
[ "def", "get_args", "(", ")", "->", "argparse", ".", "Namespace", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'python3 linefit.py'", ",", "description", "=", "(", "'Parameterize and analyze '", "'usability of conduit edge data'", ")", ")", "parser", ".", "add_argument", "(", "'files'", ",", "metavar", "=", "'F'", ",", "type", "=", "str", ",", "nargs", "=", "'*'", ",", "help", "=", "(", "'File(s) for processing. '", "'Each file has a specific format: '", "'See README (or header) for specification.'", ")", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--plot'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "'Create Plot of file(s)? Note, unless --time flag used, '", "'will plot middle time.'", ")", ")", "parser", ".", "add_argument", "(", "'-pd'", ",", "'--plotdata'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create plot of current datastore.'", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--analyze'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "'Analyze the file and determine Curvature/Noise parameters. '", "'If --time not specified, will examine entire file. '", "'This will add results to datastore with false flags '", "'in accept field if not provided.'", ")", ")", "parser", ".", "add_argument", "(", "'-mt'", ",", "'--machinetest'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "'Determine if the times from the file are usable based on '", "'supervised learning model. If --time not specified, '", "'will examine entire file.'", ")", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--model'", ",", "type", "=", "str", ",", "default", "=", "'nn'", ",", "help", "=", "(", "'Learning Model to use. Options are [\"nn\", \"svm\", \"forest\", \"sgd\"]'", ")", ")", "parser", ".", "add_argument", "(", "'-nnk'", ",", "'--nnk'", ",", "type", "=", "int", ",", "default", "=", "10", ",", "help", "=", "(", "'k-Parameter for k nearest neighbors. Google it.'", ")", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--time'", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "(", "'Time (column) of data to use for analysis OR plotting. '", "'Zero-Indexed'", ")", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--datastore'", ",", "type", "=", "str", ",", "default", "=", "DATASTORE", ",", "help", "=", "(", "\"Datastore filename override. \"", "\"Don't do this unless you know what you're doing\"", ")", ")", "parser", ".", "add_argument", "(", "'-pds'", ",", "'--printdata'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "\"Print data\"", ")", ")", "parser", ".", "add_argument", "(", "'-pdss'", ",", "'--printdatashort'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "\"Print data short\"", ")", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Get program arguments. Just use --help....
[ "Get", "program", "arguments", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L517-L559
249,124
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.get_keys
def get_keys(self) -> typing.List[str]: """ Return list of SHA512 hash keys that exist in datafile :return: list of keys """ keys = [] for key in self.data.keys(): if key not in ['__header__', '__version__', '__globals__']: keys.append(key) return keys
python
def get_keys(self) -> typing.List[str]: """ Return list of SHA512 hash keys that exist in datafile :return: list of keys """ keys = [] for key in self.data.keys(): if key not in ['__header__', '__version__', '__globals__']: keys.append(key) return keys
[ "def", "get_keys", "(", "self", ")", "->", "typing", ".", "List", "[", "str", "]", ":", "keys", "=", "[", "]", "for", "key", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "[", "'__header__'", ",", "'__version__'", ",", "'__globals__'", "]", ":", "keys", ".", "append", "(", "key", ")", "return", "keys" ]
Return list of SHA512 hash keys that exist in datafile :return: list of keys
[ "Return", "list", "of", "SHA512", "hash", "keys", "that", "exist", "in", "datafile" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L151-L161
249,125
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.check_key
def check_key(self, key: str) -> bool: """ Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore """ keys = self.get_keys() return key in keys
python
def check_key(self, key: str) -> bool: """ Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore """ keys = self.get_keys() return key in keys
[ "def", "check_key", "(", "self", ",", "key", ":", "str", ")", "->", "bool", ":", "keys", "=", "self", ".", "get_keys", "(", ")", "return", "key", "in", "keys" ]
Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore
[ "Checks", "if", "key", "exists", "in", "datastore", ".", "True", "if", "yes", "False", "if", "no", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L164-L173
249,126
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.get_traindata
def get_traindata(self) -> np.ndarray: """ Pulls all available data and concatenates for model training :return: 2d array of points """ traindata = None for key, value in self.data.items(): if key not in ['__header__', '__version__', '__globals__']: if traindata is None: traindata = value[np.where(value[:, 4] != 0)] else: traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)])) return traindata
python
def get_traindata(self) -> np.ndarray: """ Pulls all available data and concatenates for model training :return: 2d array of points """ traindata = None for key, value in self.data.items(): if key not in ['__header__', '__version__', '__globals__']: if traindata is None: traindata = value[np.where(value[:, 4] != 0)] else: traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)])) return traindata
[ "def", "get_traindata", "(", "self", ")", "->", "np", ".", "ndarray", ":", "traindata", "=", "None", "for", "key", ",", "value", "in", "self", ".", "data", ".", "items", "(", ")", ":", "if", "key", "not", "in", "[", "'__header__'", ",", "'__version__'", ",", "'__globals__'", "]", ":", "if", "traindata", "is", "None", ":", "traindata", "=", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", "else", ":", "traindata", "=", "np", ".", "concatenate", "(", "(", "traindata", ",", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", ")", ")", "return", "traindata" ]
Pulls all available data and concatenates for model training :return: 2d array of points
[ "Pulls", "all", "available", "data", "and", "concatenates", "for", "model", "training" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L175-L188
249,127
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.printdata
def printdata(self) -> None: """ Prints data to stdout """ np.set_printoptions(threshold=np.nan) print(self.data) np.set_printoptions(threshold=1000)
python
def printdata(self) -> None: """ Prints data to stdout """ np.set_printoptions(threshold=np.nan) print(self.data) np.set_printoptions(threshold=1000)
[ "def", "printdata", "(", "self", ")", "->", "None", ":", "np", ".", "set_printoptions", "(", "threshold", "=", "np", ".", "nan", ")", "print", "(", "self", ".", "data", ")", "np", ".", "set_printoptions", "(", "threshold", "=", "1000", ")" ]
Prints data to stdout
[ "Prints", "data", "to", "stdout" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L205-L209
249,128
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.update
def update(self, key: str, data: np.ndarray) -> None: """ Update entry in datastore """ self.data[key] = data
python
def update(self, key: str, data: np.ndarray) -> None: """ Update entry in datastore """ self.data[key] = data
[ "def", "update", "(", "self", ",", "key", ":", "str", ",", "data", ":", "np", ".", "ndarray", ")", "->", "None", ":", "self", ".", "data", "[", "key", "]", "=", "data" ]
Update entry in datastore
[ "Update", "entry", "in", "datastore" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L216-L218
249,129
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.update1
def update1(self, key: str, data: np.ndarray, size: int) -> None: """ Update one entry in specific record in datastore """ print(data) if key in self.get_keys(): self.data[key][data[0]] = data else: newdata = np.zeros((size, 6)) newdata[data[0]] = data self.data[key] = newdata
python
def update1(self, key: str, data: np.ndarray, size: int) -> None: """ Update one entry in specific record in datastore """ print(data) if key in self.get_keys(): self.data[key][data[0]] = data else: newdata = np.zeros((size, 6)) newdata[data[0]] = data self.data[key] = newdata
[ "def", "update1", "(", "self", ",", "key", ":", "str", ",", "data", ":", "np", ".", "ndarray", ",", "size", ":", "int", ")", "->", "None", ":", "print", "(", "data", ")", "if", "key", "in", "self", ".", "get_keys", "(", ")", ":", "self", ".", "data", "[", "key", "]", "[", "data", "[", "0", "]", "]", "=", "data", "else", ":", "newdata", "=", "np", ".", "zeros", "(", "(", "size", ",", "6", ")", ")", "newdata", "[", "data", "[", "0", "]", "]", "=", "data", "self", ".", "data", "[", "key", "]", "=", "newdata" ]
Update one entry in specific record in datastore
[ "Update", "one", "entry", "in", "specific", "record", "in", "datastore" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L220-L228
249,130
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
DataStore.hashfile
def hashfile(name: str) -> str: """ Gets a hash of a file using block parsing http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file Using SHA512 for long-term support (hehehehe) """ hasher = hashlib.sha512() with open(name, 'rb') as openfile: for chunk in iter(lambda: openfile.read(4096), b''): hasher.update(chunk) return hasher.hexdigest()
python
def hashfile(name: str) -> str: """ Gets a hash of a file using block parsing http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file Using SHA512 for long-term support (hehehehe) """ hasher = hashlib.sha512() with open(name, 'rb') as openfile: for chunk in iter(lambda: openfile.read(4096), b''): hasher.update(chunk) return hasher.hexdigest()
[ "def", "hashfile", "(", "name", ":", "str", ")", "->", "str", ":", "hasher", "=", "hashlib", ".", "sha512", "(", ")", "with", "open", "(", "name", ",", "'rb'", ")", "as", "openfile", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "openfile", ".", "read", "(", "4096", ")", ",", "b''", ")", ":", "hasher", ".", "update", "(", "chunk", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
Gets a hash of a file using block parsing http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file Using SHA512 for long-term support (hehehehe)
[ "Gets", "a", "hash", "of", "a", "file", "using", "block", "parsing" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L232-L243
249,131
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit._loadedges
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]: """ Attempts to intelligently load the .mat file and take average of left and right edges :return: left and right averages :return: times for each column :return: accept/reject for each column :return: pixel-inch ratio """ data = sco.loadmat(self.filename) datakeys = [k for k in data.keys() if ('right' in k) or ('left' in k) or ('edge' in k)] averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2) try: times = (data['times'] - data['times'].min())[0] except KeyError: times = np.arange(len(data[datakeys[0]][0])) try: accept = data['accept'] except KeyError: accept = np.zeros(len(times)) try: ratio = data['ratio'] except KeyError: ratio = 1 try: viscosity = data['viscosity'] except KeyError: viscosity = np.ones(len(times)) return averagedata, times, accept, ratio, viscosity
python
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]: """ Attempts to intelligently load the .mat file and take average of left and right edges :return: left and right averages :return: times for each column :return: accept/reject for each column :return: pixel-inch ratio """ data = sco.loadmat(self.filename) datakeys = [k for k in data.keys() if ('right' in k) or ('left' in k) or ('edge' in k)] averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2) try: times = (data['times'] - data['times'].min())[0] except KeyError: times = np.arange(len(data[datakeys[0]][0])) try: accept = data['accept'] except KeyError: accept = np.zeros(len(times)) try: ratio = data['ratio'] except KeyError: ratio = 1 try: viscosity = data['viscosity'] except KeyError: viscosity = np.ones(len(times)) return averagedata, times, accept, ratio, viscosity
[ "def", "_loadedges", "(", "self", ")", "->", "typing", ".", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "float", ",", "np", ".", "ndarray", "]", ":", "data", "=", "sco", ".", "loadmat", "(", "self", ".", "filename", ")", "datakeys", "=", "[", "k", "for", "k", "in", "data", ".", "keys", "(", ")", "if", "(", "'right'", "in", "k", ")", "or", "(", "'left'", "in", "k", ")", "or", "(", "'edge'", "in", "k", ")", "]", "averagedata", "=", "(", "(", "data", "[", "datakeys", "[", "0", "]", "]", "+", "data", "[", "datakeys", "[", "1", "]", "]", ")", "/", "2", ")", "try", ":", "times", "=", "(", "data", "[", "'times'", "]", "-", "data", "[", "'times'", "]", ".", "min", "(", ")", ")", "[", "0", "]", "except", "KeyError", ":", "times", "=", "np", ".", "arange", "(", "len", "(", "data", "[", "datakeys", "[", "0", "]", "]", "[", "0", "]", ")", ")", "try", ":", "accept", "=", "data", "[", "'accept'", "]", "except", "KeyError", ":", "accept", "=", "np", ".", "zeros", "(", "len", "(", "times", ")", ")", "try", ":", "ratio", "=", "data", "[", "'ratio'", "]", "except", "KeyError", ":", "ratio", "=", "1", "try", ":", "viscosity", "=", "data", "[", "'viscosity'", "]", "except", "KeyError", ":", "viscosity", "=", "np", ".", "ones", "(", "len", "(", "times", ")", ")", "return", "averagedata", ",", "times", ",", "accept", ",", "ratio", ",", "viscosity" ]
Attempts to intelligently load the .mat file and take average of left and right edges :return: left and right averages :return: times for each column :return: accept/reject for each column :return: pixel-inch ratio
[ "Attempts", "to", "intelligently", "load", "the", ".", "mat", "file", "and", "take", "average", "of", "left", "and", "right", "edges" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L275-L308
249,132
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit.plot_file
def plot_file(self, name: str=None, time: int=None) -> None: """ Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column """ if not time: time = int(len(self.times) / 2) if not name: name = './img/' + self.filename + '.png' yhat, residuals, residual_mean, noise = self._get_fit(time) plt.figure() plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2) plt.plot(yhat) plt.savefig(name)
python
def plot_file(self, name: str=None, time: int=None) -> None: """ Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column """ if not time: time = int(len(self.times) / 2) if not name: name = './img/' + self.filename + '.png' yhat, residuals, residual_mean, noise = self._get_fit(time) plt.figure() plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2) plt.plot(yhat) plt.savefig(name)
[ "def", "plot_file", "(", "self", ",", "name", ":", "str", "=", "None", ",", "time", ":", "int", "=", "None", ")", "->", "None", ":", "if", "not", "time", ":", "time", "=", "int", "(", "len", "(", "self", ".", "times", ")", "/", "2", ")", "if", "not", "name", ":", "name", "=", "'./img/'", "+", "self", ".", "filename", "+", "'.png'", "yhat", ",", "residuals", ",", "residual_mean", ",", "noise", "=", "self", ".", "_get_fit", "(", "time", ")", "plt", ".", "figure", "(", ")", "plt", ".", "scatter", "(", "self", ".", "domain", ",", "self", ".", "averagedata", "[", ":", ",", "time", "]", ",", "alpha", "=", "0.2", ")", "plt", ".", "plot", "(", "yhat", ")", "plt", ".", "savefig", "(", "name", ")" ]
Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column
[ "Plot", "specific", "time", "for", "provided", "datafile", ".", "If", "no", "time", "provided", "will", "plot", "middle", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L310-L326
249,133
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit._gaussian_function
def _gaussian_function(self, datalength: int, values: np.ndarray, height: int, index: int) -> np.ndarray: """ i'th Regression Model Gaussian :param: len(x) :param: x values :param: height of gaussian :param: position of gaussian :return: gaussian bumps over domain """ return height * np.exp(-(1 / (self.spread_number * datalength)) * (values - ((datalength / self.function_number) * index)) ** 2)
python
def _gaussian_function(self, datalength: int, values: np.ndarray, height: int, index: int) -> np.ndarray: """ i'th Regression Model Gaussian :param: len(x) :param: x values :param: height of gaussian :param: position of gaussian :return: gaussian bumps over domain """ return height * np.exp(-(1 / (self.spread_number * datalength)) * (values - ((datalength / self.function_number) * index)) ** 2)
[ "def", "_gaussian_function", "(", "self", ",", "datalength", ":", "int", ",", "values", ":", "np", ".", "ndarray", ",", "height", ":", "int", ",", "index", ":", "int", ")", "->", "np", ".", "ndarray", ":", "return", "height", "*", "np", ".", "exp", "(", "-", "(", "1", "/", "(", "self", ".", "spread_number", "*", "datalength", ")", ")", "*", "(", "values", "-", "(", "(", "datalength", "/", "self", ".", "function_number", ")", "*", "index", ")", ")", "**", "2", ")" ]
i'th Regression Model Gaussian :param: len(x) :param: x values :param: height of gaussian :param: position of gaussian :return: gaussian bumps over domain
[ "i", "th", "Regression", "Model", "Gaussian" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L339-L352
249,134
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit._get_fit
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]: """ Fit regression model to data :param: time (column of data) :return: predicted points :return: residuals :return: mean residual :return: error """ rawdata = self.averagedata[:, time] domain = np.arange(len(rawdata)) datalength = len(domain) coefficients = np.zeros((datalength, self.function_number + 2)) coefficients[:, 0] = 1 coefficients[:, 1] = domain for i in range(self.function_number): coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i) betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata)) predicted_values = coefficients.dot(betas) residuals = rawdata - predicted_values error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2))) return predicted_values, residuals, residuals.mean(), error
python
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]: """ Fit regression model to data :param: time (column of data) :return: predicted points :return: residuals :return: mean residual :return: error """ rawdata = self.averagedata[:, time] domain = np.arange(len(rawdata)) datalength = len(domain) coefficients = np.zeros((datalength, self.function_number + 2)) coefficients[:, 0] = 1 coefficients[:, 1] = domain for i in range(self.function_number): coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i) betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata)) predicted_values = coefficients.dot(betas) residuals = rawdata - predicted_values error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2))) return predicted_values, residuals, residuals.mean(), error
[ "def", "_get_fit", "(", "self", ",", "time", ":", "int", ")", "->", "typing", ".", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "float", ",", "float", "]", ":", "rawdata", "=", "self", ".", "averagedata", "[", ":", ",", "time", "]", "domain", "=", "np", ".", "arange", "(", "len", "(", "rawdata", ")", ")", "datalength", "=", "len", "(", "domain", ")", "coefficients", "=", "np", ".", "zeros", "(", "(", "datalength", ",", "self", ".", "function_number", "+", "2", ")", ")", "coefficients", "[", ":", ",", "0", "]", "=", "1", "coefficients", "[", ":", ",", "1", "]", "=", "domain", "for", "i", "in", "range", "(", "self", ".", "function_number", ")", ":", "coefficients", "[", ":", ",", "2", "+", "i", "]", "=", "self", ".", "_gaussian_function", "(", "datalength", ",", "domain", ",", "1", ",", "i", ")", "betas", "=", "linalg", ".", "inv", "(", "coefficients", ".", "transpose", "(", ")", ".", "dot", "(", "coefficients", ")", ")", ".", "dot", "(", "coefficients", ".", "transpose", "(", ")", ".", "dot", "(", "rawdata", ")", ")", "predicted_values", "=", "coefficients", ".", "dot", "(", "betas", ")", "residuals", "=", "rawdata", "-", "predicted_values", "error", "=", "np", ".", "sqrt", "(", "residuals", ".", "transpose", "(", ")", ".", "dot", "(", "residuals", ")", "/", "(", "datalength", "-", "(", "self", ".", "function_number", "+", "2", ")", ")", ")", "return", "predicted_values", ",", "residuals", ",", "residuals", ".", "mean", "(", ")", ",", "error" ]
Fit regression model to data :param: time (column of data) :return: predicted points :return: residuals :return: mean residual :return: error
[ "Fit", "regression", "model", "to", "data" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L355-L378
249,135
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit._get_noise
def _get_noise(self, residuals: np.ndarray) -> float: """ Determine Noise of Residuals. :param: residuals :return: noise """ return np.mean(np.abs(residuals))
python
def _get_noise(self, residuals: np.ndarray) -> float: """ Determine Noise of Residuals. :param: residuals :return: noise """ return np.mean(np.abs(residuals))
[ "def", "_get_noise", "(", "self", ",", "residuals", ":", "np", ".", "ndarray", ")", "->", "float", ":", "return", "np", ".", "mean", "(", "np", ".", "abs", "(", "residuals", ")", ")" ]
Determine Noise of Residuals. :param: residuals :return: noise
[ "Determine", "Noise", "of", "Residuals", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L381-L389
249,136
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit.analyze
def analyze(self, time: int=None) -> typing.Tuple[float, float, int, int]: """ Determine noise, curvature, range, and domain of specified array. :param: pixel to inch ratio :param: time (column) to use. :return: curvature :return: noise :return: range :return: domain """ if not time: time = int(len(self.times) / 2) if self.domains[time] == 0: yhat, residuals, mean_residual, error = self._get_fit(time) yhat_p = self.ddiff(yhat) yhat_pp = self.ddiff(yhat_p) noise = self._get_noise(residuals) curvature = (1 / self.ratio) * (1 / len(yhat_pp)) * np.sqrt(si.simps(yhat_pp ** 2)) rng = (self.ratio * (np.max(self.averagedata[:, time]) - np.min(self.averagedata[:, time]))) dmn = self.ratio * len(self.averagedata[:, time]) self.noises[time] = np.log10(noise) self.curves[time] = np.log10(curvature) self.ranges[time] = np.log10(rng) self.domains[time] = np.log10(dmn) return self.noises[time], self.curves[time], self.ranges[time], self.domains[time]
python
def analyze(self, time: int=None) -> typing.Tuple[float, float, int, int]: """ Determine noise, curvature, range, and domain of specified array. :param: pixel to inch ratio :param: time (column) to use. :return: curvature :return: noise :return: range :return: domain """ if not time: time = int(len(self.times) / 2) if self.domains[time] == 0: yhat, residuals, mean_residual, error = self._get_fit(time) yhat_p = self.ddiff(yhat) yhat_pp = self.ddiff(yhat_p) noise = self._get_noise(residuals) curvature = (1 / self.ratio) * (1 / len(yhat_pp)) * np.sqrt(si.simps(yhat_pp ** 2)) rng = (self.ratio * (np.max(self.averagedata[:, time]) - np.min(self.averagedata[:, time]))) dmn = self.ratio * len(self.averagedata[:, time]) self.noises[time] = np.log10(noise) self.curves[time] = np.log10(curvature) self.ranges[time] = np.log10(rng) self.domains[time] = np.log10(dmn) return self.noises[time], self.curves[time], self.ranges[time], self.domains[time]
[ "def", "analyze", "(", "self", ",", "time", ":", "int", "=", "None", ")", "->", "typing", ".", "Tuple", "[", "float", ",", "float", ",", "int", ",", "int", "]", ":", "if", "not", "time", ":", "time", "=", "int", "(", "len", "(", "self", ".", "times", ")", "/", "2", ")", "if", "self", ".", "domains", "[", "time", "]", "==", "0", ":", "yhat", ",", "residuals", ",", "mean_residual", ",", "error", "=", "self", ".", "_get_fit", "(", "time", ")", "yhat_p", "=", "self", ".", "ddiff", "(", "yhat", ")", "yhat_pp", "=", "self", ".", "ddiff", "(", "yhat_p", ")", "noise", "=", "self", ".", "_get_noise", "(", "residuals", ")", "curvature", "=", "(", "1", "/", "self", ".", "ratio", ")", "*", "(", "1", "/", "len", "(", "yhat_pp", ")", ")", "*", "np", ".", "sqrt", "(", "si", ".", "simps", "(", "yhat_pp", "**", "2", ")", ")", "rng", "=", "(", "self", ".", "ratio", "*", "(", "np", ".", "max", "(", "self", ".", "averagedata", "[", ":", ",", "time", "]", ")", "-", "np", ".", "min", "(", "self", ".", "averagedata", "[", ":", ",", "time", "]", ")", ")", ")", "dmn", "=", "self", ".", "ratio", "*", "len", "(", "self", ".", "averagedata", "[", ":", ",", "time", "]", ")", "self", ".", "noises", "[", "time", "]", "=", "np", ".", "log10", "(", "noise", ")", "self", ".", "curves", "[", "time", "]", "=", "np", ".", "log10", "(", "curvature", ")", "self", ".", "ranges", "[", "time", "]", "=", "np", ".", "log10", "(", "rng", ")", "self", ".", "domains", "[", "time", "]", "=", "np", ".", "log10", "(", "dmn", ")", "return", "self", ".", "noises", "[", "time", "]", ",", "self", ".", "curves", "[", "time", "]", ",", "self", ".", "ranges", "[", "time", "]", ",", "self", ".", "domains", "[", "time", "]" ]
Determine noise, curvature, range, and domain of specified array. :param: pixel to inch ratio :param: time (column) to use. :return: curvature :return: noise :return: range :return: domain
[ "Determine", "noise", "curvature", "range", "and", "domain", "of", "specified", "array", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L392-L420
249,137
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
LineFit.analyze_full
def analyze_full(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Determine noise, curvature, range, and domain of specified data. Like analyze, except examines the entire file. :param: float->pixel to inch ratio :return: array->curvatures :return: array->noises :return: array->ranges :return: array->domains """ if self.noises[0] == 0: timelength = len(self.times) for i in tqdm(range(timelength)): self.analyze(time=i) return self.noises, self.curves, self.ranges, self.domains
python
def analyze_full(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Determine noise, curvature, range, and domain of specified data. Like analyze, except examines the entire file. :param: float->pixel to inch ratio :return: array->curvatures :return: array->noises :return: array->ranges :return: array->domains """ if self.noises[0] == 0: timelength = len(self.times) for i in tqdm(range(timelength)): self.analyze(time=i) return self.noises, self.curves, self.ranges, self.domains
[ "def", "analyze_full", "(", "self", ")", "->", "typing", ".", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "np", ".", "ndarray", ",", "np", ".", "ndarray", "]", ":", "if", "self", ".", "noises", "[", "0", "]", "==", "0", ":", "timelength", "=", "len", "(", "self", ".", "times", ")", "for", "i", "in", "tqdm", "(", "range", "(", "timelength", ")", ")", ":", "self", ".", "analyze", "(", "time", "=", "i", ")", "return", "self", ".", "noises", ",", "self", ".", "curves", ",", "self", ".", "ranges", ",", "self", ".", "domains" ]
Determine noise, curvature, range, and domain of specified data. Like analyze, except examines the entire file. :param: float->pixel to inch ratio :return: array->curvatures :return: array->noises :return: array->ranges :return: array->domains
[ "Determine", "noise", "curvature", "range", "and", "domain", "of", "specified", "data", ".", "Like", "analyze", "except", "examines", "the", "entire", "file", "." ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L423-L439
249,138
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
ML.get_algo
def get_algo(self, args: argparse.Namespace, algo: str) -> object: """ Returns machine learning algorithm based on arguments """ if algo == 'nn': return NearestNeighbor(args.nnk)
python
def get_algo(self, args: argparse.Namespace, algo: str) -> object: """ Returns machine learning algorithm based on arguments """ if algo == 'nn': return NearestNeighbor(args.nnk)
[ "def", "get_algo", "(", "self", ",", "args", ":", "argparse", ".", "Namespace", ",", "algo", ":", "str", ")", "->", "object", ":", "if", "algo", "==", "'nn'", ":", "return", "NearestNeighbor", "(", "args", ".", "nnk", ")" ]
Returns machine learning algorithm based on arguments
[ "Returns", "machine", "learning", "algorithm", "based", "on", "arguments" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L449-L452
249,139
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
ML.plot_fitspace
def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None: """ Plot 2dplane of fitspace """ cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. h = 0.01 # Mesh step size x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xlabel(r'$\log_{10}$ Noise') plt.ylabel(r'$\log_{10}$ Curvature') plt.savefig(name)
python
def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None: """ Plot 2dplane of fitspace """ cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. h = 0.01 # Mesh step size x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xlabel(r'$\log_{10}$ Noise') plt.ylabel(r'$\log_{10}$ Curvature') plt.savefig(name)
[ "def", "plot_fitspace", "(", "self", ",", "name", ":", "str", ",", "X", ":", "np", ".", "ndarray", ",", "y", ":", "np", ".", "ndarray", ",", "clf", ":", "object", ")", "->", "None", ":", "cmap_light", "=", "ListedColormap", "(", "[", "'#FFAAAA'", ",", "'#AAFFAA'", ",", "'#AAAAFF'", "]", ")", "cmap_bold", "=", "ListedColormap", "(", "[", "'#FF0000'", ",", "'#00FF00'", ",", "'#0000FF'", "]", ")", "# Plot the decision boundary. For that, we will assign a color to each", "# point in the mesh [x_min, m_max]x[y_min, y_max].", "h", "=", "0.01", "# Mesh step size", "x_min", ",", "x_max", "=", "X", "[", ":", ",", "0", "]", ".", "min", "(", ")", "-", "1", ",", "X", "[", ":", ",", "0", "]", ".", "max", "(", ")", "+", "1", "y_min", ",", "y_max", "=", "X", "[", ":", ",", "1", "]", ".", "min", "(", ")", "-", "1", ",", "X", "[", ":", ",", "1", "]", ".", "max", "(", ")", "+", "1", "xx", ",", "yy", "=", "np", ".", "meshgrid", "(", "np", ".", "arange", "(", "x_min", ",", "x_max", ",", "h", ")", ",", "np", ".", "arange", "(", "y_min", ",", "y_max", ",", "h", ")", ")", "Z", "=", "clf", ".", "predict", "(", "np", ".", "c_", "[", "xx", ".", "ravel", "(", ")", ",", "yy", ".", "ravel", "(", ")", "]", ")", "# Put the result into a color plot", "Z", "=", "Z", ".", "reshape", "(", "xx", ".", "shape", ")", "plt", ".", "figure", "(", ")", "plt", ".", "pcolormesh", "(", "xx", ",", "yy", ",", "Z", ",", "cmap", "=", "cmap_light", ")", "# Plot also the training points", "plt", ".", "scatter", "(", "X", "[", ":", ",", "0", "]", ",", "X", "[", ":", ",", "1", "]", ",", "c", "=", "y", ",", "cmap", "=", "cmap_bold", ")", "plt", ".", "xlim", "(", "xx", ".", "min", "(", ")", ",", "xx", ".", "max", "(", ")", ")", "plt", ".", "ylim", "(", "yy", ".", "min", "(", ")", ",", "yy", ".", "max", "(", ")", ")", "plt", ".", "xlabel", "(", "r'$\\log_{10}$ Noise'", ")", "plt", ".", "ylabel", "(", "r'$\\log_{10}$ Curvature'", ")", "plt", ".", "savefig", "(", "name", ")" ]
Plot 2dplane of fitspace
[ "Plot", "2dplane", "of", "fitspace" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L469-L494
249,140
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
NearestNeighbor.train
def train(self, traindata: np.ndarray) -> None: """ Trains on dataset """ self.clf.fit(traindata[:, 1:5], traindata[:, 5])
python
def train(self, traindata: np.ndarray) -> None: """ Trains on dataset """ self.clf.fit(traindata[:, 1:5], traindata[:, 5])
[ "def", "train", "(", "self", ",", "traindata", ":", "np", ".", "ndarray", ")", "->", "None", ":", "self", ".", "clf", ".", "fit", "(", "traindata", "[", ":", ",", "1", ":", "5", "]", ",", "traindata", "[", ":", ",", "5", "]", ")" ]
Trains on dataset
[ "Trains", "on", "dataset" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L508-L510
249,141
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
NearestNeighbor.predict
def predict(self, predictdata: np.ndarray) -> np.ndarray: """ predict given points """ return self.clf.predict(predictdata)
python
def predict(self, predictdata: np.ndarray) -> np.ndarray: """ predict given points """ return self.clf.predict(predictdata)
[ "def", "predict", "(", "self", ",", "predictdata", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "return", "self", ".", "clf", ".", "predict", "(", "predictdata", ")" ]
predict given points
[ "predict", "given", "points" ]
4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L512-L514
249,142
henrysher/kotocore
kotocore/session.py
Session.connect_to
def connect_to(self, service_name, **kwargs): """ Shortcut method to make instantiating the ``Connection`` classes easier. Forwards ``**kwargs`` like region, keys, etc. on to the constructor. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :rtype: <kotocore.connection.Connection> instance """ service_class = self.get_connection(service_name) return service_class.connect_to(**kwargs)
python
def connect_to(self, service_name, **kwargs): """ Shortcut method to make instantiating the ``Connection`` classes easier. Forwards ``**kwargs`` like region, keys, etc. on to the constructor. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :rtype: <kotocore.connection.Connection> instance """ service_class = self.get_connection(service_name) return service_class.connect_to(**kwargs)
[ "def", "connect_to", "(", "self", ",", "service_name", ",", "*", "*", "kwargs", ")", ":", "service_class", "=", "self", ".", "get_connection", "(", "service_name", ")", "return", "service_class", ".", "connect_to", "(", "*", "*", "kwargs", ")" ]
Shortcut method to make instantiating the ``Connection`` classes easier. Forwards ``**kwargs`` like region, keys, etc. on to the constructor. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :rtype: <kotocore.connection.Connection> instance
[ "Shortcut", "method", "to", "make", "instantiating", "the", "Connection", "classes", "easier", "." ]
c52d2f3878b924ceabca07f61c91abcb1b230ecc
https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/session.py#L170-L184
249,143
minhhoit/yacms
yacms/forms/forms.py
FormForForm.save
def save(self, **kwargs): """ Create a ``FormEntry`` instance and related ``FieldEntry`` instances for each form field. """ entry = super(FormForForm, self).save(commit=False) entry.form = self.form entry.entry_time = now() entry.save() entry_fields = entry.fields.values_list("field_id", flat=True) new_entry_fields = [] for field in self.form_fields: field_key = "field_%s" % field.id value = self.cleaned_data[field_key] if value and self.fields[field_key].widget.needs_multipart_form: value = fs.save(join("forms", str(uuid4()), value.name), value) if isinstance(value, list): value = ", ".join([v.strip() for v in value]) if field.id in entry_fields: field_entry = entry.fields.get(field_id=field.id) field_entry.value = value field_entry.save() else: new = {"entry": entry, "field_id": field.id, "value": value} new_entry_fields.append(FieldEntry(**new)) if new_entry_fields: FieldEntry.objects.bulk_create(new_entry_fields) return entry
python
def save(self, **kwargs): """ Create a ``FormEntry`` instance and related ``FieldEntry`` instances for each form field. """ entry = super(FormForForm, self).save(commit=False) entry.form = self.form entry.entry_time = now() entry.save() entry_fields = entry.fields.values_list("field_id", flat=True) new_entry_fields = [] for field in self.form_fields: field_key = "field_%s" % field.id value = self.cleaned_data[field_key] if value and self.fields[field_key].widget.needs_multipart_form: value = fs.save(join("forms", str(uuid4()), value.name), value) if isinstance(value, list): value = ", ".join([v.strip() for v in value]) if field.id in entry_fields: field_entry = entry.fields.get(field_id=field.id) field_entry.value = value field_entry.save() else: new = {"entry": entry, "field_id": field.id, "value": value} new_entry_fields.append(FieldEntry(**new)) if new_entry_fields: FieldEntry.objects.bulk_create(new_entry_fields) return entry
[ "def", "save", "(", "self", ",", "*", "*", "kwargs", ")", ":", "entry", "=", "super", "(", "FormForForm", ",", "self", ")", ".", "save", "(", "commit", "=", "False", ")", "entry", ".", "form", "=", "self", ".", "form", "entry", ".", "entry_time", "=", "now", "(", ")", "entry", ".", "save", "(", ")", "entry_fields", "=", "entry", ".", "fields", ".", "values_list", "(", "\"field_id\"", ",", "flat", "=", "True", ")", "new_entry_fields", "=", "[", "]", "for", "field", "in", "self", ".", "form_fields", ":", "field_key", "=", "\"field_%s\"", "%", "field", ".", "id", "value", "=", "self", ".", "cleaned_data", "[", "field_key", "]", "if", "value", "and", "self", ".", "fields", "[", "field_key", "]", ".", "widget", ".", "needs_multipart_form", ":", "value", "=", "fs", ".", "save", "(", "join", "(", "\"forms\"", ",", "str", "(", "uuid4", "(", ")", ")", ",", "value", ".", "name", ")", ",", "value", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "\", \"", ".", "join", "(", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", "]", ")", "if", "field", ".", "id", "in", "entry_fields", ":", "field_entry", "=", "entry", ".", "fields", ".", "get", "(", "field_id", "=", "field", ".", "id", ")", "field_entry", ".", "value", "=", "value", "field_entry", ".", "save", "(", ")", "else", ":", "new", "=", "{", "\"entry\"", ":", "entry", ",", "\"field_id\"", ":", "field", ".", "id", ",", "\"value\"", ":", "value", "}", "new_entry_fields", ".", "append", "(", "FieldEntry", "(", "*", "*", "new", ")", ")", "if", "new_entry_fields", ":", "FieldEntry", ".", "objects", ".", "bulk_create", "(", "new_entry_fields", ")", "return", "entry" ]
Create a ``FormEntry`` instance and related ``FieldEntry`` instances for each form field.
[ "Create", "a", "FormEntry", "instance", "and", "related", "FieldEntry", "instances", "for", "each", "form", "field", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/forms/forms.py#L202-L229
249,144
minhhoit/yacms
yacms/forms/forms.py
FormForForm.email_to
def email_to(self): """ Return the value entered for the first field of type ``forms.EmailField``. """ for field in self.form_fields: if issubclass(fields.CLASSES[field.field_type], forms.EmailField): return self.cleaned_data["field_%s" % field.id] return None
python
def email_to(self): """ Return the value entered for the first field of type ``forms.EmailField``. """ for field in self.form_fields: if issubclass(fields.CLASSES[field.field_type], forms.EmailField): return self.cleaned_data["field_%s" % field.id] return None
[ "def", "email_to", "(", "self", ")", ":", "for", "field", "in", "self", ".", "form_fields", ":", "if", "issubclass", "(", "fields", ".", "CLASSES", "[", "field", ".", "field_type", "]", ",", "forms", ".", "EmailField", ")", ":", "return", "self", ".", "cleaned_data", "[", "\"field_%s\"", "%", "field", ".", "id", "]", "return", "None" ]
Return the value entered for the first field of type ``forms.EmailField``.
[ "Return", "the", "value", "entered", "for", "the", "first", "field", "of", "type", "forms", ".", "EmailField", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/forms/forms.py#L231-L239
249,145
minhhoit/yacms
yacms/forms/forms.py
EntriesForm.columns
def columns(self): """ Returns the list of selected column names. """ fields = [f.label for f in self.form_fields if self.cleaned_data["field_%s_export" % f.id]] if self.cleaned_data["field_0_export"]: fields.append(self.entry_time_name) return fields
python
def columns(self): """ Returns the list of selected column names. """ fields = [f.label for f in self.form_fields if self.cleaned_data["field_%s_export" % f.id]] if self.cleaned_data["field_0_export"]: fields.append(self.entry_time_name) return fields
[ "def", "columns", "(", "self", ")", ":", "fields", "=", "[", "f", ".", "label", "for", "f", "in", "self", ".", "form_fields", "if", "self", ".", "cleaned_data", "[", "\"field_%s_export\"", "%", "f", ".", "id", "]", "]", "if", "self", ".", "cleaned_data", "[", "\"field_0_export\"", "]", ":", "fields", ".", "append", "(", "self", ".", "entry_time_name", ")", "return", "fields" ]
Returns the list of selected column names.
[ "Returns", "the", "list", "of", "selected", "column", "names", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/forms/forms.py#L321-L329
249,146
WhereSoftwareGoesToDie/pymarquise
marquise/marquise.py
Marquise.close
def close(self): """Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted. """ if self.marquise_ctx is None: self.__debug("Marquise handle is already closed, will do nothing.") # Multiple close() calls are okay. return self.__debug("Shutting down Marquise handle spooling to %s and %s" % (self.spool_path_points, self.spool_path_contents)) # At the time of writing this always succeeds (returns 0). MARQUISE_SHUTDOWN(self.marquise_ctx) # Signal that our context is no longer valid. self.marquise_ctx = None
python
def close(self): """Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted. """ if self.marquise_ctx is None: self.__debug("Marquise handle is already closed, will do nothing.") # Multiple close() calls are okay. return self.__debug("Shutting down Marquise handle spooling to %s and %s" % (self.spool_path_points, self.spool_path_contents)) # At the time of writing this always succeeds (returns 0). MARQUISE_SHUTDOWN(self.marquise_ctx) # Signal that our context is no longer valid. self.marquise_ctx = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "marquise_ctx", "is", "None", ":", "self", ".", "__debug", "(", "\"Marquise handle is already closed, will do nothing.\"", ")", "# Multiple close() calls are okay.", "return", "self", ".", "__debug", "(", "\"Shutting down Marquise handle spooling to %s and %s\"", "%", "(", "self", ".", "spool_path_points", ",", "self", ".", "spool_path_contents", ")", ")", "# At the time of writing this always succeeds (returns 0).", "MARQUISE_SHUTDOWN", "(", "self", ".", "marquise_ctx", ")", "# Signal that our context is no longer valid.", "self", ".", "marquise_ctx", "=", "None" ]
Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted.
[ "Close", "the", "Marquise", "context", "ensuring", "data", "is", "flushed", "and", "spool", "files", "are", "closed", "." ]
67e52df70c50ed53ad315a64fea430a9567e2b1b
https://github.com/WhereSoftwareGoesToDie/pymarquise/blob/67e52df70c50ed53ad315a64fea430a9567e2b1b/marquise/marquise.py#L64-L82
249,147
WhereSoftwareGoesToDie/pymarquise
marquise/marquise.py
Marquise.update_source
def update_source(self, address, metadata_dict): """Pack the `metadata_dict` for an `address` into a data structure and ship it to the spool file. Arguments: address -- the address for which this metadata_dict applies. metadata_dict -- a Python dict of arbitrary string key-value pairs. """ if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) # Sanity check the input, everything must be UTF8 strings (not # yet confirmed), no Nonetypes or anything stupid like that. # # The keys of the key-value pairs are unique, by virtue of # taking a dict as input. if any([ x is None for x in metadata_dict.keys() ]): raise TypeError("One of your metadata_dict keys is a Nonetype") # Values are allowed to be None, coerce to empty strings. metadata_dict = dict([ (x[0],"" if x[1] is None else x[1]) for x in metadata_dict.items() ]) # Cast each string to a C-string. This may have unusual results if your # keys/vals aren't particularly stringy, such as Python classes, # Exceptions, etc. They will get str()'d, and they may look stupid. # pylint: disable=multiple-statements try: c_fields = [ cstring(str(x)) for x in metadata_dict.keys() ] except Exception as exc: raise TypeError("One of your metadata_dict keys couldn't be cast to a Cstring, %s" % exc) try: c_values = [ cstring(str(x)) for x in metadata_dict.values() ] except Exception as exc: raise TypeError("One of your metadata_dict values couldn't be cast to a Cstring, %s" % exc) # pylint: enable=multiple-statements # Get our source_dict data structure source_dict = MARQUISE_NEW_SOURCE(c_fields, c_values, len(metadata_dict)) if is_cnull(source_dict): raise ValueError("errno is set to EINVAL on invalid input, our errno is %d" % FFI.errno) # If you do something stupid, like passing a string where an # int (address) is meant to go, CFFI will explode. Which is # fine, but that causes memory leaks. The explosion still # occurs, but we cleanup after (before?) ourselves. try: success = MARQUISE_UPDATE_SOURCE(self.marquise_ctx, address, source_dict) except TypeError as exc: MARQUISE_FREE_SOURCE(source_dict) raise self.__debug("marquise_update_source returned %d" % success) if success != 0: MARQUISE_FREE_SOURCE(source_dict) raise RuntimeError("marquise_update_source was unsuccessful, errno is %d" % FFI.errno) MARQUISE_FREE_SOURCE(source_dict) return True
python
def update_source(self, address, metadata_dict): """Pack the `metadata_dict` for an `address` into a data structure and ship it to the spool file. Arguments: address -- the address for which this metadata_dict applies. metadata_dict -- a Python dict of arbitrary string key-value pairs. """ if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) # Sanity check the input, everything must be UTF8 strings (not # yet confirmed), no Nonetypes or anything stupid like that. # # The keys of the key-value pairs are unique, by virtue of # taking a dict as input. if any([ x is None for x in metadata_dict.keys() ]): raise TypeError("One of your metadata_dict keys is a Nonetype") # Values are allowed to be None, coerce to empty strings. metadata_dict = dict([ (x[0],"" if x[1] is None else x[1]) for x in metadata_dict.items() ]) # Cast each string to a C-string. This may have unusual results if your # keys/vals aren't particularly stringy, such as Python classes, # Exceptions, etc. They will get str()'d, and they may look stupid. # pylint: disable=multiple-statements try: c_fields = [ cstring(str(x)) for x in metadata_dict.keys() ] except Exception as exc: raise TypeError("One of your metadata_dict keys couldn't be cast to a Cstring, %s" % exc) try: c_values = [ cstring(str(x)) for x in metadata_dict.values() ] except Exception as exc: raise TypeError("One of your metadata_dict values couldn't be cast to a Cstring, %s" % exc) # pylint: enable=multiple-statements # Get our source_dict data structure source_dict = MARQUISE_NEW_SOURCE(c_fields, c_values, len(metadata_dict)) if is_cnull(source_dict): raise ValueError("errno is set to EINVAL on invalid input, our errno is %d" % FFI.errno) # If you do something stupid, like passing a string where an # int (address) is meant to go, CFFI will explode. Which is # fine, but that causes memory leaks. The explosion still # occurs, but we cleanup after (before?) ourselves. try: success = MARQUISE_UPDATE_SOURCE(self.marquise_ctx, address, source_dict) except TypeError as exc: MARQUISE_FREE_SOURCE(source_dict) raise self.__debug("marquise_update_source returned %d" % success) if success != 0: MARQUISE_FREE_SOURCE(source_dict) raise RuntimeError("marquise_update_source was unsuccessful, errno is %d" % FFI.errno) MARQUISE_FREE_SOURCE(source_dict) return True
[ "def", "update_source", "(", "self", ",", "address", ",", "metadata_dict", ")", ":", "if", "self", ".", "marquise_ctx", "is", "None", ":", "raise", "ValueError", "(", "\"Attempted to write to a closed Marquise handle.\"", ")", "self", ".", "__debug", "(", "\"Supplied address: %s\"", "%", "address", ")", "# Sanity check the input, everything must be UTF8 strings (not", "# yet confirmed), no Nonetypes or anything stupid like that.", "#", "# The keys of the key-value pairs are unique, by virtue of", "# taking a dict as input.", "if", "any", "(", "[", "x", "is", "None", "for", "x", "in", "metadata_dict", ".", "keys", "(", ")", "]", ")", ":", "raise", "TypeError", "(", "\"One of your metadata_dict keys is a Nonetype\"", ")", "# Values are allowed to be None, coerce to empty strings.", "metadata_dict", "=", "dict", "(", "[", "(", "x", "[", "0", "]", ",", "\"\"", "if", "x", "[", "1", "]", "is", "None", "else", "x", "[", "1", "]", ")", "for", "x", "in", "metadata_dict", ".", "items", "(", ")", "]", ")", "# Cast each string to a C-string. This may have unusual results if your", "# keys/vals aren't particularly stringy, such as Python classes,", "# Exceptions, etc. They will get str()'d, and they may look stupid.", "# pylint: disable=multiple-statements", "try", ":", "c_fields", "=", "[", "cstring", "(", "str", "(", "x", ")", ")", "for", "x", "in", "metadata_dict", ".", "keys", "(", ")", "]", "except", "Exception", "as", "exc", ":", "raise", "TypeError", "(", "\"One of your metadata_dict keys couldn't be cast to a Cstring, %s\"", "%", "exc", ")", "try", ":", "c_values", "=", "[", "cstring", "(", "str", "(", "x", ")", ")", "for", "x", "in", "metadata_dict", ".", "values", "(", ")", "]", "except", "Exception", "as", "exc", ":", "raise", "TypeError", "(", "\"One of your metadata_dict values couldn't be cast to a Cstring, %s\"", "%", "exc", ")", "# pylint: enable=multiple-statements", "# Get our source_dict data structure", "source_dict", "=", "MARQUISE_NEW_SOURCE", "(", "c_fields", ",", "c_values", ",", "len", "(", "metadata_dict", ")", ")", "if", "is_cnull", "(", "source_dict", ")", ":", "raise", "ValueError", "(", "\"errno is set to EINVAL on invalid input, our errno is %d\"", "%", "FFI", ".", "errno", ")", "# If you do something stupid, like passing a string where an", "# int (address) is meant to go, CFFI will explode. Which is", "# fine, but that causes memory leaks. The explosion still", "# occurs, but we cleanup after (before?) ourselves.", "try", ":", "success", "=", "MARQUISE_UPDATE_SOURCE", "(", "self", ".", "marquise_ctx", ",", "address", ",", "source_dict", ")", "except", "TypeError", "as", "exc", ":", "MARQUISE_FREE_SOURCE", "(", "source_dict", ")", "raise", "self", ".", "__debug", "(", "\"marquise_update_source returned %d\"", "%", "success", ")", "if", "success", "!=", "0", ":", "MARQUISE_FREE_SOURCE", "(", "source_dict", ")", "raise", "RuntimeError", "(", "\"marquise_update_source was unsuccessful, errno is %d\"", "%", "FFI", ".", "errno", ")", "MARQUISE_FREE_SOURCE", "(", "source_dict", ")", "return", "True" ]
Pack the `metadata_dict` for an `address` into a data structure and ship it to the spool file. Arguments: address -- the address for which this metadata_dict applies. metadata_dict -- a Python dict of arbitrary string key-value pairs.
[ "Pack", "the", "metadata_dict", "for", "an", "address", "into", "a", "data", "structure", "and", "ship", "it", "to", "the", "spool", "file", "." ]
67e52df70c50ed53ad315a64fea430a9567e2b1b
https://github.com/WhereSoftwareGoesToDie/pymarquise/blob/67e52df70c50ed53ad315a64fea430a9567e2b1b/marquise/marquise.py#L182-L237
249,148
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
StorageHandler._get_db_fields
def _get_db_fields(self, obj): """ Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`. """ for field in obj.indexes: yield field, self._zeo_key(field)
python
def _get_db_fields(self, obj): """ Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`. """ for field in obj.indexes: yield field, self._zeo_key(field)
[ "def", "_get_db_fields", "(", "self", ",", "obj", ")", ":", "for", "field", "in", "obj", ".", "indexes", ":", "yield", "field", ",", "self", ".", "_zeo_key", "(", "field", ")" ]
Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`.
[ "Return", "list", "of", "database", "dictionaries", "which", "are", "used", "as", "indexes", "for", "each", "attributes", "." ]
fb6bd326249847de04b17b64e856c878665cea92
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/storage_handler.py#L99-L111
249,149
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
StorageHandler._check_obj_properties
def _check_obj_properties(self, pub, name="pub"): """ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ if not hasattr(pub, "indexes"): raise InvalidType("`%s` doesn't have .indexes property!" % name) if not pub.indexes: raise InvalidType("`%s.indexes` is not set!" % name) if not hasattr(pub, "project_key"): raise InvalidType( "`%s` doesn't have .project_key property!" % name ) if not pub.project_key: raise InvalidType("`%s.project_key` is not set!" % name)
python
def _check_obj_properties(self, pub, name="pub"): """ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ if not hasattr(pub, "indexes"): raise InvalidType("`%s` doesn't have .indexes property!" % name) if not pub.indexes: raise InvalidType("`%s.indexes` is not set!" % name) if not hasattr(pub, "project_key"): raise InvalidType( "`%s` doesn't have .project_key property!" % name ) if not pub.project_key: raise InvalidType("`%s.project_key` is not set!" % name)
[ "def", "_check_obj_properties", "(", "self", ",", "pub", ",", "name", "=", "\"pub\"", ")", ":", "if", "not", "hasattr", "(", "pub", ",", "\"indexes\"", ")", ":", "raise", "InvalidType", "(", "\"`%s` doesn't have .indexes property!\"", "%", "name", ")", "if", "not", "pub", ".", "indexes", ":", "raise", "InvalidType", "(", "\"`%s.indexes` is not set!\"", "%", "name", ")", "if", "not", "hasattr", "(", "pub", ",", "\"project_key\"", ")", ":", "raise", "InvalidType", "(", "\"`%s` doesn't have .project_key property!\"", "%", "name", ")", "if", "not", "pub", ".", "project_key", ":", "raise", "InvalidType", "(", "\"`%s.project_key` is not set!\"", "%", "name", ")" ]
Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`.
[ "Make", "sure", "that", "pub", "has", "the", "right", "interface", "." ]
fb6bd326249847de04b17b64e856c878665cea92
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/storage_handler.py#L113-L136
249,150
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
StorageHandler._put_into_indexes
def _put_into_indexes(self, obj): """ Put publication into all indexes. Attr: obj (obj): Indexable object. Raises: UnindexableObject: When there is no index (property) which can be used to index `obj` in database. """ no_of_used_indexes = 0 for field_name, db_index in list(self._get_db_fields(obj)): attr_value = getattr(obj, field_name) if attr_value is None: # index only by set attributes continue container = db_index.get(attr_value, None) if container is None: container = OOTreeSet() db_index[attr_value] = container container.insert(obj) no_of_used_indexes += 1 # make sure that atleast one `attr_value` was used if no_of_used_indexes <= 0: raise UnindexableObject( "You have to use atleast one of the identificators!" )
python
def _put_into_indexes(self, obj): """ Put publication into all indexes. Attr: obj (obj): Indexable object. Raises: UnindexableObject: When there is no index (property) which can be used to index `obj` in database. """ no_of_used_indexes = 0 for field_name, db_index in list(self._get_db_fields(obj)): attr_value = getattr(obj, field_name) if attr_value is None: # index only by set attributes continue container = db_index.get(attr_value, None) if container is None: container = OOTreeSet() db_index[attr_value] = container container.insert(obj) no_of_used_indexes += 1 # make sure that atleast one `attr_value` was used if no_of_used_indexes <= 0: raise UnindexableObject( "You have to use atleast one of the identificators!" )
[ "def", "_put_into_indexes", "(", "self", ",", "obj", ")", ":", "no_of_used_indexes", "=", "0", "for", "field_name", ",", "db_index", "in", "list", "(", "self", ".", "_get_db_fields", "(", "obj", ")", ")", ":", "attr_value", "=", "getattr", "(", "obj", ",", "field_name", ")", "if", "attr_value", "is", "None", ":", "# index only by set attributes", "continue", "container", "=", "db_index", ".", "get", "(", "attr_value", ",", "None", ")", "if", "container", "is", "None", ":", "container", "=", "OOTreeSet", "(", ")", "db_index", "[", "attr_value", "]", "=", "container", "container", ".", "insert", "(", "obj", ")", "no_of_used_indexes", "+=", "1", "# make sure that atleast one `attr_value` was used", "if", "no_of_used_indexes", "<=", "0", ":", "raise", "UnindexableObject", "(", "\"You have to use atleast one of the identificators!\"", ")" ]
Put publication into all indexes. Attr: obj (obj): Indexable object. Raises: UnindexableObject: When there is no index (property) which can be used to index `obj` in database.
[ "Put", "publication", "into", "all", "indexes", "." ]
fb6bd326249847de04b17b64e856c878665cea92
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/storage_handler.py#L138-L169
249,151
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
StorageHandler.store_object
def store_object(self, obj): """ Save `obj` into database and into proper indexes. Attr: obj (obj): Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined. """ self._check_obj_properties(obj) with transaction.manager: self._put_into_indexes(obj)
python
def store_object(self, obj): """ Save `obj` into database and into proper indexes. Attr: obj (obj): Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined. """ self._check_obj_properties(obj) with transaction.manager: self._put_into_indexes(obj)
[ "def", "store_object", "(", "self", ",", "obj", ")", ":", "self", ".", "_check_obj_properties", "(", "obj", ")", "with", "transaction", ".", "manager", ":", "self", ".", "_put_into_indexes", "(", "obj", ")" ]
Save `obj` into database and into proper indexes. Attr: obj (obj): Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined.
[ "Save", "obj", "into", "database", "and", "into", "proper", "indexes", "." ]
fb6bd326249847de04b17b64e856c878665cea92
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/storage_handler.py#L171-L185
249,152
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
StorageHandler._get_subset_matches
def _get_subset_matches(self, query): """ Yield publications, at indexes defined by `query` property values. Args: query (obj): Object implementing proper interface. Yields: list: List of matching publications. """ for field_name, db_index in self._get_db_fields(query): attr = getattr(query, field_name) if attr is None: # don't use unset attributes continue results = db_index.get(attr, OOTreeSet()) if results: yield results
python
def _get_subset_matches(self, query): """ Yield publications, at indexes defined by `query` property values. Args: query (obj): Object implementing proper interface. Yields: list: List of matching publications. """ for field_name, db_index in self._get_db_fields(query): attr = getattr(query, field_name) if attr is None: # don't use unset attributes continue results = db_index.get(attr, OOTreeSet()) if results: yield results
[ "def", "_get_subset_matches", "(", "self", ",", "query", ")", ":", "for", "field_name", ",", "db_index", "in", "self", ".", "_get_db_fields", "(", "query", ")", ":", "attr", "=", "getattr", "(", "query", ",", "field_name", ")", "if", "attr", "is", "None", ":", "# don't use unset attributes", "continue", "results", "=", "db_index", ".", "get", "(", "attr", ",", "OOTreeSet", "(", ")", ")", "if", "results", ":", "yield", "results" ]
Yield publications, at indexes defined by `query` property values. Args: query (obj): Object implementing proper interface. Yields: list: List of matching publications.
[ "Yield", "publications", "at", "indexes", "defined", "by", "query", "property", "values", "." ]
fb6bd326249847de04b17b64e856c878665cea92
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/storage_handler.py#L187-L206
249,153
pjanis/funtool
funtool/group_measure.py
_wrap_measure
def _wrap_measure(individual_group_measure_process, group_measure, loaded_processes): """ Creates a function on a state_collection, which creates analysis_collections for each group in the collection. """ def wrapped_measure(state_collection,overriding_parameters=None,loggers=None): if loggers == None: loggers = funtool.logger.set_default_loggers() if loaded_processes != None : if group_measure.grouping_selectors != None: for grouping_selector_name in group_measure.grouping_selectors: state_collection= funtool.state_collection.add_grouping(state_collection, grouping_selector_name, loaded_processes) for group in funtool.state_collection.groups_in_grouping(state_collection, grouping_selector_name): analysis_collection = funtool.analysis.AnalysisCollection(None,group,{},{}) if group_measure.analysis_selectors != None: for analysis_selector in group_measure.analysis_selectors: analysis_collection = loaded_processes["analysis_selector"][analysis_selector].process_function(analysis_collection,state_collection) if analysis_collection != None: individual_group_measure_process(analysis_collection,state_collection) return state_collection return wrapped_measure
python
def _wrap_measure(individual_group_measure_process, group_measure, loaded_processes): """ Creates a function on a state_collection, which creates analysis_collections for each group in the collection. """ def wrapped_measure(state_collection,overriding_parameters=None,loggers=None): if loggers == None: loggers = funtool.logger.set_default_loggers() if loaded_processes != None : if group_measure.grouping_selectors != None: for grouping_selector_name in group_measure.grouping_selectors: state_collection= funtool.state_collection.add_grouping(state_collection, grouping_selector_name, loaded_processes) for group in funtool.state_collection.groups_in_grouping(state_collection, grouping_selector_name): analysis_collection = funtool.analysis.AnalysisCollection(None,group,{},{}) if group_measure.analysis_selectors != None: for analysis_selector in group_measure.analysis_selectors: analysis_collection = loaded_processes["analysis_selector"][analysis_selector].process_function(analysis_collection,state_collection) if analysis_collection != None: individual_group_measure_process(analysis_collection,state_collection) return state_collection return wrapped_measure
[ "def", "_wrap_measure", "(", "individual_group_measure_process", ",", "group_measure", ",", "loaded_processes", ")", ":", "def", "wrapped_measure", "(", "state_collection", ",", "overriding_parameters", "=", "None", ",", "loggers", "=", "None", ")", ":", "if", "loggers", "==", "None", ":", "loggers", "=", "funtool", ".", "logger", ".", "set_default_loggers", "(", ")", "if", "loaded_processes", "!=", "None", ":", "if", "group_measure", ".", "grouping_selectors", "!=", "None", ":", "for", "grouping_selector_name", "in", "group_measure", ".", "grouping_selectors", ":", "state_collection", "=", "funtool", ".", "state_collection", ".", "add_grouping", "(", "state_collection", ",", "grouping_selector_name", ",", "loaded_processes", ")", "for", "group", "in", "funtool", ".", "state_collection", ".", "groups_in_grouping", "(", "state_collection", ",", "grouping_selector_name", ")", ":", "analysis_collection", "=", "funtool", ".", "analysis", ".", "AnalysisCollection", "(", "None", ",", "group", ",", "{", "}", ",", "{", "}", ")", "if", "group_measure", ".", "analysis_selectors", "!=", "None", ":", "for", "analysis_selector", "in", "group_measure", ".", "analysis_selectors", ":", "analysis_collection", "=", "loaded_processes", "[", "\"analysis_selector\"", "]", "[", "analysis_selector", "]", ".", "process_function", "(", "analysis_collection", ",", "state_collection", ")", "if", "analysis_collection", "!=", "None", ":", "individual_group_measure_process", "(", "analysis_collection", ",", "state_collection", ")", "return", "state_collection", "return", "wrapped_measure" ]
Creates a function on a state_collection, which creates analysis_collections for each group in the collection.
[ "Creates", "a", "function", "on", "a", "state_collection", "which", "creates", "analysis_collections", "for", "each", "group", "in", "the", "collection", "." ]
231851238f0a62bc3682d8fa937db9053378c53d
https://github.com/pjanis/funtool/blob/231851238f0a62bc3682d8fa937db9053378c53d/funtool/group_measure.py#L38-L57
249,154
unixorn/haze
haze/ec2.py
getAWSAccountID
def getAWSAccountID(): ''' Print an instance's AWS account number or 0 when not in EC2 ''' link = "http://169.254.169.254/latest/dynamic/instance-identity/document" try: conn = urllib2.urlopen(url=link, timeout=5) except urllib2.URLError: return '0' jsonData = json.loads(conn.read()) return jsonData['accountId']
python
def getAWSAccountID(): ''' Print an instance's AWS account number or 0 when not in EC2 ''' link = "http://169.254.169.254/latest/dynamic/instance-identity/document" try: conn = urllib2.urlopen(url=link, timeout=5) except urllib2.URLError: return '0' jsonData = json.loads(conn.read()) return jsonData['accountId']
[ "def", "getAWSAccountID", "(", ")", ":", "link", "=", "\"http://169.254.169.254/latest/dynamic/instance-identity/document\"", "try", ":", "conn", "=", "urllib2", ".", "urlopen", "(", "url", "=", "link", ",", "timeout", "=", "5", ")", "except", "urllib2", ".", "URLError", ":", "return", "'0'", "jsonData", "=", "json", ".", "loads", "(", "conn", ".", "read", "(", ")", ")", "return", "jsonData", "[", "'accountId'", "]" ]
Print an instance's AWS account number or 0 when not in EC2
[ "Print", "an", "instance", "s", "AWS", "account", "number", "or", "0", "when", "not", "in", "EC2" ]
77692b18e6574ac356e3e16659b96505c733afff
https://github.com/unixorn/haze/blob/77692b18e6574ac356e3e16659b96505c733afff/haze/ec2.py#L26-L36
249,155
unixorn/haze
haze/ec2.py
readInstanceTag
def readInstanceTag(instanceID, tagName="Name", connection=None): """ Load a tag from EC2 :param str instanceID: Instance ID to read the tag on :param str tagName: Name of tag to load :param connection: optional boto connection to use :returns: the tag's value :rtype: str """ assert isinstance(instanceID, basestring), ("instanceID must be a string but is %r" % instanceID) assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName) if not connection: # Assume AWS credentials are in the environment or the instance is using an IAM role connection = boto.ec2.connect_to_region(myRegion()) # Filter the tag values for our instance_id # http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeTags.html tagData = connection.get_all_tags(filters={"resource-id": instanceID, "key": tagName}) if tagData: tagValue = tagData[0].value else: raise RuntimeError, "%s: No such tag on %s" % (tagName, instanceID) return tagValue
python
def readInstanceTag(instanceID, tagName="Name", connection=None): """ Load a tag from EC2 :param str instanceID: Instance ID to read the tag on :param str tagName: Name of tag to load :param connection: optional boto connection to use :returns: the tag's value :rtype: str """ assert isinstance(instanceID, basestring), ("instanceID must be a string but is %r" % instanceID) assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName) if not connection: # Assume AWS credentials are in the environment or the instance is using an IAM role connection = boto.ec2.connect_to_region(myRegion()) # Filter the tag values for our instance_id # http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeTags.html tagData = connection.get_all_tags(filters={"resource-id": instanceID, "key": tagName}) if tagData: tagValue = tagData[0].value else: raise RuntimeError, "%s: No such tag on %s" % (tagName, instanceID) return tagValue
[ "def", "readInstanceTag", "(", "instanceID", ",", "tagName", "=", "\"Name\"", ",", "connection", "=", "None", ")", ":", "assert", "isinstance", "(", "instanceID", ",", "basestring", ")", ",", "(", "\"instanceID must be a string but is %r\"", "%", "instanceID", ")", "assert", "isinstance", "(", "tagName", ",", "basestring", ")", ",", "(", "\"tagName must be a string but is %r\"", "%", "tagName", ")", "if", "not", "connection", ":", "# Assume AWS credentials are in the environment or the instance is using an IAM role", "connection", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "myRegion", "(", ")", ")", "# Filter the tag values for our instance_id", "# http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeTags.html", "tagData", "=", "connection", ".", "get_all_tags", "(", "filters", "=", "{", "\"resource-id\"", ":", "instanceID", ",", "\"key\"", ":", "tagName", "}", ")", "if", "tagData", ":", "tagValue", "=", "tagData", "[", "0", "]", ".", "value", "else", ":", "raise", "RuntimeError", ",", "\"%s: No such tag on %s\"", "%", "(", "tagName", ",", "instanceID", ")", "return", "tagValue" ]
Load a tag from EC2 :param str instanceID: Instance ID to read the tag on :param str tagName: Name of tag to load :param connection: optional boto connection to use :returns: the tag's value :rtype: str
[ "Load", "a", "tag", "from", "EC2" ]
77692b18e6574ac356e3e16659b96505c733afff
https://github.com/unixorn/haze/blob/77692b18e6574ac356e3e16659b96505c733afff/haze/ec2.py#L105-L130
249,156
unixorn/haze
haze/ec2.py
readMyEC2Tag
def readMyEC2Tag(tagName, connection=None): """ Load an EC2 tag for the running instance & print it. :param str tagName: Name of the tag to read :param connection: Optional boto connection """ assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName) # Load metadata. if == {} we are on localhost # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html if not connection: # Assume AWS credentials are in the environment or the instance is using an IAM role connection = boto.ec2.connect_to_region(myRegion()) return readInstanceTag(connection=connection, instanceID=myInstanceID(), tagName=tagName)
python
def readMyEC2Tag(tagName, connection=None): """ Load an EC2 tag for the running instance & print it. :param str tagName: Name of the tag to read :param connection: Optional boto connection """ assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName) # Load metadata. if == {} we are on localhost # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html if not connection: # Assume AWS credentials are in the environment or the instance is using an IAM role connection = boto.ec2.connect_to_region(myRegion()) return readInstanceTag(connection=connection, instanceID=myInstanceID(), tagName=tagName)
[ "def", "readMyEC2Tag", "(", "tagName", ",", "connection", "=", "None", ")", ":", "assert", "isinstance", "(", "tagName", ",", "basestring", ")", ",", "(", "\"tagName must be a string but is %r\"", "%", "tagName", ")", "# Load metadata. if == {} we are on localhost", "# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html", "if", "not", "connection", ":", "# Assume AWS credentials are in the environment or the instance is using an IAM role", "connection", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "myRegion", "(", ")", ")", "return", "readInstanceTag", "(", "connection", "=", "connection", ",", "instanceID", "=", "myInstanceID", "(", ")", ",", "tagName", "=", "tagName", ")" ]
Load an EC2 tag for the running instance & print it. :param str tagName: Name of the tag to read :param connection: Optional boto connection
[ "Load", "an", "EC2", "tag", "for", "the", "running", "instance", "&", "print", "it", "." ]
77692b18e6574ac356e3e16659b96505c733afff
https://github.com/unixorn/haze/blob/77692b18e6574ac356e3e16659b96505c733afff/haze/ec2.py#L133-L151
249,157
vuolter/autoupgrade
autoupgrade/utils.py
ver_to_tuple
def ver_to_tuple(value): """ Convert version like string to a tuple of integers. """ return tuple(int(_f) for _f in re.split(r'\D+', value) if _f)
python
def ver_to_tuple(value): """ Convert version like string to a tuple of integers. """ return tuple(int(_f) for _f in re.split(r'\D+', value) if _f)
[ "def", "ver_to_tuple", "(", "value", ")", ":", "return", "tuple", "(", "int", "(", "_f", ")", "for", "_f", "in", "re", ".", "split", "(", "r'\\D+'", ",", "value", ")", "if", "_f", ")" ]
Convert version like string to a tuple of integers.
[ "Convert", "version", "like", "string", "to", "a", "tuple", "of", "integers", "." ]
e34aca9eacd6a6f5c7a7634a67c2ee911d48ac68
https://github.com/vuolter/autoupgrade/blob/e34aca9eacd6a6f5c7a7634a67c2ee911d48ac68/autoupgrade/utils.py#L28-L32
249,158
rinocloud/rinocloud-python
rinocloud/object.py
Object.increment_name
def increment_name(self, name, i): """ takes something like test.txt and returns test1.txt """ if i == 0: return name if '.' in name: split = name.split('.') split[-2] = split[-2] + str(i) return '.'.join(split) else: return name + str(i)
python
def increment_name(self, name, i): """ takes something like test.txt and returns test1.txt """ if i == 0: return name if '.' in name: split = name.split('.') split[-2] = split[-2] + str(i) return '.'.join(split) else: return name + str(i)
[ "def", "increment_name", "(", "self", ",", "name", ",", "i", ")", ":", "if", "i", "==", "0", ":", "return", "name", "if", "'.'", "in", "name", ":", "split", "=", "name", ".", "split", "(", "'.'", ")", "split", "[", "-", "2", "]", "=", "split", "[", "-", "2", "]", "+", "str", "(", "i", ")", "return", "'.'", ".", "join", "(", "split", ")", "else", ":", "return", "name", "+", "str", "(", "i", ")" ]
takes something like test.txt and returns test1.txt
[ "takes", "something", "like", "test", ".", "txt", "and", "returns", "test1", ".", "txt" ]
7c4bf994a518f961cffedb7260fc1e4fa1838b38
https://github.com/rinocloud/rinocloud-python/blob/7c4bf994a518f961cffedb7260fc1e4fa1838b38/rinocloud/object.py#L71-L85
249,159
rinocloud/rinocloud-python
rinocloud/object.py
Object.save_local_metadata
def save_local_metadata(self): """ save all the exposed variables to a json file """ # save to the set local path and add .json with open(self.filepath + '.json', 'w+') as outfile: json.dump(self._prep_metadata(), outfile, indent=4)
python
def save_local_metadata(self): """ save all the exposed variables to a json file """ # save to the set local path and add .json with open(self.filepath + '.json', 'w+') as outfile: json.dump(self._prep_metadata(), outfile, indent=4)
[ "def", "save_local_metadata", "(", "self", ")", ":", "# save to the set local path and add .json", "with", "open", "(", "self", ".", "filepath", "+", "'.json'", ",", "'w+'", ")", "as", "outfile", ":", "json", ".", "dump", "(", "self", ".", "_prep_metadata", "(", ")", ",", "outfile", ",", "indent", "=", "4", ")" ]
save all the exposed variables to a json file
[ "save", "all", "the", "exposed", "variables", "to", "a", "json", "file" ]
7c4bf994a518f961cffedb7260fc1e4fa1838b38
https://github.com/rinocloud/rinocloud-python/blob/7c4bf994a518f961cffedb7260fc1e4fa1838b38/rinocloud/object.py#L155-L161
249,160
jrheard/madison_axi
madison_axi/axi.py
move_forward
def move_forward(num_steps): """Moves the pen forward a few steps in the direction that its "turtle" is facing. Arguments: num_steps - a number like 20. A bigger number makes the pen move farther. """ assert int(num_steps) == num_steps, "move_forward() only accepts integers, but you gave it " + str(num_steps) _make_cnc_request("move.forward./" + str(num_steps)) state['turtle'].forward(num_steps)
python
def move_forward(num_steps): """Moves the pen forward a few steps in the direction that its "turtle" is facing. Arguments: num_steps - a number like 20. A bigger number makes the pen move farther. """ assert int(num_steps) == num_steps, "move_forward() only accepts integers, but you gave it " + str(num_steps) _make_cnc_request("move.forward./" + str(num_steps)) state['turtle'].forward(num_steps)
[ "def", "move_forward", "(", "num_steps", ")", ":", "assert", "int", "(", "num_steps", ")", "==", "num_steps", ",", "\"move_forward() only accepts integers, but you gave it \"", "+", "str", "(", "num_steps", ")", "_make_cnc_request", "(", "\"move.forward./\"", "+", "str", "(", "num_steps", ")", ")", "state", "[", "'turtle'", "]", ".", "forward", "(", "num_steps", ")" ]
Moves the pen forward a few steps in the direction that its "turtle" is facing. Arguments: num_steps - a number like 20. A bigger number makes the pen move farther.
[ "Moves", "the", "pen", "forward", "a", "few", "steps", "in", "the", "direction", "that", "its", "turtle", "is", "facing", "." ]
240cd002aec134cefb69294a9e9b65630a60cddc
https://github.com/jrheard/madison_axi/blob/240cd002aec134cefb69294a9e9b65630a60cddc/madison_axi/axi.py#L97-L107
249,161
pjuren/pyokit
src/pyokit/statistics/online.py
RollingMean.add
def add(self, v): """Add a new value.""" self._vals_added += 1 if self._mean is None: self._mean = v self._mean = self._mean + ((v - self._mean) / float(self._vals_added))
python
def add(self, v): """Add a new value.""" self._vals_added += 1 if self._mean is None: self._mean = v self._mean = self._mean + ((v - self._mean) / float(self._vals_added))
[ "def", "add", "(", "self", ",", "v", ")", ":", "self", ".", "_vals_added", "+=", "1", "if", "self", ".", "_mean", "is", "None", ":", "self", ".", "_mean", "=", "v", "self", ".", "_mean", "=", "self", ".", "_mean", "+", "(", "(", "v", "-", "self", ".", "_mean", ")", "/", "float", "(", "self", ".", "_vals_added", ")", ")" ]
Add a new value.
[ "Add", "a", "new", "value", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/statistics/online.py#L45-L50
249,162
pjuren/pyokit
src/pyokit/datastruct/genomeAlignment.py
_build_trees_by_chrom
def _build_trees_by_chrom(blocks, verbose=False): """ Construct set of interval trees from an iterable of genome alignment blocks. :return: a dictionary indexed by chromosome name where each entry is an interval tree for that chromosome. """ if verbose: sys.stderr.write("separating blocks by chromosome... ") by_chrom = {} for b in blocks: if b.chrom not in by_chrom: by_chrom[b.chrom] = [] by_chrom[b.chrom].append(b) if verbose: sys.stderr.write("done\n") if verbose: sys.stderr.write("building interval trees by chromosome... ") res = {} for c in by_chrom: res[c] = IntervalTree(by_chrom[c], openEnded=True) if verbose: sys.stderr.write("done\n") return res
python
def _build_trees_by_chrom(blocks, verbose=False): """ Construct set of interval trees from an iterable of genome alignment blocks. :return: a dictionary indexed by chromosome name where each entry is an interval tree for that chromosome. """ if verbose: sys.stderr.write("separating blocks by chromosome... ") by_chrom = {} for b in blocks: if b.chrom not in by_chrom: by_chrom[b.chrom] = [] by_chrom[b.chrom].append(b) if verbose: sys.stderr.write("done\n") if verbose: sys.stderr.write("building interval trees by chromosome... ") res = {} for c in by_chrom: res[c] = IntervalTree(by_chrom[c], openEnded=True) if verbose: sys.stderr.write("done\n") return res
[ "def", "_build_trees_by_chrom", "(", "blocks", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "\"separating blocks by chromosome... \"", ")", "by_chrom", "=", "{", "}", "for", "b", "in", "blocks", ":", "if", "b", ".", "chrom", "not", "in", "by_chrom", ":", "by_chrom", "[", "b", ".", "chrom", "]", "=", "[", "]", "by_chrom", "[", "b", ".", "chrom", "]", ".", "append", "(", "b", ")", "if", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "\"done\\n\"", ")", "if", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "\"building interval trees by chromosome... \"", ")", "res", "=", "{", "}", "for", "c", "in", "by_chrom", ":", "res", "[", "c", "]", "=", "IntervalTree", "(", "by_chrom", "[", "c", "]", ",", "openEnded", "=", "True", ")", "if", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "\"done\\n\"", ")", "return", "res" ]
Construct set of interval trees from an iterable of genome alignment blocks. :return: a dictionary indexed by chromosome name where each entry is an interval tree for that chromosome.
[ "Construct", "set", "of", "interval", "trees", "from", "an", "iterable", "of", "genome", "alignment", "blocks", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomeAlignment.py#L73-L98
249,163
pjuren/pyokit
src/pyokit/datastruct/genomeAlignment.py
GenomeAlignmentBlock.get_column_absolute
def get_column_absolute(self, position, miss_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """ return a column from the block as dictionary indexed by seq. name. :param position: the index to extract from the block; must be absolute coordinates (i.e. between self.start and self.end, not inclusive of the end). :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: dictionary where keys are sequence names and values are nucleotides (raw strings). """ if position < self.start or position >= self.end: raise ValueError("getting column at genomic locus " + self._chrom + " " + str(position) + " failed; locus is outside of genome " + "alignment block") rel_coord = self.sequence_to_alignment_coords(self.reference_sequence_name, position, position + 1) assert(len(rel_coord) == 1) rel_start, rel_end = rel_coord[0] assert(rel_end == rel_start + 1) raw_col = self.get_column(rel_start, miss_seqs) if species is None: return raw_col res = {} for k in raw_col: name_parts = k.split(".") if name_parts[0] in species: res[k] = raw_col[k] return res
python
def get_column_absolute(self, position, miss_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """ return a column from the block as dictionary indexed by seq. name. :param position: the index to extract from the block; must be absolute coordinates (i.e. between self.start and self.end, not inclusive of the end). :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: dictionary where keys are sequence names and values are nucleotides (raw strings). """ if position < self.start or position >= self.end: raise ValueError("getting column at genomic locus " + self._chrom + " " + str(position) + " failed; locus is outside of genome " + "alignment block") rel_coord = self.sequence_to_alignment_coords(self.reference_sequence_name, position, position + 1) assert(len(rel_coord) == 1) rel_start, rel_end = rel_coord[0] assert(rel_end == rel_start + 1) raw_col = self.get_column(rel_start, miss_seqs) if species is None: return raw_col res = {} for k in raw_col: name_parts = k.split(".") if name_parts[0] in species: res[k] = raw_col[k] return res
[ "def", "get_column_absolute", "(", "self", ",", "position", ",", "miss_seqs", "=", "MissingSequenceHandler", ".", "TREAT_AS_ALL_GAPS", ",", "species", "=", "None", ")", ":", "if", "position", "<", "self", ".", "start", "or", "position", ">=", "self", ".", "end", ":", "raise", "ValueError", "(", "\"getting column at genomic locus \"", "+", "self", ".", "_chrom", "+", "\" \"", "+", "str", "(", "position", ")", "+", "\" failed; locus is outside of genome \"", "+", "\"alignment block\"", ")", "rel_coord", "=", "self", ".", "sequence_to_alignment_coords", "(", "self", ".", "reference_sequence_name", ",", "position", ",", "position", "+", "1", ")", "assert", "(", "len", "(", "rel_coord", ")", "==", "1", ")", "rel_start", ",", "rel_end", "=", "rel_coord", "[", "0", "]", "assert", "(", "rel_end", "==", "rel_start", "+", "1", ")", "raw_col", "=", "self", ".", "get_column", "(", "rel_start", ",", "miss_seqs", ")", "if", "species", "is", "None", ":", "return", "raw_col", "res", "=", "{", "}", "for", "k", "in", "raw_col", ":", "name_parts", "=", "k", ".", "split", "(", "\".\"", ")", "if", "name_parts", "[", "0", "]", "in", "species", ":", "res", "[", "k", "]", "=", "raw_col", "[", "k", "]", "return", "res" ]
return a column from the block as dictionary indexed by seq. name. :param position: the index to extract from the block; must be absolute coordinates (i.e. between self.start and self.end, not inclusive of the end). :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: dictionary where keys are sequence names and values are nucleotides (raw strings).
[ "return", "a", "column", "from", "the", "block", "as", "dictionary", "indexed", "by", "seq", ".", "name", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomeAlignment.py#L158-L191
249,164
pjuren/pyokit
src/pyokit/datastruct/genomeAlignment.py
GenomeAlignment.get_column
def get_column(self, chrom, position, missing_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """Get the alignment column at the specified chromosome and position.""" blocks = self.get_blocks(chrom, position, position + 1) if len(blocks) == 0: raise NoSuchAlignmentColumnError("Request for column on chrom " + chrom + " at position " + str(position) + " not possible; " + "genome alignment not defined at " + "that locus.") if len(blocks) > 1: raise NoUniqueColumnError("Request for column on chrom " + chrom + " at position " + str(position) + "not " + "possible; ambiguous alignment of that locus.") return blocks[0].get_column_absolute(position, missing_seqs, species)
python
def get_column(self, chrom, position, missing_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """Get the alignment column at the specified chromosome and position.""" blocks = self.get_blocks(chrom, position, position + 1) if len(blocks) == 0: raise NoSuchAlignmentColumnError("Request for column on chrom " + chrom + " at position " + str(position) + " not possible; " + "genome alignment not defined at " + "that locus.") if len(blocks) > 1: raise NoUniqueColumnError("Request for column on chrom " + chrom + " at position " + str(position) + "not " + "possible; ambiguous alignment of that locus.") return blocks[0].get_column_absolute(position, missing_seqs, species)
[ "def", "get_column", "(", "self", ",", "chrom", ",", "position", ",", "missing_seqs", "=", "MissingSequenceHandler", ".", "TREAT_AS_ALL_GAPS", ",", "species", "=", "None", ")", ":", "blocks", "=", "self", ".", "get_blocks", "(", "chrom", ",", "position", ",", "position", "+", "1", ")", "if", "len", "(", "blocks", ")", "==", "0", ":", "raise", "NoSuchAlignmentColumnError", "(", "\"Request for column on chrom \"", "+", "chrom", "+", "\" at position \"", "+", "str", "(", "position", ")", "+", "\" not possible; \"", "+", "\"genome alignment not defined at \"", "+", "\"that locus.\"", ")", "if", "len", "(", "blocks", ")", ">", "1", ":", "raise", "NoUniqueColumnError", "(", "\"Request for column on chrom \"", "+", "chrom", "+", "\" at position \"", "+", "str", "(", "position", ")", "+", "\"not \"", "+", "\"possible; ambiguous alignment of that locus.\"", ")", "return", "blocks", "[", "0", "]", ".", "get_column_absolute", "(", "position", ",", "missing_seqs", ",", "species", ")" ]
Get the alignment column at the specified chromosome and position.
[ "Get", "the", "alignment", "column", "at", "the", "specified", "chromosome", "and", "position", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomeAlignment.py#L312-L328
249,165
cogniteev/docido-python-sdk
docido_sdk/toolbox/decorators.py
decorate_instance_methods
def decorate_instance_methods(obj, decorator, includes=None, excludes=None): """Decorator instance methods of an object. :param obj: Python object whose instance methods have to be decorated :param decorator: instance method decorator. >>> def decorate(name, f): >>> def __wrap(*args, **kwargs) >>> print '--> entering instance method {}'.format(name) >>> eax = f(*args, **kwargs) >>> print '<-- leaving instance method {}'.format(name) :param string list includes: restrict wrapped instance methods. Default is `None` meaning that all instance method are wrapped. :param string list excludes: used to prevent some instance methods to be wrapped. Default is `None` :return: new class that inherits the `clazz` specified in parameter. """ class InstanceMethodDecorator(object): def __getattribute__(self, name): value = obj.__getattribute__(name) if excludes and name in excludes: return value if includes and name not in includes: return value if inspect.ismethod(value): value = decorator(name, value) return value return InstanceMethodDecorator()
python
def decorate_instance_methods(obj, decorator, includes=None, excludes=None): """Decorator instance methods of an object. :param obj: Python object whose instance methods have to be decorated :param decorator: instance method decorator. >>> def decorate(name, f): >>> def __wrap(*args, **kwargs) >>> print '--> entering instance method {}'.format(name) >>> eax = f(*args, **kwargs) >>> print '<-- leaving instance method {}'.format(name) :param string list includes: restrict wrapped instance methods. Default is `None` meaning that all instance method are wrapped. :param string list excludes: used to prevent some instance methods to be wrapped. Default is `None` :return: new class that inherits the `clazz` specified in parameter. """ class InstanceMethodDecorator(object): def __getattribute__(self, name): value = obj.__getattribute__(name) if excludes and name in excludes: return value if includes and name not in includes: return value if inspect.ismethod(value): value = decorator(name, value) return value return InstanceMethodDecorator()
[ "def", "decorate_instance_methods", "(", "obj", ",", "decorator", ",", "includes", "=", "None", ",", "excludes", "=", "None", ")", ":", "class", "InstanceMethodDecorator", "(", "object", ")", ":", "def", "__getattribute__", "(", "self", ",", "name", ")", ":", "value", "=", "obj", ".", "__getattribute__", "(", "name", ")", "if", "excludes", "and", "name", "in", "excludes", ":", "return", "value", "if", "includes", "and", "name", "not", "in", "includes", ":", "return", "value", "if", "inspect", ".", "ismethod", "(", "value", ")", ":", "value", "=", "decorator", "(", "name", ",", "value", ")", "return", "value", "return", "InstanceMethodDecorator", "(", ")" ]
Decorator instance methods of an object. :param obj: Python object whose instance methods have to be decorated :param decorator: instance method decorator. >>> def decorate(name, f): >>> def __wrap(*args, **kwargs) >>> print '--> entering instance method {}'.format(name) >>> eax = f(*args, **kwargs) >>> print '<-- leaving instance method {}'.format(name) :param string list includes: restrict wrapped instance methods. Default is `None` meaning that all instance method are wrapped. :param string list excludes: used to prevent some instance methods to be wrapped. Default is `None` :return: new class that inherits the `clazz` specified in parameter.
[ "Decorator", "instance", "methods", "of", "an", "object", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/decorators.py#L33-L63
249,166
cogniteev/docido-python-sdk
docido_sdk/toolbox/decorators.py
reraise
def reraise(clazz): """ Decorator catching every exception that might be raised by wrapped function and raise another exception instead. Exception initially raised is passed in first argument of the raised exception. :param: Exception class: clazz: Python exception class to raise """ def _decorator(f): @functools.wraps(f) def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: raise clazz(e), None, sys.exc_info()[2] return _wrap return _decorator
python
def reraise(clazz): """ Decorator catching every exception that might be raised by wrapped function and raise another exception instead. Exception initially raised is passed in first argument of the raised exception. :param: Exception class: clazz: Python exception class to raise """ def _decorator(f): @functools.wraps(f) def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: raise clazz(e), None, sys.exc_info()[2] return _wrap return _decorator
[ "def", "reraise", "(", "clazz", ")", ":", "def", "_decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "_wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "raise", "clazz", "(", "e", ")", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "return", "_wrap", "return", "_decorator" ]
Decorator catching every exception that might be raised by wrapped function and raise another exception instead. Exception initially raised is passed in first argument of the raised exception. :param: Exception class: clazz: Python exception class to raise
[ "Decorator", "catching", "every", "exception", "that", "might", "be", "raised", "by", "wrapped", "function", "and", "raise", "another", "exception", "instead", ".", "Exception", "initially", "raised", "is", "passed", "in", "first", "argument", "of", "the", "raised", "exception", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/decorators.py#L66-L83
249,167
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/aleph.py
getListOfBases
def getListOfBases(): """ This function is here mainly for purposes of unittest Returns: list of str: Valid bases as they are used as URL parameters in links at Aleph main page. """ downer = Downloader() data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list") dom = dhtmlparser.parseString(data.lower()) # from default aleph page filter links containing local_base in their href base_links = filter( lambda x: "href" in x.params and "local_base" in x.params["href"], dom.find("a") ) # split links by & - we will need only XXX from link.tld/..&local_base=XXX base_links = map( lambda x: x.params["href"].replace("?", "&", 1).split("&"), base_links ) # filter only sections containing bases bases = map( lambda link: filter(lambda base: "local_base=" in base, link)[0], base_links ) # filter bases from base sections bases = map(lambda x: x.split("=")[1].strip(), bases) return list(set(bases))
python
def getListOfBases(): """ This function is here mainly for purposes of unittest Returns: list of str: Valid bases as they are used as URL parameters in links at Aleph main page. """ downer = Downloader() data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list") dom = dhtmlparser.parseString(data.lower()) # from default aleph page filter links containing local_base in their href base_links = filter( lambda x: "href" in x.params and "local_base" in x.params["href"], dom.find("a") ) # split links by & - we will need only XXX from link.tld/..&local_base=XXX base_links = map( lambda x: x.params["href"].replace("?", "&", 1).split("&"), base_links ) # filter only sections containing bases bases = map( lambda link: filter(lambda base: "local_base=" in base, link)[0], base_links ) # filter bases from base sections bases = map(lambda x: x.split("=")[1].strip(), bases) return list(set(bases))
[ "def", "getListOfBases", "(", ")", ":", "downer", "=", "Downloader", "(", ")", "data", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "\"/F/?func=file&file_name=base-list\"", ")", "dom", "=", "dhtmlparser", ".", "parseString", "(", "data", ".", "lower", "(", ")", ")", "# from default aleph page filter links containing local_base in their href", "base_links", "=", "filter", "(", "lambda", "x", ":", "\"href\"", "in", "x", ".", "params", "and", "\"local_base\"", "in", "x", ".", "params", "[", "\"href\"", "]", ",", "dom", ".", "find", "(", "\"a\"", ")", ")", "# split links by & - we will need only XXX from link.tld/..&local_base=XXX", "base_links", "=", "map", "(", "lambda", "x", ":", "x", ".", "params", "[", "\"href\"", "]", ".", "replace", "(", "\"?\"", ",", "\"&\"", ",", "1", ")", ".", "split", "(", "\"&\"", ")", ",", "base_links", ")", "# filter only sections containing bases", "bases", "=", "map", "(", "lambda", "link", ":", "filter", "(", "lambda", "base", ":", "\"local_base=\"", "in", "base", ",", "link", ")", "[", "0", "]", ",", "base_links", ")", "# filter bases from base sections", "bases", "=", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "\"=\"", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "bases", ")", "return", "list", "(", "set", "(", "bases", ")", ")" ]
This function is here mainly for purposes of unittest Returns: list of str: Valid bases as they are used as URL parameters in links at Aleph main page.
[ "This", "function", "is", "here", "mainly", "for", "purposes", "of", "unittest" ]
360342c0504d5daa2344e864762cdf938d4149c7
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L235-L268
249,168
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/aleph.py
searchInAleph
def searchInAleph(base, phrase, considerSimilar, field): """ Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar (bool): fuzzy search, which is not working at all, so don't use it field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional): present if there was some form of error | no_entries (int): number of entries that can be fetch from aleph | no_records (int): no idea what is this, but it is always >= than `no_entries` | set_number (int): important - something like ID of your request | session-id (str): used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid """ downer = Downloader() if field.lower() not in VALID_ALEPH_FIELDS: raise InvalidAlephFieldException("Unknown field '" + field + "'!") param_url = Template(SEARCH_URL_TEMPLATE).substitute( PHRASE=quote_plus(phrase), # urlencode phrase BASE=base, FIELD=field, SIMILAR="Y" if considerSimilar else "N" ) result = downer.download(ALEPH_URL + param_url) dom = dhtmlparser.parseString(result) find = dom.find("find") # find <find> element :) if len(find) <= 0: raise AlephException("Aleph didn't returned any information.") find = find[0] # convert aleph result into dictionary result = _alephResultToDict(find) # add informations about base into result result["base"] = base if "error" not in result: return result # handle errors if result["error"] == "empty set": result["no_entries"] = 0 # empty set have 0 entries return result else: raise AlephException(result["error"])
python
def searchInAleph(base, phrase, considerSimilar, field): """ Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar (bool): fuzzy search, which is not working at all, so don't use it field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional): present if there was some form of error | no_entries (int): number of entries that can be fetch from aleph | no_records (int): no idea what is this, but it is always >= than `no_entries` | set_number (int): important - something like ID of your request | session-id (str): used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid """ downer = Downloader() if field.lower() not in VALID_ALEPH_FIELDS: raise InvalidAlephFieldException("Unknown field '" + field + "'!") param_url = Template(SEARCH_URL_TEMPLATE).substitute( PHRASE=quote_plus(phrase), # urlencode phrase BASE=base, FIELD=field, SIMILAR="Y" if considerSimilar else "N" ) result = downer.download(ALEPH_URL + param_url) dom = dhtmlparser.parseString(result) find = dom.find("find") # find <find> element :) if len(find) <= 0: raise AlephException("Aleph didn't returned any information.") find = find[0] # convert aleph result into dictionary result = _alephResultToDict(find) # add informations about base into result result["base"] = base if "error" not in result: return result # handle errors if result["error"] == "empty set": result["no_entries"] = 0 # empty set have 0 entries return result else: raise AlephException(result["error"])
[ "def", "searchInAleph", "(", "base", ",", "phrase", ",", "considerSimilar", ",", "field", ")", ":", "downer", "=", "Downloader", "(", ")", "if", "field", ".", "lower", "(", ")", "not", "in", "VALID_ALEPH_FIELDS", ":", "raise", "InvalidAlephFieldException", "(", "\"Unknown field '\"", "+", "field", "+", "\"'!\"", ")", "param_url", "=", "Template", "(", "SEARCH_URL_TEMPLATE", ")", ".", "substitute", "(", "PHRASE", "=", "quote_plus", "(", "phrase", ")", ",", "# urlencode phrase", "BASE", "=", "base", ",", "FIELD", "=", "field", ",", "SIMILAR", "=", "\"Y\"", "if", "considerSimilar", "else", "\"N\"", ")", "result", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "param_url", ")", "dom", "=", "dhtmlparser", ".", "parseString", "(", "result", ")", "find", "=", "dom", ".", "find", "(", "\"find\"", ")", "# find <find> element :)", "if", "len", "(", "find", ")", "<=", "0", ":", "raise", "AlephException", "(", "\"Aleph didn't returned any information.\"", ")", "find", "=", "find", "[", "0", "]", "# convert aleph result into dictionary", "result", "=", "_alephResultToDict", "(", "find", ")", "# add informations about base into result", "result", "[", "\"base\"", "]", "=", "base", "if", "\"error\"", "not", "in", "result", ":", "return", "result", "# handle errors", "if", "result", "[", "\"error\"", "]", "==", "\"empty set\"", ":", "result", "[", "\"no_entries\"", "]", "=", "0", "# empty set have 0 entries", "return", "result", "else", ":", "raise", "AlephException", "(", "result", "[", "\"error\"", "]", ")" ]
Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar (bool): fuzzy search, which is not working at all, so don't use it field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional): present if there was some form of error | no_entries (int): number of entries that can be fetch from aleph | no_records (int): no idea what is this, but it is always >= than `no_entries` | set_number (int): important - something like ID of your request | session-id (str): used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid
[ "Send", "request", "to", "the", "aleph", "search", "engine", "." ]
360342c0504d5daa2344e864762cdf938d4149c7
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L316-L388
249,169
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/aleph.py
downloadRecords
def downloadRecords(search_result, from_doc=1): """ Download `MAX_RECORDS` documents from `search_result` starting from `from_doc`. Attr: search_result (dict): returned from :func:`searchInAleph`. from_doc (int, default 1): Start from document number `from_doc`. Returns: list: List of XML strings with documents in MARC OAI. """ downer = Downloader() if "set_number" not in search_result: return [] # set numbers should be probably aligned to some length set_number = str(search_result["set_number"]) if len(set_number) < 6: set_number = (6 - len(set_number)) * "0" + set_number # download all no_records records = [] for cnt in range(search_result["no_records"]): doc_number = from_doc + cnt if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]: break set_data = downer.download( ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute( SET_NUM=set_number, RECORD_NUM=doc_number, ) ) records.append(set_data) return records
python
def downloadRecords(search_result, from_doc=1): """ Download `MAX_RECORDS` documents from `search_result` starting from `from_doc`. Attr: search_result (dict): returned from :func:`searchInAleph`. from_doc (int, default 1): Start from document number `from_doc`. Returns: list: List of XML strings with documents in MARC OAI. """ downer = Downloader() if "set_number" not in search_result: return [] # set numbers should be probably aligned to some length set_number = str(search_result["set_number"]) if len(set_number) < 6: set_number = (6 - len(set_number)) * "0" + set_number # download all no_records records = [] for cnt in range(search_result["no_records"]): doc_number = from_doc + cnt if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]: break set_data = downer.download( ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute( SET_NUM=set_number, RECORD_NUM=doc_number, ) ) records.append(set_data) return records
[ "def", "downloadRecords", "(", "search_result", ",", "from_doc", "=", "1", ")", ":", "downer", "=", "Downloader", "(", ")", "if", "\"set_number\"", "not", "in", "search_result", ":", "return", "[", "]", "# set numbers should be probably aligned to some length", "set_number", "=", "str", "(", "search_result", "[", "\"set_number\"", "]", ")", "if", "len", "(", "set_number", ")", "<", "6", ":", "set_number", "=", "(", "6", "-", "len", "(", "set_number", ")", ")", "*", "\"0\"", "+", "set_number", "# download all no_records", "records", "=", "[", "]", "for", "cnt", "in", "range", "(", "search_result", "[", "\"no_records\"", "]", ")", ":", "doc_number", "=", "from_doc", "+", "cnt", "if", "cnt", ">=", "MAX_RECORDS", "or", "doc_number", ">", "search_result", "[", "\"no_records\"", "]", ":", "break", "set_data", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "Template", "(", "RECORD_URL_TEMPLATE", ")", ".", "substitute", "(", "SET_NUM", "=", "set_number", ",", "RECORD_NUM", "=", "doc_number", ",", ")", ")", "records", ".", "append", "(", "set_data", ")", "return", "records" ]
Download `MAX_RECORDS` documents from `search_result` starting from `from_doc`. Attr: search_result (dict): returned from :func:`searchInAleph`. from_doc (int, default 1): Start from document number `from_doc`. Returns: list: List of XML strings with documents in MARC OAI.
[ "Download", "MAX_RECORDS", "documents", "from", "search_result", "starting", "from", "from_doc", "." ]
360342c0504d5daa2344e864762cdf938d4149c7
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L391-L430
249,170
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/aleph.py
getDocumentIDs
def getDocumentIDs(aleph_search_result, number_of_docs=-1): """ Get IDs, which can be used as parameters for other functions. Args: aleph_search_result (dict): returned from :func:`searchInAleph` number_of_docs (int, optional): how many :class:`DocumentID` from set given by `aleph_search_result` should be returned. Default -1 for all of them. Returns: list: :class:`DocumentID` named tuples to given `aleph_search_result`. Raises: AlephException: If Aleph returns unknown format of data. Note: Returned :class:`DocumentID` can be used as parameters to :func:`downloadMARCXML`. """ downer = Downloader() if "set_number" not in aleph_search_result: return [] # set numbers should be probably aligned to some length set_number = str(aleph_search_result["set_number"]) if len(set_number) < 6: set_number = (6 - len(set_number)) * "0" + set_number # limit number of fetched documents, if -1, download all if number_of_docs <= 0: number_of_docs = aleph_search_result["no_entries"] # download data about given set set_data = downer.download( ALEPH_URL + Template(SET_URL_TEMPLATE).substitute( SET_NUMBER=set_number, NUMBER_OF_DOCS=number_of_docs, ) ) # parse data dom = dhtmlparser.parseString(set_data) set_data = dom.find("ill-get-set") # there should be at least one <ill-get-set> field if len(set_data) <= 0: raise AlephException("Aleph didn't returned set data.") ids = [] for library in set_data: documents = _alephResultToDict(library) if "error" in documents: raise AlephException("getDocumentIDs: " + documents["error"]) # convert all document records to DocumentID named tuple and extend # them to 'ids' array if isinstance(documents["doc-number"], list): ids.extend( map( lambda x: DocumentID( x, documents["set-library"], aleph_search_result["base"] ), set(documents["doc-number"]) ) ) else: ids.append( DocumentID( documents["doc-number"], documents["set-library"], aleph_search_result["base"] ) ) return ids
python
def getDocumentIDs(aleph_search_result, number_of_docs=-1): """ Get IDs, which can be used as parameters for other functions. Args: aleph_search_result (dict): returned from :func:`searchInAleph` number_of_docs (int, optional): how many :class:`DocumentID` from set given by `aleph_search_result` should be returned. Default -1 for all of them. Returns: list: :class:`DocumentID` named tuples to given `aleph_search_result`. Raises: AlephException: If Aleph returns unknown format of data. Note: Returned :class:`DocumentID` can be used as parameters to :func:`downloadMARCXML`. """ downer = Downloader() if "set_number" not in aleph_search_result: return [] # set numbers should be probably aligned to some length set_number = str(aleph_search_result["set_number"]) if len(set_number) < 6: set_number = (6 - len(set_number)) * "0" + set_number # limit number of fetched documents, if -1, download all if number_of_docs <= 0: number_of_docs = aleph_search_result["no_entries"] # download data about given set set_data = downer.download( ALEPH_URL + Template(SET_URL_TEMPLATE).substitute( SET_NUMBER=set_number, NUMBER_OF_DOCS=number_of_docs, ) ) # parse data dom = dhtmlparser.parseString(set_data) set_data = dom.find("ill-get-set") # there should be at least one <ill-get-set> field if len(set_data) <= 0: raise AlephException("Aleph didn't returned set data.") ids = [] for library in set_data: documents = _alephResultToDict(library) if "error" in documents: raise AlephException("getDocumentIDs: " + documents["error"]) # convert all document records to DocumentID named tuple and extend # them to 'ids' array if isinstance(documents["doc-number"], list): ids.extend( map( lambda x: DocumentID( x, documents["set-library"], aleph_search_result["base"] ), set(documents["doc-number"]) ) ) else: ids.append( DocumentID( documents["doc-number"], documents["set-library"], aleph_search_result["base"] ) ) return ids
[ "def", "getDocumentIDs", "(", "aleph_search_result", ",", "number_of_docs", "=", "-", "1", ")", ":", "downer", "=", "Downloader", "(", ")", "if", "\"set_number\"", "not", "in", "aleph_search_result", ":", "return", "[", "]", "# set numbers should be probably aligned to some length", "set_number", "=", "str", "(", "aleph_search_result", "[", "\"set_number\"", "]", ")", "if", "len", "(", "set_number", ")", "<", "6", ":", "set_number", "=", "(", "6", "-", "len", "(", "set_number", ")", ")", "*", "\"0\"", "+", "set_number", "# limit number of fetched documents, if -1, download all", "if", "number_of_docs", "<=", "0", ":", "number_of_docs", "=", "aleph_search_result", "[", "\"no_entries\"", "]", "# download data about given set", "set_data", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "Template", "(", "SET_URL_TEMPLATE", ")", ".", "substitute", "(", "SET_NUMBER", "=", "set_number", ",", "NUMBER_OF_DOCS", "=", "number_of_docs", ",", ")", ")", "# parse data", "dom", "=", "dhtmlparser", ".", "parseString", "(", "set_data", ")", "set_data", "=", "dom", ".", "find", "(", "\"ill-get-set\"", ")", "# there should be at least one <ill-get-set> field", "if", "len", "(", "set_data", ")", "<=", "0", ":", "raise", "AlephException", "(", "\"Aleph didn't returned set data.\"", ")", "ids", "=", "[", "]", "for", "library", "in", "set_data", ":", "documents", "=", "_alephResultToDict", "(", "library", ")", "if", "\"error\"", "in", "documents", ":", "raise", "AlephException", "(", "\"getDocumentIDs: \"", "+", "documents", "[", "\"error\"", "]", ")", "# convert all document records to DocumentID named tuple and extend", "# them to 'ids' array", "if", "isinstance", "(", "documents", "[", "\"doc-number\"", "]", ",", "list", ")", ":", "ids", ".", "extend", "(", "map", "(", "lambda", "x", ":", "DocumentID", "(", "x", ",", "documents", "[", "\"set-library\"", "]", ",", "aleph_search_result", "[", "\"base\"", "]", ")", ",", "set", "(", "documents", "[", "\"doc-number\"", "]", ")", ")", ")", "else", ":", "ids", ".", "append", "(", "DocumentID", "(", "documents", "[", "\"doc-number\"", "]", ",", "documents", "[", "\"set-library\"", "]", ",", "aleph_search_result", "[", "\"base\"", "]", ")", ")", "return", "ids" ]
Get IDs, which can be used as parameters for other functions. Args: aleph_search_result (dict): returned from :func:`searchInAleph` number_of_docs (int, optional): how many :class:`DocumentID` from set given by `aleph_search_result` should be returned. Default -1 for all of them. Returns: list: :class:`DocumentID` named tuples to given `aleph_search_result`. Raises: AlephException: If Aleph returns unknown format of data. Note: Returned :class:`DocumentID` can be used as parameters to :func:`downloadMARCXML`.
[ "Get", "IDs", "which", "can", "be", "used", "as", "parameters", "for", "other", "functions", "." ]
360342c0504d5daa2344e864762cdf938d4149c7
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L433-L512
249,171
edeposit/edeposit.amqp.aleph
src/edeposit/amqp/aleph/aleph.py
downloadMARCXML
def downloadMARCXML(doc_id, library, base="nkc"): """ Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException """ downer = Downloader() data = downer.download( ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute( DOC_ID=doc_id, LIBRARY=library ) ) dom = dhtmlparser.parseString(data) # check if there are any errors # bad library error error = dom.find("login") if error: error_msg = error[0].find("error") if error_msg: raise LibraryNotFoundException( "Can't download document doc_id: '" + str(doc_id) + "' " + "(probably bad library: '" + library + "')!\nMessage: " + "\n".join(map(lambda x: x.getContent(), error_msg)) ) # another error - document not found error = dom.find("ill-get-doc") if error: error_msg = error[0].find("error") if error_msg: raise DocumentNotFoundException( "\n".join(map(lambda x: x.getContent(), error_msg)) ) return data
python
def downloadMARCXML(doc_id, library, base="nkc"): """ Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException """ downer = Downloader() data = downer.download( ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute( DOC_ID=doc_id, LIBRARY=library ) ) dom = dhtmlparser.parseString(data) # check if there are any errors # bad library error error = dom.find("login") if error: error_msg = error[0].find("error") if error_msg: raise LibraryNotFoundException( "Can't download document doc_id: '" + str(doc_id) + "' " + "(probably bad library: '" + library + "')!\nMessage: " + "\n".join(map(lambda x: x.getContent(), error_msg)) ) # another error - document not found error = dom.find("ill-get-doc") if error: error_msg = error[0].find("error") if error_msg: raise DocumentNotFoundException( "\n".join(map(lambda x: x.getContent(), error_msg)) ) return data
[ "def", "downloadMARCXML", "(", "doc_id", ",", "library", ",", "base", "=", "\"nkc\"", ")", ":", "downer", "=", "Downloader", "(", ")", "data", "=", "downer", ".", "download", "(", "ALEPH_URL", "+", "Template", "(", "DOC_URL_TEMPLATE", ")", ".", "substitute", "(", "DOC_ID", "=", "doc_id", ",", "LIBRARY", "=", "library", ")", ")", "dom", "=", "dhtmlparser", ".", "parseString", "(", "data", ")", "# check if there are any errors", "# bad library error", "error", "=", "dom", ".", "find", "(", "\"login\"", ")", "if", "error", ":", "error_msg", "=", "error", "[", "0", "]", ".", "find", "(", "\"error\"", ")", "if", "error_msg", ":", "raise", "LibraryNotFoundException", "(", "\"Can't download document doc_id: '\"", "+", "str", "(", "doc_id", ")", "+", "\"' \"", "+", "\"(probably bad library: '\"", "+", "library", "+", "\"')!\\nMessage: \"", "+", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", ".", "getContent", "(", ")", ",", "error_msg", ")", ")", ")", "# another error - document not found", "error", "=", "dom", ".", "find", "(", "\"ill-get-doc\"", ")", "if", "error", ":", "error_msg", "=", "error", "[", "0", "]", ".", "find", "(", "\"error\"", ")", "if", "error_msg", ":", "raise", "DocumentNotFoundException", "(", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", ".", "getContent", "(", ")", ",", "error_msg", ")", ")", ")", "return", "data" ]
Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException
[ "Download", "MARC", "XML", "document", "with", "given", "doc_id", "from", "given", "library", "." ]
360342c0504d5daa2344e864762cdf938d4149c7
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L515-L566
249,172
KnowledgeLinks/rdfframework
rdfframework/search/esloaders_temp.py
EsRdfBulkLoader._index_item
def _index_item(self, uri, num, batch_num): """ queries the triplestore for an item sends it to elasticsearch """ data = RdfDataset(get_all_item_data(uri, self.namespace), uri).base_class.es_json() self.batch_data[batch_num].append(data) self.count += 1
python
def _index_item(self, uri, num, batch_num): """ queries the triplestore for an item sends it to elasticsearch """ data = RdfDataset(get_all_item_data(uri, self.namespace), uri).base_class.es_json() self.batch_data[batch_num].append(data) self.count += 1
[ "def", "_index_item", "(", "self", ",", "uri", ",", "num", ",", "batch_num", ")", ":", "data", "=", "RdfDataset", "(", "get_all_item_data", "(", "uri", ",", "self", ".", "namespace", ")", ",", "uri", ")", ".", "base_class", ".", "es_json", "(", ")", "self", ".", "batch_data", "[", "batch_num", "]", ".", "append", "(", "data", ")", "self", ".", "count", "+=", "1" ]
queries the triplestore for an item sends it to elasticsearch
[ "queries", "the", "triplestore", "for", "an", "item", "sends", "it", "to", "elasticsearch" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/esloaders_temp.py#L59-L65
249,173
StanfordBioinformatics/scgpm_seqresults_dnanexus
scgpm_seqresults_dnanexus/dnanexus_utils.py
share_with_org
def share_with_org(project_ids, org, access_level, suppress_email_notification=False): """ Shares one or more DNAnexus projects with an organization. Args: project_ids: `list`. One or more DNAnexus project identifiers, where each project ID is in the form "project-FXq6B809p5jKzp2vJkjkKvg3". org: `str`. The name of the DNAnexus org with which to share the projects. access_level: The permission level to give to members of the org - one of ["VIEW","UPLOAD","CONTRIBUTE","ADMINISTER"]. suppress_email_notification: `bool`. True means to allow the DNAnexus platform to send an email notification for each shared project. """ for p in project_ids: dxpy.api.project_invite(object_id=p,input_params={"invitee": org,"level": access_level,"suppressEmailNotification": suppress_email_notification})
python
def share_with_org(project_ids, org, access_level, suppress_email_notification=False): """ Shares one or more DNAnexus projects with an organization. Args: project_ids: `list`. One or more DNAnexus project identifiers, where each project ID is in the form "project-FXq6B809p5jKzp2vJkjkKvg3". org: `str`. The name of the DNAnexus org with which to share the projects. access_level: The permission level to give to members of the org - one of ["VIEW","UPLOAD","CONTRIBUTE","ADMINISTER"]. suppress_email_notification: `bool`. True means to allow the DNAnexus platform to send an email notification for each shared project. """ for p in project_ids: dxpy.api.project_invite(object_id=p,input_params={"invitee": org,"level": access_level,"suppressEmailNotification": suppress_email_notification})
[ "def", "share_with_org", "(", "project_ids", ",", "org", ",", "access_level", ",", "suppress_email_notification", "=", "False", ")", ":", "for", "p", "in", "project_ids", ":", "dxpy", ".", "api", ".", "project_invite", "(", "object_id", "=", "p", ",", "input_params", "=", "{", "\"invitee\"", ":", "org", ",", "\"level\"", ":", "access_level", ",", "\"suppressEmailNotification\"", ":", "suppress_email_notification", "}", ")" ]
Shares one or more DNAnexus projects with an organization. Args: project_ids: `list`. One or more DNAnexus project identifiers, where each project ID is in the form "project-FXq6B809p5jKzp2vJkjkKvg3". org: `str`. The name of the DNAnexus org with which to share the projects. access_level: The permission level to give to members of the org - one of ["VIEW","UPLOAD","CONTRIBUTE","ADMINISTER"]. suppress_email_notification: `bool`. True means to allow the DNAnexus platform to send an email notification for each shared project.
[ "Shares", "one", "or", "more", "DNAnexus", "projects", "with", "an", "organization", "." ]
2bdaae5ec5d38a07fec99e0c5379074a591d77b6
https://github.com/StanfordBioinformatics/scgpm_seqresults_dnanexus/blob/2bdaae5ec5d38a07fec99e0c5379074a591d77b6/scgpm_seqresults_dnanexus/dnanexus_utils.py#L133-L147
249,174
amcfague/webunit2
webunit2/utils.py
parse_url
def parse_url(url): """ Takes a URL string and returns its protocol and server """ # Verify that the protocol makes sense. We shouldn't guess! if not RE_PROTOCOL_SERVER.match(url): raise Exception("URL should begin with `protocol://domain`") protocol, server, path, _, _, _ = urlparse.urlparse(url) return protocol, server
python
def parse_url(url): """ Takes a URL string and returns its protocol and server """ # Verify that the protocol makes sense. We shouldn't guess! if not RE_PROTOCOL_SERVER.match(url): raise Exception("URL should begin with `protocol://domain`") protocol, server, path, _, _, _ = urlparse.urlparse(url) return protocol, server
[ "def", "parse_url", "(", "url", ")", ":", "# Verify that the protocol makes sense. We shouldn't guess!", "if", "not", "RE_PROTOCOL_SERVER", ".", "match", "(", "url", ")", ":", "raise", "Exception", "(", "\"URL should begin with `protocol://domain`\"", ")", "protocol", ",", "server", ",", "path", ",", "_", ",", "_", ",", "_", "=", "urlparse", ".", "urlparse", "(", "url", ")", "return", "protocol", ",", "server" ]
Takes a URL string and returns its protocol and server
[ "Takes", "a", "URL", "string", "and", "returns", "its", "protocol", "and", "server" ]
3157e5837aad0810800628c1383f1fe11ee3e513
https://github.com/amcfague/webunit2/blob/3157e5837aad0810800628c1383f1fe11ee3e513/webunit2/utils.py#L45-L53
249,175
msuozzo/Aduro
aduro/store.py
EventStore.record_event
def record_event(self, event): """Records the ``KindleEvent`` `event` in the store """ with open(self._path, 'a') as file_: file_.write(str(event) + '\n')
python
def record_event(self, event): """Records the ``KindleEvent`` `event` in the store """ with open(self._path, 'a') as file_: file_.write(str(event) + '\n')
[ "def", "record_event", "(", "self", ",", "event", ")", ":", "with", "open", "(", "self", ".", "_path", ",", "'a'", ")", "as", "file_", ":", "file_", ".", "write", "(", "str", "(", "event", ")", "+", "'\\n'", ")" ]
Records the ``KindleEvent`` `event` in the store
[ "Records", "the", "KindleEvent", "event", "in", "the", "store" ]
338eeb1deeff30c198e721b660ae4daca3660911
https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/store.py#L14-L18
249,176
msuozzo/Aduro
aduro/store.py
EventStore.get_events
def get_events(self): """Returns a list of all ``KindleEvent``s held in the store """ with open(self._path, 'r') as file_: file_lines = file_.read().splitlines() event_lines = [line for line in file_lines if line] events = [] for event_line in event_lines: for event_cls in (AddEvent, SetReadingEvent, ReadEvent, SetFinishedEvent): try: event = event_cls.from_str(event_line) except EventParseError: pass else: events.append(event) return events
python
def get_events(self): """Returns a list of all ``KindleEvent``s held in the store """ with open(self._path, 'r') as file_: file_lines = file_.read().splitlines() event_lines = [line for line in file_lines if line] events = [] for event_line in event_lines: for event_cls in (AddEvent, SetReadingEvent, ReadEvent, SetFinishedEvent): try: event = event_cls.from_str(event_line) except EventParseError: pass else: events.append(event) return events
[ "def", "get_events", "(", "self", ")", ":", "with", "open", "(", "self", ".", "_path", ",", "'r'", ")", "as", "file_", ":", "file_lines", "=", "file_", ".", "read", "(", ")", ".", "splitlines", "(", ")", "event_lines", "=", "[", "line", "for", "line", "in", "file_lines", "if", "line", "]", "events", "=", "[", "]", "for", "event_line", "in", "event_lines", ":", "for", "event_cls", "in", "(", "AddEvent", ",", "SetReadingEvent", ",", "ReadEvent", ",", "SetFinishedEvent", ")", ":", "try", ":", "event", "=", "event_cls", ".", "from_str", "(", "event_line", ")", "except", "EventParseError", ":", "pass", "else", ":", "events", ".", "append", "(", "event", ")", "return", "events" ]
Returns a list of all ``KindleEvent``s held in the store
[ "Returns", "a", "list", "of", "all", "KindleEvent", "s", "held", "in", "the", "store" ]
338eeb1deeff30c198e721b660ae4daca3660911
https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/store.py#L20-L36
249,177
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.name
def name(self, value): """Set parameter name. :param str value: name value. """ if isinstance(value, string_types): match = Parameter._PARAM_NAME_COMPILER_MATCHER(value) if match is None or match.group() != value: value = re_compile(value) self._name = value
python
def name(self, value): """Set parameter name. :param str value: name value. """ if isinstance(value, string_types): match = Parameter._PARAM_NAME_COMPILER_MATCHER(value) if match is None or match.group() != value: value = re_compile(value) self._name = value
[ "def", "name", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "match", "=", "Parameter", ".", "_PARAM_NAME_COMPILER_MATCHER", "(", "value", ")", "if", "match", "is", "None", "or", "match", ".", "group", "(", ")", "!=", "value", ":", "value", "=", "re_compile", "(", "value", ")", "self", ".", "_name", "=", "value" ]
Set parameter name. :param str value: name value.
[ "Set", "parameter", "name", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L354-L367
249,178
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.svalue
def svalue(self): """Get serialized value. :rtype: str """ result = self._svalue if result is None: # try to get svalue from value if svalue is None try: value = self.value except Parameter.Error: pass else: result = self._svalue = self.serializer(value) return result
python
def svalue(self): """Get serialized value. :rtype: str """ result = self._svalue if result is None: # try to get svalue from value if svalue is None try: value = self.value except Parameter.Error: pass else: result = self._svalue = self.serializer(value) return result
[ "def", "svalue", "(", "self", ")", ":", "result", "=", "self", ".", "_svalue", "if", "result", "is", "None", ":", "# try to get svalue from value if svalue is None", "try", ":", "value", "=", "self", ".", "value", "except", "Parameter", ".", "Error", ":", "pass", "else", ":", "result", "=", "self", ".", "_svalue", "=", "self", ".", "serializer", "(", "value", ")", "return", "result" ]
Get serialized value. :rtype: str
[ "Get", "serialized", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L380-L399
249,179
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.svalue
def svalue(self, value): """Change of serialized value. Nonify this value as well. :param str value: serialized value to use. """ if value is not None: # if value is not None self._value = None self._error = None self._svalue = value
python
def svalue(self, value): """Change of serialized value. Nonify this value as well. :param str value: serialized value to use. """ if value is not None: # if value is not None self._value = None self._error = None self._svalue = value
[ "def", "svalue", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "# if value is not None", "self", ".", "_value", "=", "None", "self", ".", "_error", "=", "None", "self", ".", "_svalue", "=", "value" ]
Change of serialized value. Nonify this value as well. :param str value: serialized value to use.
[ "Change", "of", "serialized", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L402-L414
249,180
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.resolve
def resolve( self, configurable=None, conf=None, scope=None, ptype=None, parser=None, error=True, svalue=None, safe=None, besteffort=None ): """Resolve this parameter value related to a configurable and a configuration. Save error in this attribute `error` in case of failure. :param str svalue: serialized value too resolve. Default is this svalue. :param Configurable configurable: configurable to use for foreign parameter resolution. :param Configuration conf: configuration to use for cross-value resolution. :param dict scope: variables to use for local expression evaluation. :param type ptype: return type. Default is this ptype. :param parser: specific parser to use. Default this parser. :param bool error: raise an error if True (False by default). :param bool safe: if True (default) resolve without builtins functions. :param bool besteffort: best effort flag. Default is this besteffort. :return: newly resolved value. :raises: Parameter.Error for any raised exception. """ result = self._value # if cached value is None and serialiazed value exists if self._value is None and self._svalue is not None: self._error = None # nonify error. if ptype is None: ptype = self.ptype if parser is None: # init parser parser = self.parser if svalue is None: svalue = self._svalue if conf is None: # init conf conf = self.conf if configurable is None: # init configurable configurable = self.configurable if scope is None: scope = self.scope else: scope, selfscope = self.scope.copy(), scope scope.update(selfscope) if safe is None: safe = self.safe if besteffort is None: besteffort = self.besteffort # parse value if str and if parser exists try: result = self._value = parser( svalue=svalue, conf=conf, configurable=configurable, ptype=ptype, scope=scope, safe=safe, besteffort=besteffort ) except Exception as ex: self._error = ex if error: msg = 'Impossible to parse value ({0}) with {1}.' msg = msg.format(self._svalue, self.parser) reraise(Parameter.Error, Parameter.Error(msg)) return result
python
def resolve( self, configurable=None, conf=None, scope=None, ptype=None, parser=None, error=True, svalue=None, safe=None, besteffort=None ): """Resolve this parameter value related to a configurable and a configuration. Save error in this attribute `error` in case of failure. :param str svalue: serialized value too resolve. Default is this svalue. :param Configurable configurable: configurable to use for foreign parameter resolution. :param Configuration conf: configuration to use for cross-value resolution. :param dict scope: variables to use for local expression evaluation. :param type ptype: return type. Default is this ptype. :param parser: specific parser to use. Default this parser. :param bool error: raise an error if True (False by default). :param bool safe: if True (default) resolve without builtins functions. :param bool besteffort: best effort flag. Default is this besteffort. :return: newly resolved value. :raises: Parameter.Error for any raised exception. """ result = self._value # if cached value is None and serialiazed value exists if self._value is None and self._svalue is not None: self._error = None # nonify error. if ptype is None: ptype = self.ptype if parser is None: # init parser parser = self.parser if svalue is None: svalue = self._svalue if conf is None: # init conf conf = self.conf if configurable is None: # init configurable configurable = self.configurable if scope is None: scope = self.scope else: scope, selfscope = self.scope.copy(), scope scope.update(selfscope) if safe is None: safe = self.safe if besteffort is None: besteffort = self.besteffort # parse value if str and if parser exists try: result = self._value = parser( svalue=svalue, conf=conf, configurable=configurable, ptype=ptype, scope=scope, safe=safe, besteffort=besteffort ) except Exception as ex: self._error = ex if error: msg = 'Impossible to parse value ({0}) with {1}.' msg = msg.format(self._svalue, self.parser) reraise(Parameter.Error, Parameter.Error(msg)) return result
[ "def", "resolve", "(", "self", ",", "configurable", "=", "None", ",", "conf", "=", "None", ",", "scope", "=", "None", ",", "ptype", "=", "None", ",", "parser", "=", "None", ",", "error", "=", "True", ",", "svalue", "=", "None", ",", "safe", "=", "None", ",", "besteffort", "=", "None", ")", ":", "result", "=", "self", ".", "_value", "# if cached value is None and serialiazed value exists", "if", "self", ".", "_value", "is", "None", "and", "self", ".", "_svalue", "is", "not", "None", ":", "self", ".", "_error", "=", "None", "# nonify error.", "if", "ptype", "is", "None", ":", "ptype", "=", "self", ".", "ptype", "if", "parser", "is", "None", ":", "# init parser", "parser", "=", "self", ".", "parser", "if", "svalue", "is", "None", ":", "svalue", "=", "self", ".", "_svalue", "if", "conf", "is", "None", ":", "# init conf", "conf", "=", "self", ".", "conf", "if", "configurable", "is", "None", ":", "# init configurable", "configurable", "=", "self", ".", "configurable", "if", "scope", "is", "None", ":", "scope", "=", "self", ".", "scope", "else", ":", "scope", ",", "selfscope", "=", "self", ".", "scope", ".", "copy", "(", ")", ",", "scope", "scope", ".", "update", "(", "selfscope", ")", "if", "safe", "is", "None", ":", "safe", "=", "self", ".", "safe", "if", "besteffort", "is", "None", ":", "besteffort", "=", "self", ".", "besteffort", "# parse value if str and if parser exists", "try", ":", "result", "=", "self", ".", "_value", "=", "parser", "(", "svalue", "=", "svalue", ",", "conf", "=", "conf", ",", "configurable", "=", "configurable", ",", "ptype", "=", "ptype", ",", "scope", "=", "scope", ",", "safe", "=", "safe", ",", "besteffort", "=", "besteffort", ")", "except", "Exception", "as", "ex", ":", "self", ".", "_error", "=", "ex", "if", "error", ":", "msg", "=", "'Impossible to parse value ({0}) with {1}.'", "msg", "=", "msg", ".", "format", "(", "self", ".", "_svalue", ",", "self", ".", "parser", ")", "reraise", "(", "Parameter", ".", "Error", ",", "Parameter", ".", "Error", "(", "msg", ")", ")", "return", "result" ]
Resolve this parameter value related to a configurable and a configuration. Save error in this attribute `error` in case of failure. :param str svalue: serialized value too resolve. Default is this svalue. :param Configurable configurable: configurable to use for foreign parameter resolution. :param Configuration conf: configuration to use for cross-value resolution. :param dict scope: variables to use for local expression evaluation. :param type ptype: return type. Default is this ptype. :param parser: specific parser to use. Default this parser. :param bool error: raise an error if True (False by default). :param bool safe: if True (default) resolve without builtins functions. :param bool besteffort: best effort flag. Default is this besteffort. :return: newly resolved value. :raises: Parameter.Error for any raised exception.
[ "Resolve", "this", "parameter", "value", "related", "to", "a", "configurable", "and", "a", "configuration", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L416-L493
249,181
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.value
def value(self): """Get parameter value. If this cached value is None and this serialized value is not None, calculate the new value from the serialized one. :return: parameter value. :raises: TypeError if serialized value is not an instance of self ptype . ParserError if parsing step raised an error. """ result = self._value if result is None and self._svalue is not None: try: result = self._value = self.resolve() except Exception as e: reraise( Parameter.Error, Parameter.Error('Call the method "resolve" first.') ) return result
python
def value(self): """Get parameter value. If this cached value is None and this serialized value is not None, calculate the new value from the serialized one. :return: parameter value. :raises: TypeError if serialized value is not an instance of self ptype . ParserError if parsing step raised an error. """ result = self._value if result is None and self._svalue is not None: try: result = self._value = self.resolve() except Exception as e: reraise( Parameter.Error, Parameter.Error('Call the method "resolve" first.') ) return result
[ "def", "value", "(", "self", ")", ":", "result", "=", "self", ".", "_value", "if", "result", "is", "None", "and", "self", ".", "_svalue", "is", "not", "None", ":", "try", ":", "result", "=", "self", ".", "_value", "=", "self", ".", "resolve", "(", ")", "except", "Exception", "as", "e", ":", "reraise", "(", "Parameter", ".", "Error", ",", "Parameter", ".", "Error", "(", "'Call the method \"resolve\" first.'", ")", ")", "return", "result" ]
Get parameter value. If this cached value is None and this serialized value is not None, calculate the new value from the serialized one. :return: parameter value. :raises: TypeError if serialized value is not an instance of self ptype . ParserError if parsing step raised an error.
[ "Get", "parameter", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L496-L520
249,182
b3j0f/conf
b3j0f/conf/model/param.py
Parameter.value
def value(self, value): """Change of parameter value. If an error occured, it is stored in this error attribute. :param value: new value to use. If input value is not an instance of self.ptype, self error :raises: TypeError if input value is not an instance of self ptype. """ if value is None or ( self.ptype is None or isinstance(value, self.ptype) ): self._value = value else: # raise wrong type error error = TypeError( 'Wrong value type of {0} ({1}). {2} expected.'.format( self.name, value, self.ptype ) ) self._error = error raise error
python
def value(self, value): """Change of parameter value. If an error occured, it is stored in this error attribute. :param value: new value to use. If input value is not an instance of self.ptype, self error :raises: TypeError if input value is not an instance of self ptype. """ if value is None or ( self.ptype is None or isinstance(value, self.ptype) ): self._value = value else: # raise wrong type error error = TypeError( 'Wrong value type of {0} ({1}). {2} expected.'.format( self.name, value, self.ptype ) ) self._error = error raise error
[ "def", "value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "or", "(", "self", ".", "ptype", "is", "None", "or", "isinstance", "(", "value", ",", "self", ".", "ptype", ")", ")", ":", "self", ".", "_value", "=", "value", "else", ":", "# raise wrong type error", "error", "=", "TypeError", "(", "'Wrong value type of {0} ({1}). {2} expected.'", ".", "format", "(", "self", ".", "name", ",", "value", ",", "self", ".", "ptype", ")", ")", "self", ".", "_error", "=", "error", "raise", "error" ]
Change of parameter value. If an error occured, it is stored in this error attribute. :param value: new value to use. If input value is not an instance of self.ptype, self error :raises: TypeError if input value is not an instance of self ptype.
[ "Change", "of", "parameter", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/param.py#L523-L546
249,183
ericmjl/polcart
polcart/polcart.py
to_cartesian
def to_cartesian(r, theta, theta_units="radians"): """ Converts polar r, theta to cartesian x, y. """ assert theta_units in ['radians', 'degrees'],\ "kwarg theta_units must specified in radians or degrees" # Convert to radians if theta_units == "degrees": theta = to_radians(theta) theta = to_proper_radians(theta) x = r * cos(theta) y = r * sin(theta) return x, y
python
def to_cartesian(r, theta, theta_units="radians"): """ Converts polar r, theta to cartesian x, y. """ assert theta_units in ['radians', 'degrees'],\ "kwarg theta_units must specified in radians or degrees" # Convert to radians if theta_units == "degrees": theta = to_radians(theta) theta = to_proper_radians(theta) x = r * cos(theta) y = r * sin(theta) return x, y
[ "def", "to_cartesian", "(", "r", ",", "theta", ",", "theta_units", "=", "\"radians\"", ")", ":", "assert", "theta_units", "in", "[", "'radians'", ",", "'degrees'", "]", ",", "\"kwarg theta_units must specified in radians or degrees\"", "# Convert to radians", "if", "theta_units", "==", "\"degrees\"", ":", "theta", "=", "to_radians", "(", "theta", ")", "theta", "=", "to_proper_radians", "(", "theta", ")", "x", "=", "r", "*", "cos", "(", "theta", ")", "y", "=", "r", "*", "sin", "(", "theta", ")", "return", "x", ",", "y" ]
Converts polar r, theta to cartesian x, y.
[ "Converts", "polar", "r", "theta", "to", "cartesian", "x", "y", "." ]
1d003987f269c14884726205f871dd91de8610ce
https://github.com/ericmjl/polcart/blob/1d003987f269c14884726205f871dd91de8610ce/polcart/polcart.py#L5-L20
249,184
rraadd88/meld
meld/dfs.py
set_index
def set_index(data,col_index): """ Sets the index if the index is not present :param data: pandas table :param col_index: column name which will be assigned as a index """ if col_index in data: data=data.reset_index().set_index(col_index) if 'index' in data: del data['index'] return data elif data.index.name==col_index: return data else: logging.error("something's wrong with the df") df2info(data)
python
def set_index(data,col_index): """ Sets the index if the index is not present :param data: pandas table :param col_index: column name which will be assigned as a index """ if col_index in data: data=data.reset_index().set_index(col_index) if 'index' in data: del data['index'] return data elif data.index.name==col_index: return data else: logging.error("something's wrong with the df") df2info(data)
[ "def", "set_index", "(", "data", ",", "col_index", ")", ":", "if", "col_index", "in", "data", ":", "data", "=", "data", ".", "reset_index", "(", ")", ".", "set_index", "(", "col_index", ")", "if", "'index'", "in", "data", ":", "del", "data", "[", "'index'", "]", "return", "data", "elif", "data", ".", "index", ".", "name", "==", "col_index", ":", "return", "data", "else", ":", "logging", ".", "error", "(", "\"something's wrong with the df\"", ")", "df2info", "(", "data", ")" ]
Sets the index if the index is not present :param data: pandas table :param col_index: column name which will be assigned as a index
[ "Sets", "the", "index", "if", "the", "index", "is", "not", "present" ]
e25aba1c07b2c775031224a8b55bf006ccb24dfd
https://github.com/rraadd88/meld/blob/e25aba1c07b2c775031224a8b55bf006ccb24dfd/meld/dfs.py#L18-L34
249,185
rraadd88/meld
meld/dfs.py
fhs2data_combo
def fhs2data_combo(fhs,cols,index,labels=None,col_sep=': '): """ Collates data from multiple csv files :param fhs: list of paths to csv files :param cols: list of column names to concatenate :param index: name of the column name to be used as the common index of the output pandas table """ if labels is None: labels=[basename(fh) for fh in fhs] if len(fhs)>0: for fhi,fh in enumerate(fhs): label=labels[fhi] data=pd.read_csv(fh).set_index(index) if fhi==0: data_combo=pd.DataFrame(index=data.index) for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] else: for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] return data_combo else: logging.error('no fhs found: len(fhs)=0')
python
def fhs2data_combo(fhs,cols,index,labels=None,col_sep=': '): """ Collates data from multiple csv files :param fhs: list of paths to csv files :param cols: list of column names to concatenate :param index: name of the column name to be used as the common index of the output pandas table """ if labels is None: labels=[basename(fh) for fh in fhs] if len(fhs)>0: for fhi,fh in enumerate(fhs): label=labels[fhi] data=pd.read_csv(fh).set_index(index) if fhi==0: data_combo=pd.DataFrame(index=data.index) for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] else: for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] return data_combo else: logging.error('no fhs found: len(fhs)=0')
[ "def", "fhs2data_combo", "(", "fhs", ",", "cols", ",", "index", ",", "labels", "=", "None", ",", "col_sep", "=", "': '", ")", ":", "if", "labels", "is", "None", ":", "labels", "=", "[", "basename", "(", "fh", ")", "for", "fh", "in", "fhs", "]", "if", "len", "(", "fhs", ")", ">", "0", ":", "for", "fhi", ",", "fh", "in", "enumerate", "(", "fhs", ")", ":", "label", "=", "labels", "[", "fhi", "]", "data", "=", "pd", ".", "read_csv", "(", "fh", ")", ".", "set_index", "(", "index", ")", "if", "fhi", "==", "0", ":", "data_combo", "=", "pd", ".", "DataFrame", "(", "index", "=", "data", ".", "index", ")", "for", "col", "in", "cols", ":", "data_combo", ".", "loc", "[", ":", ",", "'%s%s%s'", "%", "(", "label", ",", "col_sep", ",", "col", ")", "]", "=", "data", ".", "loc", "[", ":", ",", "col", "]", "else", ":", "for", "col", "in", "cols", ":", "data_combo", ".", "loc", "[", ":", ",", "'%s%s%s'", "%", "(", "label", ",", "col_sep", ",", "col", ")", "]", "=", "data", ".", "loc", "[", ":", ",", "col", "]", "return", "data_combo", "else", ":", "logging", ".", "error", "(", "'no fhs found: len(fhs)=0'", ")" ]
Collates data from multiple csv files :param fhs: list of paths to csv files :param cols: list of column names to concatenate :param index: name of the column name to be used as the common index of the output pandas table
[ "Collates", "data", "from", "multiple", "csv", "files" ]
e25aba1c07b2c775031224a8b55bf006ccb24dfd
https://github.com/rraadd88/meld/blob/e25aba1c07b2c775031224a8b55bf006ccb24dfd/meld/dfs.py#L93-L117
249,186
DoWhileGeek/authentise-services
authentise_services/session.py
Session.__create_session
def __create_session(username=None, password=None): """grabs the configuration, and makes the call to Authentise to create the session""" config = Config() if not username or not password: username = config.username password = config.password payload = { "username": username, "password": password, } session_resp = requests.post("https://users.{}/sessions/".format(config.host), json=payload) if session_resp.status_code == 403: raise errors.ResourceError("bad user credentials") return session_resp.cookies["session"]
python
def __create_session(username=None, password=None): """grabs the configuration, and makes the call to Authentise to create the session""" config = Config() if not username or not password: username = config.username password = config.password payload = { "username": username, "password": password, } session_resp = requests.post("https://users.{}/sessions/".format(config.host), json=payload) if session_resp.status_code == 403: raise errors.ResourceError("bad user credentials") return session_resp.cookies["session"]
[ "def", "__create_session", "(", "username", "=", "None", ",", "password", "=", "None", ")", ":", "config", "=", "Config", "(", ")", "if", "not", "username", "or", "not", "password", ":", "username", "=", "config", ".", "username", "password", "=", "config", ".", "password", "payload", "=", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "}", "session_resp", "=", "requests", ".", "post", "(", "\"https://users.{}/sessions/\"", ".", "format", "(", "config", ".", "host", ")", ",", "json", "=", "payload", ")", "if", "session_resp", ".", "status_code", "==", "403", ":", "raise", "errors", ".", "ResourceError", "(", "\"bad user credentials\"", ")", "return", "session_resp", ".", "cookies", "[", "\"session\"", "]" ]
grabs the configuration, and makes the call to Authentise to create the session
[ "grabs", "the", "configuration", "and", "makes", "the", "call", "to", "Authentise", "to", "create", "the", "session" ]
ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/session.py#L18-L35
249,187
DoWhileGeek/authentise-services
authentise_services/session.py
Session.create_user
def create_user(cls, username, password, name, email): """utility class method to create a user""" config = Config() payload = {"username": username, "email": email, "name": name, "password": password, } user_creation_resp = requests.post("https://users.{}/users/".format(config.host), json=payload) if user_creation_resp.status_code != 201: raise errors.ResourceError("couldnt create user")
python
def create_user(cls, username, password, name, email): """utility class method to create a user""" config = Config() payload = {"username": username, "email": email, "name": name, "password": password, } user_creation_resp = requests.post("https://users.{}/users/".format(config.host), json=payload) if user_creation_resp.status_code != 201: raise errors.ResourceError("couldnt create user")
[ "def", "create_user", "(", "cls", ",", "username", ",", "password", ",", "name", ",", "email", ")", ":", "config", "=", "Config", "(", ")", "payload", "=", "{", "\"username\"", ":", "username", ",", "\"email\"", ":", "email", ",", "\"name\"", ":", "name", ",", "\"password\"", ":", "password", ",", "}", "user_creation_resp", "=", "requests", ".", "post", "(", "\"https://users.{}/users/\"", ".", "format", "(", "config", ".", "host", ")", ",", "json", "=", "payload", ")", "if", "user_creation_resp", ".", "status_code", "!=", "201", ":", "raise", "errors", ".", "ResourceError", "(", "\"couldnt create user\"", ")" ]
utility class method to create a user
[ "utility", "class", "method", "to", "create", "a", "user" ]
ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/session.py#L38-L48
249,188
mush42/mezzanine-live-tile
mezzanine_live_tile/templatetags/tile_tags.py
wrap_text
def wrap_text(paragraph, line_count, min_char_per_line=0): """Wraps the given text to the specified number of lines.""" one_string = strip_all_white_space(paragraph) if min_char_per_line: lines = wrap(one_string, width=min_char_per_line) try: return lines[:line_count] except IndexError: return lines else: return wrap(one_string, len(one_string)/line_count)
python
def wrap_text(paragraph, line_count, min_char_per_line=0): """Wraps the given text to the specified number of lines.""" one_string = strip_all_white_space(paragraph) if min_char_per_line: lines = wrap(one_string, width=min_char_per_line) try: return lines[:line_count] except IndexError: return lines else: return wrap(one_string, len(one_string)/line_count)
[ "def", "wrap_text", "(", "paragraph", ",", "line_count", ",", "min_char_per_line", "=", "0", ")", ":", "one_string", "=", "strip_all_white_space", "(", "paragraph", ")", "if", "min_char_per_line", ":", "lines", "=", "wrap", "(", "one_string", ",", "width", "=", "min_char_per_line", ")", "try", ":", "return", "lines", "[", ":", "line_count", "]", "except", "IndexError", ":", "return", "lines", "else", ":", "return", "wrap", "(", "one_string", ",", "len", "(", "one_string", ")", "/", "line_count", ")" ]
Wraps the given text to the specified number of lines.
[ "Wraps", "the", "given", "text", "to", "the", "specified", "number", "of", "lines", "." ]
28dd6cb1af43f25c50e724f141b5dd00f4f166e7
https://github.com/mush42/mezzanine-live-tile/blob/28dd6cb1af43f25c50e724f141b5dd00f4f166e7/mezzanine_live_tile/templatetags/tile_tags.py#L15-L25
249,189
neuroticnerd/armory
armory/serialize.py
jsonify
def jsonify(data, pretty=False, **kwargs): """Serialize Python objects to JSON with optional 'pretty' formatting Raises: TypeError: from :mod:`json` lib ValueError: from :mod:`json` lib JSONDecodeError: from :mod:`json` lib """ isod = isinstance(data, OrderedDict) params = { 'for_json': True, 'default': _complex_encode, } if pretty: params['indent'] = 2 params['sort_keys'] = False if isod else True params.update(kwargs) try: return json.dumps(data, ensure_ascii=False, **params) except UnicodeDecodeError: return json.dumps(data, **params)
python
def jsonify(data, pretty=False, **kwargs): """Serialize Python objects to JSON with optional 'pretty' formatting Raises: TypeError: from :mod:`json` lib ValueError: from :mod:`json` lib JSONDecodeError: from :mod:`json` lib """ isod = isinstance(data, OrderedDict) params = { 'for_json': True, 'default': _complex_encode, } if pretty: params['indent'] = 2 params['sort_keys'] = False if isod else True params.update(kwargs) try: return json.dumps(data, ensure_ascii=False, **params) except UnicodeDecodeError: return json.dumps(data, **params)
[ "def", "jsonify", "(", "data", ",", "pretty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "isod", "=", "isinstance", "(", "data", ",", "OrderedDict", ")", "params", "=", "{", "'for_json'", ":", "True", ",", "'default'", ":", "_complex_encode", ",", "}", "if", "pretty", ":", "params", "[", "'indent'", "]", "=", "2", "params", "[", "'sort_keys'", "]", "=", "False", "if", "isod", "else", "True", "params", ".", "update", "(", "kwargs", ")", "try", ":", "return", "json", ".", "dumps", "(", "data", ",", "ensure_ascii", "=", "False", ",", "*", "*", "params", ")", "except", "UnicodeDecodeError", ":", "return", "json", ".", "dumps", "(", "data", ",", "*", "*", "params", ")" ]
Serialize Python objects to JSON with optional 'pretty' formatting Raises: TypeError: from :mod:`json` lib ValueError: from :mod:`json` lib JSONDecodeError: from :mod:`json` lib
[ "Serialize", "Python", "objects", "to", "JSON", "with", "optional", "pretty", "formatting" ]
d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1
https://github.com/neuroticnerd/armory/blob/d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1/armory/serialize.py#L15-L35
249,190
soasme/rio-client
rio_client/transports/base.py
Transport.get_emit_api
def get_emit_api(self, action): """Build emit api.""" args = {'action': action} args.update(self.context) return ( '%(scheme)s://%(sender)s:%(token)s@%(domain)s:%(port)d' '/event/%(project)s/emit/%(action)s' % args )
python
def get_emit_api(self, action): """Build emit api.""" args = {'action': action} args.update(self.context) return ( '%(scheme)s://%(sender)s:%(token)s@%(domain)s:%(port)d' '/event/%(project)s/emit/%(action)s' % args )
[ "def", "get_emit_api", "(", "self", ",", "action", ")", ":", "args", "=", "{", "'action'", ":", "action", "}", "args", ".", "update", "(", "self", ".", "context", ")", "return", "(", "'%(scheme)s://%(sender)s:%(token)s@%(domain)s:%(port)d'", "'/event/%(project)s/emit/%(action)s'", "%", "args", ")" ]
Build emit api.
[ "Build", "emit", "api", "." ]
c6d684c6f9deea5b43f2b05bcaf40714c48b5619
https://github.com/soasme/rio-client/blob/c6d684c6f9deea5b43f2b05bcaf40714c48b5619/rio_client/transports/base.py#L17-L24
249,191
tmacwill/stellata
stellata/model.py
serialize
def serialize(data, format: str = 'json', pretty: bool = False): """Serialize a stellata object to a string format.""" def encode(obj): if isinstance(obj, stellata.model.Model): return obj.to_dict() elif isinstance(obj, datetime.datetime): return int(obj.timestamp()) elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj) elif hasattr(obj, 'serialize'): return obj.serialize() return obj if format == 'msgpack': return msgpack.packb(data, default=encode) if format == 'json': if pretty: return json.dumps(data, default=encode, indent=4) return json.dumps(data, default=encode) return data
python
def serialize(data, format: str = 'json', pretty: bool = False): """Serialize a stellata object to a string format.""" def encode(obj): if isinstance(obj, stellata.model.Model): return obj.to_dict() elif isinstance(obj, datetime.datetime): return int(obj.timestamp()) elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj) elif hasattr(obj, 'serialize'): return obj.serialize() return obj if format == 'msgpack': return msgpack.packb(data, default=encode) if format == 'json': if pretty: return json.dumps(data, default=encode, indent=4) return json.dumps(data, default=encode) return data
[ "def", "serialize", "(", "data", ",", "format", ":", "str", "=", "'json'", ",", "pretty", ":", "bool", "=", "False", ")", ":", "def", "encode", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "stellata", ".", "model", ".", "Model", ")", ":", "return", "obj", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "int", "(", "obj", ".", "timestamp", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "datetime", ".", "date", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "elif", "isinstance", "(", "obj", ",", "decimal", ".", "Decimal", ")", ":", "return", "float", "(", "obj", ")", "elif", "hasattr", "(", "obj", ",", "'serialize'", ")", ":", "return", "obj", ".", "serialize", "(", ")", "return", "obj", "if", "format", "==", "'msgpack'", ":", "return", "msgpack", ".", "packb", "(", "data", ",", "default", "=", "encode", ")", "if", "format", "==", "'json'", ":", "if", "pretty", ":", "return", "json", ".", "dumps", "(", "data", ",", "default", "=", "encode", ",", "indent", "=", "4", ")", "return", "json", ".", "dumps", "(", "data", ",", "default", "=", "encode", ")", "return", "data" ]
Serialize a stellata object to a string format.
[ "Serialize", "a", "stellata", "object", "to", "a", "string", "format", "." ]
9519c170397740eb6faf5d8a96b9a77f0d909b92
https://github.com/tmacwill/stellata/blob/9519c170397740eb6faf5d8a96b9a77f0d909b92/stellata/model.py#L191-L215
249,192
dabercro/customdocs
customdocs/__init__.py
pretty_exe_doc
def pretty_exe_doc(program, parser, stack=1, under='-'): """ Takes the name of a script and a parser that will give the help message for it. The module that called this function will then add a header to the docstring of the script, followed immediately by the help message generated by the OptionParser :param str program: Name of the program that we want to make the header :param optparser.Option parser: Either a parser or a callable with no arguments that will give the desired parser :param int stack: How far up the stack to get the docstring to change :param str under: The character you want for the program underline """ if os.path.basename(sys.argv[0]) == 'sphinx-build': # Get the calling module mod = inspect.getmodule(inspect.stack()[stack][0]) # Get parser _parser = parser() if '__call__' in dir(parser) else parser # Make the parser use the correct program _parser.set_usage(mod.__usage__.replace('%prog', program)) # Modify docs by adding a header and usate mod.__doc__ = '\n'.join(['', program, under * len(program), '::', ''] + [' %s' % l for l in _parser.format_help().split('\n')]) + \ mod.__doc__
python
def pretty_exe_doc(program, parser, stack=1, under='-'): """ Takes the name of a script and a parser that will give the help message for it. The module that called this function will then add a header to the docstring of the script, followed immediately by the help message generated by the OptionParser :param str program: Name of the program that we want to make the header :param optparser.Option parser: Either a parser or a callable with no arguments that will give the desired parser :param int stack: How far up the stack to get the docstring to change :param str under: The character you want for the program underline """ if os.path.basename(sys.argv[0]) == 'sphinx-build': # Get the calling module mod = inspect.getmodule(inspect.stack()[stack][0]) # Get parser _parser = parser() if '__call__' in dir(parser) else parser # Make the parser use the correct program _parser.set_usage(mod.__usage__.replace('%prog', program)) # Modify docs by adding a header and usate mod.__doc__ = '\n'.join(['', program, under * len(program), '::', ''] + [' %s' % l for l in _parser.format_help().split('\n')]) + \ mod.__doc__
[ "def", "pretty_exe_doc", "(", "program", ",", "parser", ",", "stack", "=", "1", ",", "under", "=", "'-'", ")", ":", "if", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", "==", "'sphinx-build'", ":", "# Get the calling module", "mod", "=", "inspect", ".", "getmodule", "(", "inspect", ".", "stack", "(", ")", "[", "stack", "]", "[", "0", "]", ")", "# Get parser", "_parser", "=", "parser", "(", ")", "if", "'__call__'", "in", "dir", "(", "parser", ")", "else", "parser", "# Make the parser use the correct program", "_parser", ".", "set_usage", "(", "mod", ".", "__usage__", ".", "replace", "(", "'%prog'", ",", "program", ")", ")", "# Modify docs by adding a header and usate", "mod", ".", "__doc__", "=", "'\\n'", ".", "join", "(", "[", "''", ",", "program", ",", "under", "*", "len", "(", "program", ")", ",", "'::'", ",", "''", "]", "+", "[", "' %s'", "%", "l", "for", "l", "in", "_parser", ".", "format_help", "(", ")", ".", "split", "(", "'\\n'", ")", "]", ")", "+", "mod", ".", "__doc__" ]
Takes the name of a script and a parser that will give the help message for it. The module that called this function will then add a header to the docstring of the script, followed immediately by the help message generated by the OptionParser :param str program: Name of the program that we want to make the header :param optparser.Option parser: Either a parser or a callable with no arguments that will give the desired parser :param int stack: How far up the stack to get the docstring to change :param str under: The character you want for the program underline
[ "Takes", "the", "name", "of", "a", "script", "and", "a", "parser", "that", "will", "give", "the", "help", "message", "for", "it", ".", "The", "module", "that", "called", "this", "function", "will", "then", "add", "a", "header", "to", "the", "docstring", "of", "the", "script", "followed", "immediately", "by", "the", "help", "message", "generated", "by", "the", "OptionParser" ]
e8c46349ce40d9ac9dc6b5d93924c974c4ade21e
https://github.com/dabercro/customdocs/blob/e8c46349ce40d9ac9dc6b5d93924c974c4ade21e/customdocs/__init__.py#L90-L117
249,193
uw-it-aca/uw-restclients-kws
uw_kws/__init__.py
KWS.get_key
def get_key(self, key_id): """ Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """ url = ENCRYPTION_KEY_URL.format(key_id) return self._key_from_json(self._get_resource(url))
python
def get_key(self, key_id): """ Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """ url = ENCRYPTION_KEY_URL.format(key_id) return self._key_from_json(self._get_resource(url))
[ "def", "get_key", "(", "self", ",", "key_id", ")", ":", "url", "=", "ENCRYPTION_KEY_URL", ".", "format", "(", "key_id", ")", "return", "self", ".", "_key_from_json", "(", "self", ".", "_get_resource", "(", "url", ")", ")" ]
Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Key", "object", "for", "the", "given", "key", "ID", ".", "If", "the", "key", "ID", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "KWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
072e5fed31e2b62a1b21eb6c19b975e760a39c7e
https://github.com/uw-it-aca/uw-restclients-kws/blob/072e5fed31e2b62a1b21eb6c19b975e760a39c7e/uw_kws/__init__.py#L29-L36
249,194
uw-it-aca/uw-restclients-kws
uw_kws/__init__.py
KWS.get_current_key
def get_current_key(self, resource_name): """ Returns a restclients.Key object for the given resource. If the resource isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """ url = ENCRYPTION_CURRENT_KEY_URL.format(resource_name) return self._key_from_json(self._get_resource(url))
python
def get_current_key(self, resource_name): """ Returns a restclients.Key object for the given resource. If the resource isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """ url = ENCRYPTION_CURRENT_KEY_URL.format(resource_name) return self._key_from_json(self._get_resource(url))
[ "def", "get_current_key", "(", "self", ",", "resource_name", ")", ":", "url", "=", "ENCRYPTION_CURRENT_KEY_URL", ".", "format", "(", "resource_name", ")", "return", "self", ".", "_key_from_json", "(", "self", ".", "_get_resource", "(", "url", ")", ")" ]
Returns a restclients.Key object for the given resource. If the resource isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Key", "object", "for", "the", "given", "resource", ".", "If", "the", "resource", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "KWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
072e5fed31e2b62a1b21eb6c19b975e760a39c7e
https://github.com/uw-it-aca/uw-restclients-kws/blob/072e5fed31e2b62a1b21eb6c19b975e760a39c7e/uw_kws/__init__.py#L38-L45
249,195
uw-it-aca/uw-restclients-kws
uw_kws/__init__.py
KWS._key_from_json
def _key_from_json(self, data): """ Internal method, for creating the Key object. """ key = Key() key.algorithm = data["Algorithm"] key.cipher_mode = data["CipherMode"] key.expiration = datetime.strptime(data["Expiration"].split(".")[0], "%Y-%m-%dT%H:%M:%S") key.key_id = data["ID"] key.key = data["Key"] key.size = data["KeySize"] key.url = data["KeyUrl"] return key
python
def _key_from_json(self, data): """ Internal method, for creating the Key object. """ key = Key() key.algorithm = data["Algorithm"] key.cipher_mode = data["CipherMode"] key.expiration = datetime.strptime(data["Expiration"].split(".")[0], "%Y-%m-%dT%H:%M:%S") key.key_id = data["ID"] key.key = data["Key"] key.size = data["KeySize"] key.url = data["KeyUrl"] return key
[ "def", "_key_from_json", "(", "self", ",", "data", ")", ":", "key", "=", "Key", "(", ")", "key", ".", "algorithm", "=", "data", "[", "\"Algorithm\"", "]", "key", ".", "cipher_mode", "=", "data", "[", "\"CipherMode\"", "]", "key", ".", "expiration", "=", "datetime", ".", "strptime", "(", "data", "[", "\"Expiration\"", "]", ".", "split", "(", "\".\"", ")", "[", "0", "]", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")", "key", ".", "key_id", "=", "data", "[", "\"ID\"", "]", "key", ".", "key", "=", "data", "[", "\"Key\"", "]", "key", ".", "size", "=", "data", "[", "\"KeySize\"", "]", "key", ".", "url", "=", "data", "[", "\"KeyUrl\"", "]", "return", "key" ]
Internal method, for creating the Key object.
[ "Internal", "method", "for", "creating", "the", "Key", "object", "." ]
072e5fed31e2b62a1b21eb6c19b975e760a39c7e
https://github.com/uw-it-aca/uw-restclients-kws/blob/072e5fed31e2b62a1b21eb6c19b975e760a39c7e/uw_kws/__init__.py#L47-L60
249,196
jmgilman/Neolib
neolib/http/Page.py
Page.newSession
def newSession(): """ Returns a new Requests session with pre-loaded default HTTP Headers Generates a new Requests session and consults with the Configuration class to determine if a Configuration exists and attempts to use the configured HTTP Request headers first. If this fails, it attempts to create a new default configuration and use those values. Finally, if a configuration cannot be initiaized it uses the hard-coded Mozilla headers. Returns request-client - The configured Requests session Raises HTTPException """ from neolib.config.Configuration import Configuration s = requests.session() if not Configuration.loaded(): if not Configuration.initialize(): s.headers.update(Page._defaultVars) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) return requests.session()
python
def newSession(): """ Returns a new Requests session with pre-loaded default HTTP Headers Generates a new Requests session and consults with the Configuration class to determine if a Configuration exists and attempts to use the configured HTTP Request headers first. If this fails, it attempts to create a new default configuration and use those values. Finally, if a configuration cannot be initiaized it uses the hard-coded Mozilla headers. Returns request-client - The configured Requests session Raises HTTPException """ from neolib.config.Configuration import Configuration s = requests.session() if not Configuration.loaded(): if not Configuration.initialize(): s.headers.update(Page._defaultVars) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) return requests.session()
[ "def", "newSession", "(", ")", ":", "from", "neolib", ".", "config", ".", "Configuration", "import", "Configuration", "s", "=", "requests", ".", "session", "(", ")", "if", "not", "Configuration", ".", "loaded", "(", ")", ":", "if", "not", "Configuration", ".", "initialize", "(", ")", ":", "s", ".", "headers", ".", "update", "(", "Page", ".", "_defaultVars", ")", "else", ":", "s", ".", "headers", ".", "update", "(", "Configuration", ".", "getConfig", "(", ")", ".", "core", ".", "HTTPHeaders", ".", "toDict", "(", ")", ")", "else", ":", "s", ".", "headers", ".", "update", "(", "Configuration", ".", "getConfig", "(", ")", ".", "core", ".", "HTTPHeaders", ".", "toDict", "(", ")", ")", "return", "requests", ".", "session", "(", ")" ]
Returns a new Requests session with pre-loaded default HTTP Headers Generates a new Requests session and consults with the Configuration class to determine if a Configuration exists and attempts to use the configured HTTP Request headers first. If this fails, it attempts to create a new default configuration and use those values. Finally, if a configuration cannot be initiaized it uses the hard-coded Mozilla headers. Returns request-client - The configured Requests session Raises HTTPException
[ "Returns", "a", "new", "Requests", "session", "with", "pre", "-", "loaded", "default", "HTTP", "Headers", "Generates", "a", "new", "Requests", "session", "and", "consults", "with", "the", "Configuration", "class", "to", "determine", "if", "a", "Configuration", "exists", "and", "attempts", "to", "use", "the", "configured", "HTTP", "Request", "headers", "first", ".", "If", "this", "fails", "it", "attempts", "to", "create", "a", "new", "default", "configuration", "and", "use", "those", "values", ".", "Finally", "if", "a", "configuration", "cannot", "be", "initiaized", "it", "uses", "the", "hard", "-", "coded", "Mozilla", "headers", ".", "Returns", "request", "-", "client", "-", "The", "configured", "Requests", "session", "Raises", "HTTPException" ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/http/Page.py#L124-L150
249,197
treycucco/bidon
bidon/db/access/model_access.py
get_model_id_constraints
def get_model_id_constraints(model): """Returns constraints to target a specific model.""" pkname = model.primary_key_name pkey = model.primary_key return get_id_constraints(pkname, pkey)
python
def get_model_id_constraints(model): """Returns constraints to target a specific model.""" pkname = model.primary_key_name pkey = model.primary_key return get_id_constraints(pkname, pkey)
[ "def", "get_model_id_constraints", "(", "model", ")", ":", "pkname", "=", "model", ".", "primary_key_name", "pkey", "=", "model", ".", "primary_key", "return", "get_id_constraints", "(", "pkname", ",", "pkey", ")" ]
Returns constraints to target a specific model.
[ "Returns", "constraints", "to", "target", "a", "specific", "model", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L208-L212
249,198
treycucco/bidon
bidon/db/access/model_access.py
get_id_constraints
def get_id_constraints(pkname, pkey): """Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length. """ if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
python
def get_id_constraints(pkname, pkey): """Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length. """ if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
[ "def", "get_id_constraints", "(", "pkname", ",", "pkey", ")", ":", "if", "isinstance", "(", "pkname", ",", "str", ")", ":", "return", "{", "pkname", ":", "pkey", "}", "else", ":", "return", "dict", "(", "zip", "(", "pkname", ",", "pkey", ")", ")" ]
Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length.
[ "Returns", "primary", "key", "consraints", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L215-L224
249,199
treycucco/bidon
bidon/db/access/model_access.py
ModelAccess._find_model
def _find_model(self, constructor, table_name, constraints=None, *, columns=None, order_by=None): """Calls DataAccess.find and passes the results to the given constructor.""" data = self.find(table_name, constraints, columns=columns, order_by=order_by) return constructor(data) if data else None
python
def _find_model(self, constructor, table_name, constraints=None, *, columns=None, order_by=None): """Calls DataAccess.find and passes the results to the given constructor.""" data = self.find(table_name, constraints, columns=columns, order_by=order_by) return constructor(data) if data else None
[ "def", "_find_model", "(", "self", ",", "constructor", ",", "table_name", ",", "constraints", "=", "None", ",", "*", ",", "columns", "=", "None", ",", "order_by", "=", "None", ")", ":", "data", "=", "self", ".", "find", "(", "table_name", ",", "constraints", ",", "columns", "=", "columns", ",", "order_by", "=", "order_by", ")", "return", "constructor", "(", "data", ")", "if", "data", "else", "None" ]
Calls DataAccess.find and passes the results to the given constructor.
[ "Calls", "DataAccess", ".", "find", "and", "passes", "the", "results", "to", "the", "given", "constructor", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L11-L14