text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get a key from server, returning the value and its CAS key.
<END_TASK>
<USER_TASK:>
Description:
def gets(self, key):
"""
Get a key from server, returning the value and its CAS key.
This method is for API compatibility with other implementations.
:param key: Key's name
:type key: six.string_types
:return: Returns (key data, value), or (None, None) if the value is not in cache.
:rtype: object
""" |
for server in self.servers:
value, cas = server.get(key)
if value is not None:
return value, cas
return None, None |
<SYSTEM_TASK:>
Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it
<END_TASK>
<USER_TASK:>
Description:
def set_result(self, rval: bool) -> None:
""" Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it
:param rval: Result of evaluation
""" |
self.result = rval
if self.result:
self.nodes = [pn for pn in self.nodes if pn.result] |
<SYSTEM_TASK:>
Return the shape expression in the schema referenced by selector, if any
<END_TASK>
<USER_TASK:>
Description:
def reference_of(selector: shapeLabel, cntxt: Union[Context, ShExJ.Schema] ) -> Optional[ShExJ.shapeExpr]:
""" Return the shape expression in the schema referenced by selector, if any
:param cntxt: Context node or ShEx Schema
:param selector: identifier of element to select within the schema
:return:
""" |
schema = cntxt.schema if isinstance(cntxt, Context) else cntxt
if selector is START:
return schema.start
for expr in schema.shapes:
if not isinstance(expr, ShExJ.ShapeExternal) and expr.id == selector:
return expr
return schema.start if schema.start is not None and schema.start.id == selector else None |
<SYSTEM_TASK:>
Search for the label in a Schema
<END_TASK>
<USER_TASK:>
Description:
def triple_reference_of(label: ShExJ.tripleExprLabel, cntxt: Context) -> Optional[ShExJ.tripleExpr]:
""" Search for the label in a Schema """ |
te: Optional[ShExJ.tripleExpr] = None
if cntxt.schema.start is not None:
te = triple_in_shape(cntxt.schema.start, label, cntxt)
if te is None:
for shapeExpr in cntxt.schema.shapes:
te = triple_in_shape(shapeExpr, label, cntxt)
if te:
break
return te |
<SYSTEM_TASK:>
Search for the label in a shape expression
<END_TASK>
<USER_TASK:>
Description:
def triple_in_shape(expr: ShExJ.shapeExpr, label: ShExJ.tripleExprLabel, cntxt: Context) \
-> Optional[ShExJ.tripleExpr]:
""" Search for the label in a shape expression """ |
te = None
if isinstance(expr, (ShExJ.ShapeOr, ShExJ.ShapeAnd)):
for expr2 in expr.shapeExprs:
te = triple_in_shape(expr2, label, cntxt)
if te is not None:
break
elif isinstance(expr, ShExJ.ShapeNot):
te = triple_in_shape(expr.shapeExpr, label, cntxt)
elif isinstance(expr, ShExJ.shapeExprLabel):
se = reference_of(expr, cntxt)
if se is not None:
te = triple_in_shape(se, label, cntxt)
return te |
<SYSTEM_TASK:>
Partition a list of integers into a list of partitions
<END_TASK>
<USER_TASK:>
Description:
def integer_partition(size: int, nparts: int) -> Iterator[List[List[int]]]:
""" Partition a list of integers into a list of partitions """ |
for part in algorithm_u(range(size), nparts):
yield part |
<SYSTEM_TASK:>
simple literal denotes a plain literal with no language tag.
<END_TASK>
<USER_TASK:>
Description:
def is_simple_literal(n: Node) -> bool:
""" simple literal denotes a plain literal with no language tag. """ |
return is_typed_literal(n) and cast(Literal, n).datatype is None and cast(Literal, n).language is None |
<SYSTEM_TASK:>
Conditionally print txt
<END_TASK>
<USER_TASK:>
Description:
def print(self, txt: str, hold: bool=False) -> None:
""" Conditionally print txt
:param txt: text to print
:param hold: If true, hang on to the text until another print comes through
:param hold: If true, drop both print statements if another hasn't intervened
:return:
""" |
if hold:
self.held_prints[self.trace_depth] = txt
elif self.held_prints[self.trace_depth]:
if self.max_print_depth > self.trace_depth:
print(self.held_prints[self.trace_depth])
print(txt)
self.max_print_depth = self.trace_depth
del self.held_prints[self.trace_depth]
else:
print(txt)
self.max_print_depth = self.trace_depth |
<SYSTEM_TASK:>
Generate the schema_id_map
<END_TASK>
<USER_TASK:>
Description:
def _gen_schema_xref(self, expr: Optional[Union[ShExJ.shapeExprLabel, ShExJ.shapeExpr]]) -> None:
"""
Generate the schema_id_map
:param expr: root shape expression
""" |
if expr is not None and not isinstance_(expr, ShExJ.shapeExprLabel) and 'id' in expr and expr.id is not None:
abs_id = self._resolve_relative_uri(expr.id)
if abs_id not in self.schema_id_map:
self.schema_id_map[abs_id] = expr
if isinstance(expr, (ShExJ.ShapeOr, ShExJ.ShapeAnd)):
for expr2 in expr.shapeExprs:
self._gen_schema_xref(expr2)
elif isinstance(expr, ShExJ.ShapeNot):
self._gen_schema_xref(expr.shapeExpr)
elif isinstance(expr, ShExJ.Shape):
if expr.expression is not None:
self._gen_te_xref(expr.expression) |
<SYSTEM_TASK:>
Return the triple expression that corresponds to id
<END_TASK>
<USER_TASK:>
Description:
def tripleExprFor(self, id_: ShExJ.tripleExprLabel) -> ShExJ.tripleExpr:
""" Return the triple expression that corresponds to id """ |
return self.te_id_map.get(id_) |
<SYSTEM_TASK:>
Return the shape expression that corresponds to id
<END_TASK>
<USER_TASK:>
Description:
def shapeExprFor(self, id_: Union[ShExJ.shapeExprLabel, START]) -> Optional[ShExJ.shapeExpr]:
""" Return the shape expression that corresponds to id """ |
rval = self.schema.start if id_ is START else self.schema_id_map.get(str(id_))
return rval |
<SYSTEM_TASK:>
Visit expr and all of its "descendant" shapes.
<END_TASK>
<USER_TASK:>
Description:
def visit_shapes(self, expr: ShExJ.shapeExpr, f: Callable[[Any, ShExJ.shapeExpr, "Context"], None], arg_cntxt: Any,
visit_center: _VisitorCenter = None, follow_inner_shapes: bool=True) -> None:
"""
Visit expr and all of its "descendant" shapes.
:param expr: root shape expression
:param f: visitor function
:param arg_cntxt: accompanying context for the visitor function
:param visit_center: Recursive visit context. (Not normally supplied on an external call)
:param follow_inner_shapes: Follow nested shapes or just visit on outer level
""" |
if visit_center is None:
visit_center = _VisitorCenter(f, arg_cntxt)
has_id = getattr(expr, 'id', None) is not None
if not has_id or not (visit_center.already_seen_shape(expr.id)
or visit_center.actively_visiting_shape(expr.id)):
# Visit the root expression
if has_id:
visit_center.start_visiting_shape(expr.id)
f(arg_cntxt, expr, self)
# Traverse the expression and visit its components
if isinstance(expr, (ShExJ.ShapeOr, ShExJ.ShapeAnd)):
for expr2 in expr.shapeExprs:
self.visit_shapes(expr2, f, arg_cntxt, visit_center, follow_inner_shapes=follow_inner_shapes)
elif isinstance(expr, ShExJ.ShapeNot):
self.visit_shapes(expr.shapeExpr, f, arg_cntxt, visit_center, follow_inner_shapes=follow_inner_shapes)
elif isinstance(expr, ShExJ.Shape):
if expr.expression is not None and follow_inner_shapes:
self.visit_triple_expressions(expr.expression,
lambda ac, te, cntxt: self._visit_shape_te(te, visit_center),
arg_cntxt,
visit_center)
elif isinstance_(expr, ShExJ.shapeExprLabel):
if not visit_center.actively_visiting_shape(str(expr)) and follow_inner_shapes:
visit_center.start_visiting_shape(str(expr))
self.visit_shapes(self.shapeExprFor(expr), f, arg_cntxt, visit_center)
visit_center.done_visiting_shape(str(expr))
if has_id:
visit_center.done_visiting_shape(expr.id) |
<SYSTEM_TASK:>
Visit a shape expression that was reached through a triple expression. This, in turn, is used to visit
<END_TASK>
<USER_TASK:>
Description:
def _visit_te_shape(self, shape: ShExJ.shapeExpr, visit_center: _VisitorCenter) -> None:
"""
Visit a shape expression that was reached through a triple expression. This, in turn, is used to visit
additional triple expressions that are referenced by the Shape
:param shape: Shape reached through triple expression traverse
:param visit_center: context used in shape visitor
""" |
if isinstance(shape, ShExJ.Shape) and shape.expression is not None:
visit_center.f(visit_center.arg_cntxt, shape.expression, self) |
<SYSTEM_TASK:>
Move the type identifiers to the end of the object for print purposes
<END_TASK>
<USER_TASK:>
Description:
def type_last(self, obj: JsonObj) -> JsonObj:
""" Move the type identifiers to the end of the object for print purposes """ |
def _tl_list(v: List) -> List:
return [self.type_last(e) if isinstance(e, JsonObj)
else _tl_list(e) if isinstance(e, list) else e for e in v if e is not None]
rval = JsonObj()
for k in as_dict(obj).keys():
v = obj[k]
if v is not None and k not in ('type', '_context'):
rval[k] = _tl_list(v) if isinstance(v, list) else self.type_last(v) if isinstance(v, JsonObj) else v
if 'type' in obj and obj.type:
rval.type = obj.type
return rval |
<SYSTEM_TASK:>
Se is a ShapeExternal and implementation-specific mechansims not defined in this specification indicate
<END_TASK>
<USER_TASK:>
Description:
def satisfiesExternal(cntxt: Context, n: Node, se: ShExJ.ShapeExternal, c: DebugContext) -> bool:
""" Se is a ShapeExternal and implementation-specific mechansims not defined in this specification indicate
success.
""" |
if c.debug:
print(f"id: {se.id}")
extern_shape = cntxt.external_shape_for(se.id)
if extern_shape:
return satisfies(cntxt, n, extern_shape)
cntxt.fail_reason = f"{se.id}: Shape is not in Schema"
return False |
<SYSTEM_TASK:>
Set the RDF DataSet to be evaulated. If ``rdf`` is a string, the presence of a return is the
<END_TASK>
<USER_TASK:>
Description:
def rdf(self, rdf: Optional[Union[str, Graph]]) -> None:
""" Set the RDF DataSet to be evaulated. If ``rdf`` is a string, the presence of a return is the
indicator that it is text instead of a location.
:param rdf: File name, URL, representation of rdflib Graph
""" |
if isinstance(rdf, Graph):
self.g = rdf
else:
self.g = Graph()
if isinstance(rdf, str):
if '\n' in rdf or '\r' in rdf:
self.g.parse(data=rdf, format=self.rdf_format)
elif ':' in rdf:
self.g.parse(location=rdf, format=self.rdf_format)
else:
self.g.parse(source=rdf, format=self.rdf_format) |
<SYSTEM_TASK:>
Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
<END_TASK>
<USER_TASK:>
Description:
def schema(self, shex: Optional[Union[str, ShExJ.Schema]]) -> None:
""" Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema.
:param shex: Schema
""" |
self.pfx = None
if shex is not None:
if isinstance(shex, ShExJ.Schema):
self._schema = shex
else:
shext = shex.strip()
loader = SchemaLoader()
if ('\n' in shex or '\r' in shex) or shext[0] in '#<_: ':
self._schema = loader.loads(shex)
else:
self._schema = loader.load(shex) if isinstance(shex, str) else shex
if self._schema is None:
raise ValueError("Unable to parse shex file")
self.pfx = PrefixLibrary(loader.schema_text) |
<SYSTEM_TASK:>
Convert path, which can be a URL or a file path into a base URI
<END_TASK>
<USER_TASK:>
Description:
def generate_base(path: str) -> str:
""" Convert path, which can be a URL or a file path into a base URI
:param path: file location or url
:return: file location or url sans actual name
""" |
if ':' in path:
parts = urlparse(path)
parts_dict = parts._asdict()
parts_dict['path'] = os.path.split(parts.path)[0] if '/' in parts.path else ''
return urlunparse(ParseResult(**parts_dict)) + '/'
else:
return (os.path.split(path)[0] if '/' in path else '') + '/' |
<SYSTEM_TASK:>
Reads data from socket.
<END_TASK>
<USER_TASK:>
Description:
def _read_socket(self, size):
"""
Reads data from socket.
:param size: Size in bytes to be read.
:return: Data from socket
""" |
value = b''
while len(value) < size:
data = self.connection.recv(size - len(value))
if not data:
break
value += data
# If we got less data than we requested, the server disconnected.
if len(value) < size:
raise socket.error()
return value |
<SYSTEM_TASK:>
Get memcached response from socket.
<END_TASK>
<USER_TASK:>
Description:
def _get_response(self):
"""
Get memcached response from socket.
:return: A tuple with binary values from memcached.
:rtype: tuple
""" |
try:
self._open_connection()
if self.connection is None:
# The connection wasn't opened, which means we're deferring a reconnection attempt.
# Raise a socket.error, so we'll return the same server_disconnected message as we
# do below.
raise socket.error('Delaying reconnection attempt')
header = self._read_socket(self.HEADER_SIZE)
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas) = struct.unpack(self.HEADER_STRUCT, header)
assert magic == self.MAGIC['response']
extra_content = None
if bodylen:
extra_content = self._read_socket(bodylen)
return (magic, opcode, keylen, extlen, datatype, status, bodylen,
opaque, cas, extra_content)
except socket.error as e:
self._connection_error(e)
# (magic, opcode, keylen, extlen, datatype, status, bodylen, opaque, cas, extra_content)
message = str(e)
return (self.MAGIC['response'], -1, 0, 0, 0, self.STATUS['server_disconnected'], 0, 0, 0, message) |
<SYSTEM_TASK:>
Authenticate user on server.
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, username, password):
"""
Authenticate user on server.
:param username: Username used to be authenticated.
:type username: six.string_types
:param password: Password used to be authenticated.
:type password: six.string_types
:return: True if successful.
:raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException
:rtype: bool
""" |
self._username = username
self._password = password
# Reopen the connection with the new credentials.
self.disconnect()
self._open_connection()
return self.authenticated |
<SYSTEM_TASK:>
Serializes a value based on its type.
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, value, compress_level=-1):
"""
Serializes a value based on its type.
:param value: Something to be serialized
:type value: six.string_types, int, long, object
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: Serialized type
:rtype: str
""" |
flags = 0
if isinstance(value, binary_type):
flags |= self.FLAGS['binary']
elif isinstance(value, text_type):
value = value.encode('utf8')
elif isinstance(value, int) and isinstance(value, bool) is False:
flags |= self.FLAGS['integer']
value = str(value)
elif isinstance(value, long) and isinstance(value, bool) is False:
flags |= self.FLAGS['long']
value = str(value)
else:
flags |= self.FLAGS['object']
buf = BytesIO()
pickler = self.pickler(buf, self.pickle_protocol)
pickler.dump(value)
value = buf.getvalue()
if compress_level != 0 and len(value) > self.COMPRESSION_THRESHOLD:
if compress_level is not None and compress_level > 0:
# Use the specified compression level.
compressed_value = self.compression.compress(value, compress_level)
else:
# Use the default compression level.
compressed_value = self.compression.compress(value)
# Use the compressed value only if it is actually smaller.
if compressed_value and len(compressed_value) < len(value):
value = compressed_value
flags |= self.FLAGS['compressed']
return flags, value |
<SYSTEM_TASK:>
Deserialized values based on flags or just return it if it is not serialized.
<END_TASK>
<USER_TASK:>
Description:
def deserialize(self, value, flags):
"""
Deserialized values based on flags or just return it if it is not serialized.
:param value: Serialized or not value.
:type value: six.string_types, int
:param flags: Value flags
:type flags: int
:return: Deserialized value
:rtype: six.string_types|int
""" |
FLAGS = self.FLAGS
if flags & FLAGS['compressed']: # pragma: no branch
value = self.compression.decompress(value)
if flags & FLAGS['binary']:
return value
if flags & FLAGS['integer']:
return int(value)
elif flags & FLAGS['long']:
return long(value)
elif flags & FLAGS['object']:
buf = BytesIO(value)
unpickler = self.unpickler(buf)
return unpickler.load()
if six.PY3:
return value.decode('utf8')
# In Python 2, mimic the behavior of the json library: return a str
# unless the value contains unicode characters.
# in Python 2, if value is a binary (e.g struct.pack("<Q") then decode will fail
try:
value.decode('ascii')
except UnicodeDecodeError:
try:
return value.decode('utf8')
except UnicodeDecodeError:
return value
else:
return value |
<SYSTEM_TASK:>
Set multiple keys with its values on server.
<END_TASK>
<USER_TASK:>
Description:
def set_multi(self, mappings, time=100, compress_level=-1):
"""
Set multiple keys with its values on server.
If a key is a (key, cas) tuple, insert as if cas(key, value, cas) had
been called.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True
:rtype: bool
""" |
mappings = mappings.items()
msg = []
for key, value in mappings:
if isinstance(key, tuple):
key, cas = key
else:
cas = None
if cas == 0:
# Like cas(), if the cas value is 0, treat it as compare-and-set against not
# existing.
command = 'addq'
else:
command = 'setq'
flags, value = self.serialize(value, compress_level=compress_level)
m = struct.pack(self.HEADER_STRUCT +
self.COMMANDS[command]['struct'] % (len(key), len(value)),
self.MAGIC['request'],
self.COMMANDS[command]['command'],
len(key),
8, 0, 0, len(key) + len(value) + 8, 0, cas or 0,
flags, time, str_to_bytes(key), value)
msg.append(m)
m = struct.pack(self.HEADER_STRUCT +
self.COMMANDS['noop']['struct'],
self.MAGIC['request'],
self.COMMANDS['noop']['command'],
0, 0, 0, 0, 0, 0, 0)
msg.append(m)
if six.PY2:
msg = ''.join(msg)
else:
msg = b''.join(msg)
self._send(msg)
opcode = -1
retval = True
while opcode != self.COMMANDS['noop']['command']:
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
if status != self.STATUS['success']:
retval = False
if status == self.STATUS['server_disconnected']:
break
return retval |
<SYSTEM_TASK:>
Function which increments and decrements.
<END_TASK>
<USER_TASK:>
Description:
def _incr_decr(self, command, key, value, default, time):
"""
Function which increments and decrements.
:param key: Key's name
:type key: six.string_types
:param value: Number to be (de|in)cremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
""" |
time = time if time >= 0 else self.MAXIMUM_EXPIRE_TIME
self._send(struct.pack(self.HEADER_STRUCT +
self.COMMANDS[command]['struct'] % len(key),
self.MAGIC['request'],
self.COMMANDS[command]['command'],
len(key),
20, 0, 0, len(key) + 20, 0, 0, value,
default, time, str_to_bytes(key)))
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
if status not in (self.STATUS['success'], self.STATUS['server_disconnected']):
raise MemcachedException('Code: %d Message: %s' % (status, extra_content), status)
if status == self.STATUS['server_disconnected']:
return 0
return struct.unpack('!Q', extra_content)[0] |
<SYSTEM_TASK:>
Increment a key, if it exists, returns its actual value, if it doesn't, return 0.
<END_TASK>
<USER_TASK:>
Description:
def incr(self, key, value, default=0, time=1000000):
"""
Increment a key, if it exists, returns its actual value, if it doesn't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
""" |
return self._incr_decr('incr', key, value, default, time) |
<SYSTEM_TASK:>
Decrement a key, if it exists, returns its actual value, if it doesn't, return 0.
<END_TASK>
<USER_TASK:>
Description:
def decr(self, key, value, default=0, time=100):
"""
Decrement a key, if it exists, returns its actual value, if it doesn't, return 0.
Minimum value of decrement return is 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be decremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
""" |
return self._incr_decr('decr', key, value, default, time) |
<SYSTEM_TASK:>
Delete multiple keys from server in one command.
<END_TASK>
<USER_TASK:>
Description:
def delete_multi(self, keys):
"""
Delete multiple keys from server in one command.
:param keys: A list of keys to be deleted
:type keys: list
:return: True in case of success and False in case of failure.
:rtype: bool
""" |
logger.debug('Deleting keys %r', keys)
if six.PY2:
msg = ''
else:
msg = b''
for key in keys:
msg += struct.pack(
self.HEADER_STRUCT +
self.COMMANDS['delete']['struct'] % len(key),
self.MAGIC['request'],
self.COMMANDS['delete']['command'],
len(key), 0, 0, 0, len(key), 0, 0, str_to_bytes(key))
msg += struct.pack(
self.HEADER_STRUCT +
self.COMMANDS['noop']['struct'],
self.MAGIC['request'],
self.COMMANDS['noop']['command'],
0, 0, 0, 0, 0, 0, 0)
self._send(msg)
opcode = -1
retval = True
while opcode != self.COMMANDS['noop']['command']:
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
if status != self.STATUS['success']:
retval = False
if status == self.STATUS['server_disconnected']:
break
return retval |
<SYSTEM_TASK:>
Add a ShExC schema to the library
<END_TASK>
<USER_TASK:>
Description:
def add_shex(self, schema: str) -> "PrefixLibrary":
""" Add a ShExC schema to the library
:param schema: ShExC schema text, URL or file name
:return: prefix library object
""" |
if '\n' in schema or '\r' in schema or ' ' in schema:
shex = schema
else:
shex = load_shex_file(schema)
for line in shex.split('\n'):
line = line.strip()
m = re.match(r'PREFIX\s+(\S+):\s+<(\S+)>', line)
if not m:
m = re.match(r"@prefix\s+(\S+):\s+<(\S+)>\s+\.", line)
if m:
setattr(self, m.group(1).upper(), Namespace(m.group(2)))
return self |
<SYSTEM_TASK:>
Add bindings in the library to the graph
<END_TASK>
<USER_TASK:>
Description:
def add_bindings(self, g: Graph) -> "PrefixLibrary":
""" Add bindings in the library to the graph
:param g: graph to add prefixes to
:return: PrefixLibrary object
""" |
for prefix, namespace in self:
g.bind(prefix.lower(), namespace)
return self |
<SYSTEM_TASK:>
Load a ShEx Schema from schema_location
<END_TASK>
<USER_TASK:>
Description:
def load(self, schema_file: Union[str, TextIO], schema_location: Optional[str]=None) -> ShExJ.Schema:
""" Load a ShEx Schema from schema_location
:param schema_file: name or file-like object to deserialize
:param schema_location: URL or file name of schema. Used to create the base_location
:return: ShEx Schema represented by schema_location
""" |
if isinstance(schema_file, str):
schema_file = self.location_rewrite(schema_file)
self.schema_text = load_shex_file(schema_file)
else:
self.schema_text = schema_file.read()
if self.base_location:
self.root_location = self.base_location
elif schema_location:
self.root_location = os.path.dirname(schema_location) + '/'
else:
self.root_location = None
return self.loads(self.schema_text) |
<SYSTEM_TASK:>
Parse and return schema as a ShExJ Schema
<END_TASK>
<USER_TASK:>
Description:
def loads(self, schema_txt: str) -> ShExJ.Schema:
""" Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
""" |
self.schema_text = schema_txt
if schema_txt.strip()[0] == '{':
# TODO: figure out how to propagate self.base_location into this parse
return cast(ShExJ.Schema, loads(schema_txt, ShExJ))
else:
return generate_shexj.parse(schema_txt, self.base_location) |
<SYSTEM_TASK:>
Replace the value in the JSON string.
<END_TASK>
<USER_TASK:>
Description:
def update_json(self, json_string, expr, value, index=0):
"""
Replace the value in the JSON string.
*Args:*\n
_json_string_ - JSON string;\n
_expr_ - JSONPath expression for determining the value to be replaced;\n
_value_ - the value to be replaced with;\n
_index_ - index for selecting item within a match list, default value is 0;\n
*Returns:*\n
Changed JSON in dictionary format.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_update}= | Update_json | ${json_example} | $..color | changed |
""" |
load_input_json = self.string_to_json(json_string)
matches = self._json_path_search(load_input_json, expr)
datum_object = matches[int(index)]
if not isinstance(datum_object, DatumInContext):
raise JsonValidatorError("Nothing found by the given json-path")
path = datum_object.path
# Edit the directory using the received data
# If the user specified a list
if isinstance(path, Index):
datum_object.context.value[datum_object.path.index] = value
# If the user specified a value of type (string, bool, integer or complex)
elif isinstance(path, Fields):
datum_object.context.value[datum_object.path.fields[0]] = value
return load_input_json |
<SYSTEM_TASK:>
Template tag to sanitize string values. It accepts lists of
<END_TASK>
<USER_TASK:>
Description:
def escape_html(value, allowed_tags=[], allowed_attributes=[],
allowed_styles=[]):
"""
Template tag to sanitize string values. It accepts lists of
allowed tags, attributes or styles in comma separated string or list format.
For example:
{% load sanitizer %}
{% escape_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %}
Will output:
<a href="">bar</a> <cript>alert('baz')</script>
On django 1.4 you could also use keyword arguments:
{% escape_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %}
""" |
if isinstance(value, basestring):
value = bleach.clean(value, tags=allowed_tags,
attributes=allowed_attributes,
styles=allowed_styles, strip=False)
return value |
<SYSTEM_TASK:>
Template tag to strip html from string values. It accepts lists of
<END_TASK>
<USER_TASK:>
Description:
def strip_html(value, allowed_tags=[], allowed_attributes=[],
allowed_styles=[]):
"""
Template tag to strip html from string values. It accepts lists of
allowed tags, attributes or stylesin comma separated string or list format.
For example:
{% load sanitizer %}
{% strip_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %}
Will output:
<a href="">bar</a> alert('baz');
On django 1.4 you could also use keyword arguments:
{% strip_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %}
""" |
if isinstance(value, basestring):
value = bleach.clean(value, tags=allowed_tags,
attributes=allowed_attributes,
styles=allowed_styles, strip=True)
return value |
<SYSTEM_TASK:>
Returns the input string with profanity replaced with a random string
<END_TASK>
<USER_TASK:>
Description:
def censor(input_text):
""" Returns the input string with profanity replaced with a random string
of characters plucked from the censor_characters pool.
""" |
ret = input_text
words = get_words()
for word in words:
curse_word = re.compile(re.escape(word), re.IGNORECASE)
cen = "".join(get_censor_char() for i in list(word))
ret = curse_word.sub(cen, ret)
return ret |
<SYSTEM_TASK:>
Add new sentence to generate dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def update_dicts(self, sentence):
"""Add new sentence to generate dictionaries.
:param sentence: A list of strings representing the sentence.
""" |
self.dict_generator(sentence=sentence)
self.word_dict, self.char_dict = None, None |
<SYSTEM_TASK:>
Set with custom dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def set_dicts(self, word_dict, char_dict):
"""Set with custom dictionaries.
:param word_dict: The word dictionary.
:param char_dict: The character dictionary.
""" |
self.word_dict = word_dict
self.char_dict = char_dict |
<SYSTEM_TASK:>
Get word and character dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def get_dicts(self):
"""Get word and character dictionaries.
:return word_dict, char_dict:
""" |
if self.word_dict is None:
self.word_dict, self.char_dict, self.max_word_len = self.dict_generator(return_dict=True)
return self.word_dict, self.char_dict |
<SYSTEM_TASK:>
Get word and character dictionaries from sentences.
<END_TASK>
<USER_TASK:>
Description:
def get_dicts_generator(word_min_freq=4,
char_min_freq=2,
word_ignore_case=False,
char_ignore_case=False):
"""Get word and character dictionaries from sentences.
:param word_min_freq: The minimum frequency of a word.
:param char_min_freq: The minimum frequency of a character.
:param word_ignore_case: Word will be transformed to lower case before saving to dictionary.
:param char_ignore_case: Character will be transformed to lower case before saving to dictionary.
:return gen: A closure that accepts sentences and returns the dictionaries.
""" |
word_count, char_count = {}, {}
def get_dicts(sentence=None,
return_dict=False):
"""Update and return dictionaries for each sentence.
:param sentence: A list of strings representing the sentence.
:param return_dict: Returns the dictionaries if it is True.
:return word_dict, char_dict, max_word_len:
"""
if sentence is not None:
for word in sentence:
if not word:
continue
if word_ignore_case:
word_key = word.lower()
else:
word_key = word
word_count[word_key] = word_count.get(word_key, 0) + 1
for char in word:
if char_ignore_case:
char_key = char.lower()
else:
char_key = char
char_count[char_key] = char_count.get(char_key, 0) + 1
if not return_dict:
return None
word_dict, char_dict = {'': 0, '<UNK>': 1}, {'': 0, '<UNK>': 1}
max_word_len = 0
for word, count in word_count.items():
if count >= word_min_freq:
word_dict[word] = len(word_dict)
max_word_len = max(max_word_len, len(word))
for char, count in char_count.items():
if count >= char_min_freq:
char_dict[char] = len(char_dict)
return word_dict, char_dict, max_word_len
return get_dicts |
<SYSTEM_TASK:>
A naive function that extracts English words from raw texts.
<END_TASK>
<USER_TASK:>
Description:
def get_word_list_eng(text):
"""A naive function that extracts English words from raw texts.
:param text: The raw text.
:return words: A list of strings.
""" |
words, index = [''], 0
while index < len(text):
while index < len(text) and ('a' <= text[index] <= 'z' or 'A' <= text[index] <= 'Z'):
words[-1] += text[index]
index += 1
if words[-1]:
words.append('')
while index < len(text) and not ('a' <= text[index] <= 'z' or 'A' <= text[index] <= 'Z'):
if text[index] != ' ':
words[-1] += text[index]
index += 1
if words[-1]:
words.append('')
if not words[-1]:
words.pop()
return words |
<SYSTEM_TASK:>
Load pre-trained embeddings from a text file.
<END_TASK>
<USER_TASK:>
Description:
def get_embedding_weights_from_file(word_dict, file_path, ignore_case=False):
"""Load pre-trained embeddings from a text file.
Each line in the file should look like this:
word feature_dim_1 feature_dim_2 ... feature_dim_n
The `feature_dim_i` should be a floating point number.
:param word_dict: A dict that maps words to indice.
:param file_path: The location of the text file containing the pre-trained embeddings.
:param ignore_case: Whether ignoring the case of the words.
:return weights: A numpy array.
""" |
pre_trained = {}
with codecs.open(file_path, 'r', 'utf8') as reader:
for line in reader:
line = line.strip()
if not line:
continue
parts = line.split()
if ignore_case:
parts[0] = parts[0].lower()
pre_trained[parts[0]] = list(map(float, parts[1:]))
embd_dim = len(next(iter(pre_trained.values())))
weights = [[0.0] * embd_dim for _ in range(max(word_dict.values()) + 1)]
for word, index in word_dict.items():
if not word:
continue
if ignore_case:
word = word.lower()
if word in pre_trained:
weights[index] = pre_trained[word]
else:
weights[index] = numpy.random.random((embd_dim,)).tolist()
return numpy.asarray(weights) |
<SYSTEM_TASK:>
Returns the backend by name or raises KeyError
<END_TASK>
<USER_TASK:>
Description:
def get_backend(name):
"""Returns the backend by name or raises KeyError""" |
for backend in _BACKENDS:
if backend.NAME == name:
return backend
raise KeyError("Backend %r not available" % name) |
<SYSTEM_TASK:>
Prints debug information for various public objects like methods,
<END_TASK>
<USER_TASK:>
Description:
def pprint(obj, file_=None):
"""Prints debug information for various public objects like methods,
functions, constructors etc.
""" |
if file_ is None:
file_ = sys.stdout
# functions, methods
if callable(obj) and hasattr(obj, "_code"):
obj._code.pprint(file_)
return
# classes
if isinstance(obj, type) and hasattr(obj, "_constructors"):
constructors = obj._constructors
for names, func in sorted(constructors.items()):
func._code.pprint(file_)
return
raise TypeError("unkown type") |
<SYSTEM_TASK:>
Returns a list of child property values for the given names.
<END_TASK>
<USER_TASK:>
Description:
def child_get(self, child, *prop_names):
"""Returns a list of child property values for the given names.""" |
return [self.child_get_property(child, name) for name in prop_names] |
<SYSTEM_TASK:>
Creates a tag and adds it to the tag table of the TextBuffer.
<END_TASK>
<USER_TASK:>
Description:
def create_tag(self, tag_name=None, **properties):
"""Creates a tag and adds it to the tag table of the TextBuffer.
:param str tag_name:
Name of the new tag, or None
:param **properties:
Keyword list of properties and their values
:returns:
A new tag.
This is equivalent to creating a Gtk.TextTag and then adding the
tag to the buffer's tag table. The returned tag is owned by
the buffer's tag table.
If ``tag_name`` is None, the tag is anonymous.
If ``tag_name`` is not None, a tag called ``tag_name`` must not already
exist in the tag table for this buffer.
Properties are passed as a keyword list of names and values (e.g.
foreground='DodgerBlue', weight=Pango.Weight.BOLD)
""" |
tag = Gtk.TextTag(name=tag_name, **properties)
self._get_or_create_tag_table().add(tag)
return tag |
<SYSTEM_TASK:>
Set the value of the child model
<END_TASK>
<USER_TASK:>
Description:
def set_value(self, iter, column, value):
"""Set the value of the child model""" |
# Delegate to child model
iter = self.convert_iter_to_child_iter(iter)
self.get_model().set_value(iter, column, value) |
<SYSTEM_TASK:>
Returns the module or raises ForeignError
<END_TASK>
<USER_TASK:>
Description:
def get_foreign_module(namespace):
"""Returns the module or raises ForeignError""" |
if namespace not in _MODULES:
try:
module = importlib.import_module("." + namespace, __package__)
except ImportError:
module = None
_MODULES[namespace] = module
module = _MODULES.get(namespace)
if module is None:
raise ForeignError("Foreign %r structs not supported" % namespace)
return module |
<SYSTEM_TASK:>
Returns a ForeignStruct implementation or raises ForeignError
<END_TASK>
<USER_TASK:>
Description:
def get_foreign_struct(namespace, name):
"""Returns a ForeignStruct implementation or raises ForeignError""" |
get_foreign_module(namespace)
try:
return ForeignStruct.get(namespace, name)
except KeyError:
raise ForeignError("Foreign %s.%s not supported" % (namespace, name)) |
<SYSTEM_TASK:>
Raises ImportError if the specified foreign module isn't supported or
<END_TASK>
<USER_TASK:>
Description:
def require_foreign(namespace, symbol=None):
"""Raises ImportError if the specified foreign module isn't supported or
the needed dependencies aren't installed.
e.g.: check_foreign('cairo', 'Context')
""" |
try:
if symbol is None:
get_foreign_module(namespace)
else:
get_foreign_struct(namespace, symbol)
except ForeignError as e:
raise ImportError(e) |
<SYSTEM_TASK:>
Create a GVariant object from given format and argument list.
<END_TASK>
<USER_TASK:>
Description:
def _create(self, format, args):
"""Create a GVariant object from given format and argument list.
This method recursively calls itself for complex structures (arrays,
dictionaries, boxed).
Return a tuple (variant, rest_format, rest_args) with the generated
GVariant, the remainder of the format string, and the remainder of the
arguments.
If args is None, then this won't actually consume any arguments, and
just parse the format string and generate empty GVariant structures.
This is required for creating empty dictionaries or arrays.
""" |
# leaves (simple types)
constructor = self._LEAF_CONSTRUCTORS.get(format[0])
if constructor:
if args is not None:
if not args:
raise TypeError('not enough arguments for GVariant format string')
v = constructor(args[0])
return (v, format[1:], args[1:])
else:
return (None, format[1:], None)
if format[0] == '(':
return self._create_tuple(format, args)
if format.startswith('a{'):
return self._create_dict(format, args)
if format[0] == 'a':
return self._create_array(format, args)
raise NotImplementedError('cannot handle GVariant type ' + format) |
<SYSTEM_TASK:>
Handle the case where the outermost type of format is a tuple.
<END_TASK>
<USER_TASK:>
Description:
def _create_tuple(self, format, args):
"""Handle the case where the outermost type of format is a tuple.""" |
format = format[1:] # eat the '('
if args is None:
# empty value: we need to call _create() to parse the subtype
rest_format = format
while rest_format:
if rest_format.startswith(')'):
break
rest_format = self._create(rest_format, None)[1]
else:
raise TypeError('tuple type string not closed with )')
rest_format = rest_format[1:] # eat the )
return (None, rest_format, None)
else:
if not args or not isinstance(args[0], tuple):
raise TypeError('expected tuple argument')
builder = GLib.VariantBuilder.new(variant_type_from_string('r'))
for i in range(len(args[0])):
if format.startswith(')'):
raise TypeError('too many arguments for tuple signature')
(v, format, _) = self._create(format, args[0][i:])
builder.add_value(v)
args = args[1:]
if not format.startswith(')'):
raise TypeError('tuple type string not closed with )')
rest_format = format[1:] # eat the )
return (builder.end(), rest_format, args) |
<SYSTEM_TASK:>
Handle the case where the outermost type of format is a dict.
<END_TASK>
<USER_TASK:>
Description:
def _create_dict(self, format, args):
"""Handle the case where the outermost type of format is a dict.""" |
builder = None
if args is None or not args[0]:
# empty value: we need to call _create() to parse the subtype,
# and specify the element type precisely
rest_format = self._create(format[2:], None)[1]
rest_format = self._create(rest_format, None)[1]
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
element_type = format[:len(format) - len(rest_format)]
builder = GLib.VariantBuilder.new(variant_type_from_string(element_type))
else:
builder = GLib.VariantBuilder.new(variant_type_from_string('a{?*}'))
for k, v in args[0].items():
(key_v, rest_format, _) = self._create(format[2:], [k])
(val_v, rest_format, _) = self._create(rest_format, [v])
if not rest_format.startswith('}'):
raise TypeError('dictionary type string not closed with }')
rest_format = rest_format[1:] # eat the }
entry = GLib.VariantBuilder.new(variant_type_from_string('{?*}'))
entry.add_value(key_v)
entry.add_value(val_v)
builder.add_value(entry.end())
if args is not None:
args = args[1:]
return (builder.end(), rest_format, args) |
<SYSTEM_TASK:>
Handle the case where the outermost type of format is an array.
<END_TASK>
<USER_TASK:>
Description:
def _create_array(self, format, args):
"""Handle the case where the outermost type of format is an array.""" |
builder = None
if args is None or not args[0]:
# empty value: we need to call _create() to parse the subtype,
# and specify the element type precisely
rest_format = self._create(format[1:], None)[1]
element_type = format[:len(format) - len(rest_format)]
builder = GLib.VariantBuilder.new(variant_type_from_string(element_type))
else:
builder = GLib.VariantBuilder.new(variant_type_from_string('a*'))
for i in range(len(args[0])):
(v, rest_format, _) = self._create(format[1:], args[0][i:])
builder.add_value(v)
if args is not None:
args = args[1:]
return (builder.end(), rest_format, args) |
<SYSTEM_TASK:>
Decompose a GVariant into a native Python object.
<END_TASK>
<USER_TASK:>
Description:
def unpack(self):
"""Decompose a GVariant into a native Python object.""" |
LEAF_ACCESSORS = {
'b': self.get_boolean,
'y': self.get_byte,
'n': self.get_int16,
'q': self.get_uint16,
'i': self.get_int32,
'u': self.get_uint32,
'x': self.get_int64,
't': self.get_uint64,
'h': self.get_handle,
'd': self.get_double,
's': self.get_string,
'o': self.get_string, # object path
'g': self.get_string, # signature
}
# simple values
la = LEAF_ACCESSORS.get(self.get_type_string())
if la:
return la()
# tuple
if self.get_type_string().startswith('('):
res = [self.get_child_value(i).unpack()
for i in range(self.n_children())]
return tuple(res)
# dictionary
if self.get_type_string().startswith('a{'):
res = {}
for i in range(self.n_children()):
v = self.get_child_value(i)
res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack()
return res
# array
if self.get_type_string().startswith('a'):
return [self.get_child_value(i).unpack()
for i in range(self.n_children())]
# variant (just unbox transparently)
if self.get_type_string().startswith('v'):
return self.get_variant().unpack()
# maybe
if self.get_type_string().startswith('m'):
m = self.get_maybe()
return m.unpack() if m else None
raise NotImplementedError('unsupported GVariant type ' + self.get_type_string()) |
<SYSTEM_TASK:>
Return a list of the element signatures of the topmost signature tuple.
<END_TASK>
<USER_TASK:>
Description:
def split_signature(klass, signature):
"""Return a list of the element signatures of the topmost signature tuple.
If the signature is not a tuple, it returns one element with the entire
signature. If the signature is an empty tuple, the result is [].
This is useful for e. g. iterating over method parameters which are
passed as a single Variant.
""" |
if signature == '()':
return []
if not signature.startswith('('):
return [signature]
result = []
head = ''
tail = signature[1:-1] # eat the surrounding ()
while tail:
c = tail[0]
head += c
tail = tail[1:]
if c in ('m', 'a'):
# prefixes, keep collecting
continue
if c in ('(', '{'):
# consume until corresponding )/}
level = 1
up = c
if up == '(':
down = ')'
else:
down = '}'
while level > 0:
c = tail[0]
head += c
tail = tail[1:]
if c == up:
level += 1
elif c == down:
level -= 1
# otherwise we have a simple type
result.append(head)
head = ''
return result |
<SYSTEM_TASK:>
Takes bytes and returns a GITypelib, or raises GIError
<END_TASK>
<USER_TASK:>
Description:
def new_from_memory(cls, data):
"""Takes bytes and returns a GITypelib, or raises GIError""" |
size = len(data)
copy = g_memdup(data, size)
ptr = cast(copy, POINTER(guint8))
try:
with gerror(GIError) as error:
return GITypelib._new_from_memory(ptr, size, error)
except GIError:
free(copy)
raise |
<SYSTEM_TASK:>
Get the subtype class for a pointer
<END_TASK>
<USER_TASK:>
Description:
def _get_type(cls, ptr):
"""Get the subtype class for a pointer""" |
# fall back to the base class if unknown
return cls.__types.get(lib.g_base_info_get_type(ptr), cls) |
<SYSTEM_TASK:>
Add a method to the target class
<END_TASK>
<USER_TASK:>
Description:
def add_method(info, target_cls, virtual=False, dont_replace=False):
"""Add a method to the target class""" |
# escape before prefixing, like pygobject
name = escape_identifier(info.name)
if virtual:
name = "do_" + name
attr = VirtualMethodAttribute(info, target_cls, name)
else:
attr = MethodAttribute(info, target_cls, name)
if dont_replace and hasattr(target_cls, name):
return
setattr(target_cls, name, attr) |
<SYSTEM_TASK:>
Create a new class for a gtype not in the gir.
<END_TASK>
<USER_TASK:>
Description:
def new_class_from_gtype(gtype):
"""Create a new class for a gtype not in the gir.
The caller is responsible for caching etc.
""" |
if gtype.is_a(PGType.from_name("GObject")):
parent = gtype.parent.pytype
if parent is None or parent == PGType.from_name("void"):
return
interfaces = [i.pytype for i in gtype.interfaces]
bases = tuple([parent] + interfaces)
cls = type(gtype.name, bases, dict())
cls.__gtype__ = gtype
return cls
elif gtype.is_a(PGType.from_name("GEnum")):
from pgi.enum import GEnumBase
return GEnumBase |
<SYSTEM_TASK:>
Creates a GObject class.
<END_TASK>
<USER_TASK:>
Description:
def ObjectAttribute(obj_info):
"""Creates a GObject class.
It inherits from the base class and all interfaces it implements.
""" |
if obj_info.name == "Object" and obj_info.namespace == "GObject":
cls = Object
else:
# Get the parent class
parent_obj = obj_info.get_parent()
if parent_obj:
attr = import_attribute(parent_obj.namespace, parent_obj.name)
bases = (attr,)
else:
bases = (object,)
# Get all object interfaces
ifaces = []
for interface in obj_info.get_interfaces():
attr = import_attribute(interface.namespace, interface.name)
# only add interfaces if the base classes don't have it
for base in bases:
if attr in base.__mro__:
break
else:
ifaces.append(attr)
# Combine them to a base class list
if ifaces:
bases = tuple(list(bases) + ifaces)
# Create a new class
cls = type(obj_info.name, bases, dict())
cls.__module__ = obj_info.namespace
# Set root to unowned= False and InitiallyUnowned=True
if obj_info.namespace == "GObject":
if obj_info.name == "InitiallyUnowned":
cls._unowned = True
elif obj_info.name == "Object":
cls._unowned = False
# GType
cls.__gtype__ = PGType(obj_info.g_type)
if not obj_info.fundamental:
# Constructor cache
cls._constructors = {}
# Properties
setattr(cls, PROPS_NAME, PropertyAttribute(obj_info))
# Signals
cls.signals = SignalsAttribute(obj_info)
# Signals
cls.__sigs__ = {}
for sig_info in obj_info.get_signals():
signal_name = sig_info.name
cls.__sigs__[signal_name] = sig_info
# Add constants
for constant in obj_info.get_constants():
constant_name = constant.name
attr = ConstantAttribute(constant)
setattr(cls, constant_name, attr)
# Fields
for field in obj_info.get_fields():
field_name = escape_identifier(field.name)
attr = FieldAttribute(field_name, field)
setattr(cls, field_name, attr)
# Add methods
for method_info in obj_info.get_methods():
# we implement most of the base object ourself
add_method(method_info, cls, dont_replace=cls is Object)
# VFuncs
for vfunc_info in obj_info.get_vfuncs():
add_method(vfunc_info, cls, virtual=True)
cs_info = obj_info.get_class_struct()
if cs_info:
class_struct = import_attribute(cs_info.namespace, cs_info.name)
else:
class_struct = None
# XXX ^ 2
def get_class_struct(cls, type_=None):
"""Returns the class struct casted to the passed type"""
if type_ is None:
type_ = class_struct
if type_ is None:
return None
ptr = cls.__gtype__._type.class_ref()
return type_._from_pointer(ptr)
setattr(cls, "_get_class_struct", classmethod(get_class_struct))
return cls |
<SYSTEM_TASK:>
Get a hopefully cache constructor
<END_TASK>
<USER_TASK:>
Description:
def _generate_constructor(cls, names):
"""Get a hopefully cache constructor""" |
cache = cls._constructors
if names in cache:
return cache[names]
elif len(cache) > 3:
cache.clear()
func = generate_constructor(cls, names)
cache[names] = func
return func |
<SYSTEM_TASK:>
Make the Python instance take ownership of the GIBaseInfo. i.e.
<END_TASK>
<USER_TASK:>
Description:
def _take_ownership(self):
"""Make the Python instance take ownership of the GIBaseInfo. i.e.
unref if the python instance gets gc'ed.
""" |
if self:
ptr = cast(self.value, GIBaseInfo)
_UnrefFinalizer.track(self, ptr)
self.__owns = True |
<SYSTEM_TASK:>
Casts a GIBaseInfo instance to the right sub type.
<END_TASK>
<USER_TASK:>
Description:
def _cast(cls, base_info, take_ownership=True):
"""Casts a GIBaseInfo instance to the right sub type.
The original GIBaseInfo can't have ownership.
Will take ownership.
""" |
type_value = base_info.type.value
try:
new_obj = cast(base_info, cls.__types[type_value])
except KeyError:
new_obj = base_info
if take_ownership:
assert not base_info.__owns
new_obj._take_ownership()
return new_obj |
<SYSTEM_TASK:>
Takes a library name and calls find_library in case loading fails,
<END_TASK>
<USER_TASK:>
Description:
def load_ctypes_library(name):
"""Takes a library name and calls find_library in case loading fails,
since some girs don't include the real .so name.
Raises OSError like LoadLibrary if loading fails.
e.g. javascriptcoregtk-3.0 should be libjavascriptcoregtk-3.0.so on unix
""" |
try:
return cdll.LoadLibrary(name)
except OSError:
name = find_library(name)
if name is None:
raise
return cdll.LoadLibrary(name) |
<SYSTEM_TASK:>
Cache the return value of a function without arguments
<END_TASK>
<USER_TASK:>
Description:
def cache_return(func):
"""Cache the return value of a function without arguments""" |
_cache = []
def wrap():
if not _cache:
_cache.append(func())
return _cache[0]
return wrap |
<SYSTEM_TASK:>
Creates a new class similar to namedtuple.
<END_TASK>
<USER_TASK:>
Description:
def _new_type(cls, args):
"""Creates a new class similar to namedtuple.
Pass a list of field names or None for no field name.
>>> x = ResultTuple._new_type([None, "bar"])
>>> x((1, 3))
ResultTuple(1, bar=3)
""" |
fformat = ["%r" if f is None else "%s=%%r" % f for f in args]
fformat = "(%s)" % ", ".join(fformat)
class _ResultTuple(cls):
__slots__ = ()
_fformat = fformat
if args:
for i, a in enumerate(args):
if a is not None:
vars()[a] = property(itemgetter(i))
del i, a
return _ResultTuple |
<SYSTEM_TASK:>
Returns a list of signal names for the given type
<END_TASK>
<USER_TASK:>
Description:
def signal_list_names(type_):
"""Returns a list of signal names for the given type
:param type\\_:
:type type\\_: :obj:`GObject.GType`
:returns: A list of signal names
:rtype: :obj:`list`
""" |
ids = signal_list_ids(type_)
return tuple(GObjectModule.signal_name(i) for i in ids) |
<SYSTEM_TASK:>
Track an object which needs destruction when it is garbage collected.
<END_TASK>
<USER_TASK:>
Description:
def track(cls, obj, ptr):
"""
Track an object which needs destruction when it is garbage collected.
""" |
cls._objects.add(cls(obj, ptr)) |
<SYSTEM_TASK:>
Loads overrides for an introspection module.
<END_TASK>
<USER_TASK:>
Description:
def load_overrides(introspection_module):
"""Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
""" |
namespace = introspection_module.__name__.rsplit(".", 1)[-1]
module_keys = [prefix + "." + namespace for prefix in const.PREFIX]
# We use sys.modules so overrides can import from gi.repository
# but restore everything at the end so this doesn't have any side effects
for module_key in module_keys:
has_old = module_key in sys.modules
old_module = sys.modules.get(module_key)
# Create a new sub type, so we can separate descriptors like
# _DeprecatedAttribute for each namespace.
proxy_type = type(namespace + "ProxyModule", (OverridesProxyModule, ), {})
proxy = proxy_type(introspection_module)
for module_key in module_keys:
sys.modules[module_key] = proxy
try:
override_package_name = 'pgi.overrides.' + namespace
# http://bugs.python.org/issue14710
try:
override_loader = get_loader(override_package_name)
except AttributeError:
override_loader = None
# Avoid checking for an ImportError, an override might
# depend on a missing module thus causing an ImportError
if override_loader is None:
return introspection_module
override_mod = importlib.import_module(override_package_name)
finally:
for module_key in module_keys:
del sys.modules[module_key]
if has_old:
sys.modules[module_key] = old_module
override_all = []
if hasattr(override_mod, "__all__"):
override_all = override_mod.__all__
for var in override_all:
try:
item = getattr(override_mod, var)
except (AttributeError, TypeError):
# Gedit puts a non-string in __all__, so catch TypeError here
continue
# make sure new classes have a proper __module__
try:
if item.__module__.split(".")[-1] == namespace:
item.__module__ = namespace
except AttributeError:
pass
setattr(proxy, var, item)
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed.
for attr, replacement in _deprecated_attrs.pop(namespace, []):
try:
value = getattr(proxy, attr)
except AttributeError:
raise AssertionError(
"%s was set deprecated but wasn't added to __all__" % attr)
delattr(proxy, attr)
deprecated_attr = _DeprecatedAttribute(
namespace, attr, value, replacement)
setattr(proxy_type, attr, deprecated_attr)
return proxy |
<SYSTEM_TASK:>
Takes a override class or function and assigns it dunder arguments
<END_TASK>
<USER_TASK:>
Description:
def override(klass):
"""Takes a override class or function and assigns it dunder arguments
form the overidden one.
""" |
namespace = klass.__module__.rsplit(".", 1)[-1]
mod_name = const.PREFIX[-1] + "." + namespace
module = sys.modules[mod_name]
if isinstance(klass, types.FunctionType):
def wrap(wrapped):
setattr(module, klass.__name__, wrapped)
return wrapped
return wrap
old_klass = klass.__mro__[1]
name = old_klass.__name__
klass.__name__ = name
klass.__module__ = old_klass.__module__
setattr(module, name, klass)
return klass |
<SYSTEM_TASK:>
Mark a function deprecated so calling it issues a warning
<END_TASK>
<USER_TASK:>
Description:
def deprecated(function, instead):
"""Mark a function deprecated so calling it issues a warning""" |
# skip for classes, breaks doc generation
if not isinstance(function, types.FunctionType):
return function
@wraps(function)
def wrap(*args, **kwargs):
warnings.warn("Deprecated, use %s instead" % instead,
PyGIDeprecationWarning)
return function(*args, **kwargs)
return wrap |
<SYSTEM_TASK:>
Marks a module level attribute as deprecated. Accessing it will emit
<END_TASK>
<USER_TASK:>
Description:
def deprecated_attr(namespace, attr, replacement):
"""Marks a module level attribute as deprecated. Accessing it will emit
a PyGIDeprecationWarning warning.
e.g. for ``deprecated_attr("GObject", "STATUS_FOO", "GLib.Status.FOO")``
accessing GObject.STATUS_FOO will emit:
"GObject.STATUS_FOO is deprecated; use GLib.Status.FOO instead"
:param str namespace:
The namespace of the override this is called in.
:param str namespace:
The attribute name (which gets added to __all__).
:param str replacement:
The replacement text which will be included in the warning.
""" |
_deprecated_attrs.setdefault(namespace, []).append((attr, replacement)) |
<SYSTEM_TASK:>
Translate method's return value for stripping off success flag.
<END_TASK>
<USER_TASK:>
Description:
def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None):
"""Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure.
""" |
@wraps(method)
def wrapped(*args, **kwargs):
ret = method(*args, **kwargs)
if ret[0]:
if len(ret) == 2:
return ret[1]
else:
return ret[1:]
else:
if exc_type:
raise exc_type(exc_str or 'call failed')
return fail_ret
return wrapped |
<SYSTEM_TASK:>
Request a name, might return the name or a similar one if already
<END_TASK>
<USER_TASK:>
Description:
def request_name(self, name):
"""Request a name, might return the name or a similar one if already
used or reserved
""" |
while name in self._blacklist:
name += "_"
self._blacklist.add(name)
return name |
<SYSTEM_TASK:>
Add a code dependency so it gets inserted into globals
<END_TASK>
<USER_TASK:>
Description:
def add_dependency(self, name, obj):
"""Add a code dependency so it gets inserted into globals""" |
if name in self._deps:
if self._deps[name] is obj:
return
raise ValueError(
"There exists a different dep with the same name : %r" % name)
self._deps[name] = obj |
<SYSTEM_TASK:>
Append this block to another one, passing all dependencies
<END_TASK>
<USER_TASK:>
Description:
def write_into(self, block, level=0):
"""Append this block to another one, passing all dependencies""" |
for line, l in self._lines:
block.write_line(line, level + l)
for name, obj in _compat.iteritems(self._deps):
block.add_dependency(name, obj) |
<SYSTEM_TASK:>
Append multiple new lines
<END_TASK>
<USER_TASK:>
Description:
def write_lines(self, lines, level=0):
"""Append multiple new lines""" |
for line in lines:
self.write_line(line, level) |
<SYSTEM_TASK:>
Execute the python code and returns the global dict.
<END_TASK>
<USER_TASK:>
Description:
def compile(self, **kwargs):
"""Execute the python code and returns the global dict.
kwargs can contain extra dependencies that get only used
at compile time.
""" |
code = compile(str(self), "<string>", "exec")
global_dict = dict(self._deps)
global_dict.update(kwargs)
_compat.exec_(code, global_dict)
return global_dict |
<SYSTEM_TASK:>
Print the code block to stdout.
<END_TASK>
<USER_TASK:>
Description:
def pprint(self, file_=sys.stdout):
"""Print the code block to stdout.
Does syntax highlighting if possible.
""" |
code = []
if self._deps:
code.append("# dependencies:")
for k, v in _compat.iteritems(self._deps):
code.append("# %s: %r" % (k, v))
code.append(str(self))
code = "\n".join(code)
if file_.isatty():
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
except ImportError:
pass
else:
formatter = TerminalFormatter(bg="dark")
lexer = PythonLexer()
file_.write(highlight(code, lexer, formatter))
return
file_.write(code + "\n") |
<SYSTEM_TASK:>
If may_be_null returns nullable or if NULL can be passed in.
<END_TASK>
<USER_TASK:>
Description:
def may_be_null_is_nullable():
"""If may_be_null returns nullable or if NULL can be passed in.
This can still be wrong if the specific typelib is older than the linked
libgirepository.
https://bugzilla.gnome.org/show_bug.cgi?id=660879#c47
""" |
repo = GIRepository()
repo.require("GLib", "2.0", 0)
info = repo.find_by_name("GLib", "spawn_sync")
# this argument is (allow-none) and can never be (nullable)
return not info.get_arg(8).may_be_null |
<SYSTEM_TASK:>
Gives a name for a type that is suitable for a docstring.
<END_TASK>
<USER_TASK:>
Description:
def get_type_name(type_):
"""Gives a name for a type that is suitable for a docstring.
int -> "int"
Gtk.Window -> "Gtk.Window"
[int] -> "[int]"
{int: Gtk.Button} -> "{int: Gtk.Button}"
""" |
if type_ is None:
return ""
if isinstance(type_, string_types):
return type_
elif isinstance(type_, list):
assert len(type_) == 1
return "[%s]" % get_type_name(type_[0])
elif isinstance(type_, dict):
assert len(type_) == 1
key, value = list(type_.items())[0]
return "{%s: %s}" % (get_type_name(key), get_type_name(value))
elif type_.__module__ in ("__builtin__", "builtins"):
return type_.__name__
else:
return "%s.%s" % (type_.__module__, type_.__name__) |
<SYSTEM_TASK:>
Returns a new shiny class for the given enum type
<END_TASK>
<USER_TASK:>
Description:
def _create_enum_class(ffi, type_name, prefix, flags=False):
"""Returns a new shiny class for the given enum type""" |
class _template(int):
_map = {}
@property
def value(self):
return int(self)
def __str__(self):
return self._map.get(self, "Unknown")
def __repr__(self):
return "%s.%s" % (type(self).__name__, str(self))
class _template_flags(int):
_map = {}
@property
def value(self):
return int(self)
def __str__(self):
names = []
val = int(self)
for flag, name in self._map.items():
if val & flag:
names.append(name)
val &= ~flag
if val:
names.append(str(val))
return " | ".join(sorted(names or ["Unknown"]))
def __repr__(self):
return "%s(%s)" % (type(self).__name__, str(self))
if flags:
template = _template_flags
else:
template = _template
cls = type(type_name, template.__bases__, dict(template.__dict__))
prefix_len = len(prefix)
for value, name in ffi.typeof(type_name).elements.items():
assert name[:prefix_len] == prefix
name = name[prefix_len:]
setattr(cls, name, cls(value))
cls._map[value] = name
return cls |
<SYSTEM_TASK:>
Converts some common enum expressions to constants
<END_TASK>
<USER_TASK:>
Description:
def _fixup_cdef_enums(string, reg=re.compile(r"=\s*(\d+)\s*<<\s*(\d+)")):
"""Converts some common enum expressions to constants""" |
def repl_shift(match):
shift_by = int(match.group(2))
value = int(match.group(1))
int_value = ctypes.c_int(value << shift_by).value
return "= %s" % str(int_value)
return reg.sub(repl_shift, string) |
<SYSTEM_TASK:>
Takes a glist, copies the values casted to type_ in to a list
<END_TASK>
<USER_TASK:>
Description:
def unpack_glist(g, type_, transfer_full=True):
"""Takes a glist, copies the values casted to type_ in to a list
and frees all items and the list.
""" |
values = []
item = g
while item:
ptr = item.contents.data
value = cast(ptr, type_).value
values.append(value)
if transfer_full:
free(ptr)
item = item.next()
if transfer_full:
g.free()
return values |
<SYSTEM_TASK:>
Takes a null terminated array, copies the values into a list
<END_TASK>
<USER_TASK:>
Description:
def unpack_nullterm_array(array):
"""Takes a null terminated array, copies the values into a list
and frees each value and the list.
""" |
addrs = cast(array, POINTER(ctypes.c_void_p))
l = []
i = 0
value = array[i]
while value:
l.append(value)
free(addrs[i])
i += 1
value = array[i]
free(addrs)
return l |
<SYSTEM_TASK:>
Set a version for the namespace to be loaded.
<END_TASK>
<USER_TASK:>
Description:
def require_version(namespace, version):
"""Set a version for the namespace to be loaded.
This needs to be called before importing the namespace or any
namespace that depends on it.
""" |
global _versions
repo = GIRepository()
namespaces = repo.get_loaded_namespaces()
if namespace in namespaces:
loaded_version = repo.get_version(namespace)
if loaded_version != version:
raise ValueError('Namespace %s is already loaded with version %s' %
(namespace, loaded_version))
if namespace in _versions and _versions[namespace] != version:
raise ValueError('Namespace %s already requires version %s' %
(namespace, _versions[namespace]))
available_versions = repo.enumerate_versions(namespace)
if not available_versions:
raise ValueError('Namespace %s not available' % namespace)
if version not in available_versions:
raise ValueError('Namespace %s not available for version %s' %
(namespace, version))
_versions[namespace] = version |
<SYSTEM_TASK:>
Takes a glist ptr, copies the values casted to type_ in to a list
<END_TASK>
<USER_TASK:>
Description:
def unpack_glist(glist_ptr, cffi_type, transfer_full=True):
"""Takes a glist ptr, copies the values casted to type_ in to a list
and frees all items and the list.
If an item is returned all yielded before are invalid.
""" |
current = glist_ptr
while current:
yield ffi.cast(cffi_type, current.data)
if transfer_full:
free(current.data)
current = current.next
if transfer_full:
lib.g_list_free(glist_ptr) |
<SYSTEM_TASK:>
Converts a zero terminated array to a list and frees each element
<END_TASK>
<USER_TASK:>
Description:
def unpack_zeroterm_array(ptr):
"""Converts a zero terminated array to a list and frees each element
and the list itself.
If an item is returned all yielded before are invalid.
""" |
assert ptr
index = 0
current = ptr[index]
while current:
yield current
free(ffi.cast("gpointer", current))
index += 1
current = ptr[index]
free(ffi.cast("gpointer", ptr)) |
<SYSTEM_TASK:>
Creates a GError exception and takes ownership if own is True
<END_TASK>
<USER_TASK:>
Description:
def _from_gerror(cls, error, own=True):
"""Creates a GError exception and takes ownership if own is True""" |
if not own:
error = error.copy()
self = cls()
self._error = error
return self |
<SYSTEM_TASK:>
Takes a version string or tuple and raises ValueError in case
<END_TASK>
<USER_TASK:>
Description:
def check_version(version):
"""Takes a version string or tuple and raises ValueError in case
the passed version is newer than the current version of pgi.
Keep in mind that the pgi version is different from the pygobject one.
""" |
if isinstance(version, string_types):
version = tuple(map(int, version.split(".")))
if version > version_info:
str_version = ".".join(map(str, version))
raise ValueError("pgi version '%s' requested, '%s' available" %
(str_version, __version__)) |
<SYSTEM_TASK:>
Call before the first gi import to redirect gi imports to pgi
<END_TASK>
<USER_TASK:>
Description:
def install_as_gi():
"""Call before the first gi import to redirect gi imports to pgi""" |
import sys
# check if gi has already been replaces
if "gi.repository" in const.PREFIX:
return
# make sure gi isn't loaded first
for mod in iterkeys(sys.modules):
if mod == "gi" or mod.startswith("gi."):
raise AssertionError("pgi has to be imported before gi")
# replace and tell the import hook
import pgi
import pgi.repository
sys.modules["gi"] = pgi
sys.modules["gi.repository"] = pgi.repository
const.PREFIX.append("gi.repository") |
<SYSTEM_TASK:>
Adds specified panel class to model class.
<END_TASK>
<USER_TASK:>
Description:
def add_panel_to_edit_handler(model, panel_cls, heading, index=None):
"""
Adds specified panel class to model class.
:param model: the model class.
:param panel_cls: the panel class.
:param heading: the panel heading.
:param index: the index position to insert at.
""" |
from wagtail.wagtailadmin.views.pages import get_page_edit_handler
edit_handler = get_page_edit_handler(model)
panel_instance = ObjectList(
[panel_cls(),],
heading = heading
).bind_to_model(model)
if index:
edit_handler.children.insert(index, panel_instance)
else:
edit_handler.children.append(panel_instance) |
<SYSTEM_TASK:>
Returns ordering value for list.
<END_TASK>
<USER_TASK:>
Description:
def get_ordering(self):
"""
Returns ordering value for list.
:rtype: str.
""" |
#noinspection PyUnresolvedReferences
ordering = self.request.GET.get('ordering', None)
if ordering not in ['title', '-created_at']:
ordering = '-created_at'
return ordering |
<SYSTEM_TASK:>
Returns a list of template names for the view.
<END_TASK>
<USER_TASK:>
Description:
def get_template_names(self):
"""
Returns a list of template names for the view.
:rtype: list.
""" |
#noinspection PyUnresolvedReferences
if self.request.is_ajax():
template_name = '/results.html'
else:
template_name = '/index.html'
return ['{0}{1}'.format(self.template_dir, template_name)] |
<SYSTEM_TASK:>
Returns tuple containing paginator instance, page instance,
<END_TASK>
<USER_TASK:>
Description:
def paginate_queryset(self, queryset, page_size):
"""
Returns tuple containing paginator instance, page instance,
object list, and whether there are other pages.
:param queryset: the queryset instance to paginate.
:param page_size: the number of instances per page.
:rtype: tuple.
""" |
paginator = self.get_paginator(
queryset,
page_size,
orphans = self.get_paginate_orphans(),
allow_empty_first_page = self.get_allow_empty()
)
page_kwarg = self.page_kwarg
#noinspection PyUnresolvedReferences
page_num = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
# Default to a valid page.
try:
page = paginator.page(page_num)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
#noinspection PyRedundantParentheses
return (paginator, page, page.object_list, page.has_other_pages()) |
<SYSTEM_TASK:>
Processes deletion of the specified instance.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, request, *args, **kwargs):
"""
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
""" |
#noinspection PyAttributeOutsideInit
self.object = self.get_object()
success_url = self.get_success_url()
meta = getattr(self.object, '_meta')
self.object.delete()
messages.success(
request,
_(u'{0} "{1}" deleted.').format(
meta.verbose_name.lower(),
str(self.object)
)
)
return redirect(success_url) |
<SYSTEM_TASK:>
Returns chunks of n length of iterable
<END_TASK>
<USER_TASK:>
Description:
def chunked(iterable, n):
"""Returns chunks of n length of iterable
If len(iterable) % n != 0, then the last chunk will have length
less than n.
Example:
>>> chunked([1, 2, 3, 4, 5], 2)
[(1, 2), (3, 4), (5,)]
""" |
iterable = iter(iterable)
while 1:
t = tuple(islice(iterable, n))
if t:
yield t
else:
return |
<SYSTEM_TASK:>
Return explanation in an easier to read format
<END_TASK>
<USER_TASK:>
Description:
def format_explanation(explanation, indent=' ', indent_level=0):
"""Return explanation in an easier to read format
Easier to read for me, at least.
""" |
if not explanation:
return ''
# Note: This is probably a crap implementation, but it's an
# interesting starting point for a better formatter.
line = ('%s%s %2.4f' % ((indent * indent_level),
explanation['description'],
explanation['value']))
if 'details' in explanation:
details = '\n'.join(
[format_explanation(subtree, indent, indent_level + 1)
for subtree in explanation['details']])
return line + '\n' + details
return line |
<SYSTEM_TASK:>
Return a elasticsearch Elasticsearch object using settings
<END_TASK>
<USER_TASK:>
Description:
def get_es(**overrides):
"""Return a elasticsearch Elasticsearch object using settings
from ``settings.py``.
:arg overrides: Allows you to override defaults to create the
ElasticSearch object. You can override any of the arguments
isted in :py:func:`elasticutils.get_es`.
For example, if you wanted to create an ElasticSearch with a
longer timeout to a different cluster, you'd do:
>>> from elasticutils.contrib.django import get_es
>>> es = get_es(urls=['http://some_other_cluster:9200'], timeout=30)
""" |
defaults = {
'urls': settings.ES_URLS,
'timeout': getattr(settings, 'ES_TIMEOUT', 5)
}
defaults.update(overrides)
return base_get_es(**defaults) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.