id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
248,000
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/subscribers.py
participation_policy_changed
def participation_policy_changed(ob, event): """ Move all the existing users to a new group """ workspace = IWorkspace(ob) old_group_name = workspace.group_for_policy(event.old_policy) old_group = api.group.get(old_group_name) for member in old_group.getAllGroupMembers(): groups = workspace.get(member.getId()).groups groups -= set([event.old_policy.title()]) groups.add(event.new_policy.title())
python
def participation_policy_changed(ob, event): """ Move all the existing users to a new group """ workspace = IWorkspace(ob) old_group_name = workspace.group_for_policy(event.old_policy) old_group = api.group.get(old_group_name) for member in old_group.getAllGroupMembers(): groups = workspace.get(member.getId()).groups groups -= set([event.old_policy.title()]) groups.add(event.new_policy.title())
[ "def", "participation_policy_changed", "(", "ob", ",", "event", ")", ":", "workspace", "=", "IWorkspace", "(", "ob", ")", "old_group_name", "=", "workspace", ".", "group_for_policy", "(", "event", ".", "old_policy", ")", "old_group", "=", "api", ".", "group", ".", "get", "(", "old_group_name", ")", "for", "member", "in", "old_group", ".", "getAllGroupMembers", "(", ")", ":", "groups", "=", "workspace", ".", "get", "(", "member", ".", "getId", "(", ")", ")", ".", "groups", "groups", "-=", "set", "(", "[", "event", ".", "old_policy", ".", "title", "(", ")", "]", ")", "groups", ".", "add", "(", "event", ".", "new_policy", ".", "title", "(", ")", ")" ]
Move all the existing users to a new group
[ "Move", "all", "the", "existing", "users", "to", "a", "new", "group" ]
a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/subscribers.py#L62-L70
248,001
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/subscribers.py
invitation_accepted
def invitation_accepted(event): """ When an invitation is accepted, add the user to the team """ request = getRequest() storage = get_storage() if event.token_id not in storage: return ws_uid, username = storage[event.token_id] storage[event.token_id] acl_users = api.portal.get_tool('acl_users') acl_users.updateCredentials( request, request.response, username, None ) catalog = api.portal.get_tool(name="portal_catalog") brain = catalog.unrestrictedSearchResults(UID=ws_uid)[0] with api.env.adopt_roles(["Manager"]): ws = IWorkspace(brain.getObject()) for name in ws.members: member = api.user.get(username=name) if member is not None: if member.getUserName() == username: api.portal.show_message( _('Oh boy, oh boy, you are already a member'), request, ) break else: ws.add_to_team(user=username) api.portal.show_message( _('Welcome to our family, Stranger'), request, )
python
def invitation_accepted(event): """ When an invitation is accepted, add the user to the team """ request = getRequest() storage = get_storage() if event.token_id not in storage: return ws_uid, username = storage[event.token_id] storage[event.token_id] acl_users = api.portal.get_tool('acl_users') acl_users.updateCredentials( request, request.response, username, None ) catalog = api.portal.get_tool(name="portal_catalog") brain = catalog.unrestrictedSearchResults(UID=ws_uid)[0] with api.env.adopt_roles(["Manager"]): ws = IWorkspace(brain.getObject()) for name in ws.members: member = api.user.get(username=name) if member is not None: if member.getUserName() == username: api.portal.show_message( _('Oh boy, oh boy, you are already a member'), request, ) break else: ws.add_to_team(user=username) api.portal.show_message( _('Welcome to our family, Stranger'), request, )
[ "def", "invitation_accepted", "(", "event", ")", ":", "request", "=", "getRequest", "(", ")", "storage", "=", "get_storage", "(", ")", "if", "event", ".", "token_id", "not", "in", "storage", ":", "return", "ws_uid", ",", "username", "=", "storage", "[", "event", ".", "token_id", "]", "storage", "[", "event", ".", "token_id", "]", "acl_users", "=", "api", ".", "portal", ".", "get_tool", "(", "'acl_users'", ")", "acl_users", ".", "updateCredentials", "(", "request", ",", "request", ".", "response", ",", "username", ",", "None", ")", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "name", "=", "\"portal_catalog\"", ")", "brain", "=", "catalog", ".", "unrestrictedSearchResults", "(", "UID", "=", "ws_uid", ")", "[", "0", "]", "with", "api", ".", "env", ".", "adopt_roles", "(", "[", "\"Manager\"", "]", ")", ":", "ws", "=", "IWorkspace", "(", "brain", ".", "getObject", "(", ")", ")", "for", "name", "in", "ws", ".", "members", ":", "member", "=", "api", ".", "user", ".", "get", "(", "username", "=", "name", ")", "if", "member", "is", "not", "None", ":", "if", "member", ".", "getUserName", "(", ")", "==", "username", ":", "api", ".", "portal", ".", "show_message", "(", "_", "(", "'Oh boy, oh boy, you are already a member'", ")", ",", "request", ",", ")", "break", "else", ":", "ws", ".", "add_to_team", "(", "user", "=", "username", ")", "api", ".", "portal", ".", "show_message", "(", "_", "(", "'Welcome to our family, Stranger'", ")", ",", "request", ",", ")" ]
When an invitation is accepted, add the user to the team
[ "When", "an", "invitation", "is", "accepted", "add", "the", "user", "to", "the", "team" ]
a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/subscribers.py#L73-L109
248,002
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/subscribers.py
user_deleted_from_site_event
def user_deleted_from_site_event(event): """ Remove deleted user from all the workspaces where he is a member """ userid = event.principal catalog = api.portal.get_tool('portal_catalog') query = {'object_provides': WORKSPACE_INTERFACE} query['workspace_members'] = userid workspaces = [ IWorkspace(b._unrestrictedGetObject()) for b in catalog.unrestrictedSearchResults(query) ] for workspace in workspaces: workspace.remove_from_team(userid)
python
def user_deleted_from_site_event(event): """ Remove deleted user from all the workspaces where he is a member """ userid = event.principal catalog = api.portal.get_tool('portal_catalog') query = {'object_provides': WORKSPACE_INTERFACE} query['workspace_members'] = userid workspaces = [ IWorkspace(b._unrestrictedGetObject()) for b in catalog.unrestrictedSearchResults(query) ] for workspace in workspaces: workspace.remove_from_team(userid)
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Remove deleted user from all the workspaces where he is a member
[ "Remove", "deleted", "user", "from", "all", "the", "workspaces", "where", "he", "is", "a", "member" ]
a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/subscribers.py#L112-L126
248,003
Bystroushaak/zeo_connector_defaults
src/zeo_connector_defaults/environment_generator.py
data_context_name
def data_context_name(fn): """ Return the `fn` in absolute path in `template_data` directory. """ return os.path.join(os.path.dirname(__file__), "template_data", fn)
python
def data_context_name(fn): """ Return the `fn` in absolute path in `template_data` directory. """ return os.path.join(os.path.dirname(__file__), "template_data", fn)
[ "def", "data_context_name", "(", "fn", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"template_data\"", ",", "fn", ")" ]
Return the `fn` in absolute path in `template_data` directory.
[ "Return", "the", "fn", "in", "absolute", "path", "in", "template_data", "directory", "." ]
b54ecb99ddb4665db00fba183ef1d7252b0ca62b
https://github.com/Bystroushaak/zeo_connector_defaults/blob/b54ecb99ddb4665db00fba183ef1d7252b0ca62b/src/zeo_connector_defaults/environment_generator.py#L24-L28
248,004
Bystroushaak/zeo_connector_defaults
src/zeo_connector_defaults/environment_generator.py
data_context
def data_context(fn, mode="r"): """ Return content fo the `fn` from the `template_data` directory. """ with open(data_context_name(fn), mode) as f: return f.read()
python
def data_context(fn, mode="r"): """ Return content fo the `fn` from the `template_data` directory. """ with open(data_context_name(fn), mode) as f: return f.read()
[ "def", "data_context", "(", "fn", ",", "mode", "=", "\"r\"", ")", ":", "with", "open", "(", "data_context_name", "(", "fn", ")", ",", "mode", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Return content fo the `fn` from the `template_data` directory.
[ "Return", "content", "fo", "the", "fn", "from", "the", "template_data", "directory", "." ]
b54ecb99ddb4665db00fba183ef1d7252b0ca62b
https://github.com/Bystroushaak/zeo_connector_defaults/blob/b54ecb99ddb4665db00fba183ef1d7252b0ca62b/src/zeo_connector_defaults/environment_generator.py#L31-L36
248,005
Bystroushaak/zeo_connector_defaults
src/zeo_connector_defaults/environment_generator.py
tmp_context
def tmp_context(fn, mode="r"): """ Return content fo the `fn` from the temporary directory. """ with open(tmp_context_name(fn), mode) as f: return f.read()
python
def tmp_context(fn, mode="r"): """ Return content fo the `fn` from the temporary directory. """ with open(tmp_context_name(fn), mode) as f: return f.read()
[ "def", "tmp_context", "(", "fn", ",", "mode", "=", "\"r\"", ")", ":", "with", "open", "(", "tmp_context_name", "(", "fn", ")", ",", "mode", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Return content fo the `fn` from the temporary directory.
[ "Return", "content", "fo", "the", "fn", "from", "the", "temporary", "directory", "." ]
b54ecb99ddb4665db00fba183ef1d7252b0ca62b
https://github.com/Bystroushaak/zeo_connector_defaults/blob/b54ecb99ddb4665db00fba183ef1d7252b0ca62b/src/zeo_connector_defaults/environment_generator.py#L49-L54
248,006
Bystroushaak/zeo_connector_defaults
src/zeo_connector_defaults/environment_generator.py
cleanup_environment
def cleanup_environment(): """ Shutdown the ZEO server process running in another thread and cleanup the temporary directory. """ SERV.terminate() shutil.rmtree(TMP_PATH) if os.path.exists(TMP_PATH): os.rmdir(TMP_PATH) global TMP_PATH TMP_PATH = None
python
def cleanup_environment(): """ Shutdown the ZEO server process running in another thread and cleanup the temporary directory. """ SERV.terminate() shutil.rmtree(TMP_PATH) if os.path.exists(TMP_PATH): os.rmdir(TMP_PATH) global TMP_PATH TMP_PATH = None
[ "def", "cleanup_environment", "(", ")", ":", "SERV", ".", "terminate", "(", ")", "shutil", ".", "rmtree", "(", "TMP_PATH", ")", "if", "os", ".", "path", ".", "exists", "(", "TMP_PATH", ")", ":", "os", ".", "rmdir", "(", "TMP_PATH", ")", "global", "TMP_PATH", "TMP_PATH", "=", "None" ]
Shutdown the ZEO server process running in another thread and cleanup the temporary directory.
[ "Shutdown", "the", "ZEO", "server", "process", "running", "in", "another", "thread", "and", "cleanup", "the", "temporary", "directory", "." ]
b54ecb99ddb4665db00fba183ef1d7252b0ca62b
https://github.com/Bystroushaak/zeo_connector_defaults/blob/b54ecb99ddb4665db00fba183ef1d7252b0ca62b/src/zeo_connector_defaults/environment_generator.py#L97-L108
248,007
dave-shawley/coercion
coercion.py
stringify
def stringify(obj): """ Return the string representation of an object. :param obj: object to get the representation of :returns: unicode string representation of `obj` or `obj` unchanged This function returns a string representation for many of the types from the standard library. It does not convert numeric or Boolean values to strings -- it only converts non-primitive instances such as :class:`datetime.datetime`. The following table describes the types that are handled and describes how they are represented. +----------------------------+--------------------------------------------+ | Class | Behavior | +============================+============================================+ | :class:`uuid.UUID` | ``str(obj)`` | +----------------------------+--------------------------------------------+ | :class:`datetime.datetime` | ``obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z')`` | +----------------------------+--------------------------------------------+ | :class:`memoryview` | ``obj.tobytes().decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytearray` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`buffer` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytes` | ``obj.decode('utf-8')`` | +----------------------------+--------------------------------------------+ Other types are returned unharmed. """ out = obj if isinstance(obj, uuid.UUID): out = str(obj) elif hasattr(obj, 'strftime'): out = obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z') elif isinstance(obj, memoryview): out = obj.tobytes() elif isinstance(obj, bytearray): out = bytes(obj) elif sys.version_info[0] < 3 and isinstance(obj, buffer): out = bytes(obj) if isinstance(out, bytes): out = out.decode('utf-8') return out
python
def stringify(obj): """ Return the string representation of an object. :param obj: object to get the representation of :returns: unicode string representation of `obj` or `obj` unchanged This function returns a string representation for many of the types from the standard library. It does not convert numeric or Boolean values to strings -- it only converts non-primitive instances such as :class:`datetime.datetime`. The following table describes the types that are handled and describes how they are represented. +----------------------------+--------------------------------------------+ | Class | Behavior | +============================+============================================+ | :class:`uuid.UUID` | ``str(obj)`` | +----------------------------+--------------------------------------------+ | :class:`datetime.datetime` | ``obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z')`` | +----------------------------+--------------------------------------------+ | :class:`memoryview` | ``obj.tobytes().decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytearray` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`buffer` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytes` | ``obj.decode('utf-8')`` | +----------------------------+--------------------------------------------+ Other types are returned unharmed. """ out = obj if isinstance(obj, uuid.UUID): out = str(obj) elif hasattr(obj, 'strftime'): out = obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z') elif isinstance(obj, memoryview): out = obj.tobytes() elif isinstance(obj, bytearray): out = bytes(obj) elif sys.version_info[0] < 3 and isinstance(obj, buffer): out = bytes(obj) if isinstance(out, bytes): out = out.decode('utf-8') return out
[ "def", "stringify", "(", "obj", ")", ":", "out", "=", "obj", "if", "isinstance", "(", "obj", ",", "uuid", ".", "UUID", ")", ":", "out", "=", "str", "(", "obj", ")", "elif", "hasattr", "(", "obj", ",", "'strftime'", ")", ":", "out", "=", "obj", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S.%f%z'", ")", "elif", "isinstance", "(", "obj", ",", "memoryview", ")", ":", "out", "=", "obj", ".", "tobytes", "(", ")", "elif", "isinstance", "(", "obj", ",", "bytearray", ")", ":", "out", "=", "bytes", "(", "obj", ")", "elif", "sys", ".", "version_info", "[", "0", "]", "<", "3", "and", "isinstance", "(", "obj", ",", "buffer", ")", ":", "out", "=", "bytes", "(", "obj", ")", "if", "isinstance", "(", "out", ",", "bytes", ")", ":", "out", "=", "out", ".", "decode", "(", "'utf-8'", ")", "return", "out" ]
Return the string representation of an object. :param obj: object to get the representation of :returns: unicode string representation of `obj` or `obj` unchanged This function returns a string representation for many of the types from the standard library. It does not convert numeric or Boolean values to strings -- it only converts non-primitive instances such as :class:`datetime.datetime`. The following table describes the types that are handled and describes how they are represented. +----------------------------+--------------------------------------------+ | Class | Behavior | +============================+============================================+ | :class:`uuid.UUID` | ``str(obj)`` | +----------------------------+--------------------------------------------+ | :class:`datetime.datetime` | ``obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z')`` | +----------------------------+--------------------------------------------+ | :class:`memoryview` | ``obj.tobytes().decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytearray` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`buffer` | ``bytes(obj).decode('utf-8')`` | +----------------------------+--------------------------------------------+ | :class:`bytes` | ``obj.decode('utf-8')`` | +----------------------------+--------------------------------------------+ Other types are returned unharmed.
[ "Return", "the", "string", "representation", "of", "an", "object", "." ]
152c91b99310364a7198020090d7d2197ebea94d
https://github.com/dave-shawley/coercion/blob/152c91b99310364a7198020090d7d2197ebea94d/coercion.py#L15-L62
248,008
dave-shawley/coercion
coercion.py
normalize_collection
def normalize_collection(coll): """ Normalize all elements in a collection. :param coll: the collection to normalize. This is required to implement one of the following protocols: :class:`collections.Mapping`, :class:`collections.Sequence`, or :class:`collections.Set`. :returns: a new instance of the input class with the keys and values normalized via :func:`.stringify` :raises: :exc:`RuntimeError` if `coll` is not a collection This function transforms the collection by recursively transforming each key and value contained in it. The action is recursive but the implementation is unrolled and iterative. If you are interested in the algorithm used, it is described as comments in the code. """ # # The recursive version of this algorithm is something like: # # if isinstance(coll, dict): # return dict((stringify(k), normalize_collection(v)) # for k, v in coll.items()) # if isinstance(obj, (list, tuple)): # return [normalize_collection(item) for item in obj] # raise RuntimeError('non-container root') # # Since this is NOT simply a tail-recursive function, unrolling # the recursion requires that we store intermediate "frame info" # somewhere while processing. I chose to use two stacks for # this: # # value_stack: contains the produced values. The while loop # appends a new container to this stack when it encounters a # container on the work stack. When the algorithm terminates, # we return the first (oldest) value on the stack. # work_stack: contains the items that need to be processed and # a function to call when the value is completed. Initially, # we place the input collection onto work stack without a # processing function. # # The algorithm starts with the input collection on the work # stack. Each iteration pops the top of the stack which contains # a value and a completion function (inserter). If the value is # a collection, then we push a new container onto the value stack, # iterate over the container, and push each item onto the work # stack with a function that will insert it into the new container. # value_stack = [] work_stack = [(coll, None)] def create_container(container_type, inserter): clone = container_type() if inserter: inserter(clone) value_stack.append(clone) return clone while work_stack: value, inserter = work_stack.pop() if isinstance(value, (frozenset, list, set, tuple)): target = create_container(list, inserter) inserter = functools.partial(target.insert, 0) for item in value: work_stack.append((item, inserter)) elif isinstance(value, dict): target = create_container(dict, inserter) for key, item in value.items(): inserter = functools.partial(target.__setitem__, stringify(key)) work_stack.append((item, inserter)) else: if inserter is None: raise RuntimeError( 'non-container root - type %r' % value.__class__) inserter(stringify(value)) return value_stack[0]
python
def normalize_collection(coll): """ Normalize all elements in a collection. :param coll: the collection to normalize. This is required to implement one of the following protocols: :class:`collections.Mapping`, :class:`collections.Sequence`, or :class:`collections.Set`. :returns: a new instance of the input class with the keys and values normalized via :func:`.stringify` :raises: :exc:`RuntimeError` if `coll` is not a collection This function transforms the collection by recursively transforming each key and value contained in it. The action is recursive but the implementation is unrolled and iterative. If you are interested in the algorithm used, it is described as comments in the code. """ # # The recursive version of this algorithm is something like: # # if isinstance(coll, dict): # return dict((stringify(k), normalize_collection(v)) # for k, v in coll.items()) # if isinstance(obj, (list, tuple)): # return [normalize_collection(item) for item in obj] # raise RuntimeError('non-container root') # # Since this is NOT simply a tail-recursive function, unrolling # the recursion requires that we store intermediate "frame info" # somewhere while processing. I chose to use two stacks for # this: # # value_stack: contains the produced values. The while loop # appends a new container to this stack when it encounters a # container on the work stack. When the algorithm terminates, # we return the first (oldest) value on the stack. # work_stack: contains the items that need to be processed and # a function to call when the value is completed. Initially, # we place the input collection onto work stack without a # processing function. # # The algorithm starts with the input collection on the work # stack. Each iteration pops the top of the stack which contains # a value and a completion function (inserter). If the value is # a collection, then we push a new container onto the value stack, # iterate over the container, and push each item onto the work # stack with a function that will insert it into the new container. # value_stack = [] work_stack = [(coll, None)] def create_container(container_type, inserter): clone = container_type() if inserter: inserter(clone) value_stack.append(clone) return clone while work_stack: value, inserter = work_stack.pop() if isinstance(value, (frozenset, list, set, tuple)): target = create_container(list, inserter) inserter = functools.partial(target.insert, 0) for item in value: work_stack.append((item, inserter)) elif isinstance(value, dict): target = create_container(dict, inserter) for key, item in value.items(): inserter = functools.partial(target.__setitem__, stringify(key)) work_stack.append((item, inserter)) else: if inserter is None: raise RuntimeError( 'non-container root - type %r' % value.__class__) inserter(stringify(value)) return value_stack[0]
[ "def", "normalize_collection", "(", "coll", ")", ":", "#", "# The recursive version of this algorithm is something like:", "#", "# if isinstance(coll, dict):", "# return dict((stringify(k), normalize_collection(v))", "# for k, v in coll.items())", "# if isinstance(obj, (list, tuple)):", "# return [normalize_collection(item) for item in obj]", "# raise RuntimeError('non-container root')", "#", "# Since this is NOT simply a tail-recursive function, unrolling", "# the recursion requires that we store intermediate \"frame info\"", "# somewhere while processing. I chose to use two stacks for", "# this:", "#", "# value_stack: contains the produced values. The while loop", "# appends a new container to this stack when it encounters a", "# container on the work stack. When the algorithm terminates,", "# we return the first (oldest) value on the stack.", "# work_stack: contains the items that need to be processed and", "# a function to call when the value is completed. Initially,", "# we place the input collection onto work stack without a", "# processing function.", "#", "# The algorithm starts with the input collection on the work", "# stack. Each iteration pops the top of the stack which contains", "# a value and a completion function (inserter). If the value is", "# a collection, then we push a new container onto the value stack,", "# iterate over the container, and push each item onto the work", "# stack with a function that will insert it into the new container.", "#", "value_stack", "=", "[", "]", "work_stack", "=", "[", "(", "coll", ",", "None", ")", "]", "def", "create_container", "(", "container_type", ",", "inserter", ")", ":", "clone", "=", "container_type", "(", ")", "if", "inserter", ":", "inserter", "(", "clone", ")", "value_stack", ".", "append", "(", "clone", ")", "return", "clone", "while", "work_stack", ":", "value", ",", "inserter", "=", "work_stack", ".", "pop", "(", ")", "if", "isinstance", "(", "value", ",", "(", "frozenset", ",", "list", ",", "set", ",", "tuple", ")", ")", ":", "target", "=", "create_container", "(", "list", ",", "inserter", ")", "inserter", "=", "functools", ".", "partial", "(", "target", ".", "insert", ",", "0", ")", "for", "item", "in", "value", ":", "work_stack", ".", "append", "(", "(", "item", ",", "inserter", ")", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "target", "=", "create_container", "(", "dict", ",", "inserter", ")", "for", "key", ",", "item", "in", "value", ".", "items", "(", ")", ":", "inserter", "=", "functools", ".", "partial", "(", "target", ".", "__setitem__", ",", "stringify", "(", "key", ")", ")", "work_stack", ".", "append", "(", "(", "item", ",", "inserter", ")", ")", "else", ":", "if", "inserter", "is", "None", ":", "raise", "RuntimeError", "(", "'non-container root - type %r'", "%", "value", ".", "__class__", ")", "inserter", "(", "stringify", "(", "value", ")", ")", "return", "value_stack", "[", "0", "]" ]
Normalize all elements in a collection. :param coll: the collection to normalize. This is required to implement one of the following protocols: :class:`collections.Mapping`, :class:`collections.Sequence`, or :class:`collections.Set`. :returns: a new instance of the input class with the keys and values normalized via :func:`.stringify` :raises: :exc:`RuntimeError` if `coll` is not a collection This function transforms the collection by recursively transforming each key and value contained in it. The action is recursive but the implementation is unrolled and iterative. If you are interested in the algorithm used, it is described as comments in the code.
[ "Normalize", "all", "elements", "in", "a", "collection", "." ]
152c91b99310364a7198020090d7d2197ebea94d
https://github.com/dave-shawley/coercion/blob/152c91b99310364a7198020090d7d2197ebea94d/coercion.py#L65-L146
248,009
klmitch/vobj
vobj/version.py
SmartVersion.available
def available(self): """ Returns a set of the available versions. :returns: A set of integers giving the available versions. """ # Short-circuit if not self._schema: return set() # Build up the set of available versions avail = set(self._schema.__vers_downgraders__.keys()) avail.add(self._schema.__version__) return avail
python
def available(self): """ Returns a set of the available versions. :returns: A set of integers giving the available versions. """ # Short-circuit if not self._schema: return set() # Build up the set of available versions avail = set(self._schema.__vers_downgraders__.keys()) avail.add(self._schema.__version__) return avail
[ "def", "available", "(", "self", ")", ":", "# Short-circuit", "if", "not", "self", ".", "_schema", ":", "return", "set", "(", ")", "# Build up the set of available versions", "avail", "=", "set", "(", "self", ".", "_schema", ".", "__vers_downgraders__", ".", "keys", "(", ")", ")", "avail", ".", "add", "(", "self", ".", "_schema", ".", "__version__", ")", "return", "avail" ]
Returns a set of the available versions. :returns: A set of integers giving the available versions.
[ "Returns", "a", "set", "of", "the", "available", "versions", "." ]
4952658dc0914fe92afb2ef6e5ccca2829de6cb2
https://github.com/klmitch/vobj/blob/4952658dc0914fe92afb2ef6e5ccca2829de6cb2/vobj/version.py#L101-L116
248,010
rackerlabs/rackspace-python-neutronclient
neutronclient/neutron/v2_0/__init__.py
parse_args_to_dict
def parse_args_to_dict(values_specs): """It is used to analyze the extra command options to command. Besides known options and arguments, our commands also support user to put more options to the end of command line. For example, list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1' is extra options to our list_nets. This feature can support V2.0 API's fields selection and filters. For example, to list networks which has name 'test4', we can have list_nets -- --name=test4. value spec is: --key type=int|bool|... value. Type is one of Python built-in types. By default, type is string. The key without value is a bool option. Key with two values will be a list option. """ # values_specs for example: '-- --tag x y --key1 type=int value1' # -- is a pseudo argument values_specs_copy = values_specs[:] if values_specs_copy and values_specs_copy[0] == '--': del values_specs_copy[0] # converted ArgumentParser arguments for each of the options _options = {} # the argument part for current option in _options current_arg = None # the string after remove meta info in values_specs # for example, '--tag x y --key1 value1' _values_specs = [] # record the count of values for an option # for example: for '--tag x y', it is 2, while for '--key1 value1', it is 1 _value_number = 0 # list=true _list_flag = False # action=clear _clear_flag = False # the current item in values_specs current_item = None # the str after 'type=' current_type_str = None for _item in values_specs_copy: if _item.startswith('--'): # Deal with previous argument if any _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Init variables for current argument current_item = _item _list_flag = False _clear_flag = False current_type_str = None if "=" in _item: _value_number = 1 _item = _item.split('=')[0] else: _value_number = 0 if _item in _options: raise exceptions.CommandError( _("Duplicated options %s") % ' '.join(values_specs)) else: _options.update({_item: {}}) current_arg = _options[_item] _item = current_item elif _item.startswith('type='): if current_arg is None: raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) if 'type' not in current_arg: current_type_str = _item.split('=', 2)[1] current_arg.update({'type': eval(current_type_str)}) if current_type_str == 'bool': current_arg.update({'type': utils.str2bool}) elif current_type_str == 'dict': current_arg.update({'type': utils.str2dict}) continue elif _item == 'list=true': _list_flag = True continue elif _item == 'action=clear': _clear_flag = True continue if not _item.startswith('--'): # All others are value items # Make sure '--' occurs first and allow minus value if (not current_item or '=' in current_item or _item.startswith('-') and not is_number(_item)): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _value_number += 1 if _item.startswith('---'): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _values_specs.append(_item) # Deal with last one argument _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Populate the parser with arguments _parser = argparse.ArgumentParser(add_help=False) for opt, optspec in six.iteritems(_options): _parser.add_argument(opt, **optspec) _args = _parser.parse_args(_values_specs) result_dict = {} for opt in six.iterkeys(_options): _opt = opt.split('--', 2)[1] _opt = _opt.replace('-', '_') _value = getattr(_args, _opt) result_dict.update({_opt: _value}) return result_dict
python
def parse_args_to_dict(values_specs): """It is used to analyze the extra command options to command. Besides known options and arguments, our commands also support user to put more options to the end of command line. For example, list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1' is extra options to our list_nets. This feature can support V2.0 API's fields selection and filters. For example, to list networks which has name 'test4', we can have list_nets -- --name=test4. value spec is: --key type=int|bool|... value. Type is one of Python built-in types. By default, type is string. The key without value is a bool option. Key with two values will be a list option. """ # values_specs for example: '-- --tag x y --key1 type=int value1' # -- is a pseudo argument values_specs_copy = values_specs[:] if values_specs_copy and values_specs_copy[0] == '--': del values_specs_copy[0] # converted ArgumentParser arguments for each of the options _options = {} # the argument part for current option in _options current_arg = None # the string after remove meta info in values_specs # for example, '--tag x y --key1 value1' _values_specs = [] # record the count of values for an option # for example: for '--tag x y', it is 2, while for '--key1 value1', it is 1 _value_number = 0 # list=true _list_flag = False # action=clear _clear_flag = False # the current item in values_specs current_item = None # the str after 'type=' current_type_str = None for _item in values_specs_copy: if _item.startswith('--'): # Deal with previous argument if any _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Init variables for current argument current_item = _item _list_flag = False _clear_flag = False current_type_str = None if "=" in _item: _value_number = 1 _item = _item.split('=')[0] else: _value_number = 0 if _item in _options: raise exceptions.CommandError( _("Duplicated options %s") % ' '.join(values_specs)) else: _options.update({_item: {}}) current_arg = _options[_item] _item = current_item elif _item.startswith('type='): if current_arg is None: raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) if 'type' not in current_arg: current_type_str = _item.split('=', 2)[1] current_arg.update({'type': eval(current_type_str)}) if current_type_str == 'bool': current_arg.update({'type': utils.str2bool}) elif current_type_str == 'dict': current_arg.update({'type': utils.str2dict}) continue elif _item == 'list=true': _list_flag = True continue elif _item == 'action=clear': _clear_flag = True continue if not _item.startswith('--'): # All others are value items # Make sure '--' occurs first and allow minus value if (not current_item or '=' in current_item or _item.startswith('-') and not is_number(_item)): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _value_number += 1 if _item.startswith('---'): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _values_specs.append(_item) # Deal with last one argument _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Populate the parser with arguments _parser = argparse.ArgumentParser(add_help=False) for opt, optspec in six.iteritems(_options): _parser.add_argument(opt, **optspec) _args = _parser.parse_args(_values_specs) result_dict = {} for opt in six.iterkeys(_options): _opt = opt.split('--', 2)[1] _opt = _opt.replace('-', '_') _value = getattr(_args, _opt) result_dict.update({_opt: _value}) return result_dict
[ "def", "parse_args_to_dict", "(", "values_specs", ")", ":", "# values_specs for example: '-- --tag x y --key1 type=int value1'", "# -- is a pseudo argument", "values_specs_copy", "=", "values_specs", "[", ":", "]", "if", "values_specs_copy", "and", "values_specs_copy", "[", "0", "]", "==", "'--'", ":", "del", "values_specs_copy", "[", "0", "]", "# converted ArgumentParser arguments for each of the options", "_options", "=", "{", "}", "# the argument part for current option in _options", "current_arg", "=", "None", "# the string after remove meta info in values_specs", "# for example, '--tag x y --key1 value1'", "_values_specs", "=", "[", "]", "# record the count of values for an option", "# for example: for '--tag x y', it is 2, while for '--key1 value1', it is 1", "_value_number", "=", "0", "# list=true", "_list_flag", "=", "False", "# action=clear", "_clear_flag", "=", "False", "# the current item in values_specs", "current_item", "=", "None", "# the str after 'type='", "current_type_str", "=", "None", "for", "_item", "in", "values_specs_copy", ":", "if", "_item", ".", "startswith", "(", "'--'", ")", ":", "# Deal with previous argument if any", "_process_previous_argument", "(", "current_arg", ",", "_value_number", ",", "current_type_str", ",", "_list_flag", ",", "_values_specs", ",", "_clear_flag", ",", "values_specs", ")", "# Init variables for current argument", "current_item", "=", "_item", "_list_flag", "=", "False", "_clear_flag", "=", "False", "current_type_str", "=", "None", "if", "\"=\"", "in", "_item", ":", "_value_number", "=", "1", "_item", "=", "_item", ".", "split", "(", "'='", ")", "[", "0", "]", "else", ":", "_value_number", "=", "0", "if", "_item", "in", "_options", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "\"Duplicated options %s\"", ")", "%", "' '", ".", "join", "(", "values_specs", ")", ")", "else", ":", "_options", ".", "update", "(", "{", "_item", ":", "{", "}", "}", ")", "current_arg", "=", "_options", "[", "_item", "]", "_item", "=", "current_item", "elif", "_item", ".", "startswith", "(", "'type='", ")", ":", "if", "current_arg", "is", "None", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "\"Invalid values_specs %s\"", ")", "%", "' '", ".", "join", "(", "values_specs", ")", ")", "if", "'type'", "not", "in", "current_arg", ":", "current_type_str", "=", "_item", ".", "split", "(", "'='", ",", "2", ")", "[", "1", "]", "current_arg", ".", "update", "(", "{", "'type'", ":", "eval", "(", "current_type_str", ")", "}", ")", "if", "current_type_str", "==", "'bool'", ":", "current_arg", ".", "update", "(", "{", "'type'", ":", "utils", ".", "str2bool", "}", ")", "elif", "current_type_str", "==", "'dict'", ":", "current_arg", ".", "update", "(", "{", "'type'", ":", "utils", ".", "str2dict", "}", ")", "continue", "elif", "_item", "==", "'list=true'", ":", "_list_flag", "=", "True", "continue", "elif", "_item", "==", "'action=clear'", ":", "_clear_flag", "=", "True", "continue", "if", "not", "_item", ".", "startswith", "(", "'--'", ")", ":", "# All others are value items", "# Make sure '--' occurs first and allow minus value", "if", "(", "not", "current_item", "or", "'='", "in", "current_item", "or", "_item", ".", "startswith", "(", "'-'", ")", "and", "not", "is_number", "(", "_item", ")", ")", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "\"Invalid values_specs %s\"", ")", "%", "' '", ".", "join", "(", "values_specs", ")", ")", "_value_number", "+=", "1", "if", "_item", ".", "startswith", "(", "'---'", ")", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "\"Invalid values_specs %s\"", ")", "%", "' '", ".", "join", "(", "values_specs", ")", ")", "_values_specs", ".", "append", "(", "_item", ")", "# Deal with last one argument", "_process_previous_argument", "(", "current_arg", ",", "_value_number", ",", "current_type_str", ",", "_list_flag", ",", "_values_specs", ",", "_clear_flag", ",", "values_specs", ")", "# Populate the parser with arguments", "_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "for", "opt", ",", "optspec", "in", "six", ".", "iteritems", "(", "_options", ")", ":", "_parser", ".", "add_argument", "(", "opt", ",", "*", "*", "optspec", ")", "_args", "=", "_parser", ".", "parse_args", "(", "_values_specs", ")", "result_dict", "=", "{", "}", "for", "opt", "in", "six", ".", "iterkeys", "(", "_options", ")", ":", "_opt", "=", "opt", ".", "split", "(", "'--'", ",", "2", ")", "[", "1", "]", "_opt", "=", "_opt", ".", "replace", "(", "'-'", ",", "'_'", ")", "_value", "=", "getattr", "(", "_args", ",", "_opt", ")", "result_dict", ".", "update", "(", "{", "_opt", ":", "_value", "}", ")", "return", "result_dict" ]
It is used to analyze the extra command options to command. Besides known options and arguments, our commands also support user to put more options to the end of command line. For example, list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1' is extra options to our list_nets. This feature can support V2.0 API's fields selection and filters. For example, to list networks which has name 'test4', we can have list_nets -- --name=test4. value spec is: --key type=int|bool|... value. Type is one of Python built-in types. By default, type is string. The key without value is a bool option. Key with two values will be a list option.
[ "It", "is", "used", "to", "analyze", "the", "extra", "command", "options", "to", "command", "." ]
5a5009a8fe078e3aa1d582176669f1b28ab26bef
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/__init__.py#L229-L342
248,011
rackerlabs/rackspace-python-neutronclient
neutronclient/neutron/v2_0/__init__.py
_merge_args
def _merge_args(qCmd, parsed_args, _extra_values, value_specs): """Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts """ temp_values = _extra_values.copy() for key, value in six.iteritems(temp_values): if hasattr(parsed_args, key): arg_value = getattr(parsed_args, key) if arg_value is not None and value is not None: if isinstance(arg_value, list): if value and isinstance(value, list): if (not arg_value or isinstance(arg_value[0], type(value[0]))): arg_value.extend(value) _extra_values.pop(key)
python
def _merge_args(qCmd, parsed_args, _extra_values, value_specs): """Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts """ temp_values = _extra_values.copy() for key, value in six.iteritems(temp_values): if hasattr(parsed_args, key): arg_value = getattr(parsed_args, key) if arg_value is not None and value is not None: if isinstance(arg_value, list): if value and isinstance(value, list): if (not arg_value or isinstance(arg_value[0], type(value[0]))): arg_value.extend(value) _extra_values.pop(key)
[ "def", "_merge_args", "(", "qCmd", ",", "parsed_args", ",", "_extra_values", ",", "value_specs", ")", ":", "temp_values", "=", "_extra_values", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "temp_values", ")", ":", "if", "hasattr", "(", "parsed_args", ",", "key", ")", ":", "arg_value", "=", "getattr", "(", "parsed_args", ",", "key", ")", "if", "arg_value", "is", "not", "None", "and", "value", "is", "not", "None", ":", "if", "isinstance", "(", "arg_value", ",", "list", ")", ":", "if", "value", "and", "isinstance", "(", "value", ",", "list", ")", ":", "if", "(", "not", "arg_value", "or", "isinstance", "(", "arg_value", "[", "0", "]", ",", "type", "(", "value", "[", "0", "]", ")", ")", ")", ":", "arg_value", ".", "extend", "(", "value", ")", "_extra_values", ".", "pop", "(", "key", ")" ]
Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts
[ "Merge", "arguments", "from", "_extra_values", "into", "parsed_args", "." ]
5a5009a8fe078e3aa1d582176669f1b28ab26bef
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/__init__.py#L345-L365
248,012
rackerlabs/rackspace-python-neutronclient
neutronclient/neutron/v2_0/__init__.py
update_dict
def update_dict(obj, dict, attributes): """Update dict with fields from obj.attributes. :param obj: the object updated into dict :param dict: the result dictionary :param attributes: a list of attributes belonging to obj """ for attribute in attributes: if hasattr(obj, attribute) and getattr(obj, attribute) is not None: dict[attribute] = getattr(obj, attribute)
python
def update_dict(obj, dict, attributes): """Update dict with fields from obj.attributes. :param obj: the object updated into dict :param dict: the result dictionary :param attributes: a list of attributes belonging to obj """ for attribute in attributes: if hasattr(obj, attribute) and getattr(obj, attribute) is not None: dict[attribute] = getattr(obj, attribute)
[ "def", "update_dict", "(", "obj", ",", "dict", ",", "attributes", ")", ":", "for", "attribute", "in", "attributes", ":", "if", "hasattr", "(", "obj", ",", "attribute", ")", "and", "getattr", "(", "obj", ",", "attribute", ")", "is", "not", "None", ":", "dict", "[", "attribute", "]", "=", "getattr", "(", "obj", ",", "attribute", ")" ]
Update dict with fields from obj.attributes. :param obj: the object updated into dict :param dict: the result dictionary :param attributes: a list of attributes belonging to obj
[ "Update", "dict", "with", "fields", "from", "obj", ".", "attributes", "." ]
5a5009a8fe078e3aa1d582176669f1b28ab26bef
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/__init__.py#L368-L377
248,013
rackerlabs/rackspace-python-neutronclient
neutronclient/neutron/v2_0/__init__.py
ListCommand.retrieve_list
def retrieve_list(self, parsed_args): """Retrieve a list of resources from Neutron server.""" neutron_client = self.get_client() _extra_values = parse_args_to_dict(self.values_specs) _merge_args(self, parsed_args, _extra_values, self.values_specs) search_opts = self.args2search_opts(parsed_args) search_opts.update(_extra_values) if self.pagination_support: page_size = parsed_args.page_size if page_size: search_opts.update({'limit': page_size}) if self.sorting_support: keys = parsed_args.sort_key if keys: search_opts.update({'sort_key': keys}) dirs = parsed_args.sort_dir len_diff = len(keys) - len(dirs) if len_diff > 0: dirs += ['asc'] * len_diff elif len_diff < 0: dirs = dirs[:len(keys)] if dirs: search_opts.update({'sort_dir': dirs}) data = self.call_server(neutron_client, search_opts, parsed_args) collection = _get_resource_plural(self.resource, neutron_client) return data.get(collection, [])
python
def retrieve_list(self, parsed_args): """Retrieve a list of resources from Neutron server.""" neutron_client = self.get_client() _extra_values = parse_args_to_dict(self.values_specs) _merge_args(self, parsed_args, _extra_values, self.values_specs) search_opts = self.args2search_opts(parsed_args) search_opts.update(_extra_values) if self.pagination_support: page_size = parsed_args.page_size if page_size: search_opts.update({'limit': page_size}) if self.sorting_support: keys = parsed_args.sort_key if keys: search_opts.update({'sort_key': keys}) dirs = parsed_args.sort_dir len_diff = len(keys) - len(dirs) if len_diff > 0: dirs += ['asc'] * len_diff elif len_diff < 0: dirs = dirs[:len(keys)] if dirs: search_opts.update({'sort_dir': dirs}) data = self.call_server(neutron_client, search_opts, parsed_args) collection = _get_resource_plural(self.resource, neutron_client) return data.get(collection, [])
[ "def", "retrieve_list", "(", "self", ",", "parsed_args", ")", ":", "neutron_client", "=", "self", ".", "get_client", "(", ")", "_extra_values", "=", "parse_args_to_dict", "(", "self", ".", "values_specs", ")", "_merge_args", "(", "self", ",", "parsed_args", ",", "_extra_values", ",", "self", ".", "values_specs", ")", "search_opts", "=", "self", ".", "args2search_opts", "(", "parsed_args", ")", "search_opts", ".", "update", "(", "_extra_values", ")", "if", "self", ".", "pagination_support", ":", "page_size", "=", "parsed_args", ".", "page_size", "if", "page_size", ":", "search_opts", ".", "update", "(", "{", "'limit'", ":", "page_size", "}", ")", "if", "self", ".", "sorting_support", ":", "keys", "=", "parsed_args", ".", "sort_key", "if", "keys", ":", "search_opts", ".", "update", "(", "{", "'sort_key'", ":", "keys", "}", ")", "dirs", "=", "parsed_args", ".", "sort_dir", "len_diff", "=", "len", "(", "keys", ")", "-", "len", "(", "dirs", ")", "if", "len_diff", ">", "0", ":", "dirs", "+=", "[", "'asc'", "]", "*", "len_diff", "elif", "len_diff", "<", "0", ":", "dirs", "=", "dirs", "[", ":", "len", "(", "keys", ")", "]", "if", "dirs", ":", "search_opts", ".", "update", "(", "{", "'sort_dir'", ":", "dirs", "}", ")", "data", "=", "self", ".", "call_server", "(", "neutron_client", ",", "search_opts", ",", "parsed_args", ")", "collection", "=", "_get_resource_plural", "(", "self", ".", "resource", ",", "neutron_client", ")", "return", "data", ".", "get", "(", "collection", ",", "[", "]", ")" ]
Retrieve a list of resources from Neutron server.
[ "Retrieve", "a", "list", "of", "resources", "from", "Neutron", "server", "." ]
5a5009a8fe078e3aa1d582176669f1b28ab26bef
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/__init__.py#L707-L733
248,014
treycucco/bidon
bidon/db/access/pg_advisory_lock.py
lock_key
def lock_key(group_id, item_id, group_width=8): """Creates a lock ID where the lower bits are the group ID and the upper bits are the item ID. This allows the use of a bigint namespace for items, with a limited space for grouping. :group_id: an integer identifying the group. Must be less than 2 ^ :group_width: :item_id: item_id an integer. must be less than 2 ^ (63 - :group_width:) - 1 :gropu_width: the number of bits to reserve for the group ID. """ if group_id >= (1 << group_width): raise Exception("Group ID is too big") if item_id >= (1 << (63 - group_width)) - 1: raise Exception("Item ID is too big") return (item_id << group_width) | group_id
python
def lock_key(group_id, item_id, group_width=8): """Creates a lock ID where the lower bits are the group ID and the upper bits are the item ID. This allows the use of a bigint namespace for items, with a limited space for grouping. :group_id: an integer identifying the group. Must be less than 2 ^ :group_width: :item_id: item_id an integer. must be less than 2 ^ (63 - :group_width:) - 1 :gropu_width: the number of bits to reserve for the group ID. """ if group_id >= (1 << group_width): raise Exception("Group ID is too big") if item_id >= (1 << (63 - group_width)) - 1: raise Exception("Item ID is too big") return (item_id << group_width) | group_id
[ "def", "lock_key", "(", "group_id", ",", "item_id", ",", "group_width", "=", "8", ")", ":", "if", "group_id", ">=", "(", "1", "<<", "group_width", ")", ":", "raise", "Exception", "(", "\"Group ID is too big\"", ")", "if", "item_id", ">=", "(", "1", "<<", "(", "63", "-", "group_width", ")", ")", "-", "1", ":", "raise", "Exception", "(", "\"Item ID is too big\"", ")", "return", "(", "item_id", "<<", "group_width", ")", "|", "group_id" ]
Creates a lock ID where the lower bits are the group ID and the upper bits are the item ID. This allows the use of a bigint namespace for items, with a limited space for grouping. :group_id: an integer identifying the group. Must be less than 2 ^ :group_width: :item_id: item_id an integer. must be less than 2 ^ (63 - :group_width:) - 1 :gropu_width: the number of bits to reserve for the group ID.
[ "Creates", "a", "lock", "ID", "where", "the", "lower", "bits", "are", "the", "group", "ID", "and", "the", "upper", "bits", "are", "the", "item", "ID", ".", "This", "allows", "the", "use", "of", "a", "bigint", "namespace", "for", "items", "with", "a", "limited", "space", "for", "grouping", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/pg_advisory_lock.py#L16-L30
248,015
treycucco/bidon
bidon/db/access/pg_advisory_lock.py
release_lock
def release_lock(dax, key, lock_mode=LockMode.wait): """Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum """ lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
python
def release_lock(dax, key, lock_mode=LockMode.wait): """Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum """ lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
[ "def", "release_lock", "(", "dax", ",", "key", ",", "lock_mode", "=", "LockMode", ".", "wait", ")", ":", "lock_fxn", "=", "_lock_fxn", "(", "\"unlock\"", ",", "lock_mode", ",", "False", ")", "return", "dax", ".", "get_scalar", "(", "dax", ".", "callproc", "(", "lock_fxn", ",", "key", "if", "isinstance", "(", "key", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "key", "]", ")", "[", "0", "]", ")" ]
Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum
[ "Manually", "release", "a", "pg", "advisory", "lock", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/pg_advisory_lock.py#L47-L56
248,016
treycucco/bidon
bidon/db/access/pg_advisory_lock.py
advisory_lock
def advisory_lock(dax, key, lock_mode=LockMode.wait, xact=False): """A context manager for obtaining a lock, executing code, and then releasing the lock. A boolean value is passed to the block indicating whether or not the lock was obtained. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum. Determines how this function operates: - wait: the wrapped code will not be executed until the lock is obtained. - skip: an attempt will be made to get the lock, and if unsuccessful, False is passed to the code block - error: an attempt will be made to get the lock, and if unsuccessful, an exception will be raised. :xact: a boolean, if True, the lock will be obtained according to lock_mode, but will not be released after the code is executed, since it will be automatically released at the end of the transaction. """ if lock_mode == LockMode.wait: obtain_lock(dax, key, lock_mode, xact) else: got_lock = obtain_lock(dax, key, lock_mode, xact) if not got_lock: if lock_mode == LockMode.error: raise Exception("Unable to obtain advisory lock {}".format(key)) else: # lock_mode is skip yield False return # At this point we have the lock try: yield True finally: if not xact: release_lock(dax, key, lock_mode)
python
def advisory_lock(dax, key, lock_mode=LockMode.wait, xact=False): """A context manager for obtaining a lock, executing code, and then releasing the lock. A boolean value is passed to the block indicating whether or not the lock was obtained. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum. Determines how this function operates: - wait: the wrapped code will not be executed until the lock is obtained. - skip: an attempt will be made to get the lock, and if unsuccessful, False is passed to the code block - error: an attempt will be made to get the lock, and if unsuccessful, an exception will be raised. :xact: a boolean, if True, the lock will be obtained according to lock_mode, but will not be released after the code is executed, since it will be automatically released at the end of the transaction. """ if lock_mode == LockMode.wait: obtain_lock(dax, key, lock_mode, xact) else: got_lock = obtain_lock(dax, key, lock_mode, xact) if not got_lock: if lock_mode == LockMode.error: raise Exception("Unable to obtain advisory lock {}".format(key)) else: # lock_mode is skip yield False return # At this point we have the lock try: yield True finally: if not xact: release_lock(dax, key, lock_mode)
[ "def", "advisory_lock", "(", "dax", ",", "key", ",", "lock_mode", "=", "LockMode", ".", "wait", ",", "xact", "=", "False", ")", ":", "if", "lock_mode", "==", "LockMode", ".", "wait", ":", "obtain_lock", "(", "dax", ",", "key", ",", "lock_mode", ",", "xact", ")", "else", ":", "got_lock", "=", "obtain_lock", "(", "dax", ",", "key", ",", "lock_mode", ",", "xact", ")", "if", "not", "got_lock", ":", "if", "lock_mode", "==", "LockMode", ".", "error", ":", "raise", "Exception", "(", "\"Unable to obtain advisory lock {}\"", ".", "format", "(", "key", ")", ")", "else", ":", "# lock_mode is skip", "yield", "False", "return", "# At this point we have the lock", "try", ":", "yield", "True", "finally", ":", "if", "not", "xact", ":", "release_lock", "(", "dax", ",", "key", ",", "lock_mode", ")" ]
A context manager for obtaining a lock, executing code, and then releasing the lock. A boolean value is passed to the block indicating whether or not the lock was obtained. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum. Determines how this function operates: - wait: the wrapped code will not be executed until the lock is obtained. - skip: an attempt will be made to get the lock, and if unsuccessful, False is passed to the code block - error: an attempt will be made to get the lock, and if unsuccessful, an exception will be raised. :xact: a boolean, if True, the lock will be obtained according to lock_mode, but will not be released after the code is executed, since it will be automatically released at the end of the transaction.
[ "A", "context", "manager", "for", "obtaining", "a", "lock", "executing", "code", "and", "then", "releasing", "the", "lock", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/pg_advisory_lock.py#L60-L98
248,017
treycucco/bidon
bidon/db/access/pg_advisory_lock.py
_lock_fxn
def _lock_fxn(direction, lock_mode, xact): """Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released. """ if direction == "unlock" or lock_mode == LockMode.wait: try_mode = "" else: try_mode = "_try" if direction == "lock" and xact: xact_mode = "_xact" else: xact_mode = "" return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction)
python
def _lock_fxn(direction, lock_mode, xact): """Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released. """ if direction == "unlock" or lock_mode == LockMode.wait: try_mode = "" else: try_mode = "_try" if direction == "lock" and xact: xact_mode = "_xact" else: xact_mode = "" return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction)
[ "def", "_lock_fxn", "(", "direction", ",", "lock_mode", ",", "xact", ")", ":", "if", "direction", "==", "\"unlock\"", "or", "lock_mode", "==", "LockMode", ".", "wait", ":", "try_mode", "=", "\"\"", "else", ":", "try_mode", "=", "\"_try\"", "if", "direction", "==", "\"lock\"", "and", "xact", ":", "xact_mode", "=", "\"_xact\"", "else", ":", "xact_mode", "=", "\"\"", "return", "\"pg{}_advisory{}_{}\"", ".", "format", "(", "try_mode", ",", "xact_mode", ",", "direction", ")" ]
Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released.
[ "Builds", "a", "pg", "advisory", "lock", "function", "name", "based", "on", "various", "options", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/pg_advisory_lock.py#L101-L119
248,018
leosartaj/sub
sub/main.py
get_hash
def get_hash(fName, readSize, dire=pDir()): """ creates the required hash """ if not fileExists(fName, dire): return -1 readSize = readSize * 1024 # bytes to be read fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'rb') as f: size = os.path.getsize(fName) if size < readSize * 2: return -1 data = f.read(readSize) f.seek(-readSize, os.SEEK_END) data += f.read(readSize) return md5(data).hexdigest()
python
def get_hash(fName, readSize, dire=pDir()): """ creates the required hash """ if not fileExists(fName, dire): return -1 readSize = readSize * 1024 # bytes to be read fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'rb') as f: size = os.path.getsize(fName) if size < readSize * 2: return -1 data = f.read(readSize) f.seek(-readSize, os.SEEK_END) data += f.read(readSize) return md5(data).hexdigest()
[ "def", "get_hash", "(", "fName", ",", "readSize", ",", "dire", "=", "pDir", "(", ")", ")", ":", "if", "not", "fileExists", "(", "fName", ",", "dire", ")", ":", "return", "-", "1", "readSize", "=", "readSize", "*", "1024", "# bytes to be read", "fName", "=", "os", ".", "path", ".", "join", "(", "dire", ",", "fName", ")", "# name coupled with path", "with", "open", "(", "fName", ",", "'rb'", ")", "as", "f", ":", "size", "=", "os", ".", "path", ".", "getsize", "(", "fName", ")", "if", "size", "<", "readSize", "*", "2", ":", "return", "-", "1", "data", "=", "f", ".", "read", "(", "readSize", ")", "f", ".", "seek", "(", "-", "readSize", ",", "os", ".", "SEEK_END", ")", "data", "+=", "f", ".", "read", "(", "readSize", ")", "return", "md5", "(", "data", ")", ".", "hexdigest", "(", ")" ]
creates the required hash
[ "creates", "the", "required", "hash" ]
9a8e55a5326c3b41357eedd235e7c36f253db2e0
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L37-L52
248,019
leosartaj/sub
sub/main.py
download_file
def download_file(fName, time, dire=pDir()): """ download the required subtitle """ # hash gen_hash = get_hash(fName, 64, dire) if gen_hash == -1: return -1 # making request user_agent = {'User-agent': 'SubDB/1.0 (sub/0.1; http://github.com/leosartaj/sub)'} param = {'action': 'download', 'hash': gen_hash, 'language': 'en'} # Specification for the request try: r = requests.get("http://api.thesubdb.com/", headers = user_agent, params = param, timeout=time) # Get Request except (requests.exceptions.Timeout, socket.error): return 'Timeout Error' if r.status_code != 200: return r.status_code # save file fName, fExt = os.path.splitext(fName) fName += '.srt' # replace extension with srt fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'wb') as f: f.write(r.text.encode('ascii', 'ignore')) return r.status_code
python
def download_file(fName, time, dire=pDir()): """ download the required subtitle """ # hash gen_hash = get_hash(fName, 64, dire) if gen_hash == -1: return -1 # making request user_agent = {'User-agent': 'SubDB/1.0 (sub/0.1; http://github.com/leosartaj/sub)'} param = {'action': 'download', 'hash': gen_hash, 'language': 'en'} # Specification for the request try: r = requests.get("http://api.thesubdb.com/", headers = user_agent, params = param, timeout=time) # Get Request except (requests.exceptions.Timeout, socket.error): return 'Timeout Error' if r.status_code != 200: return r.status_code # save file fName, fExt = os.path.splitext(fName) fName += '.srt' # replace extension with srt fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'wb') as f: f.write(r.text.encode('ascii', 'ignore')) return r.status_code
[ "def", "download_file", "(", "fName", ",", "time", ",", "dire", "=", "pDir", "(", ")", ")", ":", "# hash", "gen_hash", "=", "get_hash", "(", "fName", ",", "64", ",", "dire", ")", "if", "gen_hash", "==", "-", "1", ":", "return", "-", "1", "# making request", "user_agent", "=", "{", "'User-agent'", ":", "'SubDB/1.0 (sub/0.1; http://github.com/leosartaj/sub)'", "}", "param", "=", "{", "'action'", ":", "'download'", ",", "'hash'", ":", "gen_hash", ",", "'language'", ":", "'en'", "}", "# Specification for the request", "try", ":", "r", "=", "requests", ".", "get", "(", "\"http://api.thesubdb.com/\"", ",", "headers", "=", "user_agent", ",", "params", "=", "param", ",", "timeout", "=", "time", ")", "# Get Request", "except", "(", "requests", ".", "exceptions", ".", "Timeout", ",", "socket", ".", "error", ")", ":", "return", "'Timeout Error'", "if", "r", ".", "status_code", "!=", "200", ":", "return", "r", ".", "status_code", "# save file", "fName", ",", "fExt", "=", "os", ".", "path", ".", "splitext", "(", "fName", ")", "fName", "+=", "'.srt'", "# replace extension with srt", "fName", "=", "os", ".", "path", ".", "join", "(", "dire", ",", "fName", ")", "# name coupled with path", "with", "open", "(", "fName", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "r", ".", "text", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ")", "return", "r", ".", "status_code" ]
download the required subtitle
[ "download", "the", "required", "subtitle" ]
9a8e55a5326c3b41357eedd235e7c36f253db2e0
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L54-L80
248,020
leosartaj/sub
sub/main.py
file_downloaded
def file_downloaded(dwn, fName, verbose=False): """ print for downloaded file """ if verbose: if dwn == 200: fName, fExt = os.path.splitext(fName) print 'Downloaded ' + fName + '.srt' return True elif dwn != -1: print 'Tried downloading got ' + str(dwn) + ' for ' + fName return False
python
def file_downloaded(dwn, fName, verbose=False): """ print for downloaded file """ if verbose: if dwn == 200: fName, fExt = os.path.splitext(fName) print 'Downloaded ' + fName + '.srt' return True elif dwn != -1: print 'Tried downloading got ' + str(dwn) + ' for ' + fName return False
[ "def", "file_downloaded", "(", "dwn", ",", "fName", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "if", "dwn", "==", "200", ":", "fName", ",", "fExt", "=", "os", ".", "path", ".", "splitext", "(", "fName", ")", "print", "'Downloaded '", "+", "fName", "+", "'.srt'", "return", "True", "elif", "dwn", "!=", "-", "1", ":", "print", "'Tried downloading got '", "+", "str", "(", "dwn", ")", "+", "' for '", "+", "fName", "return", "False" ]
print for downloaded file
[ "print", "for", "downloaded", "file" ]
9a8e55a5326c3b41357eedd235e7c36f253db2e0
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L82-L93
248,021
leosartaj/sub
sub/main.py
download
def download(name, options): """ download a file or all files in a directory """ dire = os.path.dirname(name) # returns the directory name fName = os.path.basename(name) # returns the filename fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
python
def download(name, options): """ download a file or all files in a directory """ dire = os.path.dirname(name) # returns the directory name fName = os.path.basename(name) # returns the filename fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
[ "def", "download", "(", "name", ",", "options", ")", ":", "dire", "=", "os", ".", "path", ".", "dirname", "(", "name", ")", "# returns the directory name", "fName", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "# returns the filename", "fNameOnly", ",", "fExt", "=", "os", ".", "path", ".", "splitext", "(", "fName", ")", "dwn", "=", "0", "if", "fileExists", "(", "fName", ",", "dire", ")", "and", "not", "fileExists", "(", "(", "fNameOnly", "+", "'.srt'", ")", ",", "dire", ")", ":", "# skip if already downloaded", "if", "file_downloaded", "(", "download_file", "(", "fName", ",", "options", ".", "timeout", ",", "dire", ")", ",", "fName", ",", "options", ".", "verbose", ")", ":", "dwn", "+=", "1", "elif", "dirExists", "(", "name", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "name", ")", ":", "if", "options", ".", "recursive", ":", "dwn", "+=", "download", "(", "os", ".", "path", ".", "join", "(", "name", ",", "filename", ")", ",", "options", ")", "else", ":", "if", "file_downloaded", "(", "download_file", "(", "filename", ",", "options", ".", "timeout", ",", "name", ")", ",", "filename", ",", "options", ".", "verbose", ")", ":", "dwn", "+=", "1", "return", "dwn" ]
download a file or all files in a directory
[ "download", "a", "file", "or", "all", "files", "in", "a", "directory" ]
9a8e55a5326c3b41357eedd235e7c36f253db2e0
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L95-L115
248,022
ulf1/oxyba
oxyba/clean_dateobject_to_string.py
clean_dateobject_to_string
def clean_dateobject_to_string(x): """Convert a Pandas Timestamp object or datetime object to 'YYYY-MM-DD' string Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A Pandas Timestamp object or datetime object, or an array of these objects Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A string 'YYYY-MM-DD' or array of date strings. Example ------- The function aims to convert a string as follows Timestamp('2014-09-23 00:00:00') => '2014-09-23' datetime.datetime(2014,9,23,0,0) => '2014-09-23' Code Example ------------ print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00'))) '2014-09-23' print(clean_dateobject_to_string(datetime(2014,9,23,0,0))) '2014-09-23' Behavior -------- - If it is not an object with strftime function the None is return """ import numpy as np import pandas as pd def proc_elem(e): try: return e.strftime("%Y-%m-%d") except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # transform string, list/tuple, numpy array, pandas dataframe if "strftime" in dir(x): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
python
def clean_dateobject_to_string(x): """Convert a Pandas Timestamp object or datetime object to 'YYYY-MM-DD' string Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A Pandas Timestamp object or datetime object, or an array of these objects Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A string 'YYYY-MM-DD' or array of date strings. Example ------- The function aims to convert a string as follows Timestamp('2014-09-23 00:00:00') => '2014-09-23' datetime.datetime(2014,9,23,0,0) => '2014-09-23' Code Example ------------ print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00'))) '2014-09-23' print(clean_dateobject_to_string(datetime(2014,9,23,0,0))) '2014-09-23' Behavior -------- - If it is not an object with strftime function the None is return """ import numpy as np import pandas as pd def proc_elem(e): try: return e.strftime("%Y-%m-%d") except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # transform string, list/tuple, numpy array, pandas dataframe if "strftime" in dir(x): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
[ "def", "clean_dateobject_to_string", "(", "x", ")", ":", "import", "numpy", "as", "np", "import", "pandas", "as", "pd", "def", "proc_elem", "(", "e", ")", ":", "try", ":", "return", "e", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "return", "None", "def", "proc_list", "(", "x", ")", ":", "return", "[", "proc_elem", "(", "e", ")", "for", "e", "in", "x", "]", "def", "proc_ndarray", "(", "x", ")", ":", "tmp", "=", "proc_list", "(", "list", "(", "x", ".", "reshape", "(", "(", "x", ".", "size", ",", ")", ")", ")", ")", "return", "np", ".", "array", "(", "tmp", ")", ".", "reshape", "(", "x", ".", "shape", ")", "# transform string, list/tuple, numpy array, pandas dataframe", "if", "\"strftime\"", "in", "dir", "(", "x", ")", ":", "return", "proc_elem", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "proc_list", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "return", "proc_ndarray", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "pd", ".", "DataFrame", ")", ":", "return", "pd", ".", "DataFrame", "(", "proc_ndarray", "(", "x", ".", "values", ")", ",", "columns", "=", "x", ".", "columns", ",", "index", "=", "x", ".", "index", ")", "else", ":", "return", "None" ]
Convert a Pandas Timestamp object or datetime object to 'YYYY-MM-DD' string Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A Pandas Timestamp object or datetime object, or an array of these objects Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A string 'YYYY-MM-DD' or array of date strings. Example ------- The function aims to convert a string as follows Timestamp('2014-09-23 00:00:00') => '2014-09-23' datetime.datetime(2014,9,23,0,0) => '2014-09-23' Code Example ------------ print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00'))) '2014-09-23' print(clean_dateobject_to_string(datetime(2014,9,23,0,0))) '2014-09-23' Behavior -------- - If it is not an object with strftime function the None is return
[ "Convert", "a", "Pandas", "Timestamp", "object", "or", "datetime", "object", "to", "YYYY", "-", "MM", "-", "DD", "string" ]
b3043116050de275124365cb11e7df91fb40169d
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/clean_dateobject_to_string.py#L2-L66
248,023
sys-git/certifiable
certifiable/operators.py
AND
def AND(*args, **kwargs): """ ALL args must not raise an exception when called incrementally. If an exception is specified, raise it, otherwise raise the callable's exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. :raises CertifierError: The first certifier error if at least one raises a certifier error. """ for arg in args: try: arg() except CertifierError as e: exc = kwargs.get('exc', None) if exc is not None: raise exc(e) raise
python
def AND(*args, **kwargs): """ ALL args must not raise an exception when called incrementally. If an exception is specified, raise it, otherwise raise the callable's exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. :raises CertifierError: The first certifier error if at least one raises a certifier error. """ for arg in args: try: arg() except CertifierError as e: exc = kwargs.get('exc', None) if exc is not None: raise exc(e) raise
[ "def", "AND", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "args", ":", "try", ":", "arg", "(", ")", "except", "CertifierError", "as", "e", ":", "exc", "=", "kwargs", ".", "get", "(", "'exc'", ",", "None", ")", "if", "exc", "is", "not", "None", ":", "raise", "exc", "(", "e", ")", "raise" ]
ALL args must not raise an exception when called incrementally. If an exception is specified, raise it, otherwise raise the callable's exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. :raises CertifierError: The first certifier error if at least one raises a certifier error.
[ "ALL", "args", "must", "not", "raise", "an", "exception", "when", "called", "incrementally", ".", "If", "an", "exception", "is", "specified", "raise", "it", "otherwise", "raise", "the", "callable", "s", "exception", "." ]
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/operators.py#L34-L55
248,024
sys-git/certifiable
certifiable/operators.py
NAND
def NAND(*args, **kwargs): """ ALL args must raise an exception when called overall. Raise the specified exception on failure OR the first exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. """ errors = [] for arg in args: try: arg() except CertifierError as e: errors.append(e) if (len(errors) != len(args)) and len(args) > 1: exc = kwargs.get( 'exc', CertifierValueError('Expecting no certified values'), ) if exc is not None: raise exc
python
def NAND(*args, **kwargs): """ ALL args must raise an exception when called overall. Raise the specified exception on failure OR the first exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. """ errors = [] for arg in args: try: arg() except CertifierError as e: errors.append(e) if (len(errors) != len(args)) and len(args) > 1: exc = kwargs.get( 'exc', CertifierValueError('Expecting no certified values'), ) if exc is not None: raise exc
[ "def", "NAND", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "[", "]", "for", "arg", "in", "args", ":", "try", ":", "arg", "(", ")", "except", "CertifierError", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "if", "(", "len", "(", "errors", ")", "!=", "len", "(", "args", ")", ")", "and", "len", "(", "args", ")", ">", "1", ":", "exc", "=", "kwargs", ".", "get", "(", "'exc'", ",", "CertifierValueError", "(", "'Expecting no certified values'", ")", ",", ")", "if", "exc", "is", "not", "None", ":", "raise", "exc" ]
ALL args must raise an exception when called overall. Raise the specified exception on failure OR the first exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise.
[ "ALL", "args", "must", "raise", "an", "exception", "when", "called", "overall", ".", "Raise", "the", "specified", "exception", "on", "failure", "OR", "the", "first", "exception", "." ]
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/operators.py#L58-L83
248,025
sys-git/certifiable
certifiable/operators.py
XOR
def XOR(a, b, exc=CertifierValueError('Expected at least one certified value')): """ Only one arg must not raise a Certifier exception when called overall. Raise the specified exception on failure. :params Certifier a: The first certifiers to call :params Certifier b: The second certifiers to call :param Exception exc: Callable that is raised if XOR fails. """ errors = [] for certifier in [a, b]: try: certifier() except CertifierError as e: errors.append(e) if len(errors) != 1: if exc is not None: raise exc
python
def XOR(a, b, exc=CertifierValueError('Expected at least one certified value')): """ Only one arg must not raise a Certifier exception when called overall. Raise the specified exception on failure. :params Certifier a: The first certifiers to call :params Certifier b: The second certifiers to call :param Exception exc: Callable that is raised if XOR fails. """ errors = [] for certifier in [a, b]: try: certifier() except CertifierError as e: errors.append(e) if len(errors) != 1: if exc is not None: raise exc
[ "def", "XOR", "(", "a", ",", "b", ",", "exc", "=", "CertifierValueError", "(", "'Expected at least one certified value'", ")", ")", ":", "errors", "=", "[", "]", "for", "certifier", "in", "[", "a", ",", "b", "]", ":", "try", ":", "certifier", "(", ")", "except", "CertifierError", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "if", "len", "(", "errors", ")", "!=", "1", ":", "if", "exc", "is", "not", "None", ":", "raise", "exc" ]
Only one arg must not raise a Certifier exception when called overall. Raise the specified exception on failure. :params Certifier a: The first certifiers to call :params Certifier b: The second certifiers to call :param Exception exc: Callable that is raised if XOR fails.
[ "Only", "one", "arg", "must", "not", "raise", "a", "Certifier", "exception", "when", "called", "overall", ".", "Raise", "the", "specified", "exception", "on", "failure", "." ]
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/operators.py#L86-L108
248,026
FujiMakoto/IPS-Vagrant
ips_vagrant/cli.py
cli
def cli(ctx, verbose, config): """ IPS Vagrant Management Utility """ assert isinstance(ctx, Context) # Set up the logger verbose = verbose if (verbose <= 3) else 3 log_levels = {1: logging.WARN, 2: logging.INFO, 3: logging.DEBUG} log_level = log_levels[verbose] ctx.log = logging.getLogger('ipsv') ctx.log.setLevel(log_level) # Console logger console_format = logging.Formatter("[%(levelname)s] %(name)s: %(message)s") ch = logging.StreamHandler() ch.setLevel(log_level) ch.setFormatter(console_format) ctx.log.addHandler(ch) # File logger file_format = logging.Formatter("[%(asctime)s] [%(levelname)s] %(name)s: %(message)s") file_logger = logging.FileHandler(os.path.join(ctx.config.get('Paths', 'Log'), 'ipsv.log')) file_logger.setLevel(log_level) file_logger.setFormatter(file_format) ctx.log.addHandler(file_logger) # Load the configuration if os.path.isfile(config): ctx.config_path = config ctx.log.debug('Loading configuration: %s', ctx.config_path) ctx.load_config(config) else: ctx.config_path = os.path.join(ctx.basedir, 'config', 'ipsv.conf') ctx.log.debug('Loading default configuration: %s', ctx.config_path) ctx.setup()
python
def cli(ctx, verbose, config): """ IPS Vagrant Management Utility """ assert isinstance(ctx, Context) # Set up the logger verbose = verbose if (verbose <= 3) else 3 log_levels = {1: logging.WARN, 2: logging.INFO, 3: logging.DEBUG} log_level = log_levels[verbose] ctx.log = logging.getLogger('ipsv') ctx.log.setLevel(log_level) # Console logger console_format = logging.Formatter("[%(levelname)s] %(name)s: %(message)s") ch = logging.StreamHandler() ch.setLevel(log_level) ch.setFormatter(console_format) ctx.log.addHandler(ch) # File logger file_format = logging.Formatter("[%(asctime)s] [%(levelname)s] %(name)s: %(message)s") file_logger = logging.FileHandler(os.path.join(ctx.config.get('Paths', 'Log'), 'ipsv.log')) file_logger.setLevel(log_level) file_logger.setFormatter(file_format) ctx.log.addHandler(file_logger) # Load the configuration if os.path.isfile(config): ctx.config_path = config ctx.log.debug('Loading configuration: %s', ctx.config_path) ctx.load_config(config) else: ctx.config_path = os.path.join(ctx.basedir, 'config', 'ipsv.conf') ctx.log.debug('Loading default configuration: %s', ctx.config_path) ctx.setup()
[ "def", "cli", "(", "ctx", ",", "verbose", ",", "config", ")", ":", "assert", "isinstance", "(", "ctx", ",", "Context", ")", "# Set up the logger", "verbose", "=", "verbose", "if", "(", "verbose", "<=", "3", ")", "else", "3", "log_levels", "=", "{", "1", ":", "logging", ".", "WARN", ",", "2", ":", "logging", ".", "INFO", ",", "3", ":", "logging", ".", "DEBUG", "}", "log_level", "=", "log_levels", "[", "verbose", "]", "ctx", ".", "log", "=", "logging", ".", "getLogger", "(", "'ipsv'", ")", "ctx", ".", "log", ".", "setLevel", "(", "log_level", ")", "# Console logger", "console_format", "=", "logging", ".", "Formatter", "(", "\"[%(levelname)s] %(name)s: %(message)s\"", ")", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setLevel", "(", "log_level", ")", "ch", ".", "setFormatter", "(", "console_format", ")", "ctx", ".", "log", ".", "addHandler", "(", "ch", ")", "# File logger", "file_format", "=", "logging", ".", "Formatter", "(", "\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\"", ")", "file_logger", "=", "logging", ".", "FileHandler", "(", "os", ".", "path", ".", "join", "(", "ctx", ".", "config", ".", "get", "(", "'Paths'", ",", "'Log'", ")", ",", "'ipsv.log'", ")", ")", "file_logger", ".", "setLevel", "(", "log_level", ")", "file_logger", ".", "setFormatter", "(", "file_format", ")", "ctx", ".", "log", ".", "addHandler", "(", "file_logger", ")", "# Load the configuration", "if", "os", ".", "path", ".", "isfile", "(", "config", ")", ":", "ctx", ".", "config_path", "=", "config", "ctx", ".", "log", ".", "debug", "(", "'Loading configuration: %s'", ",", "ctx", ".", "config_path", ")", "ctx", ".", "load_config", "(", "config", ")", "else", ":", "ctx", ".", "config_path", "=", "os", ".", "path", ".", "join", "(", "ctx", ".", "basedir", ",", "'config'", ",", "'ipsv.conf'", ")", "ctx", ".", "log", ".", "debug", "(", "'Loading default configuration: %s'", ",", "ctx", ".", "config_path", ")", "ctx", ".", "setup", "(", ")" ]
IPS Vagrant Management Utility
[ "IPS", "Vagrant", "Management", "Utility" ]
7b1d6d095034dd8befb026d9315ecc6494d52269
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/cli.py#L119-L155
248,027
FujiMakoto/IPS-Vagrant
ips_vagrant/cli.py
Context.db
def db(self): """ Get a loaded database session """ if self.database is NotImplemented: self.database = Session return self.database
python
def db(self): """ Get a loaded database session """ if self.database is NotImplemented: self.database = Session return self.database
[ "def", "db", "(", "self", ")", ":", "if", "self", ".", "database", "is", "NotImplemented", ":", "self", ".", "database", "=", "Session", "return", "self", ".", "database" ]
Get a loaded database session
[ "Get", "a", "loaded", "database", "session" ]
7b1d6d095034dd8befb026d9315ecc6494d52269
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/cli.py#L37-L44
248,028
FujiMakoto/IPS-Vagrant
ips_vagrant/cli.py
Context.get_login
def get_login(self, use_session=True): """ Get an active login session @param use_session: Use a saved session file if available @type use_session: bool """ # Should we try and return an existing login session? if use_session and self._login.check(): self.cookiejar = self._login.cookiejar return self.cookiejar # Prompt the user for their login credentials username = click.prompt('IPS Username') password = click.prompt('IPS Password', hide_input=True) remember = click.confirm('Save login session?', True) # Process the login cookiejar = self._login.process(username, password, remember) if remember: self.cookiejar = cookiejar return cookiejar
python
def get_login(self, use_session=True): """ Get an active login session @param use_session: Use a saved session file if available @type use_session: bool """ # Should we try and return an existing login session? if use_session and self._login.check(): self.cookiejar = self._login.cookiejar return self.cookiejar # Prompt the user for their login credentials username = click.prompt('IPS Username') password = click.prompt('IPS Password', hide_input=True) remember = click.confirm('Save login session?', True) # Process the login cookiejar = self._login.process(username, password, remember) if remember: self.cookiejar = cookiejar return cookiejar
[ "def", "get_login", "(", "self", ",", "use_session", "=", "True", ")", ":", "# Should we try and return an existing login session?", "if", "use_session", "and", "self", ".", "_login", ".", "check", "(", ")", ":", "self", ".", "cookiejar", "=", "self", ".", "_login", ".", "cookiejar", "return", "self", ".", "cookiejar", "# Prompt the user for their login credentials", "username", "=", "click", ".", "prompt", "(", "'IPS Username'", ")", "password", "=", "click", ".", "prompt", "(", "'IPS Password'", ",", "hide_input", "=", "True", ")", "remember", "=", "click", ".", "confirm", "(", "'Save login session?'", ",", "True", ")", "# Process the login", "cookiejar", "=", "self", ".", "_login", ".", "process", "(", "username", ",", "password", ",", "remember", ")", "if", "remember", ":", "self", ".", "cookiejar", "=", "cookiejar", "return", "cookiejar" ]
Get an active login session @param use_session: Use a saved session file if available @type use_session: bool
[ "Get", "an", "active", "login", "session" ]
7b1d6d095034dd8befb026d9315ecc6494d52269
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/cli.py#L53-L74
248,029
FujiMakoto/IPS-Vagrant
ips_vagrant/cli.py
IpsvCLI.list_commands
def list_commands(self, ctx): """ List CLI commands @type ctx: Context @rtype: list """ commands_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'commands') command_list = [name for __, name, ispkg in pkgutil.iter_modules([commands_path]) if ispkg] command_list.sort() return command_list
python
def list_commands(self, ctx): """ List CLI commands @type ctx: Context @rtype: list """ commands_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'commands') command_list = [name for __, name, ispkg in pkgutil.iter_modules([commands_path]) if ispkg] command_list.sort() return command_list
[ "def", "list_commands", "(", "self", ",", "ctx", ")", ":", "commands_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "'commands'", ")", "command_list", "=", "[", "name", "for", "__", ",", "name", ",", "ispkg", "in", "pkgutil", ".", "iter_modules", "(", "[", "commands_path", "]", ")", "if", "ispkg", "]", "command_list", ".", "sort", "(", ")", "return", "command_list" ]
List CLI commands @type ctx: Context @rtype: list
[ "List", "CLI", "commands" ]
7b1d6d095034dd8befb026d9315ecc6494d52269
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/cli.py#L82-L91
248,030
FujiMakoto/IPS-Vagrant
ips_vagrant/cli.py
IpsvCLI.get_command
def get_command(self, ctx, name): """ Get a bound command method @type ctx: Context @param name: Command name @type name: str @rtype: object """ try: mod = importlib.import_module('ips_vagrant.commands.{name}'.format(name=name)) return mod.cli except (ImportError, AttributeError): return
python
def get_command(self, ctx, name): """ Get a bound command method @type ctx: Context @param name: Command name @type name: str @rtype: object """ try: mod = importlib.import_module('ips_vagrant.commands.{name}'.format(name=name)) return mod.cli except (ImportError, AttributeError): return
[ "def", "get_command", "(", "self", ",", "ctx", ",", "name", ")", ":", "try", ":", "mod", "=", "importlib", ".", "import_module", "(", "'ips_vagrant.commands.{name}'", ".", "format", "(", "name", "=", "name", ")", ")", "return", "mod", ".", "cli", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "return" ]
Get a bound command method @type ctx: Context @param name: Command name @type name: str @rtype: object
[ "Get", "a", "bound", "command", "method" ]
7b1d6d095034dd8befb026d9315ecc6494d52269
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/cli.py#L93-L105
248,031
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_patched_pep257
def _patched_pep257(): """Monkey-patch pep257 after imports to avoid info logging.""" import pep257 if getattr(pep257, "log", None): def _dummy(*args, **kwargs): del args del kwargs old_log_info = pep257.log.info pep257.log.info = _dummy # suppress(unused-attribute) try: yield finally: if getattr(pep257, "log", None): pep257.log.info = old_log_info
python
def _patched_pep257(): """Monkey-patch pep257 after imports to avoid info logging.""" import pep257 if getattr(pep257, "log", None): def _dummy(*args, **kwargs): del args del kwargs old_log_info = pep257.log.info pep257.log.info = _dummy # suppress(unused-attribute) try: yield finally: if getattr(pep257, "log", None): pep257.log.info = old_log_info
[ "def", "_patched_pep257", "(", ")", ":", "import", "pep257", "if", "getattr", "(", "pep257", ",", "\"log\"", ",", "None", ")", ":", "def", "_dummy", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "del", "args", "del", "kwargs", "old_log_info", "=", "pep257", ".", "log", ".", "info", "pep257", ".", "log", ".", "info", "=", "_dummy", "# suppress(unused-attribute)", "try", ":", "yield", "finally", ":", "if", "getattr", "(", "pep257", ",", "\"log\"", ",", "None", ")", ":", "pep257", ".", "log", ".", "info", "=", "old_log_info" ]
Monkey-patch pep257 after imports to avoid info logging.
[ "Monkey", "-", "patch", "pep257", "after", "imports", "to", "avoid", "info", "logging", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L53-L68
248,032
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_stamped_deps
def _stamped_deps(stamp_directory, func, dependencies, *args, **kwargs): """Run func, assumed to have dependencies as its first argument.""" if not isinstance(dependencies, list): jobstamps_dependencies = [dependencies] else: jobstamps_dependencies = dependencies kwargs.update({ "jobstamps_cache_output_directory": stamp_directory, "jobstamps_dependencies": jobstamps_dependencies }) return jobstamp.run(func, dependencies, *args, **kwargs)
python
def _stamped_deps(stamp_directory, func, dependencies, *args, **kwargs): """Run func, assumed to have dependencies as its first argument.""" if not isinstance(dependencies, list): jobstamps_dependencies = [dependencies] else: jobstamps_dependencies = dependencies kwargs.update({ "jobstamps_cache_output_directory": stamp_directory, "jobstamps_dependencies": jobstamps_dependencies }) return jobstamp.run(func, dependencies, *args, **kwargs)
[ "def", "_stamped_deps", "(", "stamp_directory", ",", "func", ",", "dependencies", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "dependencies", ",", "list", ")", ":", "jobstamps_dependencies", "=", "[", "dependencies", "]", "else", ":", "jobstamps_dependencies", "=", "dependencies", "kwargs", ".", "update", "(", "{", "\"jobstamps_cache_output_directory\"", ":", "stamp_directory", ",", "\"jobstamps_dependencies\"", ":", "jobstamps_dependencies", "}", ")", "return", "jobstamp", ".", "run", "(", "func", ",", "dependencies", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run func, assumed to have dependencies as its first argument.
[ "Run", "func", "assumed", "to", "have", "dependencies", "as", "its", "first", "argument", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L71-L82
248,033
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_debug_linter_status
def _debug_linter_status(linter, filename, show_lint_files): """Indicate that we are running this linter if required.""" if show_lint_files: print("{linter}: {filename}".format(linter=linter, filename=filename))
python
def _debug_linter_status(linter, filename, show_lint_files): """Indicate that we are running this linter if required.""" if show_lint_files: print("{linter}: {filename}".format(linter=linter, filename=filename))
[ "def", "_debug_linter_status", "(", "linter", ",", "filename", ",", "show_lint_files", ")", ":", "if", "show_lint_files", ":", "print", "(", "\"{linter}: {filename}\"", ".", "format", "(", "linter", "=", "linter", ",", "filename", "=", "filename", ")", ")" ]
Indicate that we are running this linter if required.
[ "Indicate", "that", "we", "are", "running", "this", "linter", "if", "required", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L99-L102
248,034
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_flake8
def _run_flake8(filename, stamp_file_name, show_lint_files): """Run flake8, cached by stamp_file_name.""" _debug_linter_status("flake8", filename, show_lint_files) return _stamped_deps(stamp_file_name, _run_flake8_internal, filename)
python
def _run_flake8(filename, stamp_file_name, show_lint_files): """Run flake8, cached by stamp_file_name.""" _debug_linter_status("flake8", filename, show_lint_files) return _stamped_deps(stamp_file_name, _run_flake8_internal, filename)
[ "def", "_run_flake8", "(", "filename", ",", "stamp_file_name", ",", "show_lint_files", ")", ":", "_debug_linter_status", "(", "\"flake8\"", ",", "filename", ",", "show_lint_files", ")", "return", "_stamped_deps", "(", "stamp_file_name", ",", "_run_flake8_internal", ",", "filename", ")" ]
Run flake8, cached by stamp_file_name.
[ "Run", "flake8", "cached", "by", "stamp_file_name", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L161-L166
248,035
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_prospector_on
def _run_prospector_on(filenames, tools, disabled_linters, show_lint_files, ignore_codes=None): """Run prospector on filename, using the specified tools. This function enables us to run different tools on different classes of files, which is necessary in the case of tests. """ from prospector.run import Prospector, ProspectorConfig assert tools tools = list(set(tools) - set(disabled_linters)) return_dict = dict() ignore_codes = ignore_codes or list() # Early return if all tools were filtered out if not tools: return return_dict # pylint doesn't like absolute paths, so convert to relative. all_argv = (["-F", "-D", "-M", "--no-autodetect", "-s", "veryhigh"] + ("-t " + " -t ".join(tools)).split(" ")) for filename in filenames: _debug_linter_status("prospector", filename, show_lint_files) with _custom_argv(all_argv + [os.path.relpath(f) for f in filenames]): prospector = Prospector(ProspectorConfig()) prospector.execute() messages = prospector.get_messages() or list() for message in messages: message.to_absolute_path(os.getcwd()) loc = message.location code = message.code if code in ignore_codes: continue key = _Key(loc.path, loc.line, code) return_dict[key] = message return return_dict
python
def _run_prospector_on(filenames, tools, disabled_linters, show_lint_files, ignore_codes=None): """Run prospector on filename, using the specified tools. This function enables us to run different tools on different classes of files, which is necessary in the case of tests. """ from prospector.run import Prospector, ProspectorConfig assert tools tools = list(set(tools) - set(disabled_linters)) return_dict = dict() ignore_codes = ignore_codes or list() # Early return if all tools were filtered out if not tools: return return_dict # pylint doesn't like absolute paths, so convert to relative. all_argv = (["-F", "-D", "-M", "--no-autodetect", "-s", "veryhigh"] + ("-t " + " -t ".join(tools)).split(" ")) for filename in filenames: _debug_linter_status("prospector", filename, show_lint_files) with _custom_argv(all_argv + [os.path.relpath(f) for f in filenames]): prospector = Prospector(ProspectorConfig()) prospector.execute() messages = prospector.get_messages() or list() for message in messages: message.to_absolute_path(os.getcwd()) loc = message.location code = message.code if code in ignore_codes: continue key = _Key(loc.path, loc.line, code) return_dict[key] = message return return_dict
[ "def", "_run_prospector_on", "(", "filenames", ",", "tools", ",", "disabled_linters", ",", "show_lint_files", ",", "ignore_codes", "=", "None", ")", ":", "from", "prospector", ".", "run", "import", "Prospector", ",", "ProspectorConfig", "assert", "tools", "tools", "=", "list", "(", "set", "(", "tools", ")", "-", "set", "(", "disabled_linters", ")", ")", "return_dict", "=", "dict", "(", ")", "ignore_codes", "=", "ignore_codes", "or", "list", "(", ")", "# Early return if all tools were filtered out", "if", "not", "tools", ":", "return", "return_dict", "# pylint doesn't like absolute paths, so convert to relative.", "all_argv", "=", "(", "[", "\"-F\"", ",", "\"-D\"", ",", "\"-M\"", ",", "\"--no-autodetect\"", ",", "\"-s\"", ",", "\"veryhigh\"", "]", "+", "(", "\"-t \"", "+", "\" -t \"", ".", "join", "(", "tools", ")", ")", ".", "split", "(", "\" \"", ")", ")", "for", "filename", "in", "filenames", ":", "_debug_linter_status", "(", "\"prospector\"", ",", "filename", ",", "show_lint_files", ")", "with", "_custom_argv", "(", "all_argv", "+", "[", "os", ".", "path", ".", "relpath", "(", "f", ")", "for", "f", "in", "filenames", "]", ")", ":", "prospector", "=", "Prospector", "(", "ProspectorConfig", "(", ")", ")", "prospector", ".", "execute", "(", ")", "messages", "=", "prospector", ".", "get_messages", "(", ")", "or", "list", "(", ")", "for", "message", "in", "messages", ":", "message", ".", "to_absolute_path", "(", "os", ".", "getcwd", "(", ")", ")", "loc", "=", "message", ".", "location", "code", "=", "message", ".", "code", "if", "code", "in", "ignore_codes", ":", "continue", "key", "=", "_Key", "(", "loc", ".", "path", ",", "loc", ".", "line", ",", "code", ")", "return_dict", "[", "key", "]", "=", "message", "return", "return_dict" ]
Run prospector on filename, using the specified tools. This function enables us to run different tools on different classes of files, which is necessary in the case of tests.
[ "Run", "prospector", "on", "filename", "using", "the", "specified", "tools", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L191-L235
248,036
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_prospector
def _run_prospector(filename, stamp_file_name, disabled_linters, show_lint_files): """Run prospector.""" linter_tools = [ "pep257", "pep8", "pyflakes" ] if can_run_pylint(): linter_tools.append("pylint") # Run prospector on tests. There are some errors we don't care about: # - invalid-name: This is often triggered because test method names # can be quite long. Descriptive test method names are # good, so disable this warning. # - super-on-old-class: unittest.TestCase is a new style class, but # pylint detects an old style class. # - too-many-public-methods: TestCase subclasses by definition have # lots of methods. test_ignore_codes = [ "invalid-name", "super-on-old-class", "too-many-public-methods" ] kwargs = dict() if _file_is_test(filename): kwargs["ignore_codes"] = test_ignore_codes else: if can_run_frosted(): linter_tools += ["frosted"] return _stamped_deps(stamp_file_name, _run_prospector_on, [filename], linter_tools, disabled_linters, show_lint_files, **kwargs)
python
def _run_prospector(filename, stamp_file_name, disabled_linters, show_lint_files): """Run prospector.""" linter_tools = [ "pep257", "pep8", "pyflakes" ] if can_run_pylint(): linter_tools.append("pylint") # Run prospector on tests. There are some errors we don't care about: # - invalid-name: This is often triggered because test method names # can be quite long. Descriptive test method names are # good, so disable this warning. # - super-on-old-class: unittest.TestCase is a new style class, but # pylint detects an old style class. # - too-many-public-methods: TestCase subclasses by definition have # lots of methods. test_ignore_codes = [ "invalid-name", "super-on-old-class", "too-many-public-methods" ] kwargs = dict() if _file_is_test(filename): kwargs["ignore_codes"] = test_ignore_codes else: if can_run_frosted(): linter_tools += ["frosted"] return _stamped_deps(stamp_file_name, _run_prospector_on, [filename], linter_tools, disabled_linters, show_lint_files, **kwargs)
[ "def", "_run_prospector", "(", "filename", ",", "stamp_file_name", ",", "disabled_linters", ",", "show_lint_files", ")", ":", "linter_tools", "=", "[", "\"pep257\"", ",", "\"pep8\"", ",", "\"pyflakes\"", "]", "if", "can_run_pylint", "(", ")", ":", "linter_tools", ".", "append", "(", "\"pylint\"", ")", "# Run prospector on tests. There are some errors we don't care about:", "# - invalid-name: This is often triggered because test method names", "# can be quite long. Descriptive test method names are", "# good, so disable this warning.", "# - super-on-old-class: unittest.TestCase is a new style class, but", "# pylint detects an old style class.", "# - too-many-public-methods: TestCase subclasses by definition have", "# lots of methods.", "test_ignore_codes", "=", "[", "\"invalid-name\"", ",", "\"super-on-old-class\"", ",", "\"too-many-public-methods\"", "]", "kwargs", "=", "dict", "(", ")", "if", "_file_is_test", "(", "filename", ")", ":", "kwargs", "[", "\"ignore_codes\"", "]", "=", "test_ignore_codes", "else", ":", "if", "can_run_frosted", "(", ")", ":", "linter_tools", "+=", "[", "\"frosted\"", "]", "return", "_stamped_deps", "(", "stamp_file_name", ",", "_run_prospector_on", ",", "[", "filename", "]", ",", "linter_tools", ",", "disabled_linters", ",", "show_lint_files", ",", "*", "*", "kwargs", ")" ]
Run prospector.
[ "Run", "prospector", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L244-L286
248,037
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_pyroma
def _run_pyroma(setup_file, show_lint_files): """Run pyroma.""" from pyroma import projectdata, ratings from prospector.message import Message, Location _debug_linter_status("pyroma", setup_file, show_lint_files) return_dict = dict() data = projectdata.get_data(os.getcwd()) all_tests = ratings.ALL_TESTS for test in [mod() for mod in [t.__class__ for t in all_tests]]: if test.test(data) is False: class_name = test.__class__.__name__ key = _Key(setup_file, 0, class_name) loc = Location(setup_file, None, None, 0, 0) msg = test.message() return_dict[key] = Message("pyroma", class_name, loc, msg) return return_dict
python
def _run_pyroma(setup_file, show_lint_files): """Run pyroma.""" from pyroma import projectdata, ratings from prospector.message import Message, Location _debug_linter_status("pyroma", setup_file, show_lint_files) return_dict = dict() data = projectdata.get_data(os.getcwd()) all_tests = ratings.ALL_TESTS for test in [mod() for mod in [t.__class__ for t in all_tests]]: if test.test(data) is False: class_name = test.__class__.__name__ key = _Key(setup_file, 0, class_name) loc = Location(setup_file, None, None, 0, 0) msg = test.message() return_dict[key] = Message("pyroma", class_name, loc, msg) return return_dict
[ "def", "_run_pyroma", "(", "setup_file", ",", "show_lint_files", ")", ":", "from", "pyroma", "import", "projectdata", ",", "ratings", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "_debug_linter_status", "(", "\"pyroma\"", ",", "setup_file", ",", "show_lint_files", ")", "return_dict", "=", "dict", "(", ")", "data", "=", "projectdata", ".", "get_data", "(", "os", ".", "getcwd", "(", ")", ")", "all_tests", "=", "ratings", ".", "ALL_TESTS", "for", "test", "in", "[", "mod", "(", ")", "for", "mod", "in", "[", "t", ".", "__class__", "for", "t", "in", "all_tests", "]", "]", ":", "if", "test", ".", "test", "(", "data", ")", "is", "False", ":", "class_name", "=", "test", ".", "__class__", ".", "__name__", "key", "=", "_Key", "(", "setup_file", ",", "0", ",", "class_name", ")", "loc", "=", "Location", "(", "setup_file", ",", "None", ",", "None", ",", "0", ",", "0", ")", "msg", "=", "test", ".", "message", "(", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"pyroma\"", ",", "class_name", ",", "loc", ",", "msg", ")", "return", "return_dict" ]
Run pyroma.
[ "Run", "pyroma", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L289-L311
248,038
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_polysquare_style_linter
def _run_polysquare_style_linter(matched_filenames, cache_dir, show_lint_files): """Run polysquare-generic-file-linter on matched_filenames.""" from polysquarelinter import linter as lint from prospector.message import Message, Location return_dict = dict() def _custom_reporter(error, file_path): key = _Key(file_path, error[1].line, error[0]) loc = Location(file_path, None, None, error[1].line, 0) return_dict[key] = Message("polysquare-generic-file-linter", error[0], loc, error[1].description) for filename in matched_filenames: _debug_linter_status("style-linter", filename, show_lint_files) # suppress(protected-access,unused-attribute) lint._report_lint_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--log-technical-terms-to=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames + [ "--block-regexps" ] + _BLOCK_REGEXPS) return return_dict
python
def _run_polysquare_style_linter(matched_filenames, cache_dir, show_lint_files): """Run polysquare-generic-file-linter on matched_filenames.""" from polysquarelinter import linter as lint from prospector.message import Message, Location return_dict = dict() def _custom_reporter(error, file_path): key = _Key(file_path, error[1].line, error[0]) loc = Location(file_path, None, None, error[1].line, 0) return_dict[key] = Message("polysquare-generic-file-linter", error[0], loc, error[1].description) for filename in matched_filenames: _debug_linter_status("style-linter", filename, show_lint_files) # suppress(protected-access,unused-attribute) lint._report_lint_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--log-technical-terms-to=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames + [ "--block-regexps" ] + _BLOCK_REGEXPS) return return_dict
[ "def", "_run_polysquare_style_linter", "(", "matched_filenames", ",", "cache_dir", ",", "show_lint_files", ")", ":", "from", "polysquarelinter", "import", "linter", "as", "lint", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "return_dict", "=", "dict", "(", ")", "def", "_custom_reporter", "(", "error", ",", "file_path", ")", ":", "key", "=", "_Key", "(", "file_path", ",", "error", "[", "1", "]", ".", "line", ",", "error", "[", "0", "]", ")", "loc", "=", "Location", "(", "file_path", ",", "None", ",", "None", ",", "error", "[", "1", "]", ".", "line", ",", "0", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"polysquare-generic-file-linter\"", ",", "error", "[", "0", "]", ",", "loc", ",", "error", "[", "1", "]", ".", "description", ")", "for", "filename", "in", "matched_filenames", ":", "_debug_linter_status", "(", "\"style-linter\"", ",", "filename", ",", "show_lint_files", ")", "# suppress(protected-access,unused-attribute)", "lint", ".", "_report_lint_error", "=", "_custom_reporter", "lint", ".", "main", "(", "[", "\"--spellcheck-cache=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"spelling\"", ")", ",", "\"--stamp-file-path=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"jobstamps\"", ",", "\"polysquarelinter\"", ")", ",", "\"--log-technical-terms-to=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"technical-terms\"", ")", ",", "]", "+", "matched_filenames", "+", "[", "\"--block-regexps\"", "]", "+", "_BLOCK_REGEXPS", ")", "return", "return_dict" ]
Run polysquare-generic-file-linter on matched_filenames.
[ "Run", "polysquare", "-", "generic", "-", "file", "-", "linter", "on", "matched_filenames", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L322-L355
248,039
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_spellcheck_linter
def _run_spellcheck_linter(matched_filenames, cache_dir, show_lint_files): """Run spellcheck-linter on matched_filenames.""" from polysquarelinter import lint_spelling_only as lint from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("spellcheck-linter", filename, show_lint_files) return_dict = dict() def _custom_reporter(error, file_path): line = error.line_offset + 1 key = _Key(file_path, line, "file/spelling_error") loc = Location(file_path, None, None, line, 0) # suppress(protected-access) desc = lint._SPELLCHECK_MESSAGES[error.error_type].format(error.word) return_dict[key] = Message("spellcheck-linter", "file/spelling_error", loc, desc) # suppress(protected-access,unused-attribute) lint._report_spelling_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--technical-terms=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames) return return_dict
python
def _run_spellcheck_linter(matched_filenames, cache_dir, show_lint_files): """Run spellcheck-linter on matched_filenames.""" from polysquarelinter import lint_spelling_only as lint from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("spellcheck-linter", filename, show_lint_files) return_dict = dict() def _custom_reporter(error, file_path): line = error.line_offset + 1 key = _Key(file_path, line, "file/spelling_error") loc = Location(file_path, None, None, line, 0) # suppress(protected-access) desc = lint._SPELLCHECK_MESSAGES[error.error_type].format(error.word) return_dict[key] = Message("spellcheck-linter", "file/spelling_error", loc, desc) # suppress(protected-access,unused-attribute) lint._report_spelling_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--technical-terms=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames) return return_dict
[ "def", "_run_spellcheck_linter", "(", "matched_filenames", ",", "cache_dir", ",", "show_lint_files", ")", ":", "from", "polysquarelinter", "import", "lint_spelling_only", "as", "lint", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "for", "filename", "in", "matched_filenames", ":", "_debug_linter_status", "(", "\"spellcheck-linter\"", ",", "filename", ",", "show_lint_files", ")", "return_dict", "=", "dict", "(", ")", "def", "_custom_reporter", "(", "error", ",", "file_path", ")", ":", "line", "=", "error", ".", "line_offset", "+", "1", "key", "=", "_Key", "(", "file_path", ",", "line", ",", "\"file/spelling_error\"", ")", "loc", "=", "Location", "(", "file_path", ",", "None", ",", "None", ",", "line", ",", "0", ")", "# suppress(protected-access)", "desc", "=", "lint", ".", "_SPELLCHECK_MESSAGES", "[", "error", ".", "error_type", "]", ".", "format", "(", "error", ".", "word", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"spellcheck-linter\"", ",", "\"file/spelling_error\"", ",", "loc", ",", "desc", ")", "# suppress(protected-access,unused-attribute)", "lint", ".", "_report_spelling_error", "=", "_custom_reporter", "lint", ".", "main", "(", "[", "\"--spellcheck-cache=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"spelling\"", ")", ",", "\"--stamp-file-path=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"jobstamps\"", ",", "\"polysquarelinter\"", ")", ",", "\"--technical-terms=\"", "+", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"technical-terms\"", ")", ",", "]", "+", "matched_filenames", ")", "return", "return_dict" ]
Run spellcheck-linter on matched_filenames.
[ "Run", "spellcheck", "-", "linter", "on", "matched_filenames", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L358-L389
248,040
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_run_markdownlint
def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames.""" from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("mdl", filename, show_lint_files) try: proc = subprocess.Popen(["mdl"] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() except OSError as error: if error.errno == errno.ENOENT: return [] lines = [ re.match(r"([\w\-.\/\\ ]+)\:([0-9]+)\: (\w+) (.+)", l).groups(1) for l in lines ] return_dict = dict() for filename, lineno, code, msg in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message("markdownlint", code, loc, msg) return return_dict
python
def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames.""" from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("mdl", filename, show_lint_files) try: proc = subprocess.Popen(["mdl"] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() except OSError as error: if error.errno == errno.ENOENT: return [] lines = [ re.match(r"([\w\-.\/\\ ]+)\:([0-9]+)\: (\w+) (.+)", l).groups(1) for l in lines ] return_dict = dict() for filename, lineno, code, msg in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message("markdownlint", code, loc, msg) return return_dict
[ "def", "_run_markdownlint", "(", "matched_filenames", ",", "show_lint_files", ")", ":", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "for", "filename", "in", "matched_filenames", ":", "_debug_linter_status", "(", "\"mdl\"", ",", "filename", ",", "show_lint_files", ")", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"mdl\"", "]", "+", "matched_filenames", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "lines", "=", "proc", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", ")", ".", "splitlines", "(", ")", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "==", "errno", ".", "ENOENT", ":", "return", "[", "]", "lines", "=", "[", "re", ".", "match", "(", "r\"([\\w\\-.\\/\\\\ ]+)\\:([0-9]+)\\: (\\w+) (.+)\"", ",", "l", ")", ".", "groups", "(", "1", ")", "for", "l", "in", "lines", "]", "return_dict", "=", "dict", "(", ")", "for", "filename", ",", "lineno", ",", "code", ",", "msg", "in", "lines", ":", "key", "=", "_Key", "(", "filename", ",", "int", "(", "lineno", ")", ",", "code", ")", "loc", "=", "Location", "(", "filename", ",", "None", ",", "None", ",", "int", "(", "lineno", ")", ",", "0", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"markdownlint\"", ",", "code", ",", "loc", ",", "msg", ")", "return", "return_dict" ]
Run markdownlint on matched_filenames.
[ "Run", "markdownlint", "on", "matched_filenames", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L392-L418
248,041
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_get_cache_dir
def _get_cache_dir(candidate): """Get the current cache directory.""" if candidate: return candidate import distutils.dist # suppress(import-error) import distutils.command.build # suppress(import-error) build_cmd = distutils.command.build.build(distutils.dist.Distribution()) build_cmd.finalize_options() cache_dir = os.path.abspath(build_cmd.build_temp) # Make sure that it is created before anyone tries to use it try: os.makedirs(cache_dir) except OSError as error: if error.errno != errno.EEXIST: raise error return cache_dir
python
def _get_cache_dir(candidate): """Get the current cache directory.""" if candidate: return candidate import distutils.dist # suppress(import-error) import distutils.command.build # suppress(import-error) build_cmd = distutils.command.build.build(distutils.dist.Distribution()) build_cmd.finalize_options() cache_dir = os.path.abspath(build_cmd.build_temp) # Make sure that it is created before anyone tries to use it try: os.makedirs(cache_dir) except OSError as error: if error.errno != errno.EEXIST: raise error return cache_dir
[ "def", "_get_cache_dir", "(", "candidate", ")", ":", "if", "candidate", ":", "return", "candidate", "import", "distutils", ".", "dist", "# suppress(import-error)", "import", "distutils", ".", "command", ".", "build", "# suppress(import-error)", "build_cmd", "=", "distutils", ".", "command", ".", "build", ".", "build", "(", "distutils", ".", "dist", ".", "Distribution", "(", ")", ")", "build_cmd", ".", "finalize_options", "(", ")", "cache_dir", "=", "os", ".", "path", ".", "abspath", "(", "build_cmd", ".", "build_temp", ")", "# Make sure that it is created before anyone tries to use it", "try", ":", "os", ".", "makedirs", "(", "cache_dir", ")", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "error", "return", "cache_dir" ]
Get the current cache directory.
[ "Get", "the", "current", "cache", "directory", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L426-L444
248,042
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
_is_excluded
def _is_excluded(filename, exclusions): """Return true if filename matches any of exclusions.""" for exclusion in exclusions: if fnmatch(filename, exclusion): return True return False
python
def _is_excluded(filename, exclusions): """Return true if filename matches any of exclusions.""" for exclusion in exclusions: if fnmatch(filename, exclusion): return True return False
[ "def", "_is_excluded", "(", "filename", ",", "exclusions", ")", ":", "for", "exclusion", "in", "exclusions", ":", "if", "fnmatch", "(", "filename", ",", "exclusion", ")", ":", "return", "True", "return", "False" ]
Return true if filename matches any of exclusions.
[ "Return", "true", "if", "filename", "matches", "any", "of", "exclusions", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L457-L463
248,043
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
PolysquareLintCommand._file_lines
def _file_lines(self, filename): """Get lines for filename, caching opened files.""" try: return self._file_lines_cache[filename] except KeyError: if os.path.isfile(filename): with open(filename) as python_file: self._file_lines_cache[filename] = python_file.readlines() else: self._file_lines_cache[filename] = "" return self._file_lines_cache[filename]
python
def _file_lines(self, filename): """Get lines for filename, caching opened files.""" try: return self._file_lines_cache[filename] except KeyError: if os.path.isfile(filename): with open(filename) as python_file: self._file_lines_cache[filename] = python_file.readlines() else: self._file_lines_cache[filename] = "" return self._file_lines_cache[filename]
[ "def", "_file_lines", "(", "self", ",", "filename", ")", ":", "try", ":", "return", "self", ".", "_file_lines_cache", "[", "filename", "]", "except", "KeyError", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "python_file", ":", "self", ".", "_file_lines_cache", "[", "filename", "]", "=", "python_file", ".", "readlines", "(", ")", "else", ":", "self", ".", "_file_lines_cache", "[", "filename", "]", "=", "\"\"", "return", "self", ".", "_file_lines_cache", "[", "filename", "]" ]
Get lines for filename, caching opened files.
[ "Get", "lines", "for", "filename", "caching", "opened", "files", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L479-L490
248,044
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
PolysquareLintCommand._suppressed
def _suppressed(self, filename, line, code): """Return true if linter error code is suppressed inline. The suppression format is suppress(CODE1,CODE2,CODE3) etc. """ if code in self.suppress_codes: return True lines = self._file_lines(filename) # File is zero length, cannot be suppressed if not lines: return False # Handle errors which appear after the end of the document. while line > len(lines): line = line - 1 relevant_line = lines[line - 1] try: suppressions_function = relevant_line.split("#")[1].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) except IndexError: above_line = lines[max(0, line - 2)] suppressions_function = above_line.strip()[1:].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) finally: pass
python
def _suppressed(self, filename, line, code): """Return true if linter error code is suppressed inline. The suppression format is suppress(CODE1,CODE2,CODE3) etc. """ if code in self.suppress_codes: return True lines = self._file_lines(filename) # File is zero length, cannot be suppressed if not lines: return False # Handle errors which appear after the end of the document. while line > len(lines): line = line - 1 relevant_line = lines[line - 1] try: suppressions_function = relevant_line.split("#")[1].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) except IndexError: above_line = lines[max(0, line - 2)] suppressions_function = above_line.strip()[1:].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) finally: pass
[ "def", "_suppressed", "(", "self", ",", "filename", ",", "line", ",", "code", ")", ":", "if", "code", "in", "self", ".", "suppress_codes", ":", "return", "True", "lines", "=", "self", ".", "_file_lines", "(", "filename", ")", "# File is zero length, cannot be suppressed", "if", "not", "lines", ":", "return", "False", "# Handle errors which appear after the end of the document.", "while", "line", ">", "len", "(", "lines", ")", ":", "line", "=", "line", "-", "1", "relevant_line", "=", "lines", "[", "line", "-", "1", "]", "try", ":", "suppressions_function", "=", "relevant_line", ".", "split", "(", "\"#\"", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "suppressions_function", ".", "startswith", "(", "\"suppress(\"", ")", ":", "return", "code", "in", "_parse_suppressions", "(", "suppressions_function", ")", "except", "IndexError", ":", "above_line", "=", "lines", "[", "max", "(", "0", ",", "line", "-", "2", ")", "]", "suppressions_function", "=", "above_line", ".", "strip", "(", ")", "[", "1", ":", "]", ".", "strip", "(", ")", "if", "suppressions_function", ".", "startswith", "(", "\"suppress(\"", ")", ":", "return", "code", "in", "_parse_suppressions", "(", "suppressions_function", ")", "finally", ":", "pass" ]
Return true if linter error code is suppressed inline. The suppression format is suppress(CODE1,CODE2,CODE3) etc.
[ "Return", "true", "if", "linter", "error", "code", "is", "suppressed", "inline", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L492-L522
248,045
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
PolysquareLintCommand._get_md_files
def _get_md_files(self): """Get all markdown files.""" all_f = _all_files_matching_ext(os.getcwd(), "md") exclusions = [ "*.egg/*", "*.eggs/*", "*build/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
python
def _get_md_files(self): """Get all markdown files.""" all_f = _all_files_matching_ext(os.getcwd(), "md") exclusions = [ "*.egg/*", "*.eggs/*", "*build/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
[ "def", "_get_md_files", "(", "self", ")", ":", "all_f", "=", "_all_files_matching_ext", "(", "os", ".", "getcwd", "(", ")", ",", "\"md\"", ")", "exclusions", "=", "[", "\"*.egg/*\"", ",", "\"*.eggs/*\"", ",", "\"*build/*\"", "]", "+", "self", ".", "exclusions", "return", "sorted", "(", "[", "f", "for", "f", "in", "all_f", "if", "not", "_is_excluded", "(", "f", ",", "exclusions", ")", "]", ")" ]
Get all markdown files.
[ "Get", "all", "markdown", "files", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L524-L532
248,046
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
PolysquareLintCommand._get_files_to_lint
def _get_files_to_lint(self, external_directories): """Get files to lint.""" all_f = [] for external_dir in external_directories: all_f.extend(_all_files_matching_ext(external_dir, "py")) packages = self.distribution.packages or list() for package in packages: all_f.extend(_all_files_matching_ext(package, "py")) py_modules = self.distribution.py_modules or list() for filename in py_modules: all_f.append(os.path.realpath(filename + ".py")) all_f.append(os.path.join(os.getcwd(), "setup.py")) # Remove duplicates which may exist due to symlinks or repeated # packages found by /setup.py all_f = list(set([os.path.realpath(f) for f in all_f])) exclusions = [ "*.egg/*", "*.eggs/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
python
def _get_files_to_lint(self, external_directories): """Get files to lint.""" all_f = [] for external_dir in external_directories: all_f.extend(_all_files_matching_ext(external_dir, "py")) packages = self.distribution.packages or list() for package in packages: all_f.extend(_all_files_matching_ext(package, "py")) py_modules = self.distribution.py_modules or list() for filename in py_modules: all_f.append(os.path.realpath(filename + ".py")) all_f.append(os.path.join(os.getcwd(), "setup.py")) # Remove duplicates which may exist due to symlinks or repeated # packages found by /setup.py all_f = list(set([os.path.realpath(f) for f in all_f])) exclusions = [ "*.egg/*", "*.eggs/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
[ "def", "_get_files_to_lint", "(", "self", ",", "external_directories", ")", ":", "all_f", "=", "[", "]", "for", "external_dir", "in", "external_directories", ":", "all_f", ".", "extend", "(", "_all_files_matching_ext", "(", "external_dir", ",", "\"py\"", ")", ")", "packages", "=", "self", ".", "distribution", ".", "packages", "or", "list", "(", ")", "for", "package", "in", "packages", ":", "all_f", ".", "extend", "(", "_all_files_matching_ext", "(", "package", ",", "\"py\"", ")", ")", "py_modules", "=", "self", ".", "distribution", ".", "py_modules", "or", "list", "(", ")", "for", "filename", "in", "py_modules", ":", "all_f", ".", "append", "(", "os", ".", "path", ".", "realpath", "(", "filename", "+", "\".py\"", ")", ")", "all_f", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\"setup.py\"", ")", ")", "# Remove duplicates which may exist due to symlinks or repeated", "# packages found by /setup.py", "all_f", "=", "list", "(", "set", "(", "[", "os", ".", "path", ".", "realpath", "(", "f", ")", "for", "f", "in", "all_f", "]", ")", ")", "exclusions", "=", "[", "\"*.egg/*\"", ",", "\"*.eggs/*\"", "]", "+", "self", ".", "exclusions", "return", "sorted", "(", "[", "f", "for", "f", "in", "all_f", "if", "not", "_is_excluded", "(", "f", ",", "exclusions", ")", "]", ")" ]
Get files to lint.
[ "Get", "files", "to", "lint", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L534-L559
248,047
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
PolysquareLintCommand.initialize_options
def initialize_options(self): # suppress(unused-function) """Set all options to their initial values.""" self._file_lines_cache = dict() self.suppress_codes = list() self.exclusions = list() self.cache_directory = "" self.stamp_directory = "" self.disable_linters = list() self.show_lint_files = 0
python
def initialize_options(self): # suppress(unused-function) """Set all options to their initial values.""" self._file_lines_cache = dict() self.suppress_codes = list() self.exclusions = list() self.cache_directory = "" self.stamp_directory = "" self.disable_linters = list() self.show_lint_files = 0
[ "def", "initialize_options", "(", "self", ")", ":", "# suppress(unused-function)", "self", ".", "_file_lines_cache", "=", "dict", "(", ")", "self", ".", "suppress_codes", "=", "list", "(", ")", "self", ".", "exclusions", "=", "list", "(", ")", "self", ".", "cache_directory", "=", "\"\"", "self", ".", "stamp_directory", "=", "\"\"", "self", ".", "disable_linters", "=", "list", "(", ")", "self", ".", "show_lint_files", "=", "0" ]
Set all options to their initial values.
[ "Set", "all", "options", "to", "their", "initial", "values", "." ]
5df5a6401c7ad6a90b42230eeb99c82cc56952b6
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L687-L695
248,048
tsileo/globster
globster.py
normalize_pattern
def normalize_pattern(pattern): """Converts backslashes in path patterns to forward slashes. Doesn't normalize regular expressions - they may contain escapes. """ if not (pattern.startswith('RE:') or pattern.startswith('!RE:')): pattern = _slashes.sub('/', pattern) if len(pattern) > 1: pattern = pattern.rstrip('/') return pattern
python
def normalize_pattern(pattern): """Converts backslashes in path patterns to forward slashes. Doesn't normalize regular expressions - they may contain escapes. """ if not (pattern.startswith('RE:') or pattern.startswith('!RE:')): pattern = _slashes.sub('/', pattern) if len(pattern) > 1: pattern = pattern.rstrip('/') return pattern
[ "def", "normalize_pattern", "(", "pattern", ")", ":", "if", "not", "(", "pattern", ".", "startswith", "(", "'RE:'", ")", "or", "pattern", ".", "startswith", "(", "'!RE:'", ")", ")", ":", "pattern", "=", "_slashes", ".", "sub", "(", "'/'", ",", "pattern", ")", "if", "len", "(", "pattern", ")", ">", "1", ":", "pattern", "=", "pattern", ".", "rstrip", "(", "'/'", ")", "return", "pattern" ]
Converts backslashes in path patterns to forward slashes. Doesn't normalize regular expressions - they may contain escapes.
[ "Converts", "backslashes", "in", "path", "patterns", "to", "forward", "slashes", "." ]
9628bce60207b150d39b409cddc3fadb34e70841
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L366-L375
248,049
tsileo/globster
globster.py
Replacer.add
def add(self, pat, fun): r"""Add a pattern and replacement. The pattern must not contain capturing groups. The replacement might be either a string template in which \& will be replaced with the match, or a function that will get the matching text as argument. It does not get match object, because capturing is forbidden anyway. """ self._pat = None self._pats.append(pat) self._funs.append(fun)
python
def add(self, pat, fun): r"""Add a pattern and replacement. The pattern must not contain capturing groups. The replacement might be either a string template in which \& will be replaced with the match, or a function that will get the matching text as argument. It does not get match object, because capturing is forbidden anyway. """ self._pat = None self._pats.append(pat) self._funs.append(fun)
[ "def", "add", "(", "self", ",", "pat", ",", "fun", ")", ":", "self", ".", "_pat", "=", "None", "self", ".", "_pats", ".", "append", "(", "pat", ")", "self", ".", "_funs", ".", "append", "(", "fun", ")" ]
r"""Add a pattern and replacement. The pattern must not contain capturing groups. The replacement might be either a string template in which \& will be replaced with the match, or a function that will get the matching text as argument. It does not get match object, because capturing is forbidden anyway.
[ "r", "Add", "a", "pattern", "and", "replacement", "." ]
9628bce60207b150d39b409cddc3fadb34e70841
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L60-L71
248,050
tsileo/globster
globster.py
Replacer.add_replacer
def add_replacer(self, replacer): r"""Add all patterns from another replacer. All patterns and replacements from replacer are appended to the ones already defined. """ self._pat = None self._pats.extend(replacer._pats) self._funs.extend(replacer._funs)
python
def add_replacer(self, replacer): r"""Add all patterns from another replacer. All patterns and replacements from replacer are appended to the ones already defined. """ self._pat = None self._pats.extend(replacer._pats) self._funs.extend(replacer._funs)
[ "def", "add_replacer", "(", "self", ",", "replacer", ")", ":", "self", ".", "_pat", "=", "None", "self", ".", "_pats", ".", "extend", "(", "replacer", ".", "_pats", ")", "self", ".", "_funs", ".", "extend", "(", "replacer", ".", "_funs", ")" ]
r"""Add all patterns from another replacer. All patterns and replacements from replacer are appended to the ones already defined.
[ "r", "Add", "all", "patterns", "from", "another", "replacer", "." ]
9628bce60207b150d39b409cddc3fadb34e70841
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L73-L81
248,051
tsileo/globster
globster.py
Globster.is_pattern_valid
def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """ result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
python
def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """ result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
[ "def", "is_pattern_valid", "(", "pattern", ")", ":", "result", "=", "True", "translator", "=", "Globster", ".", "pattern_info", "[", "Globster", ".", "identify", "(", "pattern", ")", "]", "[", "\"translator\"", "]", "tpattern", "=", "'(%s)'", "%", "translator", "(", "pattern", ")", "try", ":", "re_obj", "=", "lazy_regex", ".", "lazy_compile", "(", "tpattern", ",", "re", ".", "UNICODE", ")", "re_obj", ".", "search", "(", "\"\"", ")", "# force compile", "except", "Exception", "as", "e", ":", "result", "=", "False", "return", "result" ]
Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern
[ "Returns", "True", "if", "pattern", "is", "valid", "." ]
9628bce60207b150d39b409cddc3fadb34e70841
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L292-L307
248,052
cbrand/vpnchooser
src/vpnchooser/resources/device.py
DeviceResource.put
def put(self, device_id: int) -> Device: """ Updates the Device Resource with the name. """ device = self._get_or_abort(device_id) self.update(device) session.commit() session.add(device) return device
python
def put(self, device_id: int) -> Device: """ Updates the Device Resource with the name. """ device = self._get_or_abort(device_id) self.update(device) session.commit() session.add(device) return device
[ "def", "put", "(", "self", ",", "device_id", ":", "int", ")", "->", "Device", ":", "device", "=", "self", ".", "_get_or_abort", "(", "device_id", ")", "self", ".", "update", "(", "device", ")", "session", ".", "commit", "(", ")", "session", ".", "add", "(", "device", ")", "return", "device" ]
Updates the Device Resource with the name.
[ "Updates", "the", "Device", "Resource", "with", "the", "name", "." ]
d153e3d05555c23cf5e8e15e507eecad86465923
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/device.py#L114-L123
248,053
idlesign/django-xross
xross/utils.py
xross_listener
def xross_listener(http_method=None, **xross_attrs): """Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument. """ handler = currentframe().f_back.f_locals['request']._xross_handler handler.set_attrs(**xross_attrs) if http_method is not None: handler.http_method = http_method handler.dispatch()
python
def xross_listener(http_method=None, **xross_attrs): """Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument. """ handler = currentframe().f_back.f_locals['request']._xross_handler handler.set_attrs(**xross_attrs) if http_method is not None: handler.http_method = http_method handler.dispatch()
[ "def", "xross_listener", "(", "http_method", "=", "None", ",", "*", "*", "xross_attrs", ")", ":", "handler", "=", "currentframe", "(", ")", ".", "f_back", ".", "f_locals", "[", "'request'", "]", ".", "_xross_handler", "handler", ".", "set_attrs", "(", "*", "*", "xross_attrs", ")", "if", "http_method", "is", "not", "None", ":", "handler", ".", "http_method", "=", "http_method", "handler", ".", "dispatch", "(", ")" ]
Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument.
[ "Instructs", "xross", "to", "handle", "AJAX", "calls", "right", "from", "the", "moment", "it", "is", "called", "." ]
414edbab2069c4ba77773d0ef3c8fc830b336efa
https://github.com/idlesign/django-xross/blob/414edbab2069c4ba77773d0ef3c8fc830b336efa/xross/utils.py#L30-L45
248,054
idlesign/django-xross
xross/utils.py
xross_view
def xross_view(*op_functions): """This decorator should be used to decorate application views that require xross functionality. :param list op_functions: operations (functions, methods) responsible for handling xross requests. Function names considered to be operations names. Using them clients will address those functions (e.g. xross-ready HTML elements may be marked with `data-xop` attributes to define the above mentioned operations, or just define `id` which will serve for the same purpose). They can accept `request` as first argument (for methods it'll be second, as the first is `self`), and other params from client side (e.g. defined in `data-x...` html element attributes). It can also accept `xross` keyword argument, which will contain any additional `xross attrs` as defined by `xross_listener()`. Those functions should return string or dict (handled by client as JSON) or HTTPResponse, e.g. from `render()` result. Examples: def do_something(request, param1_from_html_el, param2_from_html_el, xross=None): return '%s - %s' % (param1_from_html_el, param2_from_html_el) """ operations_dict = construct_operations_dict(*op_functions) def get_request(src): return src if isinstance(src, HttpRequest) else None def dec_wrapper(func): def func_wrapper(*fargs, **fkwargs): request_idx = getattr(func, '_req_idx', None) if request_idx is None: request = get_request(fargs[0]) request_idx = 0 if not request: # Possibly a class-based view where 0-attr is `self`. request = get_request(fargs[1]) request_idx = 1 func._req_idx = request_idx else: request = fargs[request_idx] if hasattr(request, '_xross_handler'): request._xross_handler._op_bindings.update(operations_dict['_op_bindings']) else: request._xross_handler = build_handler_class(operations_dict)(request, func) try: response = func(*fargs, **fkwargs) except HandlerException as e: return HttpResponseBadRequest(e if settings.DEBUG else b'') except ResponseEmpty as e: return HttpResponseNotFound(e if settings.DEBUG else b'') except ResponseReady as r: response = r.response if response is None: response = '' if isinstance(response, str): response = HttpResponse(response) elif isinstance(response, dict): response = HttpResponse(json.dumps(response), content_type='application/json') return response return func_wrapper return dec_wrapper
python
def xross_view(*op_functions): """This decorator should be used to decorate application views that require xross functionality. :param list op_functions: operations (functions, methods) responsible for handling xross requests. Function names considered to be operations names. Using them clients will address those functions (e.g. xross-ready HTML elements may be marked with `data-xop` attributes to define the above mentioned operations, or just define `id` which will serve for the same purpose). They can accept `request` as first argument (for methods it'll be second, as the first is `self`), and other params from client side (e.g. defined in `data-x...` html element attributes). It can also accept `xross` keyword argument, which will contain any additional `xross attrs` as defined by `xross_listener()`. Those functions should return string or dict (handled by client as JSON) or HTTPResponse, e.g. from `render()` result. Examples: def do_something(request, param1_from_html_el, param2_from_html_el, xross=None): return '%s - %s' % (param1_from_html_el, param2_from_html_el) """ operations_dict = construct_operations_dict(*op_functions) def get_request(src): return src if isinstance(src, HttpRequest) else None def dec_wrapper(func): def func_wrapper(*fargs, **fkwargs): request_idx = getattr(func, '_req_idx', None) if request_idx is None: request = get_request(fargs[0]) request_idx = 0 if not request: # Possibly a class-based view where 0-attr is `self`. request = get_request(fargs[1]) request_idx = 1 func._req_idx = request_idx else: request = fargs[request_idx] if hasattr(request, '_xross_handler'): request._xross_handler._op_bindings.update(operations_dict['_op_bindings']) else: request._xross_handler = build_handler_class(operations_dict)(request, func) try: response = func(*fargs, **fkwargs) except HandlerException as e: return HttpResponseBadRequest(e if settings.DEBUG else b'') except ResponseEmpty as e: return HttpResponseNotFound(e if settings.DEBUG else b'') except ResponseReady as r: response = r.response if response is None: response = '' if isinstance(response, str): response = HttpResponse(response) elif isinstance(response, dict): response = HttpResponse(json.dumps(response), content_type='application/json') return response return func_wrapper return dec_wrapper
[ "def", "xross_view", "(", "*", "op_functions", ")", ":", "operations_dict", "=", "construct_operations_dict", "(", "*", "op_functions", ")", "def", "get_request", "(", "src", ")", ":", "return", "src", "if", "isinstance", "(", "src", ",", "HttpRequest", ")", "else", "None", "def", "dec_wrapper", "(", "func", ")", ":", "def", "func_wrapper", "(", "*", "fargs", ",", "*", "*", "fkwargs", ")", ":", "request_idx", "=", "getattr", "(", "func", ",", "'_req_idx'", ",", "None", ")", "if", "request_idx", "is", "None", ":", "request", "=", "get_request", "(", "fargs", "[", "0", "]", ")", "request_idx", "=", "0", "if", "not", "request", ":", "# Possibly a class-based view where 0-attr is `self`.", "request", "=", "get_request", "(", "fargs", "[", "1", "]", ")", "request_idx", "=", "1", "func", ".", "_req_idx", "=", "request_idx", "else", ":", "request", "=", "fargs", "[", "request_idx", "]", "if", "hasattr", "(", "request", ",", "'_xross_handler'", ")", ":", "request", ".", "_xross_handler", ".", "_op_bindings", ".", "update", "(", "operations_dict", "[", "'_op_bindings'", "]", ")", "else", ":", "request", ".", "_xross_handler", "=", "build_handler_class", "(", "operations_dict", ")", "(", "request", ",", "func", ")", "try", ":", "response", "=", "func", "(", "*", "fargs", ",", "*", "*", "fkwargs", ")", "except", "HandlerException", "as", "e", ":", "return", "HttpResponseBadRequest", "(", "e", "if", "settings", ".", "DEBUG", "else", "b''", ")", "except", "ResponseEmpty", "as", "e", ":", "return", "HttpResponseNotFound", "(", "e", "if", "settings", ".", "DEBUG", "else", "b''", ")", "except", "ResponseReady", "as", "r", ":", "response", "=", "r", ".", "response", "if", "response", "is", "None", ":", "response", "=", "''", "if", "isinstance", "(", "response", ",", "str", ")", ":", "response", "=", "HttpResponse", "(", "response", ")", "elif", "isinstance", "(", "response", ",", "dict", ")", ":", "response", "=", "HttpResponse", "(", "json", ".", "dumps", "(", "response", ")", ",", "content_type", "=", "'application/json'", ")", "return", "response", "return", "func_wrapper", "return", "dec_wrapper" ]
This decorator should be used to decorate application views that require xross functionality. :param list op_functions: operations (functions, methods) responsible for handling xross requests. Function names considered to be operations names. Using them clients will address those functions (e.g. xross-ready HTML elements may be marked with `data-xop` attributes to define the above mentioned operations, or just define `id` which will serve for the same purpose). They can accept `request` as first argument (for methods it'll be second, as the first is `self`), and other params from client side (e.g. defined in `data-x...` html element attributes). It can also accept `xross` keyword argument, which will contain any additional `xross attrs` as defined by `xross_listener()`. Those functions should return string or dict (handled by client as JSON) or HTTPResponse, e.g. from `render()` result. Examples: def do_something(request, param1_from_html_el, param2_from_html_el, xross=None): return '%s - %s' % (param1_from_html_el, param2_from_html_el)
[ "This", "decorator", "should", "be", "used", "to", "decorate", "application", "views", "that", "require", "xross", "functionality", "." ]
414edbab2069c4ba77773d0ef3c8fc830b336efa
https://github.com/idlesign/django-xross/blob/414edbab2069c4ba77773d0ef3c8fc830b336efa/xross/utils.py#L48-L123
248,055
ikanor/intercept
intercept/intercept.py
intercept
def intercept(actions: dict={}): """ Decorates a function and handles any exceptions that may rise. Args: actions: A dictionary ``<exception type>: <action>``. Available actions\ are :class:`raises` and :class:`returns`. Returns: Any value declared using a :class:`returns` action. Raises: AnyException: if AnyException is declared together with a :class:`raises` action. InterceptorError: if the decorator is called with something different from a :class:`returns` or :class:`raises` action. Interceptors can be declared inline to return a value or raise an exception when the declared exception is risen: >>> @intercept({ ... TypeError: returns('intercepted!') ... }) ... def fails(foo): ... if foo: ... raise TypeError('inner exception') ... return 'ok' >>> fails(False) 'ok' >>> fails(True) 'intercepted!' >>> @intercept({ ... TypeError: raises(Exception('intercepted!')) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... Exception: intercepted! But they can also be declared and then used later on: >>> intercept0r = intercept({ ... TypeError: returns('intercepted!') ... }) >>> @intercept0r ... def fail(): ... raise TypeError('raising error') >>> fail() 'intercepted!' You can declare also an action that captures the risen exception by passing a callable to the action. This is useful to create a custom error message: >>> @intercept({ ... TypeError: returns(lambda e: 'intercepted {}'.format(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() 'intercepted inner exception' Or to convert captured exceptions into custom errors: >>> class CustomError(Exception): ... pass >>> @intercept({ ... TypeError: raises(lambda e: CustomError(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... intercept.CustomError: inner exception """ for action in actions.values(): if type(action) is not returns and type(action) is not raises: raise InterceptorError('Actions must be declared as `returns` or `raises`') def decorated(f): def wrapped(*args, **kargs): try: return f(*args, **kargs) except Exception as e: if e.__class__ in actions: return actions[e.__class__](e) else: raise return wrapped return decorated
python
def intercept(actions: dict={}): """ Decorates a function and handles any exceptions that may rise. Args: actions: A dictionary ``<exception type>: <action>``. Available actions\ are :class:`raises` and :class:`returns`. Returns: Any value declared using a :class:`returns` action. Raises: AnyException: if AnyException is declared together with a :class:`raises` action. InterceptorError: if the decorator is called with something different from a :class:`returns` or :class:`raises` action. Interceptors can be declared inline to return a value or raise an exception when the declared exception is risen: >>> @intercept({ ... TypeError: returns('intercepted!') ... }) ... def fails(foo): ... if foo: ... raise TypeError('inner exception') ... return 'ok' >>> fails(False) 'ok' >>> fails(True) 'intercepted!' >>> @intercept({ ... TypeError: raises(Exception('intercepted!')) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... Exception: intercepted! But they can also be declared and then used later on: >>> intercept0r = intercept({ ... TypeError: returns('intercepted!') ... }) >>> @intercept0r ... def fail(): ... raise TypeError('raising error') >>> fail() 'intercepted!' You can declare also an action that captures the risen exception by passing a callable to the action. This is useful to create a custom error message: >>> @intercept({ ... TypeError: returns(lambda e: 'intercepted {}'.format(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() 'intercepted inner exception' Or to convert captured exceptions into custom errors: >>> class CustomError(Exception): ... pass >>> @intercept({ ... TypeError: raises(lambda e: CustomError(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... intercept.CustomError: inner exception """ for action in actions.values(): if type(action) is not returns and type(action) is not raises: raise InterceptorError('Actions must be declared as `returns` or `raises`') def decorated(f): def wrapped(*args, **kargs): try: return f(*args, **kargs) except Exception as e: if e.__class__ in actions: return actions[e.__class__](e) else: raise return wrapped return decorated
[ "def", "intercept", "(", "actions", ":", "dict", "=", "{", "}", ")", ":", "for", "action", "in", "actions", ".", "values", "(", ")", ":", "if", "type", "(", "action", ")", "is", "not", "returns", "and", "type", "(", "action", ")", "is", "not", "raises", ":", "raise", "InterceptorError", "(", "'Actions must be declared as `returns` or `raises`'", ")", "def", "decorated", "(", "f", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kargs", ")", ":", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kargs", ")", "except", "Exception", "as", "e", ":", "if", "e", ".", "__class__", "in", "actions", ":", "return", "actions", "[", "e", ".", "__class__", "]", "(", "e", ")", "else", ":", "raise", "return", "wrapped", "return", "decorated" ]
Decorates a function and handles any exceptions that may rise. Args: actions: A dictionary ``<exception type>: <action>``. Available actions\ are :class:`raises` and :class:`returns`. Returns: Any value declared using a :class:`returns` action. Raises: AnyException: if AnyException is declared together with a :class:`raises` action. InterceptorError: if the decorator is called with something different from a :class:`returns` or :class:`raises` action. Interceptors can be declared inline to return a value or raise an exception when the declared exception is risen: >>> @intercept({ ... TypeError: returns('intercepted!') ... }) ... def fails(foo): ... if foo: ... raise TypeError('inner exception') ... return 'ok' >>> fails(False) 'ok' >>> fails(True) 'intercepted!' >>> @intercept({ ... TypeError: raises(Exception('intercepted!')) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... Exception: intercepted! But they can also be declared and then used later on: >>> intercept0r = intercept({ ... TypeError: returns('intercepted!') ... }) >>> @intercept0r ... def fail(): ... raise TypeError('raising error') >>> fail() 'intercepted!' You can declare also an action that captures the risen exception by passing a callable to the action. This is useful to create a custom error message: >>> @intercept({ ... TypeError: returns(lambda e: 'intercepted {}'.format(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() 'intercepted inner exception' Or to convert captured exceptions into custom errors: >>> class CustomError(Exception): ... pass >>> @intercept({ ... TypeError: raises(lambda e: CustomError(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... intercept.CustomError: inner exception
[ "Decorates", "a", "function", "and", "handles", "any", "exceptions", "that", "may", "rise", "." ]
7aea4eed4f0942f2f781c560d4fdafd10fbbcc2d
https://github.com/ikanor/intercept/blob/7aea4eed4f0942f2f781c560d4fdafd10fbbcc2d/intercept/intercept.py#L7-L102
248,056
jirutka/sublimedsl
sublimedsl/keymap.py
Keymap.extend
def extend(self, *bindings): """ Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self """ self._bindings.extend(self._preprocess(bindings)) return self
python
def extend(self, *bindings): """ Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self """ self._bindings.extend(self._preprocess(bindings)) return self
[ "def", "extend", "(", "self", ",", "*", "bindings", ")", ":", "self", ".", "_bindings", ".", "extend", "(", "self", ".", "_preprocess", "(", "bindings", ")", ")", "return", "self" ]
Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self
[ "Append", "the", "given", "bindings", "to", "this", "keymap", "." ]
ca9fc79ab06e6efd79a6d5b37cb716688d4affc2
https://github.com/jirutka/sublimedsl/blob/ca9fc79ab06e6efd79a6d5b37cb716688d4affc2/sublimedsl/keymap.py#L108-L117
248,057
jirutka/sublimedsl
sublimedsl/keymap.py
Binding.when
def when(self, key): """ Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context: """ ctx = Context(key, self) self.context.append(ctx) return ctx
python
def when(self, key): """ Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context: """ ctx = Context(key, self) self.context.append(ctx) return ctx
[ "def", "when", "(", "self", ",", "key", ")", ":", "ctx", "=", "Context", "(", "key", ",", "self", ")", "self", ".", "context", ".", "append", "(", "ctx", ")", "return", "ctx" ]
Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context:
[ "Specify", "context", "i", ".", "e", ".", "condition", "that", "must", "be", "met", "." ]
ca9fc79ab06e6efd79a6d5b37cb716688d4affc2
https://github.com/jirutka/sublimedsl/blob/ca9fc79ab06e6efd79a6d5b37cb716688d4affc2/sublimedsl/keymap.py#L184-L194
248,058
wlwang41/cb
cb/commands.py
Command._split_source_page
def _split_source_page(self, path): """Split the source file texts by triple-dashed lines. shit code """ with codecs.open(path, "rb", "utf-8") as fd: textlist = fd.readlines() metadata_notation = "---\n" if textlist[0] != metadata_notation: logging.error( "{} first line must be triple-dashed!".format(path) ) sys.exit(1) metadata_textlist = [] metadata_end_flag = False idx = 1 max_idx = len(textlist) # TODO(crow): BE PYTHONIC!!! while not metadata_end_flag: metadata_textlist.append(textlist[idx]) idx += 1 if idx >= max_idx: logging.error( "{} doesn't have end triple-dashed!".format(path) ) sys.exit(1) if textlist[idx] == metadata_notation: metadata_end_flag = True content = textlist[idx + 1:] return metadata_textlist, content
python
def _split_source_page(self, path): """Split the source file texts by triple-dashed lines. shit code """ with codecs.open(path, "rb", "utf-8") as fd: textlist = fd.readlines() metadata_notation = "---\n" if textlist[0] != metadata_notation: logging.error( "{} first line must be triple-dashed!".format(path) ) sys.exit(1) metadata_textlist = [] metadata_end_flag = False idx = 1 max_idx = len(textlist) # TODO(crow): BE PYTHONIC!!! while not metadata_end_flag: metadata_textlist.append(textlist[idx]) idx += 1 if idx >= max_idx: logging.error( "{} doesn't have end triple-dashed!".format(path) ) sys.exit(1) if textlist[idx] == metadata_notation: metadata_end_flag = True content = textlist[idx + 1:] return metadata_textlist, content
[ "def", "_split_source_page", "(", "self", ",", "path", ")", ":", "with", "codecs", ".", "open", "(", "path", ",", "\"rb\"", ",", "\"utf-8\"", ")", "as", "fd", ":", "textlist", "=", "fd", ".", "readlines", "(", ")", "metadata_notation", "=", "\"---\\n\"", "if", "textlist", "[", "0", "]", "!=", "metadata_notation", ":", "logging", ".", "error", "(", "\"{} first line must be triple-dashed!\"", ".", "format", "(", "path", ")", ")", "sys", ".", "exit", "(", "1", ")", "metadata_textlist", "=", "[", "]", "metadata_end_flag", "=", "False", "idx", "=", "1", "max_idx", "=", "len", "(", "textlist", ")", "# TODO(crow): BE PYTHONIC!!!", "while", "not", "metadata_end_flag", ":", "metadata_textlist", ".", "append", "(", "textlist", "[", "idx", "]", ")", "idx", "+=", "1", "if", "idx", ">=", "max_idx", ":", "logging", ".", "error", "(", "\"{} doesn't have end triple-dashed!\"", ".", "format", "(", "path", ")", ")", "sys", ".", "exit", "(", "1", ")", "if", "textlist", "[", "idx", "]", "==", "metadata_notation", ":", "metadata_end_flag", "=", "True", "content", "=", "textlist", "[", "idx", "+", "1", ":", "]", "return", "metadata_textlist", ",", "content" ]
Split the source file texts by triple-dashed lines. shit code
[ "Split", "the", "source", "file", "texts", "by", "triple", "-", "dashed", "lines", "." ]
0a7faa427e3e6593980687dfe1a882ac99d743f6
https://github.com/wlwang41/cb/blob/0a7faa427e3e6593980687dfe1a882ac99d743f6/cb/commands.py#L89-L121
248,059
wlwang41/cb
cb/commands.py
Command._get_feed_data
def _get_feed_data(self, file_paths): """ get data to display in feed file """ rv = {} for i in file_paths: # TODO(crow): only support first category _ = i.split('/') category = _[-2] name = _[-1].split('.')[0] page_config, md = self._get_config_and_content(i) parsed_md = tools.parse_markdown(md, self.site_config) rv.setdefault(category, {}) rv[category].update( { i: { 'title': page_config.get('title', ''), 'name': name.decode('utf-8'), 'content': parsed_md, 'date': page_config.get('date', '') } } ) return rv
python
def _get_feed_data(self, file_paths): """ get data to display in feed file """ rv = {} for i in file_paths: # TODO(crow): only support first category _ = i.split('/') category = _[-2] name = _[-1].split('.')[0] page_config, md = self._get_config_and_content(i) parsed_md = tools.parse_markdown(md, self.site_config) rv.setdefault(category, {}) rv[category].update( { i: { 'title': page_config.get('title', ''), 'name': name.decode('utf-8'), 'content': parsed_md, 'date': page_config.get('date', '') } } ) return rv
[ "def", "_get_feed_data", "(", "self", ",", "file_paths", ")", ":", "rv", "=", "{", "}", "for", "i", "in", "file_paths", ":", "# TODO(crow): only support first category", "_", "=", "i", ".", "split", "(", "'/'", ")", "category", "=", "_", "[", "-", "2", "]", "name", "=", "_", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "page_config", ",", "md", "=", "self", ".", "_get_config_and_content", "(", "i", ")", "parsed_md", "=", "tools", ".", "parse_markdown", "(", "md", ",", "self", ".", "site_config", ")", "rv", ".", "setdefault", "(", "category", ",", "{", "}", ")", "rv", "[", "category", "]", ".", "update", "(", "{", "i", ":", "{", "'title'", ":", "page_config", ".", "get", "(", "'title'", ",", "''", ")", ",", "'name'", ":", "name", ".", "decode", "(", "'utf-8'", ")", ",", "'content'", ":", "parsed_md", ",", "'date'", ":", "page_config", ".", "get", "(", "'date'", ",", "''", ")", "}", "}", ")", "return", "rv" ]
get data to display in feed file
[ "get", "data", "to", "display", "in", "feed", "file" ]
0a7faa427e3e6593980687dfe1a882ac99d743f6
https://github.com/wlwang41/cb/blob/0a7faa427e3e6593980687dfe1a882ac99d743f6/cb/commands.py#L206-L228
248,060
wlwang41/cb
cb/commands.py
Command._generate_feed
def _generate_feed(self, feed_data): """ render feed file with data """ atom_feed = self._render_html('atom.xml', feed_data) feed_path = os.path.join(os.getcwd(), 'public', 'atom.xml') with codecs.open(feed_path, 'wb', 'utf-8') as f: f.write(atom_feed)
python
def _generate_feed(self, feed_data): """ render feed file with data """ atom_feed = self._render_html('atom.xml', feed_data) feed_path = os.path.join(os.getcwd(), 'public', 'atom.xml') with codecs.open(feed_path, 'wb', 'utf-8') as f: f.write(atom_feed)
[ "def", "_generate_feed", "(", "self", ",", "feed_data", ")", ":", "atom_feed", "=", "self", ".", "_render_html", "(", "'atom.xml'", ",", "feed_data", ")", "feed_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'public'", ",", "'atom.xml'", ")", "with", "codecs", ".", "open", "(", "feed_path", ",", "'wb'", ",", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "atom_feed", ")" ]
render feed file with data
[ "render", "feed", "file", "with", "data" ]
0a7faa427e3e6593980687dfe1a882ac99d743f6
https://github.com/wlwang41/cb/blob/0a7faa427e3e6593980687dfe1a882ac99d743f6/cb/commands.py#L230-L238
248,061
treycucco/bidon
bidon/util/date.py
_normalize_tz
def _normalize_tz(val): """Normalizes all valid ISO8601 time zone variants to the one python will parse. :val: a timestamp string without a timezone, or with a timezone in one of the ISO8601 accepted formats. """ match = _TZ_RE.match(val) if match: ts, tz = match.groups() if len(tz) == 5: # If the length of the tz is 5 then it is of the form (+|-)dddd, which is exactly what python # wants, so just return it. return ts + tz if len(tz) == 6: # If the length of the tz is 6 then it is of the form (+|-)dd:dd, just remove the colon return ts + tz[:3] + tz[4:] if tz == "Z" or tz == "z": # If the tz is "Z" or 'z', return a timezone of +0000 return ts + "+0000" else: # Otherwise, the timzone must be of the format (+|-)dd, in which case we just need to add two # "0" to it, and it will be in the proper format. return ts + tz + "00" else: return val
python
def _normalize_tz(val): """Normalizes all valid ISO8601 time zone variants to the one python will parse. :val: a timestamp string without a timezone, or with a timezone in one of the ISO8601 accepted formats. """ match = _TZ_RE.match(val) if match: ts, tz = match.groups() if len(tz) == 5: # If the length of the tz is 5 then it is of the form (+|-)dddd, which is exactly what python # wants, so just return it. return ts + tz if len(tz) == 6: # If the length of the tz is 6 then it is of the form (+|-)dd:dd, just remove the colon return ts + tz[:3] + tz[4:] if tz == "Z" or tz == "z": # If the tz is "Z" or 'z', return a timezone of +0000 return ts + "+0000" else: # Otherwise, the timzone must be of the format (+|-)dd, in which case we just need to add two # "0" to it, and it will be in the proper format. return ts + tz + "00" else: return val
[ "def", "_normalize_tz", "(", "val", ")", ":", "match", "=", "_TZ_RE", ".", "match", "(", "val", ")", "if", "match", ":", "ts", ",", "tz", "=", "match", ".", "groups", "(", ")", "if", "len", "(", "tz", ")", "==", "5", ":", "# If the length of the tz is 5 then it is of the form (+|-)dddd, which is exactly what python", "# wants, so just return it.", "return", "ts", "+", "tz", "if", "len", "(", "tz", ")", "==", "6", ":", "# If the length of the tz is 6 then it is of the form (+|-)dd:dd, just remove the colon", "return", "ts", "+", "tz", "[", ":", "3", "]", "+", "tz", "[", "4", ":", "]", "if", "tz", "==", "\"Z\"", "or", "tz", "==", "\"z\"", ":", "# If the tz is \"Z\" or 'z', return a timezone of +0000", "return", "ts", "+", "\"+0000\"", "else", ":", "# Otherwise, the timzone must be of the format (+|-)dd, in which case we just need to add two", "# \"0\" to it, and it will be in the proper format.", "return", "ts", "+", "tz", "+", "\"00\"", "else", ":", "return", "val" ]
Normalizes all valid ISO8601 time zone variants to the one python will parse. :val: a timestamp string without a timezone, or with a timezone in one of the ISO8601 accepted formats.
[ "Normalizes", "all", "valid", "ISO8601", "time", "zone", "variants", "to", "the", "one", "python", "will", "parse", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/date.py#L81-L106
248,062
eallik/spinoff
spinoff/remoting/mock.py
MockNetwork.node
def node(self, nodeid): """Creates a new node with the specified name, with `MockSocket` instances as incoming and outgoing sockets. Returns the implementation object created for the node from the cls, args and address specified, and the sockets. `cls` must be a callable that takes the insock and outsock, and the specified args and kwargs. """ _assert_valid_nodeid(nodeid) # addr = 'tcp://' + nodeid # insock = MockInSocket(addEndpoints=lambda endpoints: self.bind(addr, insock, endpoints)) # outsock = lambda: MockOutSocket(addr, self) return Node(hub=Hub(nodeid=nodeid))
python
def node(self, nodeid): """Creates a new node with the specified name, with `MockSocket` instances as incoming and outgoing sockets. Returns the implementation object created for the node from the cls, args and address specified, and the sockets. `cls` must be a callable that takes the insock and outsock, and the specified args and kwargs. """ _assert_valid_nodeid(nodeid) # addr = 'tcp://' + nodeid # insock = MockInSocket(addEndpoints=lambda endpoints: self.bind(addr, insock, endpoints)) # outsock = lambda: MockOutSocket(addr, self) return Node(hub=Hub(nodeid=nodeid))
[ "def", "node", "(", "self", ",", "nodeid", ")", ":", "_assert_valid_nodeid", "(", "nodeid", ")", "# addr = 'tcp://' + nodeid", "# insock = MockInSocket(addEndpoints=lambda endpoints: self.bind(addr, insock, endpoints))", "# outsock = lambda: MockOutSocket(addr, self)", "return", "Node", "(", "hub", "=", "Hub", "(", "nodeid", "=", "nodeid", ")", ")" ]
Creates a new node with the specified name, with `MockSocket` instances as incoming and outgoing sockets. Returns the implementation object created for the node from the cls, args and address specified, and the sockets. `cls` must be a callable that takes the insock and outsock, and the specified args and kwargs.
[ "Creates", "a", "new", "node", "with", "the", "specified", "name", "with", "MockSocket", "instances", "as", "incoming", "and", "outgoing", "sockets", "." ]
06b00d6b86c7422c9cb8f9a4b2915906e92b7d52
https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/remoting/mock.py#L33-L45
248,063
calvinku96/labreporthelper
labreporthelper/plot.py
PlotSingle2D.do_label
def do_label(self): """ Create label for x and y axis, title and suptitle """ outputdict = self.outputdict xlabel_options = self.kwargs.get("xlabel_options", {}) self.subplot.set_xlabel( self.kwargs.get("xlabel", "").format(**outputdict), **xlabel_options) ylabel_options = self.kwargs.get("ylabel_options", {}) self.subplot.set_ylabel( self.kwargs.get("ylabel", "").format(**outputdict), **ylabel_options) suptitle = self.kwargs.get("suptitle", None) if suptitle is not None: suptitle_options = self.kwargs.get("suptitle_options", {}) self.figure.suptitle( suptitle.format(**outputdict), fontsize=int(self.kwargs.get("suptitle_fontsize", 15)), **suptitle_options) title = self.kwargs.get("title", None) if title is not None: title_options = self.kwargs.get("title_options", {}) self.subplot.set_title( title.format(**outputdict), fontsize=int(self.kwargs.get("title_fontsize", 12)), **title_options) xlim = self.kwargs.get("xlim", None) ylim = self.kwargs.get("ylim", None) if xlim is not None: self.subplot.set_xlim(xlim) if ylim is not None: self.subplot.set_ylim(ylim) # axis format self.subplot.ticklabel_format( style="sci", useOffset=False, scilimits=self.kwargs.get("scilimits", (-4, 4)) ) return self
python
def do_label(self): """ Create label for x and y axis, title and suptitle """ outputdict = self.outputdict xlabel_options = self.kwargs.get("xlabel_options", {}) self.subplot.set_xlabel( self.kwargs.get("xlabel", "").format(**outputdict), **xlabel_options) ylabel_options = self.kwargs.get("ylabel_options", {}) self.subplot.set_ylabel( self.kwargs.get("ylabel", "").format(**outputdict), **ylabel_options) suptitle = self.kwargs.get("suptitle", None) if suptitle is not None: suptitle_options = self.kwargs.get("suptitle_options", {}) self.figure.suptitle( suptitle.format(**outputdict), fontsize=int(self.kwargs.get("suptitle_fontsize", 15)), **suptitle_options) title = self.kwargs.get("title", None) if title is not None: title_options = self.kwargs.get("title_options", {}) self.subplot.set_title( title.format(**outputdict), fontsize=int(self.kwargs.get("title_fontsize", 12)), **title_options) xlim = self.kwargs.get("xlim", None) ylim = self.kwargs.get("ylim", None) if xlim is not None: self.subplot.set_xlim(xlim) if ylim is not None: self.subplot.set_ylim(ylim) # axis format self.subplot.ticklabel_format( style="sci", useOffset=False, scilimits=self.kwargs.get("scilimits", (-4, 4)) ) return self
[ "def", "do_label", "(", "self", ")", ":", "outputdict", "=", "self", ".", "outputdict", "xlabel_options", "=", "self", ".", "kwargs", ".", "get", "(", "\"xlabel_options\"", ",", "{", "}", ")", "self", ".", "subplot", ".", "set_xlabel", "(", "self", ".", "kwargs", ".", "get", "(", "\"xlabel\"", ",", "\"\"", ")", ".", "format", "(", "*", "*", "outputdict", ")", ",", "*", "*", "xlabel_options", ")", "ylabel_options", "=", "self", ".", "kwargs", ".", "get", "(", "\"ylabel_options\"", ",", "{", "}", ")", "self", ".", "subplot", ".", "set_ylabel", "(", "self", ".", "kwargs", ".", "get", "(", "\"ylabel\"", ",", "\"\"", ")", ".", "format", "(", "*", "*", "outputdict", ")", ",", "*", "*", "ylabel_options", ")", "suptitle", "=", "self", ".", "kwargs", ".", "get", "(", "\"suptitle\"", ",", "None", ")", "if", "suptitle", "is", "not", "None", ":", "suptitle_options", "=", "self", ".", "kwargs", ".", "get", "(", "\"suptitle_options\"", ",", "{", "}", ")", "self", ".", "figure", ".", "suptitle", "(", "suptitle", ".", "format", "(", "*", "*", "outputdict", ")", ",", "fontsize", "=", "int", "(", "self", ".", "kwargs", ".", "get", "(", "\"suptitle_fontsize\"", ",", "15", ")", ")", ",", "*", "*", "suptitle_options", ")", "title", "=", "self", ".", "kwargs", ".", "get", "(", "\"title\"", ",", "None", ")", "if", "title", "is", "not", "None", ":", "title_options", "=", "self", ".", "kwargs", ".", "get", "(", "\"title_options\"", ",", "{", "}", ")", "self", ".", "subplot", ".", "set_title", "(", "title", ".", "format", "(", "*", "*", "outputdict", ")", ",", "fontsize", "=", "int", "(", "self", ".", "kwargs", ".", "get", "(", "\"title_fontsize\"", ",", "12", ")", ")", ",", "*", "*", "title_options", ")", "xlim", "=", "self", ".", "kwargs", ".", "get", "(", "\"xlim\"", ",", "None", ")", "ylim", "=", "self", ".", "kwargs", ".", "get", "(", "\"ylim\"", ",", "None", ")", "if", "xlim", "is", "not", "None", ":", "self", ".", "subplot", ".", "set_xlim", "(", "xlim", ")", "if", "ylim", "is", "not", "None", ":", "self", ".", "subplot", ".", "set_ylim", "(", "ylim", ")", "# axis format", "self", ".", "subplot", ".", "ticklabel_format", "(", "style", "=", "\"sci\"", ",", "useOffset", "=", "False", ",", "scilimits", "=", "self", ".", "kwargs", ".", "get", "(", "\"scilimits\"", ",", "(", "-", "4", ",", "4", ")", ")", ")", "return", "self" ]
Create label for x and y axis, title and suptitle
[ "Create", "label", "for", "x", "and", "y", "axis", "title", "and", "suptitle" ]
4d436241f389c02eb188c313190df62ab28c3763
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/plot.py#L148-L186
248,064
questrail/arghelper
arghelper.py
extant_item
def extant_item(arg, arg_type): """Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error. """ if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: # File exists so return the filename return arg elif arg_type == "directory": if not os.path.isdir(arg): raise argparse.ArgumentError( None, "The directory {arg} does not exist.".format(arg=arg)) else: # Directory exists so return the directory name return arg
python
def extant_item(arg, arg_type): """Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error. """ if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: # File exists so return the filename return arg elif arg_type == "directory": if not os.path.isdir(arg): raise argparse.ArgumentError( None, "The directory {arg} does not exist.".format(arg=arg)) else: # Directory exists so return the directory name return arg
[ "def", "extant_item", "(", "arg", ",", "arg_type", ")", ":", "if", "arg_type", "==", "\"file\"", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "arg", ")", ":", "raise", "argparse", ".", "ArgumentError", "(", "None", ",", "\"The file {arg} does not exist.\"", ".", "format", "(", "arg", "=", "arg", ")", ")", "else", ":", "# File exists so return the filename", "return", "arg", "elif", "arg_type", "==", "\"directory\"", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "arg", ")", ":", "raise", "argparse", ".", "ArgumentError", "(", "None", ",", "\"The directory {arg} does not exist.\"", ".", "format", "(", "arg", "=", "arg", ")", ")", "else", ":", "# Directory exists so return the directory name", "return", "arg" ]
Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error.
[ "Determine", "if", "parser", "argument", "is", "an", "existing", "file", "or", "directory", "." ]
833d7d25a1f3daba70f186057d3d39a040c56200
https://github.com/questrail/arghelper/blob/833d7d25a1f3daba70f186057d3d39a040c56200/arghelper.py#L35-L66
248,065
questrail/arghelper
arghelper.py
parse_config_input_output
def parse_config_input_output(args=sys.argv): """Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """ parser = argparse.ArgumentParser( description='Process the input files using the given config') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) parser.add_argument( 'input_dir', help='Directory containing the input files.', metavar='DIR', type=extant_dir) parser.add_argument( 'output_dir', help='Directory where the output files should be saved.', metavar='DIR', type=extant_dir) return parser.parse_args(args[1:])
python
def parse_config_input_output(args=sys.argv): """Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """ parser = argparse.ArgumentParser( description='Process the input files using the given config') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) parser.add_argument( 'input_dir', help='Directory containing the input files.', metavar='DIR', type=extant_dir) parser.add_argument( 'output_dir', help='Directory where the output files should be saved.', metavar='DIR', type=extant_dir) return parser.parse_args(args[1:])
[ "def", "parse_config_input_output", "(", "args", "=", "sys", ".", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Process the input files using the given config'", ")", "parser", ".", "add_argument", "(", "'config_file'", ",", "help", "=", "'Configuration file.'", ",", "metavar", "=", "'FILE'", ",", "type", "=", "extant_file", ")", "parser", ".", "add_argument", "(", "'input_dir'", ",", "help", "=", "'Directory containing the input files.'", ",", "metavar", "=", "'DIR'", ",", "type", "=", "extant_dir", ")", "parser", ".", "add_argument", "(", "'output_dir'", ",", "help", "=", "'Directory where the output files should be saved.'", ",", "metavar", "=", "'DIR'", ",", "type", "=", "extant_dir", ")", "return", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")" ]
Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
[ "Parse", "the", "args", "using", "the", "config_file", "input_dir", "output_dir", "pattern" ]
833d7d25a1f3daba70f186057d3d39a040c56200
https://github.com/questrail/arghelper/blob/833d7d25a1f3daba70f186057d3d39a040c56200/arghelper.py#L69-L95
248,066
questrail/arghelper
arghelper.py
parse_config
def parse_config(args=sys.argv): """Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """ parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
python
def parse_config(args=sys.argv): """Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """ parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
[ "def", "parse_config", "(", "args", "=", "sys", ".", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Read in the config file'", ")", "parser", ".", "add_argument", "(", "'config_file'", ",", "help", "=", "'Configuration file.'", ",", "metavar", "=", "'FILE'", ",", "type", "=", "extant_file", ")", "return", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")" ]
Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
[ "Parse", "the", "args", "using", "the", "config_file", "pattern" ]
833d7d25a1f3daba70f186057d3d39a040c56200
https://github.com/questrail/arghelper/blob/833d7d25a1f3daba70f186057d3d39a040c56200/arghelper.py#L98-L116
248,067
davisd50/sparc.cache
sparc/cache/sql/sql.py
SqlObjectCacheArea.cache
def cache(self, CachableItem): """Updates cache area with latest information """ _cachedItem = self.get(CachableItem) if not _cachedItem: _dirtyCachedItem = self.mapper.get(CachableItem) logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__)) cached_item = self.session.merge(_dirtyCachedItem) notify(CacheObjectCreatedEvent(cached_item, self)) return cached_item else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) cached_item = self.session.merge(_newCacheItem) notify(CacheObjectModifiedEvent(cached_item, self)) return cached_item return False
python
def cache(self, CachableItem): """Updates cache area with latest information """ _cachedItem = self.get(CachableItem) if not _cachedItem: _dirtyCachedItem = self.mapper.get(CachableItem) logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__)) cached_item = self.session.merge(_dirtyCachedItem) notify(CacheObjectCreatedEvent(cached_item, self)) return cached_item else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) cached_item = self.session.merge(_newCacheItem) notify(CacheObjectModifiedEvent(cached_item, self)) return cached_item return False
[ "def", "cache", "(", "self", ",", "CachableItem", ")", ":", "_cachedItem", "=", "self", ".", "get", "(", "CachableItem", ")", "if", "not", "_cachedItem", ":", "_dirtyCachedItem", "=", "self", ".", "mapper", ".", "get", "(", "CachableItem", ")", "logger", ".", "debug", "(", "\"new cachable item added to sql cache area {id: %s, type: %s}\"", ",", "str", "(", "_dirtyCachedItem", ".", "getId", "(", ")", ")", ",", "str", "(", "_dirtyCachedItem", ".", "__class__", ")", ")", "cached_item", "=", "self", ".", "session", ".", "merge", "(", "_dirtyCachedItem", ")", "notify", "(", "CacheObjectCreatedEvent", "(", "cached_item", ",", "self", ")", ")", "return", "cached_item", "else", ":", "_newCacheItem", "=", "self", ".", "mapper", ".", "get", "(", "CachableItem", ")", "if", "_cachedItem", "!=", "_newCacheItem", ":", "logger", ".", "debug", "(", "\"Cachable item modified in sql cache area {id: %s, type: %s}\"", ",", "str", "(", "_newCacheItem", ".", "getId", "(", ")", ")", ",", "str", "(", "_newCacheItem", ".", "__class__", ")", ")", "cached_item", "=", "self", ".", "session", ".", "merge", "(", "_newCacheItem", ")", "notify", "(", "CacheObjectModifiedEvent", "(", "cached_item", ",", "self", ")", ")", "return", "cached_item", "return", "False" ]
Updates cache area with latest information
[ "Updates", "cache", "area", "with", "latest", "information" ]
f2378aad48c368a53820e97b093ace790d4d4121
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L153-L170
248,068
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
format_json
def format_json(item, **kwargs): """ formats a datatype object to a json value """ try: json.dumps(item.value) return item.value except TypeError: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): return item.value.isoformat() raise
python
def format_json(item, **kwargs): """ formats a datatype object to a json value """ try: json.dumps(item.value) return item.value except TypeError: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): return item.value.isoformat() raise
[ "def", "format_json", "(", "item", ",", "*", "*", "kwargs", ")", ":", "try", ":", "json", ".", "dumps", "(", "item", ".", "value", ")", "return", "item", ".", "value", "except", "TypeError", ":", "if", "'time'", "in", "item", ".", "class_type", ".", "lower", "(", ")", "or", "'date'", "in", "item", ".", "class_type", ".", "lower", "(", ")", ":", "return", "item", ".", "value", ".", "isoformat", "(", ")", "raise" ]
formats a datatype object to a json value
[ "formats", "a", "datatype", "object", "to", "a", "json", "value" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L31-L40
248,069
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
format_sparql
def format_sparql(item, dt_format='turtle', **kwargs): """ Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri'] """ try: rtn_val = json.dumps(item.value) rtn_val = item.value except: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): rtn_val = item.value.isoformat() else: rtn_val = str(item.value) if hasattr(item, "datatype"): if hasattr(item, "lang") and item.lang: rtn_val = '%s@%s' % (json.dumps(rtn_val), item.lang) else: dt = item.datatype if dt_format == "uri": dt = item.datatype.sparql_uri if item.datatype in ["xsd_string", "xsd_dateTime", "xsd_time", "xsd_date"]: rtn_val = json.dumps(rtn_val) else: rtn_val = '"%s"' % json.dumps(rtn_val) rtn_val = '%s^^%s' % (rtn_val, dt.sparql) return rtn_val
python
def format_sparql(item, dt_format='turtle', **kwargs): """ Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri'] """ try: rtn_val = json.dumps(item.value) rtn_val = item.value except: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): rtn_val = item.value.isoformat() else: rtn_val = str(item.value) if hasattr(item, "datatype"): if hasattr(item, "lang") and item.lang: rtn_val = '%s@%s' % (json.dumps(rtn_val), item.lang) else: dt = item.datatype if dt_format == "uri": dt = item.datatype.sparql_uri if item.datatype in ["xsd_string", "xsd_dateTime", "xsd_time", "xsd_date"]: rtn_val = json.dumps(rtn_val) else: rtn_val = '"%s"' % json.dumps(rtn_val) rtn_val = '%s^^%s' % (rtn_val, dt.sparql) return rtn_val
[ "def", "format_sparql", "(", "item", ",", "dt_format", "=", "'turtle'", ",", "*", "*", "kwargs", ")", ":", "try", ":", "rtn_val", "=", "json", ".", "dumps", "(", "item", ".", "value", ")", "rtn_val", "=", "item", ".", "value", "except", ":", "if", "'time'", "in", "item", ".", "class_type", ".", "lower", "(", ")", "or", "'date'", "in", "item", ".", "class_type", ".", "lower", "(", ")", ":", "rtn_val", "=", "item", ".", "value", ".", "isoformat", "(", ")", "else", ":", "rtn_val", "=", "str", "(", "item", ".", "value", ")", "if", "hasattr", "(", "item", ",", "\"datatype\"", ")", ":", "if", "hasattr", "(", "item", ",", "\"lang\"", ")", "and", "item", ".", "lang", ":", "rtn_val", "=", "'%s@%s'", "%", "(", "json", ".", "dumps", "(", "rtn_val", ")", ",", "item", ".", "lang", ")", "else", ":", "dt", "=", "item", ".", "datatype", "if", "dt_format", "==", "\"uri\"", ":", "dt", "=", "item", ".", "datatype", ".", "sparql_uri", "if", "item", ".", "datatype", "in", "[", "\"xsd_string\"", ",", "\"xsd_dateTime\"", ",", "\"xsd_time\"", ",", "\"xsd_date\"", "]", ":", "rtn_val", "=", "json", ".", "dumps", "(", "rtn_val", ")", "else", ":", "rtn_val", "=", "'\"%s\"'", "%", "json", ".", "dumps", "(", "rtn_val", ")", "rtn_val", "=", "'%s^^%s'", "%", "(", "rtn_val", ",", "dt", ".", "sparql", ")", "return", "rtn_val" ]
Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri']
[ "Formats", "a", "datatype", "value", "to", "a", "SPARQL", "representation" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L42-L74
248,070
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
BaseRdfDataType._format
def _format(self, method="sparql", dt_format="turtle"): """ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method """ try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
python
def _format(self, method="sparql", dt_format="turtle"): """ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method """ try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
[ "def", "_format", "(", "self", ",", "method", "=", "\"sparql\"", ",", "dt_format", "=", "\"turtle\"", ")", ":", "try", ":", "return", "__FORMAT_OPTIONS__", "[", "method", "]", "(", "self", ",", "dt_format", "=", "dt_format", ")", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "\"'{}' is not a valid format method\"", "\"\"", ".", "format", "(", "method", ")", ")" ]
Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method
[ "Rormats", "the", "value", "in", "various", "formats" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L112-L127
248,071
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.bind
def bind(self, prefix, namespace, *args, **kwargs): """ Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether or not create the lookup reference dictionaries Example usage: RdfNsManager.rdf.type => http://www.w3.org/1999/02/22-rdf-syntax-ns#type """ # RdfNamespace(prefix, namespace, **kwargs) setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs)) if kwargs.pop('calc', True): self.__make_dicts__
python
def bind(self, prefix, namespace, *args, **kwargs): """ Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether or not create the lookup reference dictionaries Example usage: RdfNsManager.rdf.type => http://www.w3.org/1999/02/22-rdf-syntax-ns#type """ # RdfNamespace(prefix, namespace, **kwargs) setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs)) if kwargs.pop('calc', True): self.__make_dicts__
[ "def", "bind", "(", "self", ",", "prefix", ",", "namespace", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# RdfNamespace(prefix, namespace, **kwargs)", "setattr", "(", "self", ",", "prefix", ",", "RdfNamespace", "(", "prefix", ",", "namespace", ",", "*", "*", "kwargs", ")", ")", "if", "kwargs", ".", "pop", "(", "'calc'", ",", "True", ")", ":", "self", ".", "__make_dicts__" ]
Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether or not create the lookup reference dictionaries Example usage: RdfNsManager.rdf.type => http://www.w3.org/1999/02/22-rdf-syntax-ns#type
[ "Extends", "the", "function", "to", "add", "an", "attribute", "to", "the", "class", "for", "each", "added", "namespace", "to", "allow", "for", "use", "of", "dot", "notation", ".", "All", "prefixes", "are", "converted", "to", "lowercase" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L459-L479
248,072
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.prefix
def prefix(self, format="sparql"): ''' Generates a string of the rdf namespaces listed used in the framework format: "sparql" or "turtle" ''' lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) _return_str = "" if format.lower() == "sparql": return "\n".join([ns._sparql_ for ns in self.namespaces]) elif format.lower() in ["turtle", "ttl"]: return "\n".join([ns._ttl_ for ns in self.namespaces]) elif format.lower() in ["rdf", "xml", "rdf/xml"]: return "<rdf:RDF %s>" % \ " ".join([ns._xml_ for ns in self.namespaces]) else: raise NotImplementedError("'%s' is not a valid prefix type." % format)
python
def prefix(self, format="sparql"): ''' Generates a string of the rdf namespaces listed used in the framework format: "sparql" or "turtle" ''' lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) _return_str = "" if format.lower() == "sparql": return "\n".join([ns._sparql_ for ns in self.namespaces]) elif format.lower() in ["turtle", "ttl"]: return "\n".join([ns._ttl_ for ns in self.namespaces]) elif format.lower() in ["rdf", "xml", "rdf/xml"]: return "<rdf:RDF %s>" % \ " ".join([ns._xml_ for ns in self.namespaces]) else: raise NotImplementedError("'%s' is not a valid prefix type." % format)
[ "def", "prefix", "(", "self", ",", "format", "=", "\"sparql\"", ")", ":", "lg", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "ln", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "lg", ".", "setLevel", "(", "self", ".", "log_level", ")", "_return_str", "=", "\"\"", "if", "format", ".", "lower", "(", ")", "==", "\"sparql\"", ":", "return", "\"\\n\"", ".", "join", "(", "[", "ns", ".", "_sparql_", "for", "ns", "in", "self", ".", "namespaces", "]", ")", "elif", "format", ".", "lower", "(", ")", "in", "[", "\"turtle\"", ",", "\"ttl\"", "]", ":", "return", "\"\\n\"", ".", "join", "(", "[", "ns", ".", "_ttl_", "for", "ns", "in", "self", ".", "namespaces", "]", ")", "elif", "format", ".", "lower", "(", ")", "in", "[", "\"rdf\"", ",", "\"xml\"", ",", "\"rdf/xml\"", "]", ":", "return", "\"<rdf:RDF %s>\"", "%", "\" \"", ".", "join", "(", "[", "ns", ".", "_xml_", "for", "ns", "in", "self", ".", "namespaces", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "\"'%s' is not a valid prefix type.\"", "%", "format", ")" ]
Generates a string of the rdf namespaces listed used in the framework format: "sparql" or "turtle"
[ "Generates", "a", "string", "of", "the", "rdf", "namespaces", "listed", "used", "in", "the", "framework" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L481-L501
248,073
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.load
def load(self, filepath, file_encoding=None): """ Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary """ with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) elif len(current_line) > 10: break self.__make_dicts__
python
def load(self, filepath, file_encoding=None): """ Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary """ with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) elif len(current_line) > 10: break self.__make_dicts__
[ "def", "load", "(", "self", ",", "filepath", ",", "file_encoding", "=", "None", ")", ":", "with", "open", "(", "filepath", ",", "encoding", "=", "file_encoding", ")", "as", "inf", ":", "for", "line", "in", "inf", ":", "current_line", "=", "str", "(", "line", ")", ".", "strip", "(", ")", "if", "current_line", ".", "startswith", "(", "\"@prefix\"", ")", ":", "self", ".", "_add_ttl_ns", "(", "current_line", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", "elif", "len", "(", "current_line", ")", ">", "10", ":", "break", "self", ".", "__make_dicts__" ]
Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary
[ "Reads", "the", "the", "beginning", "of", "a", "turtle", "file", "and", "sets", "the", "prefix", "s", "used", "in", "that", "file", "and", "sets", "the", "prefix", "attribute" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L503-L518
248,074
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.dict_load
def dict_load(self, ns_dict): """ Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri """ for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
python
def dict_load(self, ns_dict): """ Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri """ for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
[ "def", "dict_load", "(", "self", ",", "ns_dict", ")", ":", "for", "prefix", ",", "uri", "in", "ns_dict", ".", "items", "(", ")", ":", "self", ".", "bind", "(", "prefix", ",", "uri", ",", "override", "=", "False", ",", "calc", "=", "False", ")", "self", ".", "__make_dicts__" ]
Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri
[ "Reads", "a", "dictionary", "of", "namespaces", "and", "binds", "them", "to", "the", "manager" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L524-L533
248,075
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager._add_ttl_ns
def _add_ttl_ns(self, line): """ takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == '' \ or not line.lower().startswith('@prefix'): return # parse the turtle line line = line.replace("@prefix","",1).strip() if line.endswith("."): line = line[:-1] prefix = line[:line.find(":")].strip() uri = self.clean_iri(line[line.find(":")+1:].strip()) # add the namespace to the class lg.debug("\nprefix: %s uri: %s", prefix, uri) self.bind(prefix, uri, override=False, calc=False)
python
def _add_ttl_ns(self, line): """ takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == '' \ or not line.lower().startswith('@prefix'): return # parse the turtle line line = line.replace("@prefix","",1).strip() if line.endswith("."): line = line[:-1] prefix = line[:line.find(":")].strip() uri = self.clean_iri(line[line.find(":")+1:].strip()) # add the namespace to the class lg.debug("\nprefix: %s uri: %s", prefix, uri) self.bind(prefix, uri, override=False, calc=False)
[ "def", "_add_ttl_ns", "(", "self", ",", "line", ")", ":", "lg", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "ln", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "lg", ".", "setLevel", "(", "self", ".", "log_level", ")", "lg", ".", "debug", "(", "\"line:\\n%s\"", ",", "line", ")", "line", "=", "str", "(", "line", ")", ".", "strip", "(", ")", "# if the line is not a prefix line exit", "if", "line", "is", "None", "or", "line", "==", "'none'", "or", "line", "==", "''", "or", "not", "line", ".", "lower", "(", ")", ".", "startswith", "(", "'@prefix'", ")", ":", "return", "# parse the turtle line", "line", "=", "line", ".", "replace", "(", "\"@prefix\"", ",", "\"\"", ",", "1", ")", ".", "strip", "(", ")", "if", "line", ".", "endswith", "(", "\".\"", ")", ":", "line", "=", "line", "[", ":", "-", "1", "]", "prefix", "=", "line", "[", ":", "line", ".", "find", "(", "\":\"", ")", "]", ".", "strip", "(", ")", "uri", "=", "self", ".", "clean_iri", "(", "line", "[", "line", ".", "find", "(", "\":\"", ")", "+", "1", ":", "]", ".", "strip", "(", ")", ")", "# add the namespace to the class", "lg", ".", "debug", "(", "\"\\nprefix: %s uri: %s\"", ",", "prefix", ",", "uri", ")", "self", ".", "bind", "(", "prefix", ",", "uri", ",", "override", "=", "False", ",", "calc", "=", "False", ")" ]
takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string
[ "takes", "one", "prefix", "line", "from", "the", "turtle", "file", "and", "binds", "the", "namespace", "to", "the", "class" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L535-L559
248,076
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.del_ns
def del_ns(self, namespace): """ will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove """ # remove the item from the namespace dict namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)
python
def del_ns(self, namespace): """ will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove """ # remove the item from the namespace dict namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)
[ "def", "del_ns", "(", "self", ",", "namespace", ")", ":", "# remove the item from the namespace dict", "namespace", "=", "str", "(", "namespace", ")", "attr_name", "=", "None", "if", "hasattr", "(", "self", ",", "namespace", ")", ":", "delattr", "(", "self", ",", "namespace", ")" ]
will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove
[ "will", "remove", "a", "namespace", "ref", "from", "the", "manager", ".", "either", "Arg", "is", "optional", "." ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L561-L572
248,077
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.convert_to_uri
def convert_to_uri(self, value, strip_iri=True): ''' converts a prefixed rdf ns equivalent value to its uri form. If not found returns the value as is args: value: the URI/IRI to convert strip_iri: removes the < and > signs rdflib_uri: returns an rdflib URIRef ''' parsed = self.parse_uri(str(value)) try: new_uri = "%s%s" % (self.ns_dict[parsed[0]], parsed[1]) if not strip_iri: return self.iri(new_uri) return new_uri except KeyError: return self.rpyhttp(value)
python
def convert_to_uri(self, value, strip_iri=True): ''' converts a prefixed rdf ns equivalent value to its uri form. If not found returns the value as is args: value: the URI/IRI to convert strip_iri: removes the < and > signs rdflib_uri: returns an rdflib URIRef ''' parsed = self.parse_uri(str(value)) try: new_uri = "%s%s" % (self.ns_dict[parsed[0]], parsed[1]) if not strip_iri: return self.iri(new_uri) return new_uri except KeyError: return self.rpyhttp(value)
[ "def", "convert_to_uri", "(", "self", ",", "value", ",", "strip_iri", "=", "True", ")", ":", "parsed", "=", "self", ".", "parse_uri", "(", "str", "(", "value", ")", ")", "try", ":", "new_uri", "=", "\"%s%s\"", "%", "(", "self", ".", "ns_dict", "[", "parsed", "[", "0", "]", "]", ",", "parsed", "[", "1", "]", ")", "if", "not", "strip_iri", ":", "return", "self", ".", "iri", "(", "new_uri", ")", "return", "new_uri", "except", "KeyError", ":", "return", "self", ".", "rpyhttp", "(", "value", ")" ]
converts a prefixed rdf ns equivalent value to its uri form. If not found returns the value as is args: value: the URI/IRI to convert strip_iri: removes the < and > signs rdflib_uri: returns an rdflib URIRef
[ "converts", "a", "prefixed", "rdf", "ns", "equivalent", "value", "to", "its", "uri", "form", ".", "If", "not", "found", "returns", "the", "value", "as", "is" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L608-L625
248,078
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.get_uri_parts
def get_uri_parts(self, value): """takes an value and returns a tuple of the parts args: value: a uri in any form pyuri, ttl or full IRI """ if value.startswith('pyuri_'): value = self.rpyhttp(value) parts = self.parse_uri(value) try: return (self.ns_dict[parts[0]], parts[1]) except KeyError: try: return (self.ns_dict[parts[0].lower()], parts[1]) except KeyError: return ((None, parts[0]), parts[1])
python
def get_uri_parts(self, value): """takes an value and returns a tuple of the parts args: value: a uri in any form pyuri, ttl or full IRI """ if value.startswith('pyuri_'): value = self.rpyhttp(value) parts = self.parse_uri(value) try: return (self.ns_dict[parts[0]], parts[1]) except KeyError: try: return (self.ns_dict[parts[0].lower()], parts[1]) except KeyError: return ((None, parts[0]), parts[1])
[ "def", "get_uri_parts", "(", "self", ",", "value", ")", ":", "if", "value", ".", "startswith", "(", "'pyuri_'", ")", ":", "value", "=", "self", ".", "rpyhttp", "(", "value", ")", "parts", "=", "self", ".", "parse_uri", "(", "value", ")", "try", ":", "return", "(", "self", ".", "ns_dict", "[", "parts", "[", "0", "]", "]", ",", "parts", "[", "1", "]", ")", "except", "KeyError", ":", "try", ":", "return", "(", "self", ".", "ns_dict", "[", "parts", "[", "0", "]", ".", "lower", "(", ")", "]", ",", "parts", "[", "1", "]", ")", "except", "KeyError", ":", "return", "(", "(", "None", ",", "parts", "[", "0", "]", ")", ",", "parts", "[", "1", "]", ")" ]
takes an value and returns a tuple of the parts args: value: a uri in any form pyuri, ttl or full IRI
[ "takes", "an", "value", "and", "returns", "a", "tuple", "of", "the", "parts" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L627-L642
248,079
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.rpyhttp
def rpyhttp(value): """ converts a no namespace pyuri back to a standard uri """ if value.startswith("http"): return value try: parts = value.split("_") del parts[0] _uri = base64.b64decode(parts.pop(0)).decode() return _uri + "_".join(parts) except (IndexError, UnicodeDecodeError, binascii.Error): # if the value is not a pyuri return the value return value
python
def rpyhttp(value): """ converts a no namespace pyuri back to a standard uri """ if value.startswith("http"): return value try: parts = value.split("_") del parts[0] _uri = base64.b64decode(parts.pop(0)).decode() return _uri + "_".join(parts) except (IndexError, UnicodeDecodeError, binascii.Error): # if the value is not a pyuri return the value return value
[ "def", "rpyhttp", "(", "value", ")", ":", "if", "value", ".", "startswith", "(", "\"http\"", ")", ":", "return", "value", "try", ":", "parts", "=", "value", ".", "split", "(", "\"_\"", ")", "del", "parts", "[", "0", "]", "_uri", "=", "base64", ".", "b64decode", "(", "parts", ".", "pop", "(", "0", ")", ")", ".", "decode", "(", ")", "return", "_uri", "+", "\"_\"", ".", "join", "(", "parts", ")", "except", "(", "IndexError", ",", "UnicodeDecodeError", ",", "binascii", ".", "Error", ")", ":", "# if the value is not a pyuri return the value", "return", "value" ]
converts a no namespace pyuri back to a standard uri
[ "converts", "a", "no", "namespace", "pyuri", "back", "to", "a", "standard", "uri" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L651-L662
248,080
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.pyhttp
def pyhttp(self, value): """ converts a no namespaces uri to a python excessable name """ if value.startswith("pyuri_"): return value parts = self.parse_uri(value) return "pyuri_%s_%s" % (base64.b64encode(bytes(parts[0], "utf-8")).decode(), parts[1])
python
def pyhttp(self, value): """ converts a no namespaces uri to a python excessable name """ if value.startswith("pyuri_"): return value parts = self.parse_uri(value) return "pyuri_%s_%s" % (base64.b64encode(bytes(parts[0], "utf-8")).decode(), parts[1])
[ "def", "pyhttp", "(", "self", ",", "value", ")", ":", "if", "value", ".", "startswith", "(", "\"pyuri_\"", ")", ":", "return", "value", "parts", "=", "self", ".", "parse_uri", "(", "value", ")", "return", "\"pyuri_%s_%s\"", "%", "(", "base64", ".", "b64encode", "(", "bytes", "(", "parts", "[", "0", "]", ",", "\"utf-8\"", ")", ")", ".", "decode", "(", ")", ",", "parts", "[", "1", "]", ")" ]
converts a no namespaces uri to a python excessable name
[ "converts", "a", "no", "namespaces", "uri", "to", "a", "python", "excessable", "name" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L664-L671
248,081
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.iri
def iri(uri_string): """converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <> """ uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_string)-1:] != ">": uri_string = "{}>".format(uri_string.strip()) return uri_string
python
def iri(uri_string): """converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <> """ uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_string)-1:] != ">": uri_string = "{}>".format(uri_string.strip()) return uri_string
[ "def", "iri", "(", "uri_string", ")", ":", "uri_string", "=", "str", "(", "uri_string", ")", "if", "uri_string", "[", ":", "1", "]", "==", "\"?\"", ":", "return", "uri_string", "if", "uri_string", "[", ":", "1", "]", "==", "\"[\"", ":", "return", "uri_string", "if", "uri_string", "[", ":", "1", "]", "!=", "\"<\"", ":", "uri_string", "=", "\"<{}\"", ".", "format", "(", "uri_string", ".", "strip", "(", ")", ")", "if", "uri_string", "[", "len", "(", "uri_string", ")", "-", "1", ":", "]", "!=", "\">\"", ":", "uri_string", "=", "\"{}>\"", ".", "format", "(", "uri_string", ".", "strip", "(", ")", ")", "return", "uri_string" ]
converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <>
[ "converts", "a", "string", "to", "an", "IRI", "or", "returns", "an", "IRI", "if", "already", "formated" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L682-L700
248,082
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.convert_to_ttl
def convert_to_ttl(self, value): ''' converts a value to the prefixed rdf ns equivalent. If not found returns the value as is. args: value: the value to convert ''' parsed = self.parse_uri(value) try: rtn_val = "%s:%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.iri(self.rpyhttp(value)) return rtn_val
python
def convert_to_ttl(self, value): ''' converts a value to the prefixed rdf ns equivalent. If not found returns the value as is. args: value: the value to convert ''' parsed = self.parse_uri(value) try: rtn_val = "%s:%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.iri(self.rpyhttp(value)) return rtn_val
[ "def", "convert_to_ttl", "(", "self", ",", "value", ")", ":", "parsed", "=", "self", ".", "parse_uri", "(", "value", ")", "try", ":", "rtn_val", "=", "\"%s:%s\"", "%", "(", "self", ".", "uri_dict", "[", "parsed", "[", "0", "]", "]", ",", "parsed", "[", "1", "]", ")", "except", "KeyError", ":", "rtn_val", "=", "self", ".", "iri", "(", "self", ".", "rpyhttp", "(", "value", ")", ")", "return", "rtn_val" ]
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is. args: value: the value to convert
[ "converts", "a", "value", "to", "the", "prefixed", "rdf", "ns", "equivalent", ".", "If", "not", "found", "returns", "the", "value", "as", "is", "." ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L702-L717
248,083
KnowledgeLinks/rdfframework
rdfframework/datatypes/namespaces.py
RdfNsManager.convert_to_ns
def convert_to_ns(self, value): ''' converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert ''' parsed = self.parse_uri(value) try: rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.pyhttp(value) return rtn_val
python
def convert_to_ns(self, value): ''' converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert ''' parsed = self.parse_uri(value) try: rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.pyhttp(value) return rtn_val
[ "def", "convert_to_ns", "(", "self", ",", "value", ")", ":", "parsed", "=", "self", ".", "parse_uri", "(", "value", ")", "try", ":", "rtn_val", "=", "\"%s_%s\"", "%", "(", "self", ".", "uri_dict", "[", "parsed", "[", "0", "]", "]", ",", "parsed", "[", "1", "]", ")", "except", "KeyError", ":", "rtn_val", "=", "self", ".", "pyhttp", "(", "value", ")", "return", "rtn_val" ]
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert
[ "converts", "a", "value", "to", "the", "prefixed", "rdf", "ns", "equivalent", ".", "If", "not", "found", "returns", "the", "value", "as", "is" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/namespaces.py#L719-L732
248,084
Carreau/warn
warn/warn.py
_get_stack_frame
def _get_stack_frame(stacklevel): """ utility functions to get a stackframe, skipping internal frames. """ stacklevel = stacklevel + 1 if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)): # If frame is too small to care or if the warning originated in # internal code, then do not try to hide any frames. frame = sys._getframe(stacklevel) else: frame = sys._getframe(1) # Look for one frame less since the above line starts us off. for x in range(stacklevel-1): frame = _next_external_frame(frame) if frame is None: raise ValueError return frame
python
def _get_stack_frame(stacklevel): """ utility functions to get a stackframe, skipping internal frames. """ stacklevel = stacklevel + 1 if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)): # If frame is too small to care or if the warning originated in # internal code, then do not try to hide any frames. frame = sys._getframe(stacklevel) else: frame = sys._getframe(1) # Look for one frame less since the above line starts us off. for x in range(stacklevel-1): frame = _next_external_frame(frame) if frame is None: raise ValueError return frame
[ "def", "_get_stack_frame", "(", "stacklevel", ")", ":", "stacklevel", "=", "stacklevel", "+", "1", "if", "stacklevel", "<=", "1", "or", "_is_internal_frame", "(", "sys", ".", "_getframe", "(", "1", ")", ")", ":", "# If frame is too small to care or if the warning originated in", "# internal code, then do not try to hide any frames.", "frame", "=", "sys", ".", "_getframe", "(", "stacklevel", ")", "else", ":", "frame", "=", "sys", ".", "_getframe", "(", "1", ")", "# Look for one frame less since the above line starts us off.", "for", "x", "in", "range", "(", "stacklevel", "-", "1", ")", ":", "frame", "=", "_next_external_frame", "(", "frame", ")", "if", "frame", "is", "None", ":", "raise", "ValueError", "return", "frame" ]
utility functions to get a stackframe, skipping internal frames.
[ "utility", "functions", "to", "get", "a", "stackframe", "skipping", "internal", "frames", "." ]
251ed08bc13b536c47392ba577f86e1f96bdad6b
https://github.com/Carreau/warn/blob/251ed08bc13b536c47392ba577f86e1f96bdad6b/warn/warn.py#L118-L134
248,085
Carreau/warn
warn/warn.py
warn
def warn(message, category=None, stacklevel=1, emitstacklevel=1): """Issue a warning, or maybe ignore it or raise an exception. Duplicate of the standard library warn function except it takes the following argument: `emitstacklevel` : default to 1, number of stackframe to consider when matching the module that emits this warning. """ # Check if message is already a Warning object #################### ### Get category ### #################### if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning if not (isinstance(category, type) and issubclass(category, Warning)): raise TypeError("category must be a Warning subclass, " "not '{:s}'".format(type(category).__name__)) # Get context information try: frame = _get_stack_frame(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = frame.f_globals lineno = frame.f_lineno try: eframe = _get_stack_frame(emitstacklevel) except ValueError: eglobals = sys.__dict__ else: eglobals = eframe.f_globals if '__name__' in eglobals: emodule = eglobals['__name__'] else: emodule = "<string>" #################### ### Get Filename ### #################### if '__name__' in globals: module = globals['__name__'] else: module = "<string>" #################### ### Get Filename ### #################### filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith(".pyc"): filename = filename[:-1] else: if module == "__main__": try: filename = sys.argv[0] except AttributeError: # embedded interpreters don't have sys.argv, see bug #839151 filename = '__main__' if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry, globals, emit_module=emodule)
python
def warn(message, category=None, stacklevel=1, emitstacklevel=1): """Issue a warning, or maybe ignore it or raise an exception. Duplicate of the standard library warn function except it takes the following argument: `emitstacklevel` : default to 1, number of stackframe to consider when matching the module that emits this warning. """ # Check if message is already a Warning object #################### ### Get category ### #################### if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning if not (isinstance(category, type) and issubclass(category, Warning)): raise TypeError("category must be a Warning subclass, " "not '{:s}'".format(type(category).__name__)) # Get context information try: frame = _get_stack_frame(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = frame.f_globals lineno = frame.f_lineno try: eframe = _get_stack_frame(emitstacklevel) except ValueError: eglobals = sys.__dict__ else: eglobals = eframe.f_globals if '__name__' in eglobals: emodule = eglobals['__name__'] else: emodule = "<string>" #################### ### Get Filename ### #################### if '__name__' in globals: module = globals['__name__'] else: module = "<string>" #################### ### Get Filename ### #################### filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith(".pyc"): filename = filename[:-1] else: if module == "__main__": try: filename = sys.argv[0] except AttributeError: # embedded interpreters don't have sys.argv, see bug #839151 filename = '__main__' if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry, globals, emit_module=emodule)
[ "def", "warn", "(", "message", ",", "category", "=", "None", ",", "stacklevel", "=", "1", ",", "emitstacklevel", "=", "1", ")", ":", "# Check if message is already a Warning object", "####################", "### Get category ###", "####################", "if", "isinstance", "(", "message", ",", "Warning", ")", ":", "category", "=", "message", ".", "__class__", "# Check category argument", "if", "category", "is", "None", ":", "category", "=", "UserWarning", "if", "not", "(", "isinstance", "(", "category", ",", "type", ")", "and", "issubclass", "(", "category", ",", "Warning", ")", ")", ":", "raise", "TypeError", "(", "\"category must be a Warning subclass, \"", "\"not '{:s}'\"", ".", "format", "(", "type", "(", "category", ")", ".", "__name__", ")", ")", "# Get context information", "try", ":", "frame", "=", "_get_stack_frame", "(", "stacklevel", ")", "except", "ValueError", ":", "globals", "=", "sys", ".", "__dict__", "lineno", "=", "1", "else", ":", "globals", "=", "frame", ".", "f_globals", "lineno", "=", "frame", ".", "f_lineno", "try", ":", "eframe", "=", "_get_stack_frame", "(", "emitstacklevel", ")", "except", "ValueError", ":", "eglobals", "=", "sys", ".", "__dict__", "else", ":", "eglobals", "=", "eframe", ".", "f_globals", "if", "'__name__'", "in", "eglobals", ":", "emodule", "=", "eglobals", "[", "'__name__'", "]", "else", ":", "emodule", "=", "\"<string>\"", "####################", "### Get Filename ###", "####################", "if", "'__name__'", "in", "globals", ":", "module", "=", "globals", "[", "'__name__'", "]", "else", ":", "module", "=", "\"<string>\"", "####################", "### Get Filename ###", "####################", "filename", "=", "globals", ".", "get", "(", "'__file__'", ")", "if", "filename", ":", "fnl", "=", "filename", ".", "lower", "(", ")", "if", "fnl", ".", "endswith", "(", "\".pyc\"", ")", ":", "filename", "=", "filename", "[", ":", "-", "1", "]", "else", ":", "if", "module", "==", "\"__main__\"", ":", "try", ":", "filename", "=", "sys", ".", "argv", "[", "0", "]", "except", "AttributeError", ":", "# embedded interpreters don't have sys.argv, see bug #839151", "filename", "=", "'__main__'", "if", "not", "filename", ":", "filename", "=", "module", "registry", "=", "globals", ".", "setdefault", "(", "\"__warningregistry__\"", ",", "{", "}", ")", "warn_explicit", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "module", ",", "registry", ",", "globals", ",", "emit_module", "=", "emodule", ")" ]
Issue a warning, or maybe ignore it or raise an exception. Duplicate of the standard library warn function except it takes the following argument: `emitstacklevel` : default to 1, number of stackframe to consider when matching the module that emits this warning.
[ "Issue", "a", "warning", "or", "maybe", "ignore", "it", "or", "raise", "an", "exception", "." ]
251ed08bc13b536c47392ba577f86e1f96bdad6b
https://github.com/Carreau/warn/blob/251ed08bc13b536c47392ba577f86e1f96bdad6b/warn/warn.py#L137-L206
248,086
Carreau/warn
warn/warn.py
_set_proxy_filter
def _set_proxy_filter(warningstuple): """set up a proxy that store too long warnings in a separate map""" if len(warningstuple) > 5: key = len(_proxy_map)+1 _proxy_map[key] = warningstuple # always is pass-through further in the code. return ('always', re_matchall, ProxyWarning, re_matchall, key) else: return warningstuple
python
def _set_proxy_filter(warningstuple): """set up a proxy that store too long warnings in a separate map""" if len(warningstuple) > 5: key = len(_proxy_map)+1 _proxy_map[key] = warningstuple # always is pass-through further in the code. return ('always', re_matchall, ProxyWarning, re_matchall, key) else: return warningstuple
[ "def", "_set_proxy_filter", "(", "warningstuple", ")", ":", "if", "len", "(", "warningstuple", ")", ">", "5", ":", "key", "=", "len", "(", "_proxy_map", ")", "+", "1", "_proxy_map", "[", "key", "]", "=", "warningstuple", "# always is pass-through further in the code.", "return", "(", "'always'", ",", "re_matchall", ",", "ProxyWarning", ",", "re_matchall", ",", "key", ")", "else", ":", "return", "warningstuple" ]
set up a proxy that store too long warnings in a separate map
[ "set", "up", "a", "proxy", "that", "store", "too", "long", "warnings", "in", "a", "separate", "map" ]
251ed08bc13b536c47392ba577f86e1f96bdad6b
https://github.com/Carreau/warn/blob/251ed08bc13b536c47392ba577f86e1f96bdad6b/warn/warn.py#L212-L221
248,087
questrail/arghelper
tasks.py
release
def release(ctx, deploy=False, test=False, version=''): """Tag release, run Travis-CI, and deploy to PyPI """ if test: run("python setup.py check") run("python setup.py register sdist upload --dry-run") if deploy: run("python setup.py check") if version: run("git checkout master") run("git tag -a v{ver} -m 'v{ver}'".format(ver=version)) run("git push") run("git push origin --tags") run("python setup.py sdist bdist_wheel") run("twine upload --skip-existing dist/*") else: print("- Have you updated the version?") print("- Have you updated CHANGELOG.md, README.md, and AUTHORS.md?") print("- Have you fixed any last minute bugs?") print("- Have you merged changes for release into the master branch?") print("If you answered yes to all of the above questions,") print("then run `inv release --deploy -vX.YY.ZZ` to:") print("- Checkout master") print("- Tag the git release with provided vX.YY.ZZ version") print("- Push the master branch and tags to repo")
python
def release(ctx, deploy=False, test=False, version=''): """Tag release, run Travis-CI, and deploy to PyPI """ if test: run("python setup.py check") run("python setup.py register sdist upload --dry-run") if deploy: run("python setup.py check") if version: run("git checkout master") run("git tag -a v{ver} -m 'v{ver}'".format(ver=version)) run("git push") run("git push origin --tags") run("python setup.py sdist bdist_wheel") run("twine upload --skip-existing dist/*") else: print("- Have you updated the version?") print("- Have you updated CHANGELOG.md, README.md, and AUTHORS.md?") print("- Have you fixed any last minute bugs?") print("- Have you merged changes for release into the master branch?") print("If you answered yes to all of the above questions,") print("then run `inv release --deploy -vX.YY.ZZ` to:") print("- Checkout master") print("- Tag the git release with provided vX.YY.ZZ version") print("- Push the master branch and tags to repo")
[ "def", "release", "(", "ctx", ",", "deploy", "=", "False", ",", "test", "=", "False", ",", "version", "=", "''", ")", ":", "if", "test", ":", "run", "(", "\"python setup.py check\"", ")", "run", "(", "\"python setup.py register sdist upload --dry-run\"", ")", "if", "deploy", ":", "run", "(", "\"python setup.py check\"", ")", "if", "version", ":", "run", "(", "\"git checkout master\"", ")", "run", "(", "\"git tag -a v{ver} -m 'v{ver}'\"", ".", "format", "(", "ver", "=", "version", ")", ")", "run", "(", "\"git push\"", ")", "run", "(", "\"git push origin --tags\"", ")", "run", "(", "\"python setup.py sdist bdist_wheel\"", ")", "run", "(", "\"twine upload --skip-existing dist/*\"", ")", "else", ":", "print", "(", "\"- Have you updated the version?\"", ")", "print", "(", "\"- Have you updated CHANGELOG.md, README.md, and AUTHORS.md?\"", ")", "print", "(", "\"- Have you fixed any last minute bugs?\"", ")", "print", "(", "\"- Have you merged changes for release into the master branch?\"", ")", "print", "(", "\"If you answered yes to all of the above questions,\"", ")", "print", "(", "\"then run `inv release --deploy -vX.YY.ZZ` to:\"", ")", "print", "(", "\"- Checkout master\"", ")", "print", "(", "\"- Tag the git release with provided vX.YY.ZZ version\"", ")", "print", "(", "\"- Push the master branch and tags to repo\"", ")" ]
Tag release, run Travis-CI, and deploy to PyPI
[ "Tag", "release", "run", "Travis", "-", "CI", "and", "deploy", "to", "PyPI" ]
833d7d25a1f3daba70f186057d3d39a040c56200
https://github.com/questrail/arghelper/blob/833d7d25a1f3daba70f186057d3d39a040c56200/tasks.py#L23-L48
248,088
OpenGov/python_data_wrap
datawrap/tablewrap.py
squarify_table
def squarify_table(table): ''' Updates a table so that all rows are the same length by filling smaller rows with 'None' objects up to the length of the largest row. ''' max_length = 0 min_length = maxsize for row in table: row_len = len(row) if row_len > max_length: max_length = row_len if row_len < min_length: min_length = row_len if max_length != min_length: for row in table: row_len = len(row) if row_len < max_length: row.extend([None]*(max_length-row_len))
python
def squarify_table(table): ''' Updates a table so that all rows are the same length by filling smaller rows with 'None' objects up to the length of the largest row. ''' max_length = 0 min_length = maxsize for row in table: row_len = len(row) if row_len > max_length: max_length = row_len if row_len < min_length: min_length = row_len if max_length != min_length: for row in table: row_len = len(row) if row_len < max_length: row.extend([None]*(max_length-row_len))
[ "def", "squarify_table", "(", "table", ")", ":", "max_length", "=", "0", "min_length", "=", "maxsize", "for", "row", "in", "table", ":", "row_len", "=", "len", "(", "row", ")", "if", "row_len", ">", "max_length", ":", "max_length", "=", "row_len", "if", "row_len", "<", "min_length", ":", "min_length", "=", "row_len", "if", "max_length", "!=", "min_length", ":", "for", "row", "in", "table", ":", "row_len", "=", "len", "(", "row", ")", "if", "row_len", "<", "max_length", ":", "row", ".", "extend", "(", "[", "None", "]", "*", "(", "max_length", "-", "row_len", ")", ")" ]
Updates a table so that all rows are the same length by filling smaller rows with 'None' objects up to the length of the largest row.
[ "Updates", "a", "table", "so", "that", "all", "rows", "are", "the", "same", "length", "by", "filling", "smaller", "rows", "with", "None", "objects", "up", "to", "the", "length", "of", "the", "largest", "row", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tablewrap.py#L9-L26
248,089
xtrementl/focus
focus/plugin/modules/apps.py
_get_process_cwd
def _get_process_cwd(pid): """ Returns the working directory for the provided process identifier. `pid` System process identifier. Returns string or ``None``. Note this is used as a workaround, since `psutil` isn't consistent on being able to provide this path in all cases, especially MacOS X. """ cmd = 'lsof -a -p {0} -d cwd -Fn'.format(pid) data = common.shell_process(cmd) if not data is None: lines = str(data).split('\n') # the cwd is the second line with 'n' prefix removed from value if len(lines) > 1: return lines[1][1:] or None return None
python
def _get_process_cwd(pid): """ Returns the working directory for the provided process identifier. `pid` System process identifier. Returns string or ``None``. Note this is used as a workaround, since `psutil` isn't consistent on being able to provide this path in all cases, especially MacOS X. """ cmd = 'lsof -a -p {0} -d cwd -Fn'.format(pid) data = common.shell_process(cmd) if not data is None: lines = str(data).split('\n') # the cwd is the second line with 'n' prefix removed from value if len(lines) > 1: return lines[1][1:] or None return None
[ "def", "_get_process_cwd", "(", "pid", ")", ":", "cmd", "=", "'lsof -a -p {0} -d cwd -Fn'", ".", "format", "(", "pid", ")", "data", "=", "common", ".", "shell_process", "(", "cmd", ")", "if", "not", "data", "is", "None", ":", "lines", "=", "str", "(", "data", ")", ".", "split", "(", "'\\n'", ")", "# the cwd is the second line with 'n' prefix removed from value", "if", "len", "(", "lines", ")", ">", "1", ":", "return", "lines", "[", "1", "]", "[", "1", ":", "]", "or", "None", "return", "None" ]
Returns the working directory for the provided process identifier. `pid` System process identifier. Returns string or ``None``. Note this is used as a workaround, since `psutil` isn't consistent on being able to provide this path in all cases, especially MacOS X.
[ "Returns", "the", "working", "directory", "for", "the", "provided", "process", "identifier", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L24-L47
248,090
xtrementl/focus
focus/plugin/modules/apps.py
_get_checksum
def _get_checksum(path): """ Generates a md5 checksum of the file at the specified path. `path` Path to file for checksum. Returns string or ``None`` """ # md5 uses a 512-bit digest blocks, let's scale by defined block_size _md5 = hashlib.md5() chunk_size = 128 * _md5.block_size try: with open(path, 'rb') as _file: for chunk in iter(lambda: _file.read(chunk_size), ''): _md5.update(chunk) return _md5.hexdigest() except IOError: return None
python
def _get_checksum(path): """ Generates a md5 checksum of the file at the specified path. `path` Path to file for checksum. Returns string or ``None`` """ # md5 uses a 512-bit digest blocks, let's scale by defined block_size _md5 = hashlib.md5() chunk_size = 128 * _md5.block_size try: with open(path, 'rb') as _file: for chunk in iter(lambda: _file.read(chunk_size), ''): _md5.update(chunk) return _md5.hexdigest() except IOError: return None
[ "def", "_get_checksum", "(", "path", ")", ":", "# md5 uses a 512-bit digest blocks, let's scale by defined block_size", "_md5", "=", "hashlib", ".", "md5", "(", ")", "chunk_size", "=", "128", "*", "_md5", ".", "block_size", "try", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "_file", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "_file", ".", "read", "(", "chunk_size", ")", ",", "''", ")", ":", "_md5", ".", "update", "(", "chunk", ")", "return", "_md5", ".", "hexdigest", "(", ")", "except", "IOError", ":", "return", "None" ]
Generates a md5 checksum of the file at the specified path. `path` Path to file for checksum. Returns string or ``None``
[ "Generates", "a", "md5", "checksum", "of", "the", "file", "at", "the", "specified", "path", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L50-L70
248,091
xtrementl/focus
focus/plugin/modules/apps.py
_get_user_processes
def _get_user_processes(): """ Gets process information owned by the current user. Returns generator of tuples: (``psutil.Process`` instance, path). """ uid = os.getuid() for proc in psutil.process_iter(): try: # yield processes that match current user if proc.uids.real == uid: yield (proc, proc.exe) except psutil.AccessDenied: # work around for suid/sguid processes and MacOS X restrictions try: path = common.which(proc.name) # psutil doesn't support MacOS X relative paths, # let's use a workaround to merge working directory with # process relative path if not path and common.IS_MACOSX: cwd = _get_process_cwd(proc.pid) if not cwd: continue path = os.path.join(cwd, proc.cmdline[0]) yield (proc, path) except (psutil.AccessDenied, OSError): pass except psutil.NoSuchProcess: pass
python
def _get_user_processes(): """ Gets process information owned by the current user. Returns generator of tuples: (``psutil.Process`` instance, path). """ uid = os.getuid() for proc in psutil.process_iter(): try: # yield processes that match current user if proc.uids.real == uid: yield (proc, proc.exe) except psutil.AccessDenied: # work around for suid/sguid processes and MacOS X restrictions try: path = common.which(proc.name) # psutil doesn't support MacOS X relative paths, # let's use a workaround to merge working directory with # process relative path if not path and common.IS_MACOSX: cwd = _get_process_cwd(proc.pid) if not cwd: continue path = os.path.join(cwd, proc.cmdline[0]) yield (proc, path) except (psutil.AccessDenied, OSError): pass except psutil.NoSuchProcess: pass
[ "def", "_get_user_processes", "(", ")", ":", "uid", "=", "os", ".", "getuid", "(", ")", "for", "proc", "in", "psutil", ".", "process_iter", "(", ")", ":", "try", ":", "# yield processes that match current user", "if", "proc", ".", "uids", ".", "real", "==", "uid", ":", "yield", "(", "proc", ",", "proc", ".", "exe", ")", "except", "psutil", ".", "AccessDenied", ":", "# work around for suid/sguid processes and MacOS X restrictions", "try", ":", "path", "=", "common", ".", "which", "(", "proc", ".", "name", ")", "# psutil doesn't support MacOS X relative paths,", "# let's use a workaround to merge working directory with", "# process relative path", "if", "not", "path", "and", "common", ".", "IS_MACOSX", ":", "cwd", "=", "_get_process_cwd", "(", "proc", ".", "pid", ")", "if", "not", "cwd", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "proc", ".", "cmdline", "[", "0", "]", ")", "yield", "(", "proc", ",", "path", ")", "except", "(", "psutil", ".", "AccessDenied", ",", "OSError", ")", ":", "pass", "except", "psutil", ".", "NoSuchProcess", ":", "pass" ]
Gets process information owned by the current user. Returns generator of tuples: (``psutil.Process`` instance, path).
[ "Gets", "process", "information", "owned", "by", "the", "current", "user", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L73-L107
248,092
xtrementl/focus
focus/plugin/modules/apps.py
_stop_processes
def _stop_processes(paths): """ Scans process list trying to terminate processes matching paths specified. Uses checksums to identify processes that are duplicates of those specified to terminate. `paths` List of full paths to executables for processes to terminate. """ def cache_checksum(path): """ Checksum provided path, cache, and return value. """ if not path: return None if not path in _process_checksums: checksum = _get_checksum(path) _process_checksums[path] = checksum return _process_checksums[path] if not paths: return target_checksums = dict((cache_checksum(p), 1) for p in paths) if not target_checksums: return for proc, path in _get_user_processes(): # path's checksum matches targets, attempt to terminate if cache_checksum(path) in target_checksums: try: proc.terminate() except (psutil.AccessDenied, psutil.NoSuchProcess): pass
python
def _stop_processes(paths): """ Scans process list trying to terminate processes matching paths specified. Uses checksums to identify processes that are duplicates of those specified to terminate. `paths` List of full paths to executables for processes to terminate. """ def cache_checksum(path): """ Checksum provided path, cache, and return value. """ if not path: return None if not path in _process_checksums: checksum = _get_checksum(path) _process_checksums[path] = checksum return _process_checksums[path] if not paths: return target_checksums = dict((cache_checksum(p), 1) for p in paths) if not target_checksums: return for proc, path in _get_user_processes(): # path's checksum matches targets, attempt to terminate if cache_checksum(path) in target_checksums: try: proc.terminate() except (psutil.AccessDenied, psutil.NoSuchProcess): pass
[ "def", "_stop_processes", "(", "paths", ")", ":", "def", "cache_checksum", "(", "path", ")", ":", "\"\"\" Checksum provided path, cache, and return value.\n \"\"\"", "if", "not", "path", ":", "return", "None", "if", "not", "path", "in", "_process_checksums", ":", "checksum", "=", "_get_checksum", "(", "path", ")", "_process_checksums", "[", "path", "]", "=", "checksum", "return", "_process_checksums", "[", "path", "]", "if", "not", "paths", ":", "return", "target_checksums", "=", "dict", "(", "(", "cache_checksum", "(", "p", ")", ",", "1", ")", "for", "p", "in", "paths", ")", "if", "not", "target_checksums", ":", "return", "for", "proc", ",", "path", "in", "_get_user_processes", "(", ")", ":", "# path's checksum matches targets, attempt to terminate", "if", "cache_checksum", "(", "path", ")", "in", "target_checksums", ":", "try", ":", "proc", ".", "terminate", "(", ")", "except", "(", "psutil", ".", "AccessDenied", ",", "psutil", ".", "NoSuchProcess", ")", ":", "pass" ]
Scans process list trying to terminate processes matching paths specified. Uses checksums to identify processes that are duplicates of those specified to terminate. `paths` List of full paths to executables for processes to terminate.
[ "Scans", "process", "list", "trying", "to", "terminate", "processes", "matching", "paths", "specified", ".", "Uses", "checksums", "to", "identify", "processes", "that", "are", "duplicates", "of", "those", "specified", "to", "terminate", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L110-L145
248,093
xtrementl/focus
focus/plugin/modules/apps.py
AppRun._run_apps
def _run_apps(self, paths): """ Runs apps for the provided paths. """ for path in paths: common.shell_process(path, background=True) time.sleep(0.2)
python
def _run_apps(self, paths): """ Runs apps for the provided paths. """ for path in paths: common.shell_process(path, background=True) time.sleep(0.2)
[ "def", "_run_apps", "(", "self", ",", "paths", ")", ":", "for", "path", "in", "paths", ":", "common", ".", "shell_process", "(", "path", ",", "background", "=", "True", ")", "time", ".", "sleep", "(", "0.2", ")" ]
Runs apps for the provided paths.
[ "Runs", "apps", "for", "the", "provided", "paths", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L180-L186
248,094
ulf1/oxyba
oxyba/norm_mle.py
norm_mle
def norm_mle(data, algorithm='Nelder-Mead', debug=False): """Estimate Mean and Std.Dev. of the Normal Distribution Parameters: ----------- data : list, tuple, ndarray vector with samples, observations that are assumed to follow a Normal distribution. algorithm : str Optional. Default 'Nelder-Mead' (Simplex). The algorithm used in scipy.optimize.minimize debug : bool Optional. Returns: -------- mu : float Mean, 1st moment, location parameter of the Normal distribution. sd : float Standard Deviation, 2nd moment, scale parameter of the Normal distribution results : scipy.optimize.optimize.OptimizeResult Optional. If debug=True then only scipy's optimization result variable is returned. """ import scipy.stats as sstat import scipy.optimize as sopt def objective_nll_norm_uni(theta, data): return -1.0 * sstat.norm.logpdf( data, loc=theta[0], scale=theta[1]).sum() # check eligible algorithm if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'): raise Exception('Optimization Algorithm not supported.') # set start values theta0 = [1.0, 1.0] # mu and sigma # run solver results = sopt.minimize( objective_nll_norm_uni, theta0, args=(data), method=algorithm, options={'disp': False}) # debug? if debug: return results # done return results.x[0], results.x[1]
python
def norm_mle(data, algorithm='Nelder-Mead', debug=False): """Estimate Mean and Std.Dev. of the Normal Distribution Parameters: ----------- data : list, tuple, ndarray vector with samples, observations that are assumed to follow a Normal distribution. algorithm : str Optional. Default 'Nelder-Mead' (Simplex). The algorithm used in scipy.optimize.minimize debug : bool Optional. Returns: -------- mu : float Mean, 1st moment, location parameter of the Normal distribution. sd : float Standard Deviation, 2nd moment, scale parameter of the Normal distribution results : scipy.optimize.optimize.OptimizeResult Optional. If debug=True then only scipy's optimization result variable is returned. """ import scipy.stats as sstat import scipy.optimize as sopt def objective_nll_norm_uni(theta, data): return -1.0 * sstat.norm.logpdf( data, loc=theta[0], scale=theta[1]).sum() # check eligible algorithm if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'): raise Exception('Optimization Algorithm not supported.') # set start values theta0 = [1.0, 1.0] # mu and sigma # run solver results = sopt.minimize( objective_nll_norm_uni, theta0, args=(data), method=algorithm, options={'disp': False}) # debug? if debug: return results # done return results.x[0], results.x[1]
[ "def", "norm_mle", "(", "data", ",", "algorithm", "=", "'Nelder-Mead'", ",", "debug", "=", "False", ")", ":", "import", "scipy", ".", "stats", "as", "sstat", "import", "scipy", ".", "optimize", "as", "sopt", "def", "objective_nll_norm_uni", "(", "theta", ",", "data", ")", ":", "return", "-", "1.0", "*", "sstat", ".", "norm", ".", "logpdf", "(", "data", ",", "loc", "=", "theta", "[", "0", "]", ",", "scale", "=", "theta", "[", "1", "]", ")", ".", "sum", "(", ")", "# check eligible algorithm", "if", "algorithm", "not", "in", "(", "'Nelder-Mead'", ",", "'CG'", ",", "'BFGS'", ")", ":", "raise", "Exception", "(", "'Optimization Algorithm not supported.'", ")", "# set start values", "theta0", "=", "[", "1.0", ",", "1.0", "]", "# mu and sigma", "# run solver", "results", "=", "sopt", ".", "minimize", "(", "objective_nll_norm_uni", ",", "theta0", ",", "args", "=", "(", "data", ")", ",", "method", "=", "algorithm", ",", "options", "=", "{", "'disp'", ":", "False", "}", ")", "# debug?", "if", "debug", ":", "return", "results", "# done", "return", "results", ".", "x", "[", "0", "]", ",", "results", ".", "x", "[", "1", "]" ]
Estimate Mean and Std.Dev. of the Normal Distribution Parameters: ----------- data : list, tuple, ndarray vector with samples, observations that are assumed to follow a Normal distribution. algorithm : str Optional. Default 'Nelder-Mead' (Simplex). The algorithm used in scipy.optimize.minimize debug : bool Optional. Returns: -------- mu : float Mean, 1st moment, location parameter of the Normal distribution. sd : float Standard Deviation, 2nd moment, scale parameter of the Normal distribution results : scipy.optimize.optimize.OptimizeResult Optional. If debug=True then only scipy's optimization result variable is returned.
[ "Estimate", "Mean", "and", "Std", ".", "Dev", ".", "of", "the", "Normal", "Distribution" ]
b3043116050de275124365cb11e7df91fb40169d
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/norm_mle.py#L2-L59
248,095
KnowledgeLinks/rdfframework
rdfframework/utilities/frameworkutilities.py
DataStatus.get
def get(self, status_item): """ queries the database and returns that status of the item. args: status_item: the name of the item to check """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = ''' SELECT ?loaded WHERE {{ kdr:{0} kds:{1} ?loaded . }}''' value = self.conn.query(sparql=sparql.format(self.group, status_item)) if len(value) > 0 and \ cbool(value[0].get('loaded',{}).get("value",False)): return True else: return False
python
def get(self, status_item): """ queries the database and returns that status of the item. args: status_item: the name of the item to check """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = ''' SELECT ?loaded WHERE {{ kdr:{0} kds:{1} ?loaded . }}''' value = self.conn.query(sparql=sparql.format(self.group, status_item)) if len(value) > 0 and \ cbool(value[0].get('loaded',{}).get("value",False)): return True else: return False
[ "def", "get", "(", "self", ",", "status_item", ")", ":", "lg", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "ln", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "lg", ".", "setLevel", "(", "self", ".", "log_level", ")", "sparql", "=", "'''\n SELECT ?loaded\n WHERE {{\n kdr:{0} kds:{1} ?loaded .\n }}'''", "value", "=", "self", ".", "conn", ".", "query", "(", "sparql", "=", "sparql", ".", "format", "(", "self", ".", "group", ",", "status_item", ")", ")", "if", "len", "(", "value", ")", ">", "0", "and", "cbool", "(", "value", "[", "0", "]", ".", "get", "(", "'loaded'", ",", "{", "}", ")", ".", "get", "(", "\"value\"", ",", "False", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
queries the database and returns that status of the item. args: status_item: the name of the item to check
[ "queries", "the", "database", "and", "returns", "that", "status", "of", "the", "item", "." ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/utilities/frameworkutilities.py#L71-L90
248,096
malthe/pop
src/pop/utils.py
local_machine_uuid
def local_machine_uuid(): """Return local machine unique identifier. >>> uuid = local_machine_uuid() """ result = subprocess.check_output( 'hal-get-property --udi ' '/org/freedesktop/Hal/devices/computer ' '--key system.hardware.uuid'.split() ).strip() return uuid.UUID(hex=result)
python
def local_machine_uuid(): """Return local machine unique identifier. >>> uuid = local_machine_uuid() """ result = subprocess.check_output( 'hal-get-property --udi ' '/org/freedesktop/Hal/devices/computer ' '--key system.hardware.uuid'.split() ).strip() return uuid.UUID(hex=result)
[ "def", "local_machine_uuid", "(", ")", ":", "result", "=", "subprocess", ".", "check_output", "(", "'hal-get-property --udi '", "'/org/freedesktop/Hal/devices/computer '", "'--key system.hardware.uuid'", ".", "split", "(", ")", ")", ".", "strip", "(", ")", "return", "uuid", ".", "UUID", "(", "hex", "=", "result", ")" ]
Return local machine unique identifier. >>> uuid = local_machine_uuid()
[ "Return", "local", "machine", "unique", "identifier", "." ]
3b58b91b41d8b9bee546eb40dc280a57500b8bed
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/utils.py#L26-L39
248,097
malthe/pop
src/pop/utils.py
YAMLState.read
def read(self, required=False): """Read Zookeeper state. Read in the current Zookeeper state for this node. This operation should be called prior to other interactions with this object. `required`: boolean indicating if the node existence should be required at read time. Normally write will create the node if the path is possible. This allows for simplified catching of errors. """ self._pristine_cache = {} self._cache = {} try: data, stat = yield self._client.get(self._path) data = yaml.load(data) if data: self._pristine_cache = data self._cache = data.copy() except NoNodeException: if required: raise StateNotFound(self._path)
python
def read(self, required=False): """Read Zookeeper state. Read in the current Zookeeper state for this node. This operation should be called prior to other interactions with this object. `required`: boolean indicating if the node existence should be required at read time. Normally write will create the node if the path is possible. This allows for simplified catching of errors. """ self._pristine_cache = {} self._cache = {} try: data, stat = yield self._client.get(self._path) data = yaml.load(data) if data: self._pristine_cache = data self._cache = data.copy() except NoNodeException: if required: raise StateNotFound(self._path)
[ "def", "read", "(", "self", ",", "required", "=", "False", ")", ":", "self", ".", "_pristine_cache", "=", "{", "}", "self", ".", "_cache", "=", "{", "}", "try", ":", "data", ",", "stat", "=", "yield", "self", ".", "_client", ".", "get", "(", "self", ".", "_path", ")", "data", "=", "yaml", ".", "load", "(", "data", ")", "if", "data", ":", "self", ".", "_pristine_cache", "=", "data", "self", ".", "_cache", "=", "data", ".", "copy", "(", ")", "except", "NoNodeException", ":", "if", "required", ":", "raise", "StateNotFound", "(", "self", ".", "_path", ")" ]
Read Zookeeper state. Read in the current Zookeeper state for this node. This operation should be called prior to other interactions with this object. `required`: boolean indicating if the node existence should be required at read time. Normally write will create the node if the path is possible. This allows for simplified catching of errors.
[ "Read", "Zookeeper", "state", "." ]
3b58b91b41d8b9bee546eb40dc280a57500b8bed
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/utils.py#L96-L119
248,098
malthe/pop
src/pop/utils.py
YAMLState.write
def write(self): """Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers. """ self._check() cache = self._cache pristine_cache = self._pristine_cache self._pristine_cache = cache.copy() # Used by `apply_changes` function to return the changes to # this scope. changes = [] def apply_changes(content, stat): """Apply the local state to the Zookeeper node state.""" del changes[:] current = yaml.load(content) if content else {} missing = object() for key in set(pristine_cache).union(cache): old_value = pristine_cache.get(key, missing) new_value = cache.get(key, missing) if old_value != new_value: if new_value != missing: current[key] = new_value if old_value != missing: changes.append( ModifiedItem(key, old_value, new_value)) else: changes.append(AddedItem(key, new_value)) elif key in current: del current[key] changes.append(DeletedItem(key, old_value)) return yaml.safe_dump(current) # Apply the change till it takes. yield retry_change(self._client, self._path, apply_changes) returnValue(changes)
python
def write(self): """Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers. """ self._check() cache = self._cache pristine_cache = self._pristine_cache self._pristine_cache = cache.copy() # Used by `apply_changes` function to return the changes to # this scope. changes = [] def apply_changes(content, stat): """Apply the local state to the Zookeeper node state.""" del changes[:] current = yaml.load(content) if content else {} missing = object() for key in set(pristine_cache).union(cache): old_value = pristine_cache.get(key, missing) new_value = cache.get(key, missing) if old_value != new_value: if new_value != missing: current[key] = new_value if old_value != missing: changes.append( ModifiedItem(key, old_value, new_value)) else: changes.append(AddedItem(key, new_value)) elif key in current: del current[key] changes.append(DeletedItem(key, old_value)) return yaml.safe_dump(current) # Apply the change till it takes. yield retry_change(self._client, self._path, apply_changes) returnValue(changes)
[ "def", "write", "(", "self", ")", ":", "self", ".", "_check", "(", ")", "cache", "=", "self", ".", "_cache", "pristine_cache", "=", "self", ".", "_pristine_cache", "self", ".", "_pristine_cache", "=", "cache", ".", "copy", "(", ")", "# Used by `apply_changes` function to return the changes to", "# this scope.", "changes", "=", "[", "]", "def", "apply_changes", "(", "content", ",", "stat", ")", ":", "\"\"\"Apply the local state to the Zookeeper node state.\"\"\"", "del", "changes", "[", ":", "]", "current", "=", "yaml", ".", "load", "(", "content", ")", "if", "content", "else", "{", "}", "missing", "=", "object", "(", ")", "for", "key", "in", "set", "(", "pristine_cache", ")", ".", "union", "(", "cache", ")", ":", "old_value", "=", "pristine_cache", ".", "get", "(", "key", ",", "missing", ")", "new_value", "=", "cache", ".", "get", "(", "key", ",", "missing", ")", "if", "old_value", "!=", "new_value", ":", "if", "new_value", "!=", "missing", ":", "current", "[", "key", "]", "=", "new_value", "if", "old_value", "!=", "missing", ":", "changes", ".", "append", "(", "ModifiedItem", "(", "key", ",", "old_value", ",", "new_value", ")", ")", "else", ":", "changes", ".", "append", "(", "AddedItem", "(", "key", ",", "new_value", ")", ")", "elif", "key", "in", "current", ":", "del", "current", "[", "key", "]", "changes", ".", "append", "(", "DeletedItem", "(", "key", ",", "old_value", ")", ")", "return", "yaml", ".", "safe_dump", "(", "current", ")", "# Apply the change till it takes.", "yield", "retry_change", "(", "self", ".", "_client", ",", "self", ".", "_path", ",", "apply_changes", ")", "returnValue", "(", "changes", ")" ]
Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers.
[ "Write", "object", "state", "to", "Zookeeper", "." ]
3b58b91b41d8b9bee546eb40dc280a57500b8bed
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/utils.py#L145-L184
248,099
azogue/dataweb
dataweb/classdataweb.py
DataWeb.printif
def printif(self, obj_print, tipo_print=None): """Color output & logging.""" if self.verbose: print(obj_print) if tipo_print == 'ok': logging.info(obj_print) elif tipo_print == 'error': logging.error(obj_print) elif tipo_print == 'warning': logging.warning(obj_print)
python
def printif(self, obj_print, tipo_print=None): """Color output & logging.""" if self.verbose: print(obj_print) if tipo_print == 'ok': logging.info(obj_print) elif tipo_print == 'error': logging.error(obj_print) elif tipo_print == 'warning': logging.warning(obj_print)
[ "def", "printif", "(", "self", ",", "obj_print", ",", "tipo_print", "=", "None", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "obj_print", ")", "if", "tipo_print", "==", "'ok'", ":", "logging", ".", "info", "(", "obj_print", ")", "elif", "tipo_print", "==", "'error'", ":", "logging", ".", "error", "(", "obj_print", ")", "elif", "tipo_print", "==", "'warning'", ":", "logging", ".", "warning", "(", "obj_print", ")" ]
Color output & logging.
[ "Color", "output", "&", "logging", "." ]
085035855df7cef0fe7725bbe9a706832344d946
https://github.com/azogue/dataweb/blob/085035855df7cef0fe7725bbe9a706832344d946/dataweb/classdataweb.py#L148-L157