repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
yt-project/unyt
unyt/unit_object.py
Unit.simplify
def simplify(self): """Return a new equivalent unit object with a simplified unit expression >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m """ expr = self.expr self.expr = _cancel_mul(expr, self.registry) return self
python
def simplify(self): """Return a new equivalent unit object with a simplified unit expression >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m """ expr = self.expr self.expr = _cancel_mul(expr, self.registry) return self
[ "def", "simplify", "(", "self", ")", ":", "expr", "=", "self", ".", "expr", "self", ".", "expr", "=", "_cancel_mul", "(", "expr", ",", "self", ".", "registry", ")", "return", "self" ]
Return a new equivalent unit object with a simplified unit expression >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m
[ "Return", "a", "new", "equivalent", "unit", "object", "with", "a", "simplified", "unit", "expression" ]
7a4eafc229f83784f4c63d639aee554f9a6b1ca0
https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/unit_object.py#L681-L691
train
yt-project/unyt
unyt/_parsing.py
_auto_positive_symbol
def _auto_positive_symbol(tokens, local_dict, global_dict): """ Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol """ result = [] tokens.append((None, None)) # so zip traverses all tokens for tok, nextTok in zip(tokens, tokens[1:]): tokNum, tokVal = tok nextTokNum, nextTokVal = nextTok if tokNum == token.NAME: name = tokVal if name in global_dict: obj = global_dict[name] if isinstance(obj, (Basic, type)) or callable(obj): result.append((token.NAME, name)) continue # try to resolve known alternative unit name try: used_name = inv_name_alternatives[str(name)] except KeyError: # if we don't know this name it's a user-defined unit name # so we should create a new symbol for it used_name = str(name) result.extend( [ (token.NAME, "Symbol"), (token.OP, "("), (token.NAME, repr(used_name)), (token.OP, ","), (token.NAME, "positive"), (token.OP, "="), (token.NAME, "True"), (token.OP, ")"), ] ) else: result.append((tokNum, tokVal)) return result
python
def _auto_positive_symbol(tokens, local_dict, global_dict): """ Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol """ result = [] tokens.append((None, None)) # so zip traverses all tokens for tok, nextTok in zip(tokens, tokens[1:]): tokNum, tokVal = tok nextTokNum, nextTokVal = nextTok if tokNum == token.NAME: name = tokVal if name in global_dict: obj = global_dict[name] if isinstance(obj, (Basic, type)) or callable(obj): result.append((token.NAME, name)) continue # try to resolve known alternative unit name try: used_name = inv_name_alternatives[str(name)] except KeyError: # if we don't know this name it's a user-defined unit name # so we should create a new symbol for it used_name = str(name) result.extend( [ (token.NAME, "Symbol"), (token.OP, "("), (token.NAME, repr(used_name)), (token.OP, ","), (token.NAME, "positive"), (token.OP, "="), (token.NAME, "True"), (token.OP, ")"), ] ) else: result.append((tokNum, tokVal)) return result
[ "def", "_auto_positive_symbol", "(", "tokens", ",", "local_dict", ",", "global_dict", ")", ":", "result", "=", "[", "]", "tokens", ".", "append", "(", "(", "None", ",", "None", ")", ")", "# so zip traverses all tokens", "for", "tok", ",", "nextTok", "in", "zip", "(", "tokens", ",", "tokens", "[", "1", ":", "]", ")", ":", "tokNum", ",", "tokVal", "=", "tok", "nextTokNum", ",", "nextTokVal", "=", "nextTok", "if", "tokNum", "==", "token", ".", "NAME", ":", "name", "=", "tokVal", "if", "name", "in", "global_dict", ":", "obj", "=", "global_dict", "[", "name", "]", "if", "isinstance", "(", "obj", ",", "(", "Basic", ",", "type", ")", ")", "or", "callable", "(", "obj", ")", ":", "result", ".", "append", "(", "(", "token", ".", "NAME", ",", "name", ")", ")", "continue", "# try to resolve known alternative unit name", "try", ":", "used_name", "=", "inv_name_alternatives", "[", "str", "(", "name", ")", "]", "except", "KeyError", ":", "# if we don't know this name it's a user-defined unit name", "# so we should create a new symbol for it", "used_name", "=", "str", "(", "name", ")", "result", ".", "extend", "(", "[", "(", "token", ".", "NAME", ",", "\"Symbol\"", ")", ",", "(", "token", ".", "OP", ",", "\"(\"", ")", ",", "(", "token", ".", "NAME", ",", "repr", "(", "used_name", ")", ")", ",", "(", "token", ".", "OP", ",", "\",\"", ")", ",", "(", "token", ".", "NAME", ",", "\"positive\"", ")", ",", "(", "token", ".", "OP", ",", "\"=\"", ")", ",", "(", "token", ".", "NAME", ",", "\"True\"", ")", ",", "(", "token", ".", "OP", ",", "\")\"", ")", ",", "]", ")", "else", ":", "result", ".", "append", "(", "(", "tokNum", ",", "tokVal", ")", ")", "return", "result" ]
Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
[ "Inserts", "calls", "to", "Symbol", "for", "undefined", "variables", ".", "Passes", "in", "positive", "=", "True", "as", "a", "keyword", "argument", ".", "Adapted", "from", "sympy", ".", "sympy", ".", "parsing", ".", "sympy_parser", ".", "auto_symbol" ]
7a4eafc229f83784f4c63d639aee554f9a6b1ca0
https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/_parsing.py#L25-L68
train
kz26/PyExcelerate
pyexcelerate/Range.py
Range.intersection
def intersection(self, range): """ Calculates the intersection with another range object """ if self.worksheet != range.worksheet: # Different worksheet return None start = (max(self._start[0], range._start[0]), max(self._start[1], range._start[1])) end = (min(self._end[0], range._end[0]), min(self._end[1], range._end[1])) if end[0] < start[0] or end[1] < start[1]: return None return Range(start, end, self.worksheet, validate=False)
python
def intersection(self, range): """ Calculates the intersection with another range object """ if self.worksheet != range.worksheet: # Different worksheet return None start = (max(self._start[0], range._start[0]), max(self._start[1], range._start[1])) end = (min(self._end[0], range._end[0]), min(self._end[1], range._end[1])) if end[0] < start[0] or end[1] < start[1]: return None return Range(start, end, self.worksheet, validate=False)
[ "def", "intersection", "(", "self", ",", "range", ")", ":", "if", "self", ".", "worksheet", "!=", "range", ".", "worksheet", ":", "# Different worksheet", "return", "None", "start", "=", "(", "max", "(", "self", ".", "_start", "[", "0", "]", ",", "range", ".", "_start", "[", "0", "]", ")", ",", "max", "(", "self", ".", "_start", "[", "1", "]", ",", "range", ".", "_start", "[", "1", "]", ")", ")", "end", "=", "(", "min", "(", "self", ".", "_end", "[", "0", "]", ",", "range", ".", "_end", "[", "0", "]", ")", ",", "min", "(", "self", ".", "_end", "[", "1", "]", ",", "range", ".", "_end", "[", "1", "]", ")", ")", "if", "end", "[", "0", "]", "<", "start", "[", "0", "]", "or", "end", "[", "1", "]", "<", "start", "[", "1", "]", ":", "return", "None", "return", "Range", "(", "start", ",", "end", ",", "self", ".", "worksheet", ",", "validate", "=", "False", ")" ]
Calculates the intersection with another range object
[ "Calculates", "the", "intersection", "with", "another", "range", "object" ]
247406dc41adc7e94542bcbf04589f1e5fdf8c51
https://github.com/kz26/PyExcelerate/blob/247406dc41adc7e94542bcbf04589f1e5fdf8c51/pyexcelerate/Range.py#L140-L153
train
harlowja/fasteners
fasteners/process_lock.py
interprocess_locked
def interprocess_locked(path): """Acquires & releases a interprocess lock around call into decorated function.""" lock = InterProcessLock(path) def decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return decorator
python
def interprocess_locked(path): """Acquires & releases a interprocess lock around call into decorated function.""" lock = InterProcessLock(path) def decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return decorator
[ "def", "interprocess_locked", "(", "path", ")", ":", "lock", "=", "InterProcessLock", "(", "path", ")", "def", "decorator", "(", "f", ")", ":", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "lock", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Acquires & releases a interprocess lock around call into decorated function.
[ "Acquires", "&", "releases", "a", "interprocess", "lock", "around", "call", "into", "decorated", "function", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L265-L280
train
harlowja/fasteners
fasteners/process_lock.py
_InterProcessLock.acquire
def acquire(self, blocking=True, delay=DELAY_INCREMENT, max_delay=MAX_DELAY, timeout=None): """Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool """ if delay < 0: raise ValueError("Delay must be greater than or equal to zero") if timeout is not None and timeout < 0: raise ValueError("Timeout must be greater than or equal to zero") if delay >= max_delay: max_delay = delay self._do_open() watch = _utils.StopWatch(duration=timeout) r = _utils.Retry(delay, max_delay, sleep_func=self.sleep_func, watch=watch) with watch: gotten = r(self._try_acquire, blocking, watch) if not gotten: self.acquired = False return False else: self.acquired = True self.logger.log(_utils.BLATHER, "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]", self.path, watch.elapsed(), r.attempts) return True
python
def acquire(self, blocking=True, delay=DELAY_INCREMENT, max_delay=MAX_DELAY, timeout=None): """Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool """ if delay < 0: raise ValueError("Delay must be greater than or equal to zero") if timeout is not None and timeout < 0: raise ValueError("Timeout must be greater than or equal to zero") if delay >= max_delay: max_delay = delay self._do_open() watch = _utils.StopWatch(duration=timeout) r = _utils.Retry(delay, max_delay, sleep_func=self.sleep_func, watch=watch) with watch: gotten = r(self._try_acquire, blocking, watch) if not gotten: self.acquired = False return False else: self.acquired = True self.logger.log(_utils.BLATHER, "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]", self.path, watch.elapsed(), r.attempts) return True
[ "def", "acquire", "(", "self", ",", "blocking", "=", "True", ",", "delay", "=", "DELAY_INCREMENT", ",", "max_delay", "=", "MAX_DELAY", ",", "timeout", "=", "None", ")", ":", "if", "delay", "<", "0", ":", "raise", "ValueError", "(", "\"Delay must be greater than or equal to zero\"", ")", "if", "timeout", "is", "not", "None", "and", "timeout", "<", "0", ":", "raise", "ValueError", "(", "\"Timeout must be greater than or equal to zero\"", ")", "if", "delay", ">=", "max_delay", ":", "max_delay", "=", "delay", "self", ".", "_do_open", "(", ")", "watch", "=", "_utils", ".", "StopWatch", "(", "duration", "=", "timeout", ")", "r", "=", "_utils", ".", "Retry", "(", "delay", ",", "max_delay", ",", "sleep_func", "=", "self", ".", "sleep_func", ",", "watch", "=", "watch", ")", "with", "watch", ":", "gotten", "=", "r", "(", "self", ".", "_try_acquire", ",", "blocking", ",", "watch", ")", "if", "not", "gotten", ":", "self", ".", "acquired", "=", "False", "return", "False", "else", ":", "self", ".", "acquired", "=", "True", "self", ".", "logger", ".", "log", "(", "_utils", ".", "BLATHER", ",", "\"Acquired file lock `%s` after waiting %0.3fs [%s\"", "\" attempts were required]\"", ",", "self", ".", "path", ",", "watch", ".", "elapsed", "(", ")", ",", "r", ".", "attempts", ")", "return", "True" ]
Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool
[ "Attempt", "to", "acquire", "the", "given", "lock", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L130-L171
train
harlowja/fasteners
fasteners/process_lock.py
_InterProcessLock.release
def release(self): """Release the previously acquired lock.""" if not self.acquired: raise threading.ThreadError("Unable to release an unacquired" " lock") try: self.unlock() except IOError: self.logger.exception("Could not unlock the acquired lock opened" " on `%s`", self.path) else: self.acquired = False try: self._do_close() except IOError: self.logger.exception("Could not close the file handle" " opened on `%s`", self.path) else: self.logger.log(_utils.BLATHER, "Unlocked and closed file lock open on" " `%s`", self.path)
python
def release(self): """Release the previously acquired lock.""" if not self.acquired: raise threading.ThreadError("Unable to release an unacquired" " lock") try: self.unlock() except IOError: self.logger.exception("Could not unlock the acquired lock opened" " on `%s`", self.path) else: self.acquired = False try: self._do_close() except IOError: self.logger.exception("Could not close the file handle" " opened on `%s`", self.path) else: self.logger.log(_utils.BLATHER, "Unlocked and closed file lock open on" " `%s`", self.path)
[ "def", "release", "(", "self", ")", ":", "if", "not", "self", ".", "acquired", ":", "raise", "threading", ".", "ThreadError", "(", "\"Unable to release an unacquired\"", "\" lock\"", ")", "try", ":", "self", ".", "unlock", "(", ")", "except", "IOError", ":", "self", ".", "logger", ".", "exception", "(", "\"Could not unlock the acquired lock opened\"", "\" on `%s`\"", ",", "self", ".", "path", ")", "else", ":", "self", ".", "acquired", "=", "False", "try", ":", "self", ".", "_do_close", "(", ")", "except", "IOError", ":", "self", ".", "logger", ".", "exception", "(", "\"Could not close the file handle\"", "\" opened on `%s`\"", ",", "self", ".", "path", ")", "else", ":", "self", ".", "logger", ".", "log", "(", "_utils", ".", "BLATHER", ",", "\"Unlocked and closed file lock open on\"", "\" `%s`\"", ",", "self", ".", "path", ")" ]
Release the previously acquired lock.
[ "Release", "the", "previously", "acquired", "lock", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L187-L207
train
harlowja/fasteners
fasteners/_utils.py
canonicalize_path
def canonicalize_path(path): """Canonicalizes a potential path. Returns a binary string encoded into filesystem encoding. """ if isinstance(path, six.binary_type): return path if isinstance(path, six.text_type): return _fsencode(path) else: return canonicalize_path(str(path))
python
def canonicalize_path(path): """Canonicalizes a potential path. Returns a binary string encoded into filesystem encoding. """ if isinstance(path, six.binary_type): return path if isinstance(path, six.text_type): return _fsencode(path) else: return canonicalize_path(str(path))
[ "def", "canonicalize_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "six", ".", "binary_type", ")", ":", "return", "path", "if", "isinstance", "(", "path", ",", "six", ".", "text_type", ")", ":", "return", "_fsencode", "(", "path", ")", "else", ":", "return", "canonicalize_path", "(", "str", "(", "path", ")", ")" ]
Canonicalizes a potential path. Returns a binary string encoded into filesystem encoding.
[ "Canonicalizes", "a", "potential", "path", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/_utils.py#L47-L57
train
harlowja/fasteners
fasteners/lock.py
read_locked
def read_locked(*args, **kwargs): """Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.read_lock(): return f(self, *args, **kwargs) return wrapper # This is needed to handle when the decorator has args or the decorator # doesn't have args, python is rather weird here... if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
python
def read_locked(*args, **kwargs): """Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.read_lock(): return f(self, *args, **kwargs) return wrapper # This is needed to handle when the decorator has args or the decorator # doesn't have args, python is rather weird here... if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
[ "def", "read_locked", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "attr_name", "=", "kwargs", ".", "get", "(", "'lock'", ",", "'_lock'", ")", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rw_lock", "=", "getattr", "(", "self", ",", "attr_name", ")", "with", "rw_lock", ".", "read_lock", "(", ")", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "# This is needed to handle when the decorator has args or the decorator", "# doesn't have args, python is rather weird here...", "if", "kwargs", "or", "not", "args", ":", "return", "decorator", "else", ":", "if", "len", "(", "args", ")", "==", "1", ":", "return", "decorator", "(", "args", "[", "0", "]", ")", "else", ":", "return", "decorator" ]
Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to.
[ "Acquires", "&", "releases", "a", "read", "lock", "around", "call", "into", "decorated", "method", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L38-L66
train
harlowja/fasteners
fasteners/lock.py
write_locked
def write_locked(*args, **kwargs): """Acquires & releases a write lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock` object) in the instance object this decorator is attached to. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.write_lock(): return f(self, *args, **kwargs) return wrapper # This is needed to handle when the decorator has args or the decorator # doesn't have args, python is rather weird here... if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
python
def write_locked(*args, **kwargs): """Acquires & releases a write lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock` object) in the instance object this decorator is attached to. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.write_lock(): return f(self, *args, **kwargs) return wrapper # This is needed to handle when the decorator has args or the decorator # doesn't have args, python is rather weird here... if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
[ "def", "write_locked", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "attr_name", "=", "kwargs", ".", "get", "(", "'lock'", ",", "'_lock'", ")", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rw_lock", "=", "getattr", "(", "self", ",", "attr_name", ")", "with", "rw_lock", ".", "write_lock", "(", ")", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "# This is needed to handle when the decorator has args or the decorator", "# doesn't have args, python is rather weird here...", "if", "kwargs", "or", "not", "args", ":", "return", "decorator", "else", ":", "if", "len", "(", "args", ")", "==", "1", ":", "return", "decorator", "(", "args", "[", "0", "]", ")", "else", ":", "return", "decorator" ]
Acquires & releases a write lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock` object) in the instance object this decorator is attached to.
[ "Acquires", "&", "releases", "a", "write", "lock", "around", "call", "into", "decorated", "method", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L69-L97
train
harlowja/fasteners
fasteners/lock.py
ReaderWriterLock.is_writer
def is_writer(self, check_pending=True): """Returns if the caller is the active writer or a pending writer.""" me = self._current_thread() if self._writer == me: return True if check_pending: return me in self._pending_writers else: return False
python
def is_writer(self, check_pending=True): """Returns if the caller is the active writer or a pending writer.""" me = self._current_thread() if self._writer == me: return True if check_pending: return me in self._pending_writers else: return False
[ "def", "is_writer", "(", "self", ",", "check_pending", "=", "True", ")", ":", "me", "=", "self", ".", "_current_thread", "(", ")", "if", "self", ".", "_writer", "==", "me", ":", "return", "True", "if", "check_pending", ":", "return", "me", "in", "self", ".", "_pending_writers", "else", ":", "return", "False" ]
Returns if the caller is the active writer or a pending writer.
[ "Returns", "if", "the", "caller", "is", "the", "active", "writer", "or", "a", "pending", "writer", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L136-L144
train
harlowja/fasteners
fasteners/lock.py
ReaderWriterLock.owner
def owner(self): """Returns whether the lock is locked by a writer or reader.""" if self._writer is not None: return self.WRITER if self._readers: return self.READER return None
python
def owner(self): """Returns whether the lock is locked by a writer or reader.""" if self._writer is not None: return self.WRITER if self._readers: return self.READER return None
[ "def", "owner", "(", "self", ")", ":", "if", "self", ".", "_writer", "is", "not", "None", ":", "return", "self", ".", "WRITER", "if", "self", ".", "_readers", ":", "return", "self", ".", "READER", "return", "None" ]
Returns whether the lock is locked by a writer or reader.
[ "Returns", "whether", "the", "lock", "is", "locked", "by", "a", "writer", "or", "reader", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L147-L153
train
harlowja/fasteners
fasteners/lock.py
ReaderWriterLock.read_lock
def read_lock(self): """Context manager that grants a read lock. Will wait until no active or pending writers. Raises a ``RuntimeError`` if a pending writer tries to acquire a read lock. """ me = self._current_thread() if me in self._pending_writers: raise RuntimeError("Writer %s can not acquire a read lock" " while waiting for the write lock" % me) with self._cond: while True: # No active writer, or we are the writer; # we are good to become a reader. if self._writer is None or self._writer == me: try: self._readers[me] = self._readers[me] + 1 except KeyError: self._readers[me] = 1 break # An active writer; guess we have to wait. self._cond.wait() try: yield self finally: # I am no longer a reader, remove *one* occurrence of myself. # If the current thread acquired two read locks, then it will # still have to remove that other read lock; this allows for # basic reentrancy to be possible. with self._cond: try: me_instances = self._readers[me] if me_instances > 1: self._readers[me] = me_instances - 1 else: self._readers.pop(me) except KeyError: pass self._cond.notify_all()
python
def read_lock(self): """Context manager that grants a read lock. Will wait until no active or pending writers. Raises a ``RuntimeError`` if a pending writer tries to acquire a read lock. """ me = self._current_thread() if me in self._pending_writers: raise RuntimeError("Writer %s can not acquire a read lock" " while waiting for the write lock" % me) with self._cond: while True: # No active writer, or we are the writer; # we are good to become a reader. if self._writer is None or self._writer == me: try: self._readers[me] = self._readers[me] + 1 except KeyError: self._readers[me] = 1 break # An active writer; guess we have to wait. self._cond.wait() try: yield self finally: # I am no longer a reader, remove *one* occurrence of myself. # If the current thread acquired two read locks, then it will # still have to remove that other read lock; this allows for # basic reentrancy to be possible. with self._cond: try: me_instances = self._readers[me] if me_instances > 1: self._readers[me] = me_instances - 1 else: self._readers.pop(me) except KeyError: pass self._cond.notify_all()
[ "def", "read_lock", "(", "self", ")", ":", "me", "=", "self", ".", "_current_thread", "(", ")", "if", "me", "in", "self", ".", "_pending_writers", ":", "raise", "RuntimeError", "(", "\"Writer %s can not acquire a read lock\"", "\" while waiting for the write lock\"", "%", "me", ")", "with", "self", ".", "_cond", ":", "while", "True", ":", "# No active writer, or we are the writer;", "# we are good to become a reader.", "if", "self", ".", "_writer", "is", "None", "or", "self", ".", "_writer", "==", "me", ":", "try", ":", "self", ".", "_readers", "[", "me", "]", "=", "self", ".", "_readers", "[", "me", "]", "+", "1", "except", "KeyError", ":", "self", ".", "_readers", "[", "me", "]", "=", "1", "break", "# An active writer; guess we have to wait.", "self", ".", "_cond", ".", "wait", "(", ")", "try", ":", "yield", "self", "finally", ":", "# I am no longer a reader, remove *one* occurrence of myself.", "# If the current thread acquired two read locks, then it will", "# still have to remove that other read lock; this allows for", "# basic reentrancy to be possible.", "with", "self", ".", "_cond", ":", "try", ":", "me_instances", "=", "self", ".", "_readers", "[", "me", "]", "if", "me_instances", ">", "1", ":", "self", ".", "_readers", "[", "me", "]", "=", "me_instances", "-", "1", "else", ":", "self", ".", "_readers", ".", "pop", "(", "me", ")", "except", "KeyError", ":", "pass", "self", ".", "_cond", ".", "notify_all", "(", ")" ]
Context manager that grants a read lock. Will wait until no active or pending writers. Raises a ``RuntimeError`` if a pending writer tries to acquire a read lock.
[ "Context", "manager", "that", "grants", "a", "read", "lock", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L161-L202
train
harlowja/fasteners
fasteners/lock.py
ReaderWriterLock.write_lock
def write_lock(self): """Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock. """ me = self._current_thread() i_am_writer = self.is_writer(check_pending=False) if self.is_reader() and not i_am_writer: raise RuntimeError("Reader %s to writer privilege" " escalation not allowed" % me) if i_am_writer: # Already the writer; this allows for basic reentrancy. yield self else: with self._cond: self._pending_writers.append(me) while True: # No readers, and no active writer, am I next?? if len(self._readers) == 0 and self._writer is None: if self._pending_writers[0] == me: self._writer = self._pending_writers.popleft() break self._cond.wait() try: yield self finally: with self._cond: self._writer = None self._cond.notify_all()
python
def write_lock(self): """Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock. """ me = self._current_thread() i_am_writer = self.is_writer(check_pending=False) if self.is_reader() and not i_am_writer: raise RuntimeError("Reader %s to writer privilege" " escalation not allowed" % me) if i_am_writer: # Already the writer; this allows for basic reentrancy. yield self else: with self._cond: self._pending_writers.append(me) while True: # No readers, and no active writer, am I next?? if len(self._readers) == 0 and self._writer is None: if self._pending_writers[0] == me: self._writer = self._pending_writers.popleft() break self._cond.wait() try: yield self finally: with self._cond: self._writer = None self._cond.notify_all()
[ "def", "write_lock", "(", "self", ")", ":", "me", "=", "self", ".", "_current_thread", "(", ")", "i_am_writer", "=", "self", ".", "is_writer", "(", "check_pending", "=", "False", ")", "if", "self", ".", "is_reader", "(", ")", "and", "not", "i_am_writer", ":", "raise", "RuntimeError", "(", "\"Reader %s to writer privilege\"", "\" escalation not allowed\"", "%", "me", ")", "if", "i_am_writer", ":", "# Already the writer; this allows for basic reentrancy.", "yield", "self", "else", ":", "with", "self", ".", "_cond", ":", "self", ".", "_pending_writers", ".", "append", "(", "me", ")", "while", "True", ":", "# No readers, and no active writer, am I next??", "if", "len", "(", "self", ".", "_readers", ")", "==", "0", "and", "self", ".", "_writer", "is", "None", ":", "if", "self", ".", "_pending_writers", "[", "0", "]", "==", "me", ":", "self", ".", "_writer", "=", "self", ".", "_pending_writers", ".", "popleft", "(", ")", "break", "self", ".", "_cond", ".", "wait", "(", ")", "try", ":", "yield", "self", "finally", ":", "with", "self", ".", "_cond", ":", "self", ".", "_writer", "=", "None", "self", ".", "_cond", ".", "notify_all", "(", ")" ]
Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock.
[ "Context", "manager", "that", "grants", "a", "write", "lock", "." ]
8f3bbab0204a50037448a8fad7a6bf12eb1a2695
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L205-L238
train
bfontaine/freesms
freesms/__init__.py
FreeClient.send_sms
def send_sms(self, text, **kw): """ Send an SMS. Since Free only allows us to send SMSes to ourselves you don't have to provide your phone number. """ params = { 'user': self._user, 'pass': self._passwd, 'msg': text } kw.setdefault("verify", False) if not kw["verify"]: # remove SSL warning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) res = requests.get(FreeClient.BASE_URL, params=params, **kw) return FreeResponse(res.status_code)
python
def send_sms(self, text, **kw): """ Send an SMS. Since Free only allows us to send SMSes to ourselves you don't have to provide your phone number. """ params = { 'user': self._user, 'pass': self._passwd, 'msg': text } kw.setdefault("verify", False) if not kw["verify"]: # remove SSL warning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) res = requests.get(FreeClient.BASE_URL, params=params, **kw) return FreeResponse(res.status_code)
[ "def", "send_sms", "(", "self", ",", "text", ",", "*", "*", "kw", ")", ":", "params", "=", "{", "'user'", ":", "self", ".", "_user", ",", "'pass'", ":", "self", ".", "_passwd", ",", "'msg'", ":", "text", "}", "kw", ".", "setdefault", "(", "\"verify\"", ",", "False", ")", "if", "not", "kw", "[", "\"verify\"", "]", ":", "# remove SSL warning", "requests", ".", "packages", ".", "urllib3", ".", "disable_warnings", "(", "InsecureRequestWarning", ")", "res", "=", "requests", ".", "get", "(", "FreeClient", ".", "BASE_URL", ",", "params", "=", "params", ",", "*", "*", "kw", ")", "return", "FreeResponse", "(", "res", ".", "status_code", ")" ]
Send an SMS. Since Free only allows us to send SMSes to ourselves you don't have to provide your phone number.
[ "Send", "an", "SMS", ".", "Since", "Free", "only", "allows", "us", "to", "send", "SMSes", "to", "ourselves", "you", "don", "t", "have", "to", "provide", "your", "phone", "number", "." ]
64b3df222a852f313bd80afd9a7280b584fe31e1
https://github.com/bfontaine/freesms/blob/64b3df222a852f313bd80afd9a7280b584fe31e1/freesms/__init__.py#L63-L82
train
scrapinghub/exporters
exporters/writers/filebase_base_writer.py
FilebaseBaseWriter.create_filebase_name
def create_filebase_name(self, group_info, extension='gz', file_name=None): """ Return tuple of resolved destination folder name and file name """ dirname = self.filebase.formatted_dirname(groups=group_info) if not file_name: file_name = self.filebase.prefix_template + '.' + extension return dirname, file_name
python
def create_filebase_name(self, group_info, extension='gz', file_name=None): """ Return tuple of resolved destination folder name and file name """ dirname = self.filebase.formatted_dirname(groups=group_info) if not file_name: file_name = self.filebase.prefix_template + '.' + extension return dirname, file_name
[ "def", "create_filebase_name", "(", "self", ",", "group_info", ",", "extension", "=", "'gz'", ",", "file_name", "=", "None", ")", ":", "dirname", "=", "self", ".", "filebase", ".", "formatted_dirname", "(", "groups", "=", "group_info", ")", "if", "not", "file_name", ":", "file_name", "=", "self", ".", "filebase", ".", "prefix_template", "+", "'.'", "+", "extension", "return", "dirname", ",", "file_name" ]
Return tuple of resolved destination folder name and file name
[ "Return", "tuple", "of", "resolved", "destination", "folder", "name", "and", "file", "name" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/filebase_base_writer.py#L145-L152
train
scrapinghub/exporters
exporters/writers/aggregation_stats_writer.py
AggregationStatsWriter.write_batch
def write_batch(self, batch): """ Receives the batch and writes it. This method is usually called from a manager. """ for item in batch: for key in item: self.aggregated_info['occurrences'][key] += 1 self.increment_written_items() if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached: {} items written.' .format(self.get_metadata('items_count'))) self.logger.debug('Wrote items')
python
def write_batch(self, batch): """ Receives the batch and writes it. This method is usually called from a manager. """ for item in batch: for key in item: self.aggregated_info['occurrences'][key] += 1 self.increment_written_items() if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached: {} items written.' .format(self.get_metadata('items_count'))) self.logger.debug('Wrote items')
[ "def", "write_batch", "(", "self", ",", "batch", ")", ":", "for", "item", "in", "batch", ":", "for", "key", "in", "item", ":", "self", ".", "aggregated_info", "[", "'occurrences'", "]", "[", "key", "]", "+=", "1", "self", ".", "increment_written_items", "(", ")", "if", "self", ".", "items_limit", "and", "self", ".", "items_limit", "==", "self", ".", "get_metadata", "(", "'items_count'", ")", ":", "raise", "ItemsLimitReached", "(", "'Finishing job after items_limit reached: {} items written.'", ".", "format", "(", "self", ".", "get_metadata", "(", "'items_count'", ")", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Wrote items'", ")" ]
Receives the batch and writes it. This method is usually called from a manager.
[ "Receives", "the", "batch", "and", "writes", "it", ".", "This", "method", "is", "usually", "called", "from", "a", "manager", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/aggregation_stats_writer.py#L18-L29
train
scrapinghub/exporters
exporters/writers/aggregation_stats_writer.py
AggregationStatsWriter._get_aggregated_info
def _get_aggregated_info(self): """ Keeps track of aggregated info in a dictionary called self.aggregated_info """ agg_results = {} for key in self.aggregated_info['occurrences']: agg_results[key] = { 'occurrences': self.aggregated_info['occurrences'].get(key), 'coverage': (float(self.aggregated_info['occurrences'] .get(key))/float(self.get_metadata('items_count')))*100 } return agg_results
python
def _get_aggregated_info(self): """ Keeps track of aggregated info in a dictionary called self.aggregated_info """ agg_results = {} for key in self.aggregated_info['occurrences']: agg_results[key] = { 'occurrences': self.aggregated_info['occurrences'].get(key), 'coverage': (float(self.aggregated_info['occurrences'] .get(key))/float(self.get_metadata('items_count')))*100 } return agg_results
[ "def", "_get_aggregated_info", "(", "self", ")", ":", "agg_results", "=", "{", "}", "for", "key", "in", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ":", "agg_results", "[", "key", "]", "=", "{", "'occurrences'", ":", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ".", "get", "(", "key", ")", ",", "'coverage'", ":", "(", "float", "(", "self", ".", "aggregated_info", "[", "'occurrences'", "]", ".", "get", "(", "key", ")", ")", "/", "float", "(", "self", ".", "get_metadata", "(", "'items_count'", ")", ")", ")", "*", "100", "}", "return", "agg_results" ]
Keeps track of aggregated info in a dictionary called self.aggregated_info
[ "Keeps", "track", "of", "aggregated", "info", "in", "a", "dictionary", "called", "self", ".", "aggregated_info" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/aggregation_stats_writer.py#L31-L42
train
scrapinghub/exporters
exporters/writers/cloudsearch_writer.py
create_document_batches
def create_document_batches(jsonlines, id_field, max_batch_size=CLOUDSEARCH_MAX_BATCH_SIZE): """Create batches in expected AWS Cloudsearch format, limiting the byte size per batch according to given max_batch_size See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html """ batch = [] fixed_initial_size = 2 def create_entry(line): try: record = json.loads(line) except: raise ValueError('Could not parse JSON from: %s' % line) key = record[id_field] return '{"type":"add","id":%s,"fields":%s}' % (json.dumps(key), line) current_size = fixed_initial_size for line in jsonlines: entry = create_entry(line) entry_size = len(entry) + 1 if max_batch_size > (current_size + entry_size): current_size += entry_size batch.append(entry) else: yield '[' + ','.join(batch) + ']' batch = [entry] current_size = fixed_initial_size + entry_size if batch: yield '[' + ','.join(batch) + ']'
python
def create_document_batches(jsonlines, id_field, max_batch_size=CLOUDSEARCH_MAX_BATCH_SIZE): """Create batches in expected AWS Cloudsearch format, limiting the byte size per batch according to given max_batch_size See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html """ batch = [] fixed_initial_size = 2 def create_entry(line): try: record = json.loads(line) except: raise ValueError('Could not parse JSON from: %s' % line) key = record[id_field] return '{"type":"add","id":%s,"fields":%s}' % (json.dumps(key), line) current_size = fixed_initial_size for line in jsonlines: entry = create_entry(line) entry_size = len(entry) + 1 if max_batch_size > (current_size + entry_size): current_size += entry_size batch.append(entry) else: yield '[' + ','.join(batch) + ']' batch = [entry] current_size = fixed_initial_size + entry_size if batch: yield '[' + ','.join(batch) + ']'
[ "def", "create_document_batches", "(", "jsonlines", ",", "id_field", ",", "max_batch_size", "=", "CLOUDSEARCH_MAX_BATCH_SIZE", ")", ":", "batch", "=", "[", "]", "fixed_initial_size", "=", "2", "def", "create_entry", "(", "line", ")", ":", "try", ":", "record", "=", "json", ".", "loads", "(", "line", ")", "except", ":", "raise", "ValueError", "(", "'Could not parse JSON from: %s'", "%", "line", ")", "key", "=", "record", "[", "id_field", "]", "return", "'{\"type\":\"add\",\"id\":%s,\"fields\":%s}'", "%", "(", "json", ".", "dumps", "(", "key", ")", ",", "line", ")", "current_size", "=", "fixed_initial_size", "for", "line", "in", "jsonlines", ":", "entry", "=", "create_entry", "(", "line", ")", "entry_size", "=", "len", "(", "entry", ")", "+", "1", "if", "max_batch_size", ">", "(", "current_size", "+", "entry_size", ")", ":", "current_size", "+=", "entry_size", "batch", ".", "append", "(", "entry", ")", "else", ":", "yield", "'['", "+", "','", ".", "join", "(", "batch", ")", "+", "']'", "batch", "=", "[", "entry", "]", "current_size", "=", "fixed_initial_size", "+", "entry_size", "if", "batch", ":", "yield", "'['", "+", "','", ".", "join", "(", "batch", ")", "+", "']'" ]
Create batches in expected AWS Cloudsearch format, limiting the byte size per batch according to given max_batch_size See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html
[ "Create", "batches", "in", "expected", "AWS", "Cloudsearch", "format", "limiting", "the", "byte", "size", "per", "batch", "according", "to", "given", "max_batch_size" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/cloudsearch_writer.py#L14-L45
train
scrapinghub/exporters
exporters/writers/cloudsearch_writer.py
CloudSearchWriter._post_document_batch
def _post_document_batch(self, batch): """ Send a batch to Cloudsearch endpoint See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html """ # noqa target_batch = '/2013-01-01/documents/batch' url = self.endpoint_url + target_batch return requests.post(url, data=batch, headers={'Content-type': 'application/json'})
python
def _post_document_batch(self, batch): """ Send a batch to Cloudsearch endpoint See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html """ # noqa target_batch = '/2013-01-01/documents/batch' url = self.endpoint_url + target_batch return requests.post(url, data=batch, headers={'Content-type': 'application/json'})
[ "def", "_post_document_batch", "(", "self", ",", "batch", ")", ":", "# noqa", "target_batch", "=", "'/2013-01-01/documents/batch'", "url", "=", "self", ".", "endpoint_url", "+", "target_batch", "return", "requests", ".", "post", "(", "url", ",", "data", "=", "batch", ",", "headers", "=", "{", "'Content-type'", ":", "'application/json'", "}", ")" ]
Send a batch to Cloudsearch endpoint See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html
[ "Send", "a", "batch", "to", "Cloudsearch", "endpoint" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/cloudsearch_writer.py#L97-L105
train
scrapinghub/exporters
exporters/writers/fs_writer.py
FSWriter._create_path_if_not_exist
def _create_path_if_not_exist(self, path): """ Creates a folders path if it doesn't exist """ if path and not os.path.exists(path): os.makedirs(path)
python
def _create_path_if_not_exist(self, path): """ Creates a folders path if it doesn't exist """ if path and not os.path.exists(path): os.makedirs(path)
[ "def", "_create_path_if_not_exist", "(", "self", ",", "path", ")", ":", "if", "path", "and", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
Creates a folders path if it doesn't exist
[ "Creates", "a", "folders", "path", "if", "it", "doesn", "t", "exist" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/fs_writer.py#L26-L31
train
scrapinghub/exporters
exporters/writers/s3_writer.py
S3Writer.close
def close(self): """ Called to clean all possible tmp files created during the process. """ if self.read_option('save_pointer'): self._update_last_pointer() super(S3Writer, self).close()
python
def close(self): """ Called to clean all possible tmp files created during the process. """ if self.read_option('save_pointer'): self._update_last_pointer() super(S3Writer, self).close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "read_option", "(", "'save_pointer'", ")", ":", "self", ".", "_update_last_pointer", "(", ")", "super", "(", "S3Writer", ",", "self", ")", ".", "close", "(", ")" ]
Called to clean all possible tmp files created during the process.
[ "Called", "to", "clean", "all", "possible", "tmp", "files", "created", "during", "the", "process", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/s3_writer.py#L207-L213
train
scrapinghub/exporters
exporters/utils.py
get_boto_connection
def get_boto_connection(aws_access_key_id, aws_secret_access_key, region=None, bucketname=None, host=None): """ Conection parameters must be different only if bucket name has a period """ m = _AWS_ACCESS_KEY_ID_RE.match(aws_access_key_id) if m is None or m.group() != aws_access_key_id: logging.error('The provided aws_access_key_id is not in the correct format. It must \ be alphanumeric and contain between 16 and 32 characters.') if len(aws_access_key_id) > len(aws_secret_access_key): logging.warn("The AWS credential keys aren't in the usual size," " are you using the correct ones?") import boto from boto.s3.connection import OrdinaryCallingFormat extra_args = {} if host is not None: extra_args['host'] = host if bucketname is not None and '.' in bucketname: extra_args['calling_format'] = OrdinaryCallingFormat() if region is None: return boto.connect_s3(aws_access_key_id, aws_secret_access_key, **extra_args) return boto.s3.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **extra_args)
python
def get_boto_connection(aws_access_key_id, aws_secret_access_key, region=None, bucketname=None, host=None): """ Conection parameters must be different only if bucket name has a period """ m = _AWS_ACCESS_KEY_ID_RE.match(aws_access_key_id) if m is None or m.group() != aws_access_key_id: logging.error('The provided aws_access_key_id is not in the correct format. It must \ be alphanumeric and contain between 16 and 32 characters.') if len(aws_access_key_id) > len(aws_secret_access_key): logging.warn("The AWS credential keys aren't in the usual size," " are you using the correct ones?") import boto from boto.s3.connection import OrdinaryCallingFormat extra_args = {} if host is not None: extra_args['host'] = host if bucketname is not None and '.' in bucketname: extra_args['calling_format'] = OrdinaryCallingFormat() if region is None: return boto.connect_s3(aws_access_key_id, aws_secret_access_key, **extra_args) return boto.s3.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **extra_args)
[ "def", "get_boto_connection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "region", "=", "None", ",", "bucketname", "=", "None", ",", "host", "=", "None", ")", ":", "m", "=", "_AWS_ACCESS_KEY_ID_RE", ".", "match", "(", "aws_access_key_id", ")", "if", "m", "is", "None", "or", "m", ".", "group", "(", ")", "!=", "aws_access_key_id", ":", "logging", ".", "error", "(", "'The provided aws_access_key_id is not in the correct format. It must \\\n be alphanumeric and contain between 16 and 32 characters.'", ")", "if", "len", "(", "aws_access_key_id", ")", ">", "len", "(", "aws_secret_access_key", ")", ":", "logging", ".", "warn", "(", "\"The AWS credential keys aren't in the usual size,\"", "\" are you using the correct ones?\"", ")", "import", "boto", "from", "boto", ".", "s3", ".", "connection", "import", "OrdinaryCallingFormat", "extra_args", "=", "{", "}", "if", "host", "is", "not", "None", ":", "extra_args", "[", "'host'", "]", "=", "host", "if", "bucketname", "is", "not", "None", "and", "'.'", "in", "bucketname", ":", "extra_args", "[", "'calling_format'", "]", "=", "OrdinaryCallingFormat", "(", ")", "if", "region", "is", "None", ":", "return", "boto", ".", "connect_s3", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "*", "*", "extra_args", ")", "return", "boto", ".", "s3", ".", "connect_to_region", "(", "region", ",", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "*", "*", "extra_args", ")" ]
Conection parameters must be different only if bucket name has a period
[ "Conection", "parameters", "must", "be", "different", "only", "if", "bucket", "name", "has", "a", "period" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/utils.py#L115-L140
train
scrapinghub/exporters
exporters/utils.py
maybe_cast_list
def maybe_cast_list(value, types): """ Try to coerce list values into more specific list subclasses in types. """ if not isinstance(value, list): return value if type(types) not in (list, tuple): types = (types,) for list_type in types: if issubclass(list_type, list): try: return list_type(value) except (TypeError, ValueError): pass return value
python
def maybe_cast_list(value, types): """ Try to coerce list values into more specific list subclasses in types. """ if not isinstance(value, list): return value if type(types) not in (list, tuple): types = (types,) for list_type in types: if issubclass(list_type, list): try: return list_type(value) except (TypeError, ValueError): pass return value
[ "def", "maybe_cast_list", "(", "value", ",", "types", ")", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "return", "value", "if", "type", "(", "types", ")", "not", "in", "(", "list", ",", "tuple", ")", ":", "types", "=", "(", "types", ",", ")", "for", "list_type", "in", "types", ":", "if", "issubclass", "(", "list_type", ",", "list", ")", ":", "try", ":", "return", "list_type", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "return", "value" ]
Try to coerce list values into more specific list subclasses in types.
[ "Try", "to", "coerce", "list", "values", "into", "more", "specific", "list", "subclasses", "in", "types", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/utils.py#L143-L159
train
scrapinghub/exporters
exporters/iterio.py
iterate_chunks
def iterate_chunks(file, chunk_size): """ Iterate chunks of size chunk_size from a file-like object """ chunk = file.read(chunk_size) while chunk: yield chunk chunk = file.read(chunk_size)
python
def iterate_chunks(file, chunk_size): """ Iterate chunks of size chunk_size from a file-like object """ chunk = file.read(chunk_size) while chunk: yield chunk chunk = file.read(chunk_size)
[ "def", "iterate_chunks", "(", "file", ",", "chunk_size", ")", ":", "chunk", "=", "file", ".", "read", "(", "chunk_size", ")", "while", "chunk", ":", "yield", "chunk", "chunk", "=", "file", ".", "read", "(", "chunk_size", ")" ]
Iterate chunks of size chunk_size from a file-like object
[ "Iterate", "chunks", "of", "size", "chunk_size", "from", "a", "file", "-", "like", "object" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L15-L22
train
scrapinghub/exporters
exporters/iterio.py
IterIO.unshift
def unshift(self, chunk): """ Pushes a chunk of data back into the internal buffer. This is useful in certain situations where a stream is being consumed by code that needs to "un-consume" some amount of data that it has optimistically pulled out of the source, so that the data can be passed on to some other party. """ if chunk: self._pos -= len(chunk) self._unconsumed.append(chunk)
python
def unshift(self, chunk): """ Pushes a chunk of data back into the internal buffer. This is useful in certain situations where a stream is being consumed by code that needs to "un-consume" some amount of data that it has optimistically pulled out of the source, so that the data can be passed on to some other party. """ if chunk: self._pos -= len(chunk) self._unconsumed.append(chunk)
[ "def", "unshift", "(", "self", ",", "chunk", ")", ":", "if", "chunk", ":", "self", ".", "_pos", "-=", "len", "(", "chunk", ")", "self", ".", "_unconsumed", ".", "append", "(", "chunk", ")" ]
Pushes a chunk of data back into the internal buffer. This is useful in certain situations where a stream is being consumed by code that needs to "un-consume" some amount of data that it has optimistically pulled out of the source, so that the data can be passed on to some other party.
[ "Pushes", "a", "chunk", "of", "data", "back", "into", "the", "internal", "buffer", ".", "This", "is", "useful", "in", "certain", "situations", "where", "a", "stream", "is", "being", "consumed", "by", "code", "that", "needs", "to", "un", "-", "consume", "some", "amount", "of", "data", "that", "it", "has", "optimistically", "pulled", "out", "of", "the", "source", "so", "that", "the", "data", "can", "be", "passed", "on", "to", "some", "other", "party", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L46-L56
train
scrapinghub/exporters
exporters/iterio.py
IterIO.readline
def readline(self): """ Read until a new-line character is encountered """ line = "" n_pos = -1 try: while n_pos < 0: line += self.next_chunk() n_pos = line.find('\n') except StopIteration: pass if n_pos >= 0: line, extra = line[:n_pos+1], line[n_pos+1:] self.unshift(extra) return line
python
def readline(self): """ Read until a new-line character is encountered """ line = "" n_pos = -1 try: while n_pos < 0: line += self.next_chunk() n_pos = line.find('\n') except StopIteration: pass if n_pos >= 0: line, extra = line[:n_pos+1], line[n_pos+1:] self.unshift(extra) return line
[ "def", "readline", "(", "self", ")", ":", "line", "=", "\"\"", "n_pos", "=", "-", "1", "try", ":", "while", "n_pos", "<", "0", ":", "line", "+=", "self", ".", "next_chunk", "(", ")", "n_pos", "=", "line", ".", "find", "(", "'\\n'", ")", "except", "StopIteration", ":", "pass", "if", "n_pos", ">=", "0", ":", "line", ",", "extra", "=", "line", "[", ":", "n_pos", "+", "1", "]", ",", "line", "[", "n_pos", "+", "1", ":", "]", "self", ".", "unshift", "(", "extra", ")", "return", "line" ]
Read until a new-line character is encountered
[ "Read", "until", "a", "new", "-", "line", "character", "is", "encountered" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L110-L126
train
scrapinghub/exporters
exporters/iterio.py
IterIO.close
def close(self): """ Disable al operations and close the underlying file-like object, if any """ if callable(getattr(self._file, 'close', None)): self._iterator.close() self._iterator = None self._unconsumed = None self.closed = True
python
def close(self): """ Disable al operations and close the underlying file-like object, if any """ if callable(getattr(self._file, 'close', None)): self._iterator.close() self._iterator = None self._unconsumed = None self.closed = True
[ "def", "close", "(", "self", ")", ":", "if", "callable", "(", "getattr", "(", "self", ".", "_file", ",", "'close'", ",", "None", ")", ")", ":", "self", ".", "_iterator", ".", "close", "(", ")", "self", ".", "_iterator", "=", "None", "self", ".", "_unconsumed", "=", "None", "self", ".", "closed", "=", "True" ]
Disable al operations and close the underlying file-like object, if any
[ "Disable", "al", "operations", "and", "close", "the", "underlying", "file", "-", "like", "object", "if", "any" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L148-L156
train
scrapinghub/exporters
exporters/persistence/pickle_persistence.py
PicklePersistence.configuration_from_uri
def configuration_from_uri(uri, uri_regex): """ returns a configuration object. """ file_path = re.match(uri_regex, uri).groups()[0] with open(file_path) as f: configuration = pickle.load(f)['configuration'] configuration = yaml.safe_load(configuration) configuration['exporter_options']['resume'] = True persistence_state_id = file_path.split(os.path.sep)[-1] configuration['exporter_options']['persistence_state_id'] = persistence_state_id return configuration
python
def configuration_from_uri(uri, uri_regex): """ returns a configuration object. """ file_path = re.match(uri_regex, uri).groups()[0] with open(file_path) as f: configuration = pickle.load(f)['configuration'] configuration = yaml.safe_load(configuration) configuration['exporter_options']['resume'] = True persistence_state_id = file_path.split(os.path.sep)[-1] configuration['exporter_options']['persistence_state_id'] = persistence_state_id return configuration
[ "def", "configuration_from_uri", "(", "uri", ",", "uri_regex", ")", ":", "file_path", "=", "re", ".", "match", "(", "uri_regex", ",", "uri", ")", ".", "groups", "(", ")", "[", "0", "]", "with", "open", "(", "file_path", ")", "as", "f", ":", "configuration", "=", "pickle", ".", "load", "(", "f", ")", "[", "'configuration'", "]", "configuration", "=", "yaml", ".", "safe_load", "(", "configuration", ")", "configuration", "[", "'exporter_options'", "]", "[", "'resume'", "]", "=", "True", "persistence_state_id", "=", "file_path", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "1", "]", "configuration", "[", "'exporter_options'", "]", "[", "'persistence_state_id'", "]", "=", "persistence_state_id", "return", "configuration" ]
returns a configuration object.
[ "returns", "a", "configuration", "object", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/pickle_persistence.py#L77-L88
train
scrapinghub/exporters
exporters/write_buffers/base.py
WriteBuffer.buffer
def buffer(self, item): """ Receive an item and write it. """ key = self.get_key_from_item(item) if not self.grouping_info.is_first_file_item(key): self.items_group_files.add_item_separator_to_file(key) self.grouping_info.ensure_group_info(key) self.items_group_files.add_item_to_file(item, key)
python
def buffer(self, item): """ Receive an item and write it. """ key = self.get_key_from_item(item) if not self.grouping_info.is_first_file_item(key): self.items_group_files.add_item_separator_to_file(key) self.grouping_info.ensure_group_info(key) self.items_group_files.add_item_to_file(item, key)
[ "def", "buffer", "(", "self", ",", "item", ")", ":", "key", "=", "self", ".", "get_key_from_item", "(", "item", ")", "if", "not", "self", ".", "grouping_info", ".", "is_first_file_item", "(", "key", ")", ":", "self", ".", "items_group_files", ".", "add_item_separator_to_file", "(", "key", ")", "self", ".", "grouping_info", ".", "ensure_group_info", "(", "key", ")", "self", ".", "items_group_files", ".", "add_item_to_file", "(", "item", ",", "key", ")" ]
Receive an item and write it.
[ "Receive", "an", "item", "and", "write", "it", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/write_buffers/base.py#L29-L37
train
scrapinghub/exporters
exporters/persistence/alchemy_persistence.py
BaseAlchemyPersistence.parse_persistence_uri
def parse_persistence_uri(cls, persistence_uri): """Parse a database URI and the persistence state ID from the given persistence URI """ regex = cls.persistence_uri_re match = re.match(regex, persistence_uri) if not match: raise ValueError("Couldn't parse persistence URI: %s -- regex: %s)" % (persistence_uri, regex)) conn_params = match.groupdict() missing = {'proto', 'job_id', 'database'} - set(conn_params) if missing: raise ValueError('Missing required parameters: %s (given params: %s)' % (tuple(missing), conn_params)) persistence_state_id = int(conn_params.pop('job_id')) db_uri = cls.build_db_conn_uri(**conn_params) return db_uri, persistence_state_id
python
def parse_persistence_uri(cls, persistence_uri): """Parse a database URI and the persistence state ID from the given persistence URI """ regex = cls.persistence_uri_re match = re.match(regex, persistence_uri) if not match: raise ValueError("Couldn't parse persistence URI: %s -- regex: %s)" % (persistence_uri, regex)) conn_params = match.groupdict() missing = {'proto', 'job_id', 'database'} - set(conn_params) if missing: raise ValueError('Missing required parameters: %s (given params: %s)' % (tuple(missing), conn_params)) persistence_state_id = int(conn_params.pop('job_id')) db_uri = cls.build_db_conn_uri(**conn_params) return db_uri, persistence_state_id
[ "def", "parse_persistence_uri", "(", "cls", ",", "persistence_uri", ")", ":", "regex", "=", "cls", ".", "persistence_uri_re", "match", "=", "re", ".", "match", "(", "regex", ",", "persistence_uri", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Couldn't parse persistence URI: %s -- regex: %s)\"", "%", "(", "persistence_uri", ",", "regex", ")", ")", "conn_params", "=", "match", ".", "groupdict", "(", ")", "missing", "=", "{", "'proto'", ",", "'job_id'", ",", "'database'", "}", "-", "set", "(", "conn_params", ")", "if", "missing", ":", "raise", "ValueError", "(", "'Missing required parameters: %s (given params: %s)'", "%", "(", "tuple", "(", "missing", ")", ",", "conn_params", ")", ")", "persistence_state_id", "=", "int", "(", "conn_params", ".", "pop", "(", "'job_id'", ")", ")", "db_uri", "=", "cls", ".", "build_db_conn_uri", "(", "*", "*", "conn_params", ")", "return", "db_uri", ",", "persistence_state_id" ]
Parse a database URI and the persistence state ID from the given persistence URI
[ "Parse", "a", "database", "URI", "and", "the", "persistence", "state", "ID", "from", "the", "given", "persistence", "URI" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/alchemy_persistence.py#L104-L122
train
scrapinghub/exporters
exporters/persistence/alchemy_persistence.py
BaseAlchemyPersistence.configuration_from_uri
def configuration_from_uri(cls, persistence_uri): """ Return a configuration object. """ db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri) engine = create_engine(db_uri) Base.metadata.create_all(engine) Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) session = DBSession() job = session.query(Job).filter(Job.id == persistence_state_id).first() configuration = job.configuration configuration = yaml.safe_load(configuration) configuration['exporter_options']['resume'] = True configuration['exporter_options']['persistence_state_id'] = persistence_state_id return configuration
python
def configuration_from_uri(cls, persistence_uri): """ Return a configuration object. """ db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri) engine = create_engine(db_uri) Base.metadata.create_all(engine) Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) session = DBSession() job = session.query(Job).filter(Job.id == persistence_state_id).first() configuration = job.configuration configuration = yaml.safe_load(configuration) configuration['exporter_options']['resume'] = True configuration['exporter_options']['persistence_state_id'] = persistence_state_id return configuration
[ "def", "configuration_from_uri", "(", "cls", ",", "persistence_uri", ")", ":", "db_uri", ",", "persistence_state_id", "=", "cls", ".", "parse_persistence_uri", "(", "persistence_uri", ")", "engine", "=", "create_engine", "(", "db_uri", ")", "Base", ".", "metadata", ".", "create_all", "(", "engine", ")", "Base", ".", "metadata", ".", "bind", "=", "engine", "DBSession", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "DBSession", "(", ")", "job", "=", "session", ".", "query", "(", "Job", ")", ".", "filter", "(", "Job", ".", "id", "==", "persistence_state_id", ")", ".", "first", "(", ")", "configuration", "=", "job", ".", "configuration", "configuration", "=", "yaml", ".", "safe_load", "(", "configuration", ")", "configuration", "[", "'exporter_options'", "]", "[", "'resume'", "]", "=", "True", "configuration", "[", "'exporter_options'", "]", "[", "'persistence_state_id'", "]", "=", "persistence_state_id", "return", "configuration" ]
Return a configuration object.
[ "Return", "a", "configuration", "object", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/alchemy_persistence.py#L125-L140
train
scrapinghub/exporters
exporters/readers/fs_reader.py
FSReader._get_input_files
def _get_input_files(cls, input_specification): """Get list of input files according to input definition. Input definition can be: - str: specifying a filename - list of str: specifying list a of filenames - dict with "dir" and optional "pattern" parameters: specifying the toplevel directory under which input files will be sought and an optional filepath pattern """ if isinstance(input_specification, (basestring, dict)): input_specification = [input_specification] elif not isinstance(input_specification, list): raise ConfigurationError("Input specification must be string, list or dict.") out = [] for input_unit in input_specification: if isinstance(input_unit, basestring): out.append(input_unit) elif isinstance(input_unit, dict): missing = object() directory = input_unit.get('dir', missing) dir_pointer = input_unit.get('dir_pointer', missing) if directory is missing and dir_pointer is missing: raise ConfigurationError( 'Input directory dict must contain' ' "dir" or "dir_pointer" element (but not both)') if directory is not missing and dir_pointer is not missing: raise ConfigurationError( 'Input directory dict must not contain' ' both "dir" and "dir_pointer" elements') if dir_pointer is not missing: directory = cls._get_pointer(dir_pointer) out.extend(cls._get_directory_files( directory=directory, pattern=input_unit.get('pattern'), include_dot_files=input_unit.get('include_dot_files', False))) else: raise ConfigurationError('Input must only contain strings or dicts') return out
python
def _get_input_files(cls, input_specification): """Get list of input files according to input definition. Input definition can be: - str: specifying a filename - list of str: specifying list a of filenames - dict with "dir" and optional "pattern" parameters: specifying the toplevel directory under which input files will be sought and an optional filepath pattern """ if isinstance(input_specification, (basestring, dict)): input_specification = [input_specification] elif not isinstance(input_specification, list): raise ConfigurationError("Input specification must be string, list or dict.") out = [] for input_unit in input_specification: if isinstance(input_unit, basestring): out.append(input_unit) elif isinstance(input_unit, dict): missing = object() directory = input_unit.get('dir', missing) dir_pointer = input_unit.get('dir_pointer', missing) if directory is missing and dir_pointer is missing: raise ConfigurationError( 'Input directory dict must contain' ' "dir" or "dir_pointer" element (but not both)') if directory is not missing and dir_pointer is not missing: raise ConfigurationError( 'Input directory dict must not contain' ' both "dir" and "dir_pointer" elements') if dir_pointer is not missing: directory = cls._get_pointer(dir_pointer) out.extend(cls._get_directory_files( directory=directory, pattern=input_unit.get('pattern'), include_dot_files=input_unit.get('include_dot_files', False))) else: raise ConfigurationError('Input must only contain strings or dicts') return out
[ "def", "_get_input_files", "(", "cls", ",", "input_specification", ")", ":", "if", "isinstance", "(", "input_specification", ",", "(", "basestring", ",", "dict", ")", ")", ":", "input_specification", "=", "[", "input_specification", "]", "elif", "not", "isinstance", "(", "input_specification", ",", "list", ")", ":", "raise", "ConfigurationError", "(", "\"Input specification must be string, list or dict.\"", ")", "out", "=", "[", "]", "for", "input_unit", "in", "input_specification", ":", "if", "isinstance", "(", "input_unit", ",", "basestring", ")", ":", "out", ".", "append", "(", "input_unit", ")", "elif", "isinstance", "(", "input_unit", ",", "dict", ")", ":", "missing", "=", "object", "(", ")", "directory", "=", "input_unit", ".", "get", "(", "'dir'", ",", "missing", ")", "dir_pointer", "=", "input_unit", ".", "get", "(", "'dir_pointer'", ",", "missing", ")", "if", "directory", "is", "missing", "and", "dir_pointer", "is", "missing", ":", "raise", "ConfigurationError", "(", "'Input directory dict must contain'", "' \"dir\" or \"dir_pointer\" element (but not both)'", ")", "if", "directory", "is", "not", "missing", "and", "dir_pointer", "is", "not", "missing", ":", "raise", "ConfigurationError", "(", "'Input directory dict must not contain'", "' both \"dir\" and \"dir_pointer\" elements'", ")", "if", "dir_pointer", "is", "not", "missing", ":", "directory", "=", "cls", ".", "_get_pointer", "(", "dir_pointer", ")", "out", ".", "extend", "(", "cls", ".", "_get_directory_files", "(", "directory", "=", "directory", ",", "pattern", "=", "input_unit", ".", "get", "(", "'pattern'", ")", ",", "include_dot_files", "=", "input_unit", ".", "get", "(", "'include_dot_files'", ",", "False", ")", ")", ")", "else", ":", "raise", "ConfigurationError", "(", "'Input must only contain strings or dicts'", ")", "return", "out" ]
Get list of input files according to input definition. Input definition can be: - str: specifying a filename - list of str: specifying list a of filenames - dict with "dir" and optional "pattern" parameters: specifying the toplevel directory under which input files will be sought and an optional filepath pattern
[ "Get", "list", "of", "input", "files", "according", "to", "input", "definition", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/fs_reader.py#L59-L103
train
scrapinghub/exporters
exporters/readers/kafka_random_reader.py
KafkaRandomReader.consume_messages
def consume_messages(self, batchsize): """ Get messages batch from the reservoir """ if not self._reservoir: self.finished = True return for msg in self._reservoir[:batchsize]: yield msg self._reservoir = self._reservoir[batchsize:]
python
def consume_messages(self, batchsize): """ Get messages batch from the reservoir """ if not self._reservoir: self.finished = True return for msg in self._reservoir[:batchsize]: yield msg self._reservoir = self._reservoir[batchsize:]
[ "def", "consume_messages", "(", "self", ",", "batchsize", ")", ":", "if", "not", "self", ".", "_reservoir", ":", "self", ".", "finished", "=", "True", "return", "for", "msg", "in", "self", ".", "_reservoir", "[", ":", "batchsize", "]", ":", "yield", "msg", "self", ".", "_reservoir", "=", "self", ".", "_reservoir", "[", "batchsize", ":", "]" ]
Get messages batch from the reservoir
[ "Get", "messages", "batch", "from", "the", "reservoir" ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/kafka_random_reader.py#L127-L134
train
scrapinghub/exporters
exporters/readers/kafka_random_reader.py
KafkaRandomReader.decompress_messages
def decompress_messages(self, offmsgs): """ Decompress pre-defined compressed fields for each message. Msgs should be unpacked before this step. """ for offmsg in offmsgs: yield offmsg.message.key, self.decompress_fun(offmsg.message.value)
python
def decompress_messages(self, offmsgs): """ Decompress pre-defined compressed fields for each message. Msgs should be unpacked before this step. """ for offmsg in offmsgs: yield offmsg.message.key, self.decompress_fun(offmsg.message.value)
[ "def", "decompress_messages", "(", "self", ",", "offmsgs", ")", ":", "for", "offmsg", "in", "offmsgs", ":", "yield", "offmsg", ".", "message", ".", "key", ",", "self", ".", "decompress_fun", "(", "offmsg", ".", "message", ".", "value", ")" ]
Decompress pre-defined compressed fields for each message. Msgs should be unpacked before this step.
[ "Decompress", "pre", "-", "defined", "compressed", "fields", "for", "each", "message", ".", "Msgs", "should", "be", "unpacked", "before", "this", "step", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/kafka_random_reader.py#L136-L141
train
scrapinghub/exporters
exporters/filters/base_filter.py
BaseFilter.filter_batch
def filter_batch(self, batch): """ Receives the batch, filters it, and returns it. """ for item in batch: if self.filter(item): yield item else: self.set_metadata('filtered_out', self.get_metadata('filtered_out') + 1) self.total += 1 self._log_progress()
python
def filter_batch(self, batch): """ Receives the batch, filters it, and returns it. """ for item in batch: if self.filter(item): yield item else: self.set_metadata('filtered_out', self.get_metadata('filtered_out') + 1) self.total += 1 self._log_progress()
[ "def", "filter_batch", "(", "self", ",", "batch", ")", ":", "for", "item", "in", "batch", ":", "if", "self", ".", "filter", "(", "item", ")", ":", "yield", "item", "else", ":", "self", ".", "set_metadata", "(", "'filtered_out'", ",", "self", ".", "get_metadata", "(", "'filtered_out'", ")", "+", "1", ")", "self", ".", "total", "+=", "1", "self", ".", "_log_progress", "(", ")" ]
Receives the batch, filters it, and returns it.
[ "Receives", "the", "batch", "filters", "it", "and", "returns", "it", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/filters/base_filter.py#L24-L36
train
scrapinghub/exporters
exporters/writers/base_writer.py
BaseWriter.write_batch
def write_batch(self, batch): """ Buffer a batch of items to be written and update internal counters. Calling this method doesn't guarantee that all items have been written. To ensure everything has been written you need to call flush(). """ for item in batch: self.write_buffer.buffer(item) key = self.write_buffer.get_key_from_item(item) if self.write_buffer.should_write_buffer(key): self._write_current_buffer_for_group_key(key) self.increment_written_items() self._check_items_limit()
python
def write_batch(self, batch): """ Buffer a batch of items to be written and update internal counters. Calling this method doesn't guarantee that all items have been written. To ensure everything has been written you need to call flush(). """ for item in batch: self.write_buffer.buffer(item) key = self.write_buffer.get_key_from_item(item) if self.write_buffer.should_write_buffer(key): self._write_current_buffer_for_group_key(key) self.increment_written_items() self._check_items_limit()
[ "def", "write_batch", "(", "self", ",", "batch", ")", ":", "for", "item", "in", "batch", ":", "self", ".", "write_buffer", ".", "buffer", "(", "item", ")", "key", "=", "self", ".", "write_buffer", ".", "get_key_from_item", "(", "item", ")", "if", "self", ".", "write_buffer", ".", "should_write_buffer", "(", "key", ")", ":", "self", ".", "_write_current_buffer_for_group_key", "(", "key", ")", "self", ".", "increment_written_items", "(", ")", "self", ".", "_check_items_limit", "(", ")" ]
Buffer a batch of items to be written and update internal counters. Calling this method doesn't guarantee that all items have been written. To ensure everything has been written you need to call flush().
[ "Buffer", "a", "batch", "of", "items", "to", "be", "written", "and", "update", "internal", "counters", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L103-L116
train
scrapinghub/exporters
exporters/writers/base_writer.py
BaseWriter._check_items_limit
def _check_items_limit(self): """ Raise ItemsLimitReached if the writer reached the configured items limit. """ if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached:' ' {} items written.'.format(self.get_metadata('items_count')))
python
def _check_items_limit(self): """ Raise ItemsLimitReached if the writer reached the configured items limit. """ if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached:' ' {} items written.'.format(self.get_metadata('items_count')))
[ "def", "_check_items_limit", "(", "self", ")", ":", "if", "self", ".", "items_limit", "and", "self", ".", "items_limit", "==", "self", ".", "get_metadata", "(", "'items_count'", ")", ":", "raise", "ItemsLimitReached", "(", "'Finishing job after items_limit reached:'", "' {} items written.'", ".", "format", "(", "self", ".", "get_metadata", "(", "'items_count'", ")", ")", ")" ]
Raise ItemsLimitReached if the writer reached the configured items limit.
[ "Raise", "ItemsLimitReached", "if", "the", "writer", "reached", "the", "configured", "items", "limit", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L118-L124
train
scrapinghub/exporters
exporters/writers/base_writer.py
BaseWriter.flush
def flush(self): """ Ensure all remaining buffers are written. """ for key in self.grouping_info.keys(): if self._should_flush(key): self._write_current_buffer_for_group_key(key)
python
def flush(self): """ Ensure all remaining buffers are written. """ for key in self.grouping_info.keys(): if self._should_flush(key): self._write_current_buffer_for_group_key(key)
[ "def", "flush", "(", "self", ")", ":", "for", "key", "in", "self", ".", "grouping_info", ".", "keys", "(", ")", ":", "if", "self", ".", "_should_flush", "(", "key", ")", ":", "self", ".", "_write_current_buffer_for_group_key", "(", "key", ")" ]
Ensure all remaining buffers are written.
[ "Ensure", "all", "remaining", "buffers", "are", "written", "." ]
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L129-L135
train
opendatateam/udata
udata/assets.py
has_manifest
def has_manifest(app, filename='manifest.json'): '''Verify the existance of a JSON assets manifest''' try: return pkg_resources.resource_exists(app, filename) except ImportError: return os.path.isabs(filename) and os.path.exists(filename)
python
def has_manifest(app, filename='manifest.json'): '''Verify the existance of a JSON assets manifest''' try: return pkg_resources.resource_exists(app, filename) except ImportError: return os.path.isabs(filename) and os.path.exists(filename)
[ "def", "has_manifest", "(", "app", ",", "filename", "=", "'manifest.json'", ")", ":", "try", ":", "return", "pkg_resources", ".", "resource_exists", "(", "app", ",", "filename", ")", "except", "ImportError", ":", "return", "os", ".", "path", ".", "isabs", "(", "filename", ")", "and", "os", ".", "path", ".", "exists", "(", "filename", ")" ]
Verify the existance of a JSON assets manifest
[ "Verify", "the", "existance", "of", "a", "JSON", "assets", "manifest" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L19-L24
train
opendatateam/udata
udata/assets.py
register_manifest
def register_manifest(app, filename='manifest.json'): '''Register an assets json manifest''' if current_app.config.get('TESTING'): return # Do not spend time here when testing if not has_manifest(app, filename): msg = '{filename} not found for {app}'.format(**locals()) raise ValueError(msg) manifest = _manifests.get(app, {}) manifest.update(load_manifest(app, filename)) _manifests[app] = manifest
python
def register_manifest(app, filename='manifest.json'): '''Register an assets json manifest''' if current_app.config.get('TESTING'): return # Do not spend time here when testing if not has_manifest(app, filename): msg = '{filename} not found for {app}'.format(**locals()) raise ValueError(msg) manifest = _manifests.get(app, {}) manifest.update(load_manifest(app, filename)) _manifests[app] = manifest
[ "def", "register_manifest", "(", "app", ",", "filename", "=", "'manifest.json'", ")", ":", "if", "current_app", ".", "config", ".", "get", "(", "'TESTING'", ")", ":", "return", "# Do not spend time here when testing", "if", "not", "has_manifest", "(", "app", ",", "filename", ")", ":", "msg", "=", "'{filename} not found for {app}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "raise", "ValueError", "(", "msg", ")", "manifest", "=", "_manifests", ".", "get", "(", "app", ",", "{", "}", ")", "manifest", ".", "update", "(", "load_manifest", "(", "app", ",", "filename", ")", ")", "_manifests", "[", "app", "]", "=", "manifest" ]
Register an assets json manifest
[ "Register", "an", "assets", "json", "manifest" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L27-L36
train
opendatateam/udata
udata/assets.py
load_manifest
def load_manifest(app, filename='manifest.json'): '''Load an assets json manifest''' if os.path.isabs(filename): path = filename else: path = pkg_resources.resource_filename(app, filename) with io.open(path, mode='r', encoding='utf8') as stream: data = json.load(stream) _registered_manifests[app] = path return data
python
def load_manifest(app, filename='manifest.json'): '''Load an assets json manifest''' if os.path.isabs(filename): path = filename else: path = pkg_resources.resource_filename(app, filename) with io.open(path, mode='r', encoding='utf8') as stream: data = json.load(stream) _registered_manifests[app] = path return data
[ "def", "load_manifest", "(", "app", ",", "filename", "=", "'manifest.json'", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "path", "=", "filename", "else", ":", "path", "=", "pkg_resources", ".", "resource_filename", "(", "app", ",", "filename", ")", "with", "io", ".", "open", "(", "path", ",", "mode", "=", "'r'", ",", "encoding", "=", "'utf8'", ")", "as", "stream", ":", "data", "=", "json", ".", "load", "(", "stream", ")", "_registered_manifests", "[", "app", "]", "=", "path", "return", "data" ]
Load an assets json manifest
[ "Load", "an", "assets", "json", "manifest" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L39-L48
train
opendatateam/udata
udata/assets.py
from_manifest
def from_manifest(app, filename, raw=False, **kwargs): ''' Get the path to a static file for a given app entry of a given type. :param str app: The application key to which is tied this manifest :param str filename: the original filename (without hash) :param bool raw: if True, doesn't add prefix to the manifest :return: the resolved file path from manifest :rtype: str ''' cfg = current_app.config if current_app.config.get('TESTING'): return # Do not spend time here when testing path = _manifests[app][filename] if not raw and cfg.get('CDN_DOMAIN') and not cfg.get('CDN_DEBUG'): scheme = 'https' if cfg.get('CDN_HTTPS') else request.scheme prefix = '{}://'.format(scheme) if not path.startswith('/'): # CDN_DOMAIN has no trailing slash path = '/' + path return ''.join((prefix, cfg['CDN_DOMAIN'], path)) elif not raw and kwargs.get('external', False): if path.startswith('/'): # request.host_url has a trailing slash path = path[1:] return ''.join((request.host_url, path)) return path
python
def from_manifest(app, filename, raw=False, **kwargs): ''' Get the path to a static file for a given app entry of a given type. :param str app: The application key to which is tied this manifest :param str filename: the original filename (without hash) :param bool raw: if True, doesn't add prefix to the manifest :return: the resolved file path from manifest :rtype: str ''' cfg = current_app.config if current_app.config.get('TESTING'): return # Do not spend time here when testing path = _manifests[app][filename] if not raw and cfg.get('CDN_DOMAIN') and not cfg.get('CDN_DEBUG'): scheme = 'https' if cfg.get('CDN_HTTPS') else request.scheme prefix = '{}://'.format(scheme) if not path.startswith('/'): # CDN_DOMAIN has no trailing slash path = '/' + path return ''.join((prefix, cfg['CDN_DOMAIN'], path)) elif not raw and kwargs.get('external', False): if path.startswith('/'): # request.host_url has a trailing slash path = path[1:] return ''.join((request.host_url, path)) return path
[ "def", "from_manifest", "(", "app", ",", "filename", ",", "raw", "=", "False", ",", "*", "*", "kwargs", ")", ":", "cfg", "=", "current_app", ".", "config", "if", "current_app", ".", "config", ".", "get", "(", "'TESTING'", ")", ":", "return", "# Do not spend time here when testing", "path", "=", "_manifests", "[", "app", "]", "[", "filename", "]", "if", "not", "raw", "and", "cfg", ".", "get", "(", "'CDN_DOMAIN'", ")", "and", "not", "cfg", ".", "get", "(", "'CDN_DEBUG'", ")", ":", "scheme", "=", "'https'", "if", "cfg", ".", "get", "(", "'CDN_HTTPS'", ")", "else", "request", ".", "scheme", "prefix", "=", "'{}://'", ".", "format", "(", "scheme", ")", "if", "not", "path", ".", "startswith", "(", "'/'", ")", ":", "# CDN_DOMAIN has no trailing slash", "path", "=", "'/'", "+", "path", "return", "''", ".", "join", "(", "(", "prefix", ",", "cfg", "[", "'CDN_DOMAIN'", "]", ",", "path", ")", ")", "elif", "not", "raw", "and", "kwargs", ".", "get", "(", "'external'", ",", "False", ")", ":", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "# request.host_url has a trailing slash", "path", "=", "path", "[", "1", ":", "]", "return", "''", ".", "join", "(", "(", "request", ".", "host_url", ",", "path", ")", ")", "return", "path" ]
Get the path to a static file for a given app entry of a given type. :param str app: The application key to which is tied this manifest :param str filename: the original filename (without hash) :param bool raw: if True, doesn't add prefix to the manifest :return: the resolved file path from manifest :rtype: str
[ "Get", "the", "path", "to", "a", "static", "file", "for", "a", "given", "app", "entry", "of", "a", "given", "type", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L58-L85
train
opendatateam/udata
udata/assets.py
cdn_for
def cdn_for(endpoint, **kwargs): ''' Get a CDN URL for a static assets. Do not use a replacement for all flask.url_for calls as it is only meant for CDN assets URLS. (There is some extra round trip which cost is justified by the CDN assets prformance improvements) ''' if current_app.config['CDN_DOMAIN']: if not current_app.config.get('CDN_DEBUG'): kwargs.pop('_external', None) # Avoid the _external parameter in URL return cdn_url_for(endpoint, **kwargs) return url_for(endpoint, **kwargs)
python
def cdn_for(endpoint, **kwargs): ''' Get a CDN URL for a static assets. Do not use a replacement for all flask.url_for calls as it is only meant for CDN assets URLS. (There is some extra round trip which cost is justified by the CDN assets prformance improvements) ''' if current_app.config['CDN_DOMAIN']: if not current_app.config.get('CDN_DEBUG'): kwargs.pop('_external', None) # Avoid the _external parameter in URL return cdn_url_for(endpoint, **kwargs) return url_for(endpoint, **kwargs)
[ "def", "cdn_for", "(", "endpoint", ",", "*", "*", "kwargs", ")", ":", "if", "current_app", ".", "config", "[", "'CDN_DOMAIN'", "]", ":", "if", "not", "current_app", ".", "config", ".", "get", "(", "'CDN_DEBUG'", ")", ":", "kwargs", ".", "pop", "(", "'_external'", ",", "None", ")", "# Avoid the _external parameter in URL", "return", "cdn_url_for", "(", "endpoint", ",", "*", "*", "kwargs", ")", "return", "url_for", "(", "endpoint", ",", "*", "*", "kwargs", ")" ]
Get a CDN URL for a static assets. Do not use a replacement for all flask.url_for calls as it is only meant for CDN assets URLS. (There is some extra round trip which cost is justified by the CDN assets prformance improvements)
[ "Get", "a", "CDN", "URL", "for", "a", "static", "assets", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L92-L105
train
opendatateam/udata
udata/models/queryset.py
UDataQuerySet.get_or_create
def get_or_create(self, write_concern=None, auto_save=True, *q_objs, **query): """Retrieve unique object or create, if it doesn't exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or created object and ``created`` is a boolean specifying whether a new object was created. Taken back from: https://github.com/MongoEngine/mongoengine/ pull/1029/files#diff-05c70acbd0634d6d05e4a6e3a9b7d66b """ defaults = query.pop('defaults', {}) try: doc = self.get(*q_objs, **query) return doc, False except self._document.DoesNotExist: query.update(defaults) doc = self._document(**query) if auto_save: doc.save(write_concern=write_concern) return doc, True
python
def get_or_create(self, write_concern=None, auto_save=True, *q_objs, **query): """Retrieve unique object or create, if it doesn't exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or created object and ``created`` is a boolean specifying whether a new object was created. Taken back from: https://github.com/MongoEngine/mongoengine/ pull/1029/files#diff-05c70acbd0634d6d05e4a6e3a9b7d66b """ defaults = query.pop('defaults', {}) try: doc = self.get(*q_objs, **query) return doc, False except self._document.DoesNotExist: query.update(defaults) doc = self._document(**query) if auto_save: doc.save(write_concern=write_concern) return doc, True
[ "def", "get_or_create", "(", "self", ",", "write_concern", "=", "None", ",", "auto_save", "=", "True", ",", "*", "q_objs", ",", "*", "*", "query", ")", ":", "defaults", "=", "query", ".", "pop", "(", "'defaults'", ",", "{", "}", ")", "try", ":", "doc", "=", "self", ".", "get", "(", "*", "q_objs", ",", "*", "*", "query", ")", "return", "doc", ",", "False", "except", "self", ".", "_document", ".", "DoesNotExist", ":", "query", ".", "update", "(", "defaults", ")", "doc", "=", "self", ".", "_document", "(", "*", "*", "query", ")", "if", "auto_save", ":", "doc", ".", "save", "(", "write_concern", "=", "write_concern", ")", "return", "doc", ",", "True" ]
Retrieve unique object or create, if it doesn't exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or created object and ``created`` is a boolean specifying whether a new object was created. Taken back from: https://github.com/MongoEngine/mongoengine/ pull/1029/files#diff-05c70acbd0634d6d05e4a6e3a9b7d66b
[ "Retrieve", "unique", "object", "or", "create", "if", "it", "doesn", "t", "exist", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/queryset.py#L50-L73
train
opendatateam/udata
udata/models/queryset.py
UDataQuerySet.generic_in
def generic_in(self, **kwargs): '''Bypass buggy GenericReferenceField querying issue''' query = {} for key, value in kwargs.items(): if not value: continue # Optimize query for when there is only one value if isinstance(value, (list, tuple)) and len(value) == 1: value = value[0] if isinstance(value, (list, tuple)): if all(isinstance(v, basestring) for v in value): ids = [ObjectId(v) for v in value] query['{0}._ref.$id'.format(key)] = {'$in': ids} elif all(isinstance(v, DBRef) for v in value): query['{0}._ref'.format(key)] = {'$in': value} elif all(isinstance(v, ObjectId) for v in value): query['{0}._ref.$id'.format(key)] = {'$in': value} elif isinstance(value, ObjectId): query['{0}._ref.$id'.format(key)] = value elif isinstance(value, basestring): query['{0}._ref.$id'.format(key)] = ObjectId(value) else: self.error('expect a list of string, ObjectId or DBRef') return self(__raw__=query)
python
def generic_in(self, **kwargs): '''Bypass buggy GenericReferenceField querying issue''' query = {} for key, value in kwargs.items(): if not value: continue # Optimize query for when there is only one value if isinstance(value, (list, tuple)) and len(value) == 1: value = value[0] if isinstance(value, (list, tuple)): if all(isinstance(v, basestring) for v in value): ids = [ObjectId(v) for v in value] query['{0}._ref.$id'.format(key)] = {'$in': ids} elif all(isinstance(v, DBRef) for v in value): query['{0}._ref'.format(key)] = {'$in': value} elif all(isinstance(v, ObjectId) for v in value): query['{0}._ref.$id'.format(key)] = {'$in': value} elif isinstance(value, ObjectId): query['{0}._ref.$id'.format(key)] = value elif isinstance(value, basestring): query['{0}._ref.$id'.format(key)] = ObjectId(value) else: self.error('expect a list of string, ObjectId or DBRef') return self(__raw__=query)
[ "def", "generic_in", "(", "self", ",", "*", "*", "kwargs", ")", ":", "query", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "value", ":", "continue", "# Optimize query for when there is only one value", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", "and", "len", "(", "value", ")", "==", "1", ":", "value", "=", "value", "[", "0", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "all", "(", "isinstance", "(", "v", ",", "basestring", ")", "for", "v", "in", "value", ")", ":", "ids", "=", "[", "ObjectId", "(", "v", ")", "for", "v", "in", "value", "]", "query", "[", "'{0}._ref.$id'", ".", "format", "(", "key", ")", "]", "=", "{", "'$in'", ":", "ids", "}", "elif", "all", "(", "isinstance", "(", "v", ",", "DBRef", ")", "for", "v", "in", "value", ")", ":", "query", "[", "'{0}._ref'", ".", "format", "(", "key", ")", "]", "=", "{", "'$in'", ":", "value", "}", "elif", "all", "(", "isinstance", "(", "v", ",", "ObjectId", ")", "for", "v", "in", "value", ")", ":", "query", "[", "'{0}._ref.$id'", ".", "format", "(", "key", ")", "]", "=", "{", "'$in'", ":", "value", "}", "elif", "isinstance", "(", "value", ",", "ObjectId", ")", ":", "query", "[", "'{0}._ref.$id'", ".", "format", "(", "key", ")", "]", "=", "value", "elif", "isinstance", "(", "value", ",", "basestring", ")", ":", "query", "[", "'{0}._ref.$id'", ".", "format", "(", "key", ")", "]", "=", "ObjectId", "(", "value", ")", "else", ":", "self", ".", "error", "(", "'expect a list of string, ObjectId or DBRef'", ")", "return", "self", "(", "__raw__", "=", "query", ")" ]
Bypass buggy GenericReferenceField querying issue
[ "Bypass", "buggy", "GenericReferenceField", "querying", "issue" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/queryset.py#L75-L98
train
opendatateam/udata
udata/core/issues/notifications.py
issues_notifications
def issues_notifications(user): '''Notify user about open issues''' notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = issues_for(user).only('id', 'title', 'created', 'subject') # Do not dereference subject (so it's a DBRef) # Also improve performances and memory usage for issue in qs.no_dereference(): notifications.append((issue.created, { 'id': issue.id, 'title': issue.title, 'subject': { 'id': issue.subject['_ref'].id, 'type': issue.subject['_cls'].lower(), } })) return notifications
python
def issues_notifications(user): '''Notify user about open issues''' notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = issues_for(user).only('id', 'title', 'created', 'subject') # Do not dereference subject (so it's a DBRef) # Also improve performances and memory usage for issue in qs.no_dereference(): notifications.append((issue.created, { 'id': issue.id, 'title': issue.title, 'subject': { 'id': issue.subject['_ref'].id, 'type': issue.subject['_cls'].lower(), } })) return notifications
[ "def", "issues_notifications", "(", "user", ")", ":", "notifications", "=", "[", "]", "# Only fetch required fields for notification serialization", "# Greatly improve performances and memory usage", "qs", "=", "issues_for", "(", "user", ")", ".", "only", "(", "'id'", ",", "'title'", ",", "'created'", ",", "'subject'", ")", "# Do not dereference subject (so it's a DBRef)", "# Also improve performances and memory usage", "for", "issue", "in", "qs", ".", "no_dereference", "(", ")", ":", "notifications", ".", "append", "(", "(", "issue", ".", "created", ",", "{", "'id'", ":", "issue", ".", "id", ",", "'title'", ":", "issue", ".", "title", ",", "'subject'", ":", "{", "'id'", ":", "issue", ".", "subject", "[", "'_ref'", "]", ".", "id", ",", "'type'", ":", "issue", ".", "subject", "[", "'_cls'", "]", ".", "lower", "(", ")", ",", "}", "}", ")", ")", "return", "notifications" ]
Notify user about open issues
[ "Notify", "user", "about", "open", "issues" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/issues/notifications.py#L15-L35
train
opendatateam/udata
udata/features/identicon/backends.py
get_config
def get_config(key): ''' Get an identicon configuration parameter. Precedance order is: - application config (`udata.cfg`) - theme config - default ''' key = 'AVATAR_{0}'.format(key.upper()) local_config = current_app.config.get(key) return local_config or getattr(theme.current, key, DEFAULTS[key])
python
def get_config(key): ''' Get an identicon configuration parameter. Precedance order is: - application config (`udata.cfg`) - theme config - default ''' key = 'AVATAR_{0}'.format(key.upper()) local_config = current_app.config.get(key) return local_config or getattr(theme.current, key, DEFAULTS[key])
[ "def", "get_config", "(", "key", ")", ":", "key", "=", "'AVATAR_{0}'", ".", "format", "(", "key", ".", "upper", "(", ")", ")", "local_config", "=", "current_app", ".", "config", ".", "get", "(", "key", ")", "return", "local_config", "or", "getattr", "(", "theme", ".", "current", ",", "key", ",", "DEFAULTS", "[", "key", "]", ")" ]
Get an identicon configuration parameter. Precedance order is: - application config (`udata.cfg`) - theme config - default
[ "Get", "an", "identicon", "configuration", "parameter", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L41-L52
train
opendatateam/udata
udata/features/identicon/backends.py
get_provider
def get_provider(): '''Get the current provider from config''' name = get_config('provider') available = entrypoints.get_all('udata.avatars') if name not in available: raise ValueError('Unknown avatar provider: {0}'.format(name)) return available[name]
python
def get_provider(): '''Get the current provider from config''' name = get_config('provider') available = entrypoints.get_all('udata.avatars') if name not in available: raise ValueError('Unknown avatar provider: {0}'.format(name)) return available[name]
[ "def", "get_provider", "(", ")", ":", "name", "=", "get_config", "(", "'provider'", ")", "available", "=", "entrypoints", ".", "get_all", "(", "'udata.avatars'", ")", "if", "name", "not", "in", "available", ":", "raise", "ValueError", "(", "'Unknown avatar provider: {0}'", ".", "format", "(", "name", ")", ")", "return", "available", "[", "name", "]" ]
Get the current provider from config
[ "Get", "the", "current", "provider", "from", "config" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L59-L65
train
opendatateam/udata
udata/features/identicon/backends.py
generate_pydenticon
def generate_pydenticon(identifier, size): ''' Use pydenticon to generate an identicon image. All parameters are extracted from configuration. ''' blocks_size = get_internal_config('size') foreground = get_internal_config('foreground') background = get_internal_config('background') generator = pydenticon.Generator(blocks_size, blocks_size, digest=hashlib.sha1, foreground=foreground, background=background) # Pydenticon adds padding to the size and as a consequence # we need to compute the size without the padding padding = int(round(get_internal_config('padding') * size / 100.)) size = size - 2 * padding padding = (padding, ) * 4 return generator.generate(identifier, size, size, padding=padding, output_format='png')
python
def generate_pydenticon(identifier, size): ''' Use pydenticon to generate an identicon image. All parameters are extracted from configuration. ''' blocks_size = get_internal_config('size') foreground = get_internal_config('foreground') background = get_internal_config('background') generator = pydenticon.Generator(blocks_size, blocks_size, digest=hashlib.sha1, foreground=foreground, background=background) # Pydenticon adds padding to the size and as a consequence # we need to compute the size without the padding padding = int(round(get_internal_config('padding') * size / 100.)) size = size - 2 * padding padding = (padding, ) * 4 return generator.generate(identifier, size, size, padding=padding, output_format='png')
[ "def", "generate_pydenticon", "(", "identifier", ",", "size", ")", ":", "blocks_size", "=", "get_internal_config", "(", "'size'", ")", "foreground", "=", "get_internal_config", "(", "'foreground'", ")", "background", "=", "get_internal_config", "(", "'background'", ")", "generator", "=", "pydenticon", ".", "Generator", "(", "blocks_size", ",", "blocks_size", ",", "digest", "=", "hashlib", ".", "sha1", ",", "foreground", "=", "foreground", ",", "background", "=", "background", ")", "# Pydenticon adds padding to the size and as a consequence", "# we need to compute the size without the padding", "padding", "=", "int", "(", "round", "(", "get_internal_config", "(", "'padding'", ")", "*", "size", "/", "100.", ")", ")", "size", "=", "size", "-", "2", "*", "padding", "padding", "=", "(", "padding", ",", ")", "*", "4", "return", "generator", ".", "generate", "(", "identifier", ",", "size", ",", "size", ",", "padding", "=", "padding", ",", "output_format", "=", "'png'", ")" ]
Use pydenticon to generate an identicon image. All parameters are extracted from configuration.
[ "Use", "pydenticon", "to", "generate", "an", "identicon", "image", ".", "All", "parameters", "are", "extracted", "from", "configuration", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L80-L100
train
opendatateam/udata
udata/features/identicon/backends.py
adorable
def adorable(identifier, size): ''' Adorable Avatars provider Simply redirect to the external API. See: http://avatars.adorable.io/ ''' url = ADORABLE_AVATARS_URL.format(identifier=identifier, size=size) return redirect(url)
python
def adorable(identifier, size): ''' Adorable Avatars provider Simply redirect to the external API. See: http://avatars.adorable.io/ ''' url = ADORABLE_AVATARS_URL.format(identifier=identifier, size=size) return redirect(url)
[ "def", "adorable", "(", "identifier", ",", "size", ")", ":", "url", "=", "ADORABLE_AVATARS_URL", ".", "format", "(", "identifier", "=", "identifier", ",", "size", "=", "size", ")", "return", "redirect", "(", "url", ")" ]
Adorable Avatars provider Simply redirect to the external API. See: http://avatars.adorable.io/
[ "Adorable", "Avatars", "provider" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L116-L125
train
opendatateam/udata
udata/core/dataset/commands.py
licenses
def licenses(source=DEFAULT_LICENSE_FILE): '''Feed the licenses from a JSON file''' if source.startswith('http'): json_licenses = requests.get(source).json() else: with open(source) as fp: json_licenses = json.load(fp) if len(json_licenses): log.info('Dropping existing licenses') License.drop_collection() for json_license in json_licenses: flags = [] for field, flag in FLAGS_MAP.items(): if json_license.get(field, False): flags.append(flag) license = License.objects.create( id=json_license['id'], title=json_license['title'], url=json_license['url'] or None, maintainer=json_license['maintainer'] or None, flags=flags, active=json_license.get('active', False), alternate_urls=json_license.get('alternate_urls', []), alternate_titles=json_license.get('alternate_titles', []), ) log.info('Added license "%s"', license.title) try: License.objects.get(id=DEFAULT_LICENSE['id']) except License.DoesNotExist: License.objects.create(**DEFAULT_LICENSE) log.info('Added license "%s"', DEFAULT_LICENSE['title']) success('Done')
python
def licenses(source=DEFAULT_LICENSE_FILE): '''Feed the licenses from a JSON file''' if source.startswith('http'): json_licenses = requests.get(source).json() else: with open(source) as fp: json_licenses = json.load(fp) if len(json_licenses): log.info('Dropping existing licenses') License.drop_collection() for json_license in json_licenses: flags = [] for field, flag in FLAGS_MAP.items(): if json_license.get(field, False): flags.append(flag) license = License.objects.create( id=json_license['id'], title=json_license['title'], url=json_license['url'] or None, maintainer=json_license['maintainer'] or None, flags=flags, active=json_license.get('active', False), alternate_urls=json_license.get('alternate_urls', []), alternate_titles=json_license.get('alternate_titles', []), ) log.info('Added license "%s"', license.title) try: License.objects.get(id=DEFAULT_LICENSE['id']) except License.DoesNotExist: License.objects.create(**DEFAULT_LICENSE) log.info('Added license "%s"', DEFAULT_LICENSE['title']) success('Done')
[ "def", "licenses", "(", "source", "=", "DEFAULT_LICENSE_FILE", ")", ":", "if", "source", ".", "startswith", "(", "'http'", ")", ":", "json_licenses", "=", "requests", ".", "get", "(", "source", ")", ".", "json", "(", ")", "else", ":", "with", "open", "(", "source", ")", "as", "fp", ":", "json_licenses", "=", "json", ".", "load", "(", "fp", ")", "if", "len", "(", "json_licenses", ")", ":", "log", ".", "info", "(", "'Dropping existing licenses'", ")", "License", ".", "drop_collection", "(", ")", "for", "json_license", "in", "json_licenses", ":", "flags", "=", "[", "]", "for", "field", ",", "flag", "in", "FLAGS_MAP", ".", "items", "(", ")", ":", "if", "json_license", ".", "get", "(", "field", ",", "False", ")", ":", "flags", ".", "append", "(", "flag", ")", "license", "=", "License", ".", "objects", ".", "create", "(", "id", "=", "json_license", "[", "'id'", "]", ",", "title", "=", "json_license", "[", "'title'", "]", ",", "url", "=", "json_license", "[", "'url'", "]", "or", "None", ",", "maintainer", "=", "json_license", "[", "'maintainer'", "]", "or", "None", ",", "flags", "=", "flags", ",", "active", "=", "json_license", ".", "get", "(", "'active'", ",", "False", ")", ",", "alternate_urls", "=", "json_license", ".", "get", "(", "'alternate_urls'", ",", "[", "]", ")", ",", "alternate_titles", "=", "json_license", ".", "get", "(", "'alternate_titles'", ",", "[", "]", ")", ",", ")", "log", ".", "info", "(", "'Added license \"%s\"'", ",", "license", ".", "title", ")", "try", ":", "License", ".", "objects", ".", "get", "(", "id", "=", "DEFAULT_LICENSE", "[", "'id'", "]", ")", "except", "License", ".", "DoesNotExist", ":", "License", ".", "objects", ".", "create", "(", "*", "*", "DEFAULT_LICENSE", ")", "log", ".", "info", "(", "'Added license \"%s\"'", ",", "DEFAULT_LICENSE", "[", "'title'", "]", ")", "success", "(", "'Done'", ")" ]
Feed the licenses from a JSON file
[ "Feed", "the", "licenses", "from", "a", "JSON", "file" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/commands.py#L30-L64
train
opendatateam/udata
udata/core/spatial/forms.py
ZonesField.fetch_objects
def fetch_objects(self, geoids): ''' Custom object retrieval. Zones are resolved from their identifier instead of the default bulk fetch by ID. ''' zones = [] no_match = [] for geoid in geoids: zone = GeoZone.objects.resolve(geoid) if zone: zones.append(zone) else: no_match.append(geoid) if no_match: msg = _('Unknown geoid(s): {identifiers}').format( identifiers=', '.join(str(id) for id in no_match)) raise validators.ValidationError(msg) return zones
python
def fetch_objects(self, geoids): ''' Custom object retrieval. Zones are resolved from their identifier instead of the default bulk fetch by ID. ''' zones = [] no_match = [] for geoid in geoids: zone = GeoZone.objects.resolve(geoid) if zone: zones.append(zone) else: no_match.append(geoid) if no_match: msg = _('Unknown geoid(s): {identifiers}').format( identifiers=', '.join(str(id) for id in no_match)) raise validators.ValidationError(msg) return zones
[ "def", "fetch_objects", "(", "self", ",", "geoids", ")", ":", "zones", "=", "[", "]", "no_match", "=", "[", "]", "for", "geoid", "in", "geoids", ":", "zone", "=", "GeoZone", ".", "objects", ".", "resolve", "(", "geoid", ")", "if", "zone", ":", "zones", ".", "append", "(", "zone", ")", "else", ":", "no_match", ".", "append", "(", "geoid", ")", "if", "no_match", ":", "msg", "=", "_", "(", "'Unknown geoid(s): {identifiers}'", ")", ".", "format", "(", "identifiers", "=", "', '", ".", "join", "(", "str", "(", "id", ")", "for", "id", "in", "no_match", ")", ")", "raise", "validators", ".", "ValidationError", "(", "msg", ")", "return", "zones" ]
Custom object retrieval. Zones are resolved from their identifier instead of the default bulk fetch by ID.
[ "Custom", "object", "retrieval", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/forms.py#L34-L55
train
opendatateam/udata
tasks_helpers.py
lrun
def lrun(command, *args, **kwargs): '''Run a local command from project root''' return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs)
python
def lrun(command, *args, **kwargs): '''Run a local command from project root''' return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs)
[ "def", "lrun", "(", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "run", "(", "'cd {0} && {1}'", ".", "format", "(", "ROOT", ",", "command", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run a local command from project root
[ "Run", "a", "local", "command", "from", "project", "root" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/tasks_helpers.py#L37-L39
train
opendatateam/udata
udata/harvest/backends/dcat.py
DcatBackend.initialize
def initialize(self): '''List all datasets for a given ...''' fmt = guess_format(self.source.url) # if format can't be guessed from the url # we fallback on the declared Content-Type if not fmt: response = requests.head(self.source.url) mime_type = response.headers.get('Content-Type', '').split(';', 1)[0] if not mime_type: msg = 'Unable to detect format from extension or mime type' raise ValueError(msg) fmt = guess_format(mime_type) if not fmt: msg = 'Unsupported mime type "{0}"'.format(mime_type) raise ValueError(msg) graph = self.parse_graph(self.source.url, fmt) self.job.data = {'graph': graph.serialize(format='json-ld', indent=None)}
python
def initialize(self): '''List all datasets for a given ...''' fmt = guess_format(self.source.url) # if format can't be guessed from the url # we fallback on the declared Content-Type if not fmt: response = requests.head(self.source.url) mime_type = response.headers.get('Content-Type', '').split(';', 1)[0] if not mime_type: msg = 'Unable to detect format from extension or mime type' raise ValueError(msg) fmt = guess_format(mime_type) if not fmt: msg = 'Unsupported mime type "{0}"'.format(mime_type) raise ValueError(msg) graph = self.parse_graph(self.source.url, fmt) self.job.data = {'graph': graph.serialize(format='json-ld', indent=None)}
[ "def", "initialize", "(", "self", ")", ":", "fmt", "=", "guess_format", "(", "self", ".", "source", ".", "url", ")", "# if format can't be guessed from the url", "# we fallback on the declared Content-Type", "if", "not", "fmt", ":", "response", "=", "requests", ".", "head", "(", "self", ".", "source", ".", "url", ")", "mime_type", "=", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ".", "split", "(", "';'", ",", "1", ")", "[", "0", "]", "if", "not", "mime_type", ":", "msg", "=", "'Unable to detect format from extension or mime type'", "raise", "ValueError", "(", "msg", ")", "fmt", "=", "guess_format", "(", "mime_type", ")", "if", "not", "fmt", ":", "msg", "=", "'Unsupported mime type \"{0}\"'", ".", "format", "(", "mime_type", ")", "raise", "ValueError", "(", "msg", ")", "graph", "=", "self", ".", "parse_graph", "(", "self", ".", "source", ".", "url", ",", "fmt", ")", "self", ".", "job", ".", "data", "=", "{", "'graph'", ":", "graph", ".", "serialize", "(", "format", "=", "'json-ld'", ",", "indent", "=", "None", ")", "}" ]
List all datasets for a given ...
[ "List", "all", "datasets", "for", "a", "given", "..." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/dcat.py#L51-L67
train
opendatateam/udata
udata/commands/worker.py
get_tasks
def get_tasks(): '''Get a list of known tasks with their routing queue''' return { name: get_task_queue(name, cls) for name, cls in celery.tasks.items() # Exclude celery internal tasks if not name.startswith('celery.') # Exclude udata test tasks and not name.startswith('test-') }
python
def get_tasks(): '''Get a list of known tasks with their routing queue''' return { name: get_task_queue(name, cls) for name, cls in celery.tasks.items() # Exclude celery internal tasks if not name.startswith('celery.') # Exclude udata test tasks and not name.startswith('test-') }
[ "def", "get_tasks", "(", ")", ":", "return", "{", "name", ":", "get_task_queue", "(", "name", ",", "cls", ")", "for", "name", ",", "cls", "in", "celery", ".", "tasks", ".", "items", "(", ")", "# Exclude celery internal tasks", "if", "not", "name", ".", "startswith", "(", "'celery.'", ")", "# Exclude udata test tasks", "and", "not", "name", ".", "startswith", "(", "'test-'", ")", "}" ]
Get a list of known tasks with their routing queue
[ "Get", "a", "list", "of", "known", "tasks", "with", "their", "routing", "queue" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L97-L106
train
opendatateam/udata
udata/commands/worker.py
tasks
def tasks(): '''Display registered tasks with their queue''' tasks = get_tasks() longest = max(tasks.keys(), key=len) size = len(longest) for name, queue in sorted(tasks.items()): print('* {0}: {1}'.format(name.ljust(size), queue))
python
def tasks(): '''Display registered tasks with their queue''' tasks = get_tasks() longest = max(tasks.keys(), key=len) size = len(longest) for name, queue in sorted(tasks.items()): print('* {0}: {1}'.format(name.ljust(size), queue))
[ "def", "tasks", "(", ")", ":", "tasks", "=", "get_tasks", "(", ")", "longest", "=", "max", "(", "tasks", ".", "keys", "(", ")", ",", "key", "=", "len", ")", "size", "=", "len", "(", "longest", ")", "for", "name", ",", "queue", "in", "sorted", "(", "tasks", ".", "items", "(", ")", ")", ":", "print", "(", "'* {0}: {1}'", ".", "format", "(", "name", ".", "ljust", "(", "size", ")", ",", "queue", ")", ")" ]
Display registered tasks with their queue
[ "Display", "registered", "tasks", "with", "their", "queue" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L110-L116
train
opendatateam/udata
udata/commands/worker.py
status
def status(queue, munin, munin_config): """List queued tasks aggregated by name""" if munin_config: return status_print_config(queue) queues = get_queues(queue) for queue in queues: status_print_queue(queue, munin=munin) if not munin: print('-' * 40)
python
def status(queue, munin, munin_config): """List queued tasks aggregated by name""" if munin_config: return status_print_config(queue) queues = get_queues(queue) for queue in queues: status_print_queue(queue, munin=munin) if not munin: print('-' * 40)
[ "def", "status", "(", "queue", ",", "munin", ",", "munin_config", ")", ":", "if", "munin_config", ":", "return", "status_print_config", "(", "queue", ")", "queues", "=", "get_queues", "(", "queue", ")", "for", "queue", "in", "queues", ":", "status_print_queue", "(", "queue", ",", "munin", "=", "munin", ")", "if", "not", "munin", ":", "print", "(", "'-'", "*", "40", ")" ]
List queued tasks aggregated by name
[ "List", "queued", "tasks", "aggregated", "by", "name" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L125-L133
train
opendatateam/udata
udata/forms/fields.py
FieldHelper.pre_validate
def pre_validate(self, form): '''Calls preprocessors before pre_validation''' for preprocessor in self._preprocessors: preprocessor(form, self) super(FieldHelper, self).pre_validate(form)
python
def pre_validate(self, form): '''Calls preprocessors before pre_validation''' for preprocessor in self._preprocessors: preprocessor(form, self) super(FieldHelper, self).pre_validate(form)
[ "def", "pre_validate", "(", "self", ",", "form", ")", ":", "for", "preprocessor", "in", "self", ".", "_preprocessors", ":", "preprocessor", "(", "form", ",", "self", ")", "super", "(", "FieldHelper", ",", "self", ")", ".", "pre_validate", "(", "form", ")" ]
Calls preprocessors before pre_validation
[ "Calls", "preprocessors", "before", "pre_validation" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L55-L59
train
opendatateam/udata
udata/forms/fields.py
EmptyNone.process_formdata
def process_formdata(self, valuelist): '''Replace empty values by None''' super(EmptyNone, self).process_formdata(valuelist) self.data = self.data or None
python
def process_formdata(self, valuelist): '''Replace empty values by None''' super(EmptyNone, self).process_formdata(valuelist) self.data = self.data or None
[ "def", "process_formdata", "(", "self", ",", "valuelist", ")", ":", "super", "(", "EmptyNone", ",", "self", ")", ".", "process_formdata", "(", "valuelist", ")", "self", ".", "data", "=", "self", ".", "data", "or", "None" ]
Replace empty values by None
[ "Replace", "empty", "values", "by", "None" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L67-L70
train
opendatateam/udata
udata/forms/fields.py
ModelList.fetch_objects
def fetch_objects(self, oids): ''' This methods is used to fetch models from a list of identifiers. Default implementation performs a bulk query on identifiers. Override this method to customize the objects retrieval. ''' objects = self.model.objects.in_bulk(oids) if len(objects.keys()) != len(oids): non_existants = set(oids) - set(objects.keys()) msg = _('Unknown identifiers: {identifiers}').format( identifiers=', '.join(str(ne) for ne in non_existants)) raise validators.ValidationError(msg) return [objects[id] for id in oids]
python
def fetch_objects(self, oids): ''' This methods is used to fetch models from a list of identifiers. Default implementation performs a bulk query on identifiers. Override this method to customize the objects retrieval. ''' objects = self.model.objects.in_bulk(oids) if len(objects.keys()) != len(oids): non_existants = set(oids) - set(objects.keys()) msg = _('Unknown identifiers: {identifiers}').format( identifiers=', '.join(str(ne) for ne in non_existants)) raise validators.ValidationError(msg) return [objects[id] for id in oids]
[ "def", "fetch_objects", "(", "self", ",", "oids", ")", ":", "objects", "=", "self", ".", "model", ".", "objects", ".", "in_bulk", "(", "oids", ")", "if", "len", "(", "objects", ".", "keys", "(", ")", ")", "!=", "len", "(", "oids", ")", ":", "non_existants", "=", "set", "(", "oids", ")", "-", "set", "(", "objects", ".", "keys", "(", ")", ")", "msg", "=", "_", "(", "'Unknown identifiers: {identifiers}'", ")", ".", "format", "(", "identifiers", "=", "', '", ".", "join", "(", "str", "(", "ne", ")", "for", "ne", "in", "non_existants", ")", ")", "raise", "validators", ".", "ValidationError", "(", "msg", ")", "return", "[", "objects", "[", "id", "]", "for", "id", "in", "oids", "]" ]
This methods is used to fetch models from a list of identifiers. Default implementation performs a bulk query on identifiers. Override this method to customize the objects retrieval.
[ "This", "methods", "is", "used", "to", "fetch", "models", "from", "a", "list", "of", "identifiers", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L487-L503
train
opendatateam/udata
udata/forms/fields.py
NestedModelList.validate
def validate(self, form, extra_validators=tuple()): '''Perform validation only if data has been submitted''' if not self.has_data: return True if self.is_list_data: if not isinstance(self._formdata[self.name], (list, tuple)): return False return super(NestedModelList, self).validate(form, extra_validators)
python
def validate(self, form, extra_validators=tuple()): '''Perform validation only if data has been submitted''' if not self.has_data: return True if self.is_list_data: if not isinstance(self._formdata[self.name], (list, tuple)): return False return super(NestedModelList, self).validate(form, extra_validators)
[ "def", "validate", "(", "self", ",", "form", ",", "extra_validators", "=", "tuple", "(", ")", ")", ":", "if", "not", "self", ".", "has_data", ":", "return", "True", "if", "self", ".", "is_list_data", ":", "if", "not", "isinstance", "(", "self", ".", "_formdata", "[", "self", ".", "name", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "False", "return", "super", "(", "NestedModelList", ",", "self", ")", ".", "validate", "(", "form", ",", "extra_validators", ")" ]
Perform validation only if data has been submitted
[ "Perform", "validation", "only", "if", "data", "has", "been", "submitted" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L531-L538
train
opendatateam/udata
udata/forms/fields.py
NestedModelList._add_entry
def _add_entry(self, formdata=None, data=unset_value, index=None): ''' Fill the form with previous data if necessary to handle partial update ''' if formdata: prefix = '-'.join((self.name, str(index))) basekey = '-'.join((prefix, '{0}')) idkey = basekey.format('id') if prefix in formdata: formdata[idkey] = formdata.pop(prefix) if hasattr(self.nested_model, 'id') and idkey in formdata: id = self.nested_model.id.to_python(formdata[idkey]) data = get_by(self.initial_data, 'id', id) initial = flatten_json(self.nested_form, data.to_mongo(), prefix) for key, value in initial.items(): if key not in formdata: formdata[key] = value else: data = None return super(NestedModelList, self)._add_entry(formdata, data, index)
python
def _add_entry(self, formdata=None, data=unset_value, index=None): ''' Fill the form with previous data if necessary to handle partial update ''' if formdata: prefix = '-'.join((self.name, str(index))) basekey = '-'.join((prefix, '{0}')) idkey = basekey.format('id') if prefix in formdata: formdata[idkey] = formdata.pop(prefix) if hasattr(self.nested_model, 'id') and idkey in formdata: id = self.nested_model.id.to_python(formdata[idkey]) data = get_by(self.initial_data, 'id', id) initial = flatten_json(self.nested_form, data.to_mongo(), prefix) for key, value in initial.items(): if key not in formdata: formdata[key] = value else: data = None return super(NestedModelList, self)._add_entry(formdata, data, index)
[ "def", "_add_entry", "(", "self", ",", "formdata", "=", "None", ",", "data", "=", "unset_value", ",", "index", "=", "None", ")", ":", "if", "formdata", ":", "prefix", "=", "'-'", ".", "join", "(", "(", "self", ".", "name", ",", "str", "(", "index", ")", ")", ")", "basekey", "=", "'-'", ".", "join", "(", "(", "prefix", ",", "'{0}'", ")", ")", "idkey", "=", "basekey", ".", "format", "(", "'id'", ")", "if", "prefix", "in", "formdata", ":", "formdata", "[", "idkey", "]", "=", "formdata", ".", "pop", "(", "prefix", ")", "if", "hasattr", "(", "self", ".", "nested_model", ",", "'id'", ")", "and", "idkey", "in", "formdata", ":", "id", "=", "self", ".", "nested_model", ".", "id", ".", "to_python", "(", "formdata", "[", "idkey", "]", ")", "data", "=", "get_by", "(", "self", ".", "initial_data", ",", "'id'", ",", "id", ")", "initial", "=", "flatten_json", "(", "self", ".", "nested_form", ",", "data", ".", "to_mongo", "(", ")", ",", "prefix", ")", "for", "key", ",", "value", "in", "initial", ".", "items", "(", ")", ":", "if", "key", "not", "in", "formdata", ":", "formdata", "[", "key", "]", "=", "value", "else", ":", "data", "=", "None", "return", "super", "(", "NestedModelList", ",", "self", ")", ".", "_add_entry", "(", "formdata", ",", "data", ",", "index", ")" ]
Fill the form with previous data if necessary to handle partial update
[ "Fill", "the", "form", "with", "previous", "data", "if", "necessary", "to", "handle", "partial", "update" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L564-L587
train
opendatateam/udata
udata/forms/fields.py
ExtrasField.parse
def parse(self, data): '''Parse fields and store individual errors''' self.field_errors = {} return dict( (k, self._parse_value(k, v)) for k, v in data.items() )
python
def parse(self, data): '''Parse fields and store individual errors''' self.field_errors = {} return dict( (k, self._parse_value(k, v)) for k, v in data.items() )
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "field_errors", "=", "{", "}", "return", "dict", "(", "(", "k", ",", "self", ".", "_parse_value", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ")" ]
Parse fields and store individual errors
[ "Parse", "fields", "and", "store", "individual", "errors" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L733-L738
train
opendatateam/udata
udata/core/metrics/commands.py
update
def update(site=False, organizations=False, users=False, datasets=False, reuses=False): '''Update all metrics for the current date''' do_all = not any((site, organizations, users, datasets, reuses)) if do_all or site: log.info('Update site metrics') update_site_metrics() if do_all or datasets: log.info('Update datasets metrics') for dataset in Dataset.objects.timeout(False): update_metrics_for(dataset) if do_all or reuses: log.info('Update reuses metrics') for reuse in Reuse.objects.timeout(False): update_metrics_for(reuse) if do_all or organizations: log.info('Update organizations metrics') for organization in Organization.objects.timeout(False): update_metrics_for(organization) if do_all or users: log.info('Update user metrics') for user in User.objects.timeout(False): update_metrics_for(user) success('All metrics have been updated')
python
def update(site=False, organizations=False, users=False, datasets=False, reuses=False): '''Update all metrics for the current date''' do_all = not any((site, organizations, users, datasets, reuses)) if do_all or site: log.info('Update site metrics') update_site_metrics() if do_all or datasets: log.info('Update datasets metrics') for dataset in Dataset.objects.timeout(False): update_metrics_for(dataset) if do_all or reuses: log.info('Update reuses metrics') for reuse in Reuse.objects.timeout(False): update_metrics_for(reuse) if do_all or organizations: log.info('Update organizations metrics') for organization in Organization.objects.timeout(False): update_metrics_for(organization) if do_all or users: log.info('Update user metrics') for user in User.objects.timeout(False): update_metrics_for(user) success('All metrics have been updated')
[ "def", "update", "(", "site", "=", "False", ",", "organizations", "=", "False", ",", "users", "=", "False", ",", "datasets", "=", "False", ",", "reuses", "=", "False", ")", ":", "do_all", "=", "not", "any", "(", "(", "site", ",", "organizations", ",", "users", ",", "datasets", ",", "reuses", ")", ")", "if", "do_all", "or", "site", ":", "log", ".", "info", "(", "'Update site metrics'", ")", "update_site_metrics", "(", ")", "if", "do_all", "or", "datasets", ":", "log", ".", "info", "(", "'Update datasets metrics'", ")", "for", "dataset", "in", "Dataset", ".", "objects", ".", "timeout", "(", "False", ")", ":", "update_metrics_for", "(", "dataset", ")", "if", "do_all", "or", "reuses", ":", "log", ".", "info", "(", "'Update reuses metrics'", ")", "for", "reuse", "in", "Reuse", ".", "objects", ".", "timeout", "(", "False", ")", ":", "update_metrics_for", "(", "reuse", ")", "if", "do_all", "or", "organizations", ":", "log", ".", "info", "(", "'Update organizations metrics'", ")", "for", "organization", "in", "Organization", ".", "objects", ".", "timeout", "(", "False", ")", ":", "update_metrics_for", "(", "organization", ")", "if", "do_all", "or", "users", ":", "log", ".", "info", "(", "'Update user metrics'", ")", "for", "user", "in", "User", ".", "objects", ".", "timeout", "(", "False", ")", ":", "update_metrics_for", "(", "user", ")", "success", "(", "'All metrics have been updated'", ")" ]
Update all metrics for the current date
[ "Update", "all", "metrics", "for", "the", "current", "date" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L37-L66
train
opendatateam/udata
udata/core/metrics/commands.py
list
def list(): '''List all known metrics''' for cls, metrics in metric_catalog.items(): echo(white(cls.__name__)) for metric in metrics.keys(): echo('> {0}'.format(metric))
python
def list(): '''List all known metrics''' for cls, metrics in metric_catalog.items(): echo(white(cls.__name__)) for metric in metrics.keys(): echo('> {0}'.format(metric))
[ "def", "list", "(", ")", ":", "for", "cls", ",", "metrics", "in", "metric_catalog", ".", "items", "(", ")", ":", "echo", "(", "white", "(", "cls", ".", "__name__", ")", ")", "for", "metric", "in", "metrics", ".", "keys", "(", ")", ":", "echo", "(", "'> {0}'", ".", "format", "(", "metric", ")", ")" ]
List all known metrics
[ "List", "all", "known", "metrics" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L70-L76
train
opendatateam/udata
udata/api/commands.py
json_to_file
def json_to_file(data, filename, pretty=False): '''Dump JSON data to a file''' kwargs = dict(indent=4) if pretty else {} dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) dump = json.dumps(api.__schema__, **kwargs) with open(filename, 'wb') as f: f.write(dump.encode('utf-8'))
python
def json_to_file(data, filename, pretty=False): '''Dump JSON data to a file''' kwargs = dict(indent=4) if pretty else {} dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) dump = json.dumps(api.__schema__, **kwargs) with open(filename, 'wb') as f: f.write(dump.encode('utf-8'))
[ "def", "json_to_file", "(", "data", ",", "filename", ",", "pretty", "=", "False", ")", ":", "kwargs", "=", "dict", "(", "indent", "=", "4", ")", "if", "pretty", "else", "{", "}", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "os", ".", "makedirs", "(", "dirname", ")", "dump", "=", "json", ".", "dumps", "(", "api", ".", "__schema__", ",", "*", "*", "kwargs", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "dump", ".", "encode", "(", "'utf-8'", ")", ")" ]
Dump JSON data to a file
[ "Dump", "JSON", "data", "to", "a", "file" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L24-L32
train
opendatateam/udata
udata/api/commands.py
postman
def postman(filename, pretty, urlvars, swagger): '''Dump the API as a Postman collection''' data = api.as_postman(urlvars=urlvars, swagger=swagger) json_to_file(data, filename, pretty)
python
def postman(filename, pretty, urlvars, swagger): '''Dump the API as a Postman collection''' data = api.as_postman(urlvars=urlvars, swagger=swagger) json_to_file(data, filename, pretty)
[ "def", "postman", "(", "filename", ",", "pretty", ",", "urlvars", ",", "swagger", ")", ":", "data", "=", "api", ".", "as_postman", "(", "urlvars", "=", "urlvars", ",", "swagger", "=", "swagger", ")", "json_to_file", "(", "data", ",", "filename", ",", "pretty", ")" ]
Dump the API as a Postman collection
[ "Dump", "the", "API", "as", "a", "Postman", "collection" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L49-L52
train
opendatateam/udata
udata/core/badges/tasks.py
notify_badge_added_certified
def notify_badge_added_certified(sender, kind=''): ''' Send an email when a `CERTIFIED` badge is added to an `Organization` Parameters ---------- sender The object that emitted the event. kind: str The kind of `Badge` object awarded. ''' if kind == CERTIFIED and isinstance(sender, Organization): recipients = [member.user for member in sender.members] subject = _( 'Your organization "%(name)s" has been certified', name=sender.name ) mail.send( subject, recipients, 'badge_added_certified', organization=sender, badge=sender.get_badge(kind) )
python
def notify_badge_added_certified(sender, kind=''): ''' Send an email when a `CERTIFIED` badge is added to an `Organization` Parameters ---------- sender The object that emitted the event. kind: str The kind of `Badge` object awarded. ''' if kind == CERTIFIED and isinstance(sender, Organization): recipients = [member.user for member in sender.members] subject = _( 'Your organization "%(name)s" has been certified', name=sender.name ) mail.send( subject, recipients, 'badge_added_certified', organization=sender, badge=sender.get_badge(kind) )
[ "def", "notify_badge_added_certified", "(", "sender", ",", "kind", "=", "''", ")", ":", "if", "kind", "==", "CERTIFIED", "and", "isinstance", "(", "sender", ",", "Organization", ")", ":", "recipients", "=", "[", "member", ".", "user", "for", "member", "in", "sender", ".", "members", "]", "subject", "=", "_", "(", "'Your organization \"%(name)s\" has been certified'", ",", "name", "=", "sender", ".", "name", ")", "mail", ".", "send", "(", "subject", ",", "recipients", ",", "'badge_added_certified'", ",", "organization", "=", "sender", ",", "badge", "=", "sender", ".", "get_badge", "(", "kind", ")", ")" ]
Send an email when a `CERTIFIED` badge is added to an `Organization` Parameters ---------- sender The object that emitted the event. kind: str The kind of `Badge` object awarded.
[ "Send", "an", "email", "when", "a", "CERTIFIED", "badge", "is", "added", "to", "an", "Organization" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/tasks.py#L27-L50
train
opendatateam/udata
udata/core/discussions/notifications.py
discussions_notifications
def discussions_notifications(user): '''Notify user about open discussions''' notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = discussions_for(user).only('id', 'created', 'title', 'subject') # Do not dereference subject (so it's a DBRef) # Also improve performances and memory usage for discussion in qs.no_dereference(): notifications.append((discussion.created, { 'id': discussion.id, 'title': discussion.title, 'subject': { 'id': discussion.subject['_ref'].id, 'type': discussion.subject['_cls'].lower(), } })) return notifications
python
def discussions_notifications(user): '''Notify user about open discussions''' notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = discussions_for(user).only('id', 'created', 'title', 'subject') # Do not dereference subject (so it's a DBRef) # Also improve performances and memory usage for discussion in qs.no_dereference(): notifications.append((discussion.created, { 'id': discussion.id, 'title': discussion.title, 'subject': { 'id': discussion.subject['_ref'].id, 'type': discussion.subject['_cls'].lower(), } })) return notifications
[ "def", "discussions_notifications", "(", "user", ")", ":", "notifications", "=", "[", "]", "# Only fetch required fields for notification serialization", "# Greatly improve performances and memory usage", "qs", "=", "discussions_for", "(", "user", ")", ".", "only", "(", "'id'", ",", "'created'", ",", "'title'", ",", "'subject'", ")", "# Do not dereference subject (so it's a DBRef)", "# Also improve performances and memory usage", "for", "discussion", "in", "qs", ".", "no_dereference", "(", ")", ":", "notifications", ".", "append", "(", "(", "discussion", ".", "created", ",", "{", "'id'", ":", "discussion", ".", "id", ",", "'title'", ":", "discussion", ".", "title", ",", "'subject'", ":", "{", "'id'", ":", "discussion", ".", "subject", "[", "'_ref'", "]", ".", "id", ",", "'type'", ":", "discussion", ".", "subject", "[", "'_cls'", "]", ".", "lower", "(", ")", ",", "}", "}", ")", ")", "return", "notifications" ]
Notify user about open discussions
[ "Notify", "user", "about", "open", "discussions" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/notifications.py#L15-L35
train
opendatateam/udata
udata/tracking.py
send_signal
def send_signal(signal, request, user, **kwargs): '''Generic method to send signals to Piwik given that we always have to compute IP and UID for instance. ''' params = { 'user_ip': request.remote_addr } params.update(kwargs) if user.is_authenticated: params['uid'] = user.id signal.send(request.url, **params)
python
def send_signal(signal, request, user, **kwargs): '''Generic method to send signals to Piwik given that we always have to compute IP and UID for instance. ''' params = { 'user_ip': request.remote_addr } params.update(kwargs) if user.is_authenticated: params['uid'] = user.id signal.send(request.url, **params)
[ "def", "send_signal", "(", "signal", ",", "request", ",", "user", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'user_ip'", ":", "request", ".", "remote_addr", "}", "params", ".", "update", "(", "kwargs", ")", "if", "user", ".", "is_authenticated", ":", "params", "[", "'uid'", "]", "=", "user", ".", "id", "signal", ".", "send", "(", "request", ".", "url", ",", "*", "*", "params", ")" ]
Generic method to send signals to Piwik given that we always have to compute IP and UID for instance.
[ "Generic", "method", "to", "send", "signals", "to", "Piwik" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tracking.py#L5-L16
train
opendatateam/udata
udata/core/organization/notifications.py
membership_request_notifications
def membership_request_notifications(user): '''Notify user about pending membership requests''' orgs = [o for o in user.organizations if o.is_admin(user)] notifications = [] for org in orgs: for request in org.pending_requests: notifications.append((request.created, { 'id': request.id, 'organization': org.id, 'user': { 'id': request.user.id, 'fullname': request.user.fullname, 'avatar': str(request.user.avatar) } })) return notifications
python
def membership_request_notifications(user): '''Notify user about pending membership requests''' orgs = [o for o in user.organizations if o.is_admin(user)] notifications = [] for org in orgs: for request in org.pending_requests: notifications.append((request.created, { 'id': request.id, 'organization': org.id, 'user': { 'id': request.user.id, 'fullname': request.user.fullname, 'avatar': str(request.user.avatar) } })) return notifications
[ "def", "membership_request_notifications", "(", "user", ")", ":", "orgs", "=", "[", "o", "for", "o", "in", "user", ".", "organizations", "if", "o", ".", "is_admin", "(", "user", ")", "]", "notifications", "=", "[", "]", "for", "org", "in", "orgs", ":", "for", "request", "in", "org", ".", "pending_requests", ":", "notifications", ".", "append", "(", "(", "request", ".", "created", ",", "{", "'id'", ":", "request", ".", "id", ",", "'organization'", ":", "org", ".", "id", ",", "'user'", ":", "{", "'id'", ":", "request", ".", "user", ".", "id", ",", "'fullname'", ":", "request", ".", "user", ".", "fullname", ",", "'avatar'", ":", "str", "(", "request", ".", "user", ".", "avatar", ")", "}", "}", ")", ")", "return", "notifications" ]
Notify user about pending membership requests
[ "Notify", "user", "about", "pending", "membership", "requests" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/organization/notifications.py#L13-L30
train
opendatateam/udata
udata/harvest/commands.py
validate
def validate(identifier): '''Validate a source given its identifier''' source = actions.validate_source(identifier) log.info('Source %s (%s) has been validated', source.slug, str(source.id))
python
def validate(identifier): '''Validate a source given its identifier''' source = actions.validate_source(identifier) log.info('Source %s (%s) has been validated', source.slug, str(source.id))
[ "def", "validate", "(", "identifier", ")", ":", "source", "=", "actions", ".", "validate_source", "(", "identifier", ")", "log", ".", "info", "(", "'Source %s (%s) has been validated'", ",", "source", ".", "slug", ",", "str", "(", "source", ".", "id", ")", ")" ]
Validate a source given its identifier
[ "Validate", "a", "source", "given", "its", "identifier" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L48-L51
train
opendatateam/udata
udata/harvest/commands.py
delete
def delete(identifier): '''Delete a harvest source''' log.info('Deleting source "%s"', identifier) actions.delete_source(identifier) log.info('Deleted source "%s"', identifier)
python
def delete(identifier): '''Delete a harvest source''' log.info('Deleting source "%s"', identifier) actions.delete_source(identifier) log.info('Deleted source "%s"', identifier)
[ "def", "delete", "(", "identifier", ")", ":", "log", ".", "info", "(", "'Deleting source \"%s\"'", ",", "identifier", ")", "actions", ".", "delete_source", "(", "identifier", ")", "log", ".", "info", "(", "'Deleted source \"%s\"'", ",", "identifier", ")" ]
Delete a harvest source
[ "Delete", "a", "harvest", "source" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L55-L59
train
opendatateam/udata
udata/harvest/commands.py
sources
def sources(scheduled=False): '''List all harvest sources''' sources = actions.list_sources() if scheduled: sources = [s for s in sources if s.periodic_task] if sources: for source in sources: msg = '{source.name} ({source.backend}): {cron}' if source.periodic_task: cron = source.periodic_task.schedule_display else: cron = 'not scheduled' log.info(msg.format(source=source, cron=cron)) elif scheduled: log.info('No sources scheduled yet') else: log.info('No sources defined yet')
python
def sources(scheduled=False): '''List all harvest sources''' sources = actions.list_sources() if scheduled: sources = [s for s in sources if s.periodic_task] if sources: for source in sources: msg = '{source.name} ({source.backend}): {cron}' if source.periodic_task: cron = source.periodic_task.schedule_display else: cron = 'not scheduled' log.info(msg.format(source=source, cron=cron)) elif scheduled: log.info('No sources scheduled yet') else: log.info('No sources defined yet')
[ "def", "sources", "(", "scheduled", "=", "False", ")", ":", "sources", "=", "actions", ".", "list_sources", "(", ")", "if", "scheduled", ":", "sources", "=", "[", "s", "for", "s", "in", "sources", "if", "s", ".", "periodic_task", "]", "if", "sources", ":", "for", "source", "in", "sources", ":", "msg", "=", "'{source.name} ({source.backend}): {cron}'", "if", "source", ".", "periodic_task", ":", "cron", "=", "source", ".", "periodic_task", ".", "schedule_display", "else", ":", "cron", "=", "'not scheduled'", "log", ".", "info", "(", "msg", ".", "format", "(", "source", "=", "source", ",", "cron", "=", "cron", ")", ")", "elif", "scheduled", ":", "log", ".", "info", "(", "'No sources scheduled yet'", ")", "else", ":", "log", ".", "info", "(", "'No sources defined yet'", ")" ]
List all harvest sources
[ "List", "all", "harvest", "sources" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L65-L81
train
opendatateam/udata
udata/harvest/commands.py
backends
def backends(): '''List available backends''' log.info('Available backends:') for backend in actions.list_backends(): log.info('%s (%s)', backend.name, backend.display_name or backend.name)
python
def backends(): '''List available backends''' log.info('Available backends:') for backend in actions.list_backends(): log.info('%s (%s)', backend.name, backend.display_name or backend.name)
[ "def", "backends", "(", ")", ":", "log", ".", "info", "(", "'Available backends:'", ")", "for", "backend", "in", "actions", ".", "list_backends", "(", ")", ":", "log", ".", "info", "(", "'%s (%s)'", ",", "backend", ".", "name", ",", "backend", ".", "display_name", "or", "backend", ".", "name", ")" ]
List available backends
[ "List", "available", "backends" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L85-L89
train
opendatateam/udata
udata/harvest/commands.py
schedule
def schedule(identifier, **kwargs): '''Schedule a harvest job to run periodically''' source = actions.schedule(identifier, **kwargs) msg = 'Scheduled {source.name} with the following crontab: {cron}' log.info(msg.format(source=source, cron=source.periodic_task.crontab))
python
def schedule(identifier, **kwargs): '''Schedule a harvest job to run periodically''' source = actions.schedule(identifier, **kwargs) msg = 'Scheduled {source.name} with the following crontab: {cron}' log.info(msg.format(source=source, cron=source.periodic_task.crontab))
[ "def", "schedule", "(", "identifier", ",", "*", "*", "kwargs", ")", ":", "source", "=", "actions", ".", "schedule", "(", "identifier", ",", "*", "*", "kwargs", ")", "msg", "=", "'Scheduled {source.name} with the following crontab: {cron}'", "log", ".", "info", "(", "msg", ".", "format", "(", "source", "=", "source", ",", "cron", "=", "source", ".", "periodic_task", ".", "crontab", ")", ")" ]
Schedule a harvest job to run periodically
[ "Schedule", "a", "harvest", "job", "to", "run", "periodically" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L120-L124
train
opendatateam/udata
udata/harvest/commands.py
unschedule
def unschedule(identifier): '''Unschedule a periodical harvest job''' source = actions.unschedule(identifier) log.info('Unscheduled harvest source "%s"', source.name)
python
def unschedule(identifier): '''Unschedule a periodical harvest job''' source = actions.unschedule(identifier) log.info('Unscheduled harvest source "%s"', source.name)
[ "def", "unschedule", "(", "identifier", ")", ":", "source", "=", "actions", ".", "unschedule", "(", "identifier", ")", "log", ".", "info", "(", "'Unscheduled harvest source \"%s\"'", ",", "source", ".", "name", ")" ]
Unschedule a periodical harvest job
[ "Unschedule", "a", "periodical", "harvest", "job" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L129-L132
train
opendatateam/udata
udata/harvest/commands.py
attach
def attach(domain, filename): ''' Attach existing datasets to their harvest remote id Mapping between identifiers should be in FILENAME CSV file. ''' log.info('Attaching datasets for domain %s', domain) result = actions.attach(domain, filename) log.info('Attached %s datasets to %s', result.success, domain)
python
def attach(domain, filename): ''' Attach existing datasets to their harvest remote id Mapping between identifiers should be in FILENAME CSV file. ''' log.info('Attaching datasets for domain %s', domain) result = actions.attach(domain, filename) log.info('Attached %s datasets to %s', result.success, domain)
[ "def", "attach", "(", "domain", ",", "filename", ")", ":", "log", ".", "info", "(", "'Attaching datasets for domain %s'", ",", "domain", ")", "result", "=", "actions", ".", "attach", "(", "domain", ",", "filename", ")", "log", ".", "info", "(", "'Attached %s datasets to %s'", ",", "result", ".", "success", ",", "domain", ")" ]
Attach existing datasets to their harvest remote id Mapping between identifiers should be in FILENAME CSV file.
[ "Attach", "existing", "datasets", "to", "their", "harvest", "remote", "id" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L146-L154
train
opendatateam/udata
udata/features/transfer/actions.py
request_transfer
def request_transfer(subject, recipient, comment): '''Initiate a transfer request''' TransferPermission(subject).test() if recipient == (subject.organization or subject.owner): raise ValueError( 'Recipient should be different than the current owner') transfer = Transfer.objects.create( owner=subject.organization or subject.owner, recipient=recipient, subject=subject, comment=comment ) return transfer
python
def request_transfer(subject, recipient, comment): '''Initiate a transfer request''' TransferPermission(subject).test() if recipient == (subject.organization or subject.owner): raise ValueError( 'Recipient should be different than the current owner') transfer = Transfer.objects.create( owner=subject.organization or subject.owner, recipient=recipient, subject=subject, comment=comment ) return transfer
[ "def", "request_transfer", "(", "subject", ",", "recipient", ",", "comment", ")", ":", "TransferPermission", "(", "subject", ")", ".", "test", "(", ")", "if", "recipient", "==", "(", "subject", ".", "organization", "or", "subject", ".", "owner", ")", ":", "raise", "ValueError", "(", "'Recipient should be different than the current owner'", ")", "transfer", "=", "Transfer", ".", "objects", ".", "create", "(", "owner", "=", "subject", ".", "organization", "or", "subject", ".", "owner", ",", "recipient", "=", "recipient", ",", "subject", "=", "subject", ",", "comment", "=", "comment", ")", "return", "transfer" ]
Initiate a transfer request
[ "Initiate", "a", "transfer", "request" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L19-L31
train
opendatateam/udata
udata/features/transfer/actions.py
accept_transfer
def accept_transfer(transfer, comment=None): '''Accept an incoming a transfer request''' TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'accepted' transfer.response_comment = comment transfer.save() subject = transfer.subject recipient = transfer.recipient if isinstance(recipient, Organization): subject.organization = recipient elif isinstance(recipient, User): subject.owner = recipient subject.save() return transfer
python
def accept_transfer(transfer, comment=None): '''Accept an incoming a transfer request''' TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'accepted' transfer.response_comment = comment transfer.save() subject = transfer.subject recipient = transfer.recipient if isinstance(recipient, Organization): subject.organization = recipient elif isinstance(recipient, User): subject.owner = recipient subject.save() return transfer
[ "def", "accept_transfer", "(", "transfer", ",", "comment", "=", "None", ")", ":", "TransferResponsePermission", "(", "transfer", ")", ".", "test", "(", ")", "transfer", ".", "responded", "=", "datetime", ".", "now", "(", ")", "transfer", ".", "responder", "=", "current_user", ".", "_get_current_object", "(", ")", "transfer", ".", "status", "=", "'accepted'", "transfer", ".", "response_comment", "=", "comment", "transfer", ".", "save", "(", ")", "subject", "=", "transfer", ".", "subject", "recipient", "=", "transfer", ".", "recipient", "if", "isinstance", "(", "recipient", ",", "Organization", ")", ":", "subject", ".", "organization", "=", "recipient", "elif", "isinstance", "(", "recipient", ",", "User", ")", ":", "subject", ".", "owner", "=", "recipient", "subject", ".", "save", "(", ")", "return", "transfer" ]
Accept an incoming a transfer request
[ "Accept", "an", "incoming", "a", "transfer", "request" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L35-L54
train
opendatateam/udata
udata/features/transfer/actions.py
refuse_transfer
def refuse_transfer(transfer, comment=None): '''Refuse an incoming a transfer request''' TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'refused' transfer.response_comment = comment transfer.save() return transfer
python
def refuse_transfer(transfer, comment=None): '''Refuse an incoming a transfer request''' TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'refused' transfer.response_comment = comment transfer.save() return transfer
[ "def", "refuse_transfer", "(", "transfer", ",", "comment", "=", "None", ")", ":", "TransferResponsePermission", "(", "transfer", ")", ".", "test", "(", ")", "transfer", ".", "responded", "=", "datetime", ".", "now", "(", ")", "transfer", ".", "responder", "=", "current_user", ".", "_get_current_object", "(", ")", "transfer", ".", "status", "=", "'refused'", "transfer", ".", "response_comment", "=", "comment", "transfer", ".", "save", "(", ")", "return", "transfer" ]
Refuse an incoming a transfer request
[ "Refuse", "an", "incoming", "a", "transfer", "request" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L58-L68
train
opendatateam/udata
udata/core/metrics/models.py
WithMetrics.clean
def clean(self): '''Fill metrics with defaults on create''' if not self.metrics: self.metrics = dict( (name, spec.default) for name, spec in (metric_catalog.get(self.__class__, {}) .items())) return super(WithMetrics, self).clean()
python
def clean(self): '''Fill metrics with defaults on create''' if not self.metrics: self.metrics = dict( (name, spec.default) for name, spec in (metric_catalog.get(self.__class__, {}) .items())) return super(WithMetrics, self).clean()
[ "def", "clean", "(", "self", ")", ":", "if", "not", "self", ".", "metrics", ":", "self", ".", "metrics", "=", "dict", "(", "(", "name", ",", "spec", ".", "default", ")", "for", "name", ",", "spec", "in", "(", "metric_catalog", ".", "get", "(", "self", ".", "__class__", ",", "{", "}", ")", ".", "items", "(", ")", ")", ")", "return", "super", "(", "WithMetrics", ",", "self", ")", ".", "clean", "(", ")" ]
Fill metrics with defaults on create
[ "Fill", "metrics", "with", "defaults", "on", "create" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/models.py#L44-L51
train
opendatateam/udata
udata/core/site/rdf.py
build_catalog
def build_catalog(site, datasets, format=None): '''Build the DCAT catalog for this site''' site_url = url_for('site.home_redirect', _external=True) catalog_url = url_for('site.rdf_catalog', _external=True) graph = Graph(namespace_manager=namespace_manager) catalog = graph.resource(URIRef(catalog_url)) catalog.set(RDF.type, DCAT.Catalog) catalog.set(DCT.title, Literal(site.title)) catalog.set(DCT.language, Literal(current_app.config['DEFAULT_LANGUAGE'])) catalog.set(FOAF.homepage, URIRef(site_url)) publisher = graph.resource(BNode()) publisher.set(RDF.type, FOAF.Organization) publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR'])) catalog.set(DCT.publisher, publisher) for dataset in datasets: catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph)) if isinstance(datasets, Paginable): if not format: raise ValueError('Pagination requires format') catalog.add(RDF.type, HYDRA.Collection) catalog.set(HYDRA.totalItems, Literal(datasets.total)) kwargs = { 'format': format, 'page_size': datasets.page_size, '_external': True, } first_url = url_for('site.rdf_catalog_format', page=1, **kwargs) page_url = url_for('site.rdf_catalog_format', page=datasets.page, **kwargs) last_url = url_for('site.rdf_catalog_format', page=datasets.pages, **kwargs) pagination = graph.resource(URIRef(page_url)) pagination.set(RDF.type, HYDRA.PartialCollectionView) pagination.set(HYDRA.first, URIRef(first_url)) pagination.set(HYDRA.last, URIRef(last_url)) if datasets.has_next: next_url = url_for('site.rdf_catalog_format', page=datasets.page + 1, **kwargs) pagination.set(HYDRA.next, URIRef(next_url)) if datasets.has_prev: prev_url = url_for('site.rdf_catalog_format', page=datasets.page - 1, **kwargs) pagination.set(HYDRA.previous, URIRef(prev_url)) catalog.set(HYDRA.view, pagination) return catalog
python
def build_catalog(site, datasets, format=None): '''Build the DCAT catalog for this site''' site_url = url_for('site.home_redirect', _external=True) catalog_url = url_for('site.rdf_catalog', _external=True) graph = Graph(namespace_manager=namespace_manager) catalog = graph.resource(URIRef(catalog_url)) catalog.set(RDF.type, DCAT.Catalog) catalog.set(DCT.title, Literal(site.title)) catalog.set(DCT.language, Literal(current_app.config['DEFAULT_LANGUAGE'])) catalog.set(FOAF.homepage, URIRef(site_url)) publisher = graph.resource(BNode()) publisher.set(RDF.type, FOAF.Organization) publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR'])) catalog.set(DCT.publisher, publisher) for dataset in datasets: catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph)) if isinstance(datasets, Paginable): if not format: raise ValueError('Pagination requires format') catalog.add(RDF.type, HYDRA.Collection) catalog.set(HYDRA.totalItems, Literal(datasets.total)) kwargs = { 'format': format, 'page_size': datasets.page_size, '_external': True, } first_url = url_for('site.rdf_catalog_format', page=1, **kwargs) page_url = url_for('site.rdf_catalog_format', page=datasets.page, **kwargs) last_url = url_for('site.rdf_catalog_format', page=datasets.pages, **kwargs) pagination = graph.resource(URIRef(page_url)) pagination.set(RDF.type, HYDRA.PartialCollectionView) pagination.set(HYDRA.first, URIRef(first_url)) pagination.set(HYDRA.last, URIRef(last_url)) if datasets.has_next: next_url = url_for('site.rdf_catalog_format', page=datasets.page + 1, **kwargs) pagination.set(HYDRA.next, URIRef(next_url)) if datasets.has_prev: prev_url = url_for('site.rdf_catalog_format', page=datasets.page - 1, **kwargs) pagination.set(HYDRA.previous, URIRef(prev_url)) catalog.set(HYDRA.view, pagination) return catalog
[ "def", "build_catalog", "(", "site", ",", "datasets", ",", "format", "=", "None", ")", ":", "site_url", "=", "url_for", "(", "'site.home_redirect'", ",", "_external", "=", "True", ")", "catalog_url", "=", "url_for", "(", "'site.rdf_catalog'", ",", "_external", "=", "True", ")", "graph", "=", "Graph", "(", "namespace_manager", "=", "namespace_manager", ")", "catalog", "=", "graph", ".", "resource", "(", "URIRef", "(", "catalog_url", ")", ")", "catalog", ".", "set", "(", "RDF", ".", "type", ",", "DCAT", ".", "Catalog", ")", "catalog", ".", "set", "(", "DCT", ".", "title", ",", "Literal", "(", "site", ".", "title", ")", ")", "catalog", ".", "set", "(", "DCT", ".", "language", ",", "Literal", "(", "current_app", ".", "config", "[", "'DEFAULT_LANGUAGE'", "]", ")", ")", "catalog", ".", "set", "(", "FOAF", ".", "homepage", ",", "URIRef", "(", "site_url", ")", ")", "publisher", "=", "graph", ".", "resource", "(", "BNode", "(", ")", ")", "publisher", ".", "set", "(", "RDF", ".", "type", ",", "FOAF", ".", "Organization", ")", "publisher", ".", "set", "(", "FOAF", ".", "name", ",", "Literal", "(", "current_app", ".", "config", "[", "'SITE_AUTHOR'", "]", ")", ")", "catalog", ".", "set", "(", "DCT", ".", "publisher", ",", "publisher", ")", "for", "dataset", "in", "datasets", ":", "catalog", ".", "add", "(", "DCAT", ".", "dataset", ",", "dataset_to_rdf", "(", "dataset", ",", "graph", ")", ")", "if", "isinstance", "(", "datasets", ",", "Paginable", ")", ":", "if", "not", "format", ":", "raise", "ValueError", "(", "'Pagination requires format'", ")", "catalog", ".", "add", "(", "RDF", ".", "type", ",", "HYDRA", ".", "Collection", ")", "catalog", ".", "set", "(", "HYDRA", ".", "totalItems", ",", "Literal", "(", "datasets", ".", "total", ")", ")", "kwargs", "=", "{", "'format'", ":", "format", ",", "'page_size'", ":", "datasets", ".", "page_size", ",", "'_external'", ":", "True", ",", "}", "first_url", "=", "url_for", "(", "'site.rdf_catalog_format'", ",", "page", "=", "1", ",", "*", "*", "kwargs", ")", "page_url", "=", "url_for", "(", "'site.rdf_catalog_format'", ",", "page", "=", "datasets", ".", "page", ",", "*", "*", "kwargs", ")", "last_url", "=", "url_for", "(", "'site.rdf_catalog_format'", ",", "page", "=", "datasets", ".", "pages", ",", "*", "*", "kwargs", ")", "pagination", "=", "graph", ".", "resource", "(", "URIRef", "(", "page_url", ")", ")", "pagination", ".", "set", "(", "RDF", ".", "type", ",", "HYDRA", ".", "PartialCollectionView", ")", "pagination", ".", "set", "(", "HYDRA", ".", "first", ",", "URIRef", "(", "first_url", ")", ")", "pagination", ".", "set", "(", "HYDRA", ".", "last", ",", "URIRef", "(", "last_url", ")", ")", "if", "datasets", ".", "has_next", ":", "next_url", "=", "url_for", "(", "'site.rdf_catalog_format'", ",", "page", "=", "datasets", ".", "page", "+", "1", ",", "*", "*", "kwargs", ")", "pagination", ".", "set", "(", "HYDRA", ".", "next", ",", "URIRef", "(", "next_url", ")", ")", "if", "datasets", ".", "has_prev", ":", "prev_url", "=", "url_for", "(", "'site.rdf_catalog_format'", ",", "page", "=", "datasets", ".", "page", "-", "1", ",", "*", "*", "kwargs", ")", "pagination", ".", "set", "(", "HYDRA", ".", "previous", ",", "URIRef", "(", "prev_url", ")", ")", "catalog", ".", "set", "(", "HYDRA", ".", "view", ",", "pagination", ")", "return", "catalog" ]
Build the DCAT catalog for this site
[ "Build", "the", "DCAT", "catalog", "for", "this", "site" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/site/rdf.py#L15-L68
train
opendatateam/udata
udata/patch_flask_security.py
sendmail_proxy
def sendmail_proxy(subject, email, template, **context): """Cast the lazy_gettext'ed subject to string before passing to Celery""" sendmail.delay(subject.value, email, template, **context)
python
def sendmail_proxy(subject, email, template, **context): """Cast the lazy_gettext'ed subject to string before passing to Celery""" sendmail.delay(subject.value, email, template, **context)
[ "def", "sendmail_proxy", "(", "subject", ",", "email", ",", "template", ",", "*", "*", "context", ")", ":", "sendmail", ".", "delay", "(", "subject", ".", "value", ",", "email", ",", "template", ",", "*", "*", "context", ")" ]
Cast the lazy_gettext'ed subject to string before passing to Celery
[ "Cast", "the", "lazy_gettext", "ed", "subject", "to", "string", "before", "passing", "to", "Celery" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/patch_flask_security.py#L18-L20
train
opendatateam/udata
udata/commands/static.py
collect
def collect(path, no_input): '''Collect static files''' if exists(path): msg = '"%s" directory already exists and will be erased' log.warning(msg, path) if not no_input: click.confirm('Are you sure?', abort=True) log.info('Deleting static directory "%s"', path) shutil.rmtree(path) prefix = current_app.static_url_path or current_app.static_folder if prefix.startswith('/'): prefix = prefix[1:] destination = join(path, prefix) log.info('Copying application assets into "%s"', destination) shutil.copytree(current_app.static_folder, destination) for blueprint in current_app.blueprints.values(): if blueprint.has_static_folder: prefix = current_app.static_prefixes.get(blueprint.name) prefix = prefix or blueprint.url_prefix or '' prefix += blueprint.static_url_path or '' if prefix.startswith('/'): prefix = prefix[1:] log.info('Copying %s assets to %s', blueprint.name, prefix) destination = join(path, prefix) copy_recursive(blueprint.static_folder, destination) for prefix, source in current_app.config['STATIC_DIRS']: log.info('Copying %s to %s', source, prefix) destination = join(path, prefix) copy_recursive(source, destination) log.info('Done')
python
def collect(path, no_input): '''Collect static files''' if exists(path): msg = '"%s" directory already exists and will be erased' log.warning(msg, path) if not no_input: click.confirm('Are you sure?', abort=True) log.info('Deleting static directory "%s"', path) shutil.rmtree(path) prefix = current_app.static_url_path or current_app.static_folder if prefix.startswith('/'): prefix = prefix[1:] destination = join(path, prefix) log.info('Copying application assets into "%s"', destination) shutil.copytree(current_app.static_folder, destination) for blueprint in current_app.blueprints.values(): if blueprint.has_static_folder: prefix = current_app.static_prefixes.get(blueprint.name) prefix = prefix or blueprint.url_prefix or '' prefix += blueprint.static_url_path or '' if prefix.startswith('/'): prefix = prefix[1:] log.info('Copying %s assets to %s', blueprint.name, prefix) destination = join(path, prefix) copy_recursive(blueprint.static_folder, destination) for prefix, source in current_app.config['STATIC_DIRS']: log.info('Copying %s to %s', source, prefix) destination = join(path, prefix) copy_recursive(source, destination) log.info('Done')
[ "def", "collect", "(", "path", ",", "no_input", ")", ":", "if", "exists", "(", "path", ")", ":", "msg", "=", "'\"%s\" directory already exists and will be erased'", "log", ".", "warning", "(", "msg", ",", "path", ")", "if", "not", "no_input", ":", "click", ".", "confirm", "(", "'Are you sure?'", ",", "abort", "=", "True", ")", "log", ".", "info", "(", "'Deleting static directory \"%s\"'", ",", "path", ")", "shutil", ".", "rmtree", "(", "path", ")", "prefix", "=", "current_app", ".", "static_url_path", "or", "current_app", ".", "static_folder", "if", "prefix", ".", "startswith", "(", "'/'", ")", ":", "prefix", "=", "prefix", "[", "1", ":", "]", "destination", "=", "join", "(", "path", ",", "prefix", ")", "log", ".", "info", "(", "'Copying application assets into \"%s\"'", ",", "destination", ")", "shutil", ".", "copytree", "(", "current_app", ".", "static_folder", ",", "destination", ")", "for", "blueprint", "in", "current_app", ".", "blueprints", ".", "values", "(", ")", ":", "if", "blueprint", ".", "has_static_folder", ":", "prefix", "=", "current_app", ".", "static_prefixes", ".", "get", "(", "blueprint", ".", "name", ")", "prefix", "=", "prefix", "or", "blueprint", ".", "url_prefix", "or", "''", "prefix", "+=", "blueprint", ".", "static_url_path", "or", "''", "if", "prefix", ".", "startswith", "(", "'/'", ")", ":", "prefix", "=", "prefix", "[", "1", ":", "]", "log", ".", "info", "(", "'Copying %s assets to %s'", ",", "blueprint", ".", "name", ",", "prefix", ")", "destination", "=", "join", "(", "path", ",", "prefix", ")", "copy_recursive", "(", "blueprint", ".", "static_folder", ",", "destination", ")", "for", "prefix", ",", "source", "in", "current_app", ".", "config", "[", "'STATIC_DIRS'", "]", ":", "log", ".", "info", "(", "'Copying %s to %s'", ",", "source", ",", "prefix", ")", "destination", "=", "join", "(", "path", ",", "prefix", ")", "copy_recursive", "(", "source", ",", "destination", ")", "log", ".", "info", "(", "'Done'", ")" ]
Collect static files
[ "Collect", "static", "files" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/static.py#L24-L59
train
opendatateam/udata
udata/harvest/notifications.py
validate_harvester_notifications
def validate_harvester_notifications(user): '''Notify admins about pending harvester validation''' if not user.sysadmin: return [] notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = HarvestSource.objects(validation__state=VALIDATION_PENDING) qs = qs.only('id', 'created_at', 'name') for source in qs: notifications.append((source.created_at, { 'id': source.id, 'name': source.name, })) return notifications
python
def validate_harvester_notifications(user): '''Notify admins about pending harvester validation''' if not user.sysadmin: return [] notifications = [] # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = HarvestSource.objects(validation__state=VALIDATION_PENDING) qs = qs.only('id', 'created_at', 'name') for source in qs: notifications.append((source.created_at, { 'id': source.id, 'name': source.name, })) return notifications
[ "def", "validate_harvester_notifications", "(", "user", ")", ":", "if", "not", "user", ".", "sysadmin", ":", "return", "[", "]", "notifications", "=", "[", "]", "# Only fetch required fields for notification serialization", "# Greatly improve performances and memory usage", "qs", "=", "HarvestSource", ".", "objects", "(", "validation__state", "=", "VALIDATION_PENDING", ")", "qs", "=", "qs", ".", "only", "(", "'id'", ",", "'created_at'", ",", "'name'", ")", "for", "source", "in", "qs", ":", "notifications", ".", "append", "(", "(", "source", ".", "created_at", ",", "{", "'id'", ":", "source", ".", "id", ",", "'name'", ":", "source", ".", "name", ",", "}", ")", ")", "return", "notifications" ]
Notify admins about pending harvester validation
[ "Notify", "admins", "about", "pending", "harvester", "validation" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/notifications.py#L14-L32
train
opendatateam/udata
udata/harvest/backends/__init__.py
get
def get(app, name): '''Get a backend given its name''' backend = get_all(app).get(name) if not backend: msg = 'Harvest backend "{0}" is not registered'.format(name) raise EntrypointError(msg) return backend
python
def get(app, name): '''Get a backend given its name''' backend = get_all(app).get(name) if not backend: msg = 'Harvest backend "{0}" is not registered'.format(name) raise EntrypointError(msg) return backend
[ "def", "get", "(", "app", ",", "name", ")", ":", "backend", "=", "get_all", "(", "app", ")", ".", "get", "(", "name", ")", "if", "not", "backend", ":", "msg", "=", "'Harvest backend \"{0}\" is not registered'", ".", "format", "(", "name", ")", "raise", "EntrypointError", "(", "msg", ")", "return", "backend" ]
Get a backend given its name
[ "Get", "a", "backend", "given", "its", "name" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/__init__.py#L7-L13
train
opendatateam/udata
udata/core/topic/views.py
TopicSearchMixin.search
def search(self): '''Override search to match on topic tags''' s = super(TopicSearchMixin, self).search() s = s.filter('bool', should=[ Q('term', tags=tag) for tag in self.topic.tags ]) return s
python
def search(self): '''Override search to match on topic tags''' s = super(TopicSearchMixin, self).search() s = s.filter('bool', should=[ Q('term', tags=tag) for tag in self.topic.tags ]) return s
[ "def", "search", "(", "self", ")", ":", "s", "=", "super", "(", "TopicSearchMixin", ",", "self", ")", ".", "search", "(", ")", "s", "=", "s", ".", "filter", "(", "'bool'", ",", "should", "=", "[", "Q", "(", "'term'", ",", "tags", "=", "tag", ")", "for", "tag", "in", "self", ".", "topic", ".", "tags", "]", ")", "return", "s" ]
Override search to match on topic tags
[ "Override", "search", "to", "match", "on", "topic", "tags" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/topic/views.py#L25-L31
train
opendatateam/udata
udata/core/reuse/models.py
Reuse.clean
def clean(self): '''Auto populate urlhash from url''' if not self.urlhash or 'url' in self._get_changed_fields(): self.urlhash = hash_url(self.url) super(Reuse, self).clean()
python
def clean(self): '''Auto populate urlhash from url''' if not self.urlhash or 'url' in self._get_changed_fields(): self.urlhash = hash_url(self.url) super(Reuse, self).clean()
[ "def", "clean", "(", "self", ")", ":", "if", "not", "self", ".", "urlhash", "or", "'url'", "in", "self", ".", "_get_changed_fields", "(", ")", ":", "self", ".", "urlhash", "=", "hash_url", "(", "self", ".", "url", ")", "super", "(", "Reuse", ",", "self", ")", ".", "clean", "(", ")" ]
Auto populate urlhash from url
[ "Auto", "populate", "urlhash", "from", "url" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/reuse/models.py#L126-L130
train
opendatateam/udata
udata/commands/serve.py
serve
def serve(info, host, port, reload, debugger, eager_loading, with_threads): ''' Runs a local udata development server. This local server is recommended for development purposes only but it can also be used for simple intranet deployments. By default it will not support any sort of concurrency at all to simplify debugging. This can be changed with the --with-threads option which will enable basic multithreading. The reloader and debugger are by default enabled if the debug flag of Flask is enabled and disabled otherwise. ''' # Werkzeug logger is special and is required # with this configuration for development server logger = logging.getLogger('werkzeug') logger.setLevel(logging.INFO) logger.handlers = [] debug = current_app.config['DEBUG'] if reload is None: reload = bool(debug) if debugger is None: debugger = bool(debug) if eager_loading is None: eager_loading = not reload app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) settings = os.environ.get('UDATA_SETTINGS', os.path.join(os.getcwd(), 'udata.cfg')) extra_files = [settings] if reload: extra_files.extend(assets.manifests_paths()) run_simple(host, port, app, use_reloader=reload, use_debugger=debugger, threaded=with_threads, extra_files=extra_files)
python
def serve(info, host, port, reload, debugger, eager_loading, with_threads): ''' Runs a local udata development server. This local server is recommended for development purposes only but it can also be used for simple intranet deployments. By default it will not support any sort of concurrency at all to simplify debugging. This can be changed with the --with-threads option which will enable basic multithreading. The reloader and debugger are by default enabled if the debug flag of Flask is enabled and disabled otherwise. ''' # Werkzeug logger is special and is required # with this configuration for development server logger = logging.getLogger('werkzeug') logger.setLevel(logging.INFO) logger.handlers = [] debug = current_app.config['DEBUG'] if reload is None: reload = bool(debug) if debugger is None: debugger = bool(debug) if eager_loading is None: eager_loading = not reload app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) settings = os.environ.get('UDATA_SETTINGS', os.path.join(os.getcwd(), 'udata.cfg')) extra_files = [settings] if reload: extra_files.extend(assets.manifests_paths()) run_simple(host, port, app, use_reloader=reload, use_debugger=debugger, threaded=with_threads, extra_files=extra_files)
[ "def", "serve", "(", "info", ",", "host", ",", "port", ",", "reload", ",", "debugger", ",", "eager_loading", ",", "with_threads", ")", ":", "# Werkzeug logger is special and is required", "# with this configuration for development server", "logger", "=", "logging", ".", "getLogger", "(", "'werkzeug'", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logger", ".", "handlers", "=", "[", "]", "debug", "=", "current_app", ".", "config", "[", "'DEBUG'", "]", "if", "reload", "is", "None", ":", "reload", "=", "bool", "(", "debug", ")", "if", "debugger", "is", "None", ":", "debugger", "=", "bool", "(", "debug", ")", "if", "eager_loading", "is", "None", ":", "eager_loading", "=", "not", "reload", "app", "=", "DispatchingApp", "(", "info", ".", "load_app", ",", "use_eager_loading", "=", "eager_loading", ")", "settings", "=", "os", ".", "environ", ".", "get", "(", "'UDATA_SETTINGS'", ",", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'udata.cfg'", ")", ")", "extra_files", "=", "[", "settings", "]", "if", "reload", ":", "extra_files", ".", "extend", "(", "assets", ".", "manifests_paths", "(", ")", ")", "run_simple", "(", "host", ",", "port", ",", "app", ",", "use_reloader", "=", "reload", ",", "use_debugger", "=", "debugger", ",", "threaded", "=", "with_threads", ",", "extra_files", "=", "extra_files", ")" ]
Runs a local udata development server. This local server is recommended for development purposes only but it can also be used for simple intranet deployments. By default it will not support any sort of concurrency at all to simplify debugging. This can be changed with the --with-threads option which will enable basic multithreading. The reloader and debugger are by default enabled if the debug flag of Flask is enabled and disabled otherwise.
[ "Runs", "a", "local", "udata", "development", "server", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/serve.py#L38-L77
train
opendatateam/udata
udata/core/dataset/forms.py
enforce_filetype_file
def enforce_filetype_file(form, field): '''Only allowed domains in resource.url when filetype is file''' if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE: return domain = urlparse(field.data).netloc allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS'] allowed_domains += [current_app.config.get('SERVER_NAME')] if current_app.config.get('CDN_DOMAIN'): allowed_domains.append(current_app.config['CDN_DOMAIN']) if '*' in allowed_domains: return if domain and domain not in allowed_domains: message = _('Domain "{domain}" not allowed for filetype "{filetype}"') raise validators.ValidationError(message.format( domain=domain, filetype=RESOURCE_FILETYPE_FILE ))
python
def enforce_filetype_file(form, field): '''Only allowed domains in resource.url when filetype is file''' if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE: return domain = urlparse(field.data).netloc allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS'] allowed_domains += [current_app.config.get('SERVER_NAME')] if current_app.config.get('CDN_DOMAIN'): allowed_domains.append(current_app.config['CDN_DOMAIN']) if '*' in allowed_domains: return if domain and domain not in allowed_domains: message = _('Domain "{domain}" not allowed for filetype "{filetype}"') raise validators.ValidationError(message.format( domain=domain, filetype=RESOURCE_FILETYPE_FILE ))
[ "def", "enforce_filetype_file", "(", "form", ",", "field", ")", ":", "if", "form", ".", "_fields", ".", "get", "(", "'filetype'", ")", ".", "data", "!=", "RESOURCE_FILETYPE_FILE", ":", "return", "domain", "=", "urlparse", "(", "field", ".", "data", ")", ".", "netloc", "allowed_domains", "=", "current_app", ".", "config", "[", "'RESOURCES_FILE_ALLOWED_DOMAINS'", "]", "allowed_domains", "+=", "[", "current_app", ".", "config", ".", "get", "(", "'SERVER_NAME'", ")", "]", "if", "current_app", ".", "config", ".", "get", "(", "'CDN_DOMAIN'", ")", ":", "allowed_domains", ".", "append", "(", "current_app", ".", "config", "[", "'CDN_DOMAIN'", "]", ")", "if", "'*'", "in", "allowed_domains", ":", "return", "if", "domain", "and", "domain", "not", "in", "allowed_domains", ":", "message", "=", "_", "(", "'Domain \"{domain}\" not allowed for filetype \"{filetype}\"'", ")", "raise", "validators", ".", "ValidationError", "(", "message", ".", "format", "(", "domain", "=", "domain", ",", "filetype", "=", "RESOURCE_FILETYPE_FILE", ")", ")" ]
Only allowed domains in resource.url when filetype is file
[ "Only", "allowed", "domains", "in", "resource", ".", "url", "when", "filetype", "is", "file" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L36-L51
train
opendatateam/udata
udata/core/dataset/forms.py
map_legacy_frequencies
def map_legacy_frequencies(form, field): ''' Map legacy frequencies to new ones''' if field.data in LEGACY_FREQUENCIES: field.data = LEGACY_FREQUENCIES[field.data]
python
def map_legacy_frequencies(form, field): ''' Map legacy frequencies to new ones''' if field.data in LEGACY_FREQUENCIES: field.data = LEGACY_FREQUENCIES[field.data]
[ "def", "map_legacy_frequencies", "(", "form", ",", "field", ")", ":", "if", "field", ".", "data", "in", "LEGACY_FREQUENCIES", ":", "field", ".", "data", "=", "LEGACY_FREQUENCIES", "[", "field", ".", "data", "]" ]
Map legacy frequencies to new ones
[ "Map", "legacy", "frequencies", "to", "new", "ones" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L101-L104
train
opendatateam/udata
udata/core/user/models.py
User.resources_availability
def resources_availability(self): """Return the percentage of availability for resources.""" # Flatten the list. availabilities = list( chain( *[org.check_availability() for org in self.organizations] ) ) # Filter out the unknown availabilities = [a for a in availabilities if type(a) is bool] if availabilities: # Trick will work because it's a sum() of booleans. return round(100. * sum(availabilities) / len(availabilities), 2) # if nothing is unavailable, everything is considered OK return 100
python
def resources_availability(self): """Return the percentage of availability for resources.""" # Flatten the list. availabilities = list( chain( *[org.check_availability() for org in self.organizations] ) ) # Filter out the unknown availabilities = [a for a in availabilities if type(a) is bool] if availabilities: # Trick will work because it's a sum() of booleans. return round(100. * sum(availabilities) / len(availabilities), 2) # if nothing is unavailable, everything is considered OK return 100
[ "def", "resources_availability", "(", "self", ")", ":", "# Flatten the list.", "availabilities", "=", "list", "(", "chain", "(", "*", "[", "org", ".", "check_availability", "(", ")", "for", "org", "in", "self", ".", "organizations", "]", ")", ")", "# Filter out the unknown", "availabilities", "=", "[", "a", "for", "a", "in", "availabilities", "if", "type", "(", "a", ")", "is", "bool", "]", "if", "availabilities", ":", "# Trick will work because it's a sum() of booleans.", "return", "round", "(", "100.", "*", "sum", "(", "availabilities", ")", "/", "len", "(", "availabilities", ")", ",", "2", ")", "# if nothing is unavailable, everything is considered OK", "return", "100" ]
Return the percentage of availability for resources.
[ "Return", "the", "percentage", "of", "availability", "for", "resources", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L128-L142
train
opendatateam/udata
udata/core/user/models.py
User.datasets_org_count
def datasets_org_count(self): """Return the number of datasets of user's organizations.""" from udata.models import Dataset # Circular imports. return sum(Dataset.objects(organization=org).visible().count() for org in self.organizations)
python
def datasets_org_count(self): """Return the number of datasets of user's organizations.""" from udata.models import Dataset # Circular imports. return sum(Dataset.objects(organization=org).visible().count() for org in self.organizations)
[ "def", "datasets_org_count", "(", "self", ")", ":", "from", "udata", ".", "models", "import", "Dataset", "# Circular imports.", "return", "sum", "(", "Dataset", ".", "objects", "(", "organization", "=", "org", ")", ".", "visible", "(", ")", ".", "count", "(", ")", "for", "org", "in", "self", ".", "organizations", ")" ]
Return the number of datasets of user's organizations.
[ "Return", "the", "number", "of", "datasets", "of", "user", "s", "organizations", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L145-L149
train
opendatateam/udata
udata/core/user/models.py
User.followers_org_count
def followers_org_count(self): """Return the number of followers of user's organizations.""" from udata.models import Follow # Circular imports. return sum(Follow.objects(following=org).count() for org in self.organizations)
python
def followers_org_count(self): """Return the number of followers of user's organizations.""" from udata.models import Follow # Circular imports. return sum(Follow.objects(following=org).count() for org in self.organizations)
[ "def", "followers_org_count", "(", "self", ")", ":", "from", "udata", ".", "models", "import", "Follow", "# Circular imports.", "return", "sum", "(", "Follow", ".", "objects", "(", "following", "=", "org", ")", ".", "count", "(", ")", "for", "org", "in", "self", ".", "organizations", ")" ]
Return the number of followers of user's organizations.
[ "Return", "the", "number", "of", "followers", "of", "user", "s", "organizations", "." ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L152-L156
train
opendatateam/udata
udata/core/badges/models.py
BadgeMixin.get_badge
def get_badge(self, kind): ''' Get a badge given its kind if present''' candidates = [b for b in self.badges if b.kind == kind] return candidates[0] if candidates else None
python
def get_badge(self, kind): ''' Get a badge given its kind if present''' candidates = [b for b in self.badges if b.kind == kind] return candidates[0] if candidates else None
[ "def", "get_badge", "(", "self", ",", "kind", ")", ":", "candidates", "=", "[", "b", "for", "b", "in", "self", ".", "badges", "if", "b", ".", "kind", "==", "kind", "]", "return", "candidates", "[", "0", "]", "if", "candidates", "else", "None" ]
Get a badge given its kind if present
[ "Get", "a", "badge", "given", "its", "kind", "if", "present" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L54-L57
train
opendatateam/udata
udata/core/badges/models.py
BadgeMixin.add_badge
def add_badge(self, kind): '''Perform an atomic prepend for a new badge''' badge = self.get_badge(kind) if badge: return badge if kind not in getattr(self, '__badges__', {}): msg = 'Unknown badge type for {model}: {kind}' raise db.ValidationError(msg.format(model=self.__class__.__name__, kind=kind)) badge = Badge(kind=kind) if current_user.is_authenticated: badge.created_by = current_user.id self.update(__raw__={ '$push': { 'badges': { '$each': [badge.to_mongo()], '$position': 0 } } }) self.reload() post_save.send(self.__class__, document=self) on_badge_added.send(self, kind=kind) return self.get_badge(kind)
python
def add_badge(self, kind): '''Perform an atomic prepend for a new badge''' badge = self.get_badge(kind) if badge: return badge if kind not in getattr(self, '__badges__', {}): msg = 'Unknown badge type for {model}: {kind}' raise db.ValidationError(msg.format(model=self.__class__.__name__, kind=kind)) badge = Badge(kind=kind) if current_user.is_authenticated: badge.created_by = current_user.id self.update(__raw__={ '$push': { 'badges': { '$each': [badge.to_mongo()], '$position': 0 } } }) self.reload() post_save.send(self.__class__, document=self) on_badge_added.send(self, kind=kind) return self.get_badge(kind)
[ "def", "add_badge", "(", "self", ",", "kind", ")", ":", "badge", "=", "self", ".", "get_badge", "(", "kind", ")", "if", "badge", ":", "return", "badge", "if", "kind", "not", "in", "getattr", "(", "self", ",", "'__badges__'", ",", "{", "}", ")", ":", "msg", "=", "'Unknown badge type for {model}: {kind}'", "raise", "db", ".", "ValidationError", "(", "msg", ".", "format", "(", "model", "=", "self", ".", "__class__", ".", "__name__", ",", "kind", "=", "kind", ")", ")", "badge", "=", "Badge", "(", "kind", "=", "kind", ")", "if", "current_user", ".", "is_authenticated", ":", "badge", ".", "created_by", "=", "current_user", ".", "id", "self", ".", "update", "(", "__raw__", "=", "{", "'$push'", ":", "{", "'badges'", ":", "{", "'$each'", ":", "[", "badge", ".", "to_mongo", "(", ")", "]", ",", "'$position'", ":", "0", "}", "}", "}", ")", "self", ".", "reload", "(", ")", "post_save", ".", "send", "(", "self", ".", "__class__", ",", "document", "=", "self", ")", "on_badge_added", ".", "send", "(", "self", ",", "kind", "=", "kind", ")", "return", "self", ".", "get_badge", "(", "kind", ")" ]
Perform an atomic prepend for a new badge
[ "Perform", "an", "atomic", "prepend", "for", "a", "new", "badge" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L59-L83
train
opendatateam/udata
udata/core/badges/models.py
BadgeMixin.remove_badge
def remove_badge(self, kind): '''Perform an atomic removal for a given badge''' self.update(__raw__={ '$pull': { 'badges': {'kind': kind} } }) self.reload() on_badge_removed.send(self, kind=kind) post_save.send(self.__class__, document=self)
python
def remove_badge(self, kind): '''Perform an atomic removal for a given badge''' self.update(__raw__={ '$pull': { 'badges': {'kind': kind} } }) self.reload() on_badge_removed.send(self, kind=kind) post_save.send(self.__class__, document=self)
[ "def", "remove_badge", "(", "self", ",", "kind", ")", ":", "self", ".", "update", "(", "__raw__", "=", "{", "'$pull'", ":", "{", "'badges'", ":", "{", "'kind'", ":", "kind", "}", "}", "}", ")", "self", ".", "reload", "(", ")", "on_badge_removed", ".", "send", "(", "self", ",", "kind", "=", "kind", ")", "post_save", ".", "send", "(", "self", ".", "__class__", ",", "document", "=", "self", ")" ]
Perform an atomic removal for a given badge
[ "Perform", "an", "atomic", "removal", "for", "a", "given", "badge" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L85-L94
train
opendatateam/udata
udata/core/badges/models.py
BadgeMixin.toggle_badge
def toggle_badge(self, kind): '''Toggle a bdage given its kind''' badge = self.get_badge(kind) if badge: return self.remove_badge(kind) else: return self.add_badge(kind)
python
def toggle_badge(self, kind): '''Toggle a bdage given its kind''' badge = self.get_badge(kind) if badge: return self.remove_badge(kind) else: return self.add_badge(kind)
[ "def", "toggle_badge", "(", "self", ",", "kind", ")", ":", "badge", "=", "self", ".", "get_badge", "(", "kind", ")", "if", "badge", ":", "return", "self", ".", "remove_badge", "(", "kind", ")", "else", ":", "return", "self", ".", "add_badge", "(", "kind", ")" ]
Toggle a bdage given its kind
[ "Toggle", "a", "bdage", "given", "its", "kind" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L96-L102
train
opendatateam/udata
udata/core/badges/models.py
BadgeMixin.badge_label
def badge_label(self, badge): '''Display the badge label for a given kind''' kind = badge.kind if isinstance(badge, Badge) else badge return self.__badges__[kind]
python
def badge_label(self, badge): '''Display the badge label for a given kind''' kind = badge.kind if isinstance(badge, Badge) else badge return self.__badges__[kind]
[ "def", "badge_label", "(", "self", ",", "badge", ")", ":", "kind", "=", "badge", ".", "kind", "if", "isinstance", "(", "badge", ",", "Badge", ")", "else", "badge", "return", "self", ".", "__badges__", "[", "kind", "]" ]
Display the badge label for a given kind
[ "Display", "the", "badge", "label", "for", "a", "given", "kind" ]
f016585af94b0ff6bd73738c700324adc8ba7f8f
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L104-L107
train