code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _sanitizer(self, obj): if isinstance(obj, datetime.datetime): return obj.isoformat() if hasattr(obj, "to_dict"): return obj.to_dict() return obj
Sanitizer method that will be passed to json.dumps.
def make_uniq_for_step(ctx, ukeys, step, stage, full_data, clean_missing_after_seconds, to_uniq): # TODO: # this still seems to work ok for Storage types json/bubble, # for DS we need to reload de dumped step to uniqify if not ukeys: return to_uniq else: uniq_data = bubble_lod_load(ctx, step, stage) ctx.say('Creating uniq identifiers for [' + step + '] information', 0) ctx.gbc.say('uniq_data:', stuff=uniq_data, verbosity=1000) # TODO:make: data->keyed.items uniq_step_res = make_uniq(ctx=ctx, ldict=to_uniq, keyed=uniq_data, uniqstr=ukeys, tag=step, full_data=full_data, remove_missing_after_seconds=clean_missing_after_seconds) ctx.gbc.say('uniq_step_res:', stuff=uniq_step_res, verbosity=1000) to_uniq_newest = get_newest_uniq(ctx.gbc, uniq_step_res) # TODO: selected pulled only from slice of uniq # PROBLEM: slice of pull is not equal to slice of newest uniq, # can only select keys from newest, from slice of pulled # need a uid list from to_transform # to_transform = get_gen_slice(gbc, to_transform_newest, amount, index) # for now not a big problem, as with 'pump' there should be no problem to_uniq = to_uniq_newest # todo make keyed.items->data uniq_res_list = get_uniq_list(ctx.gbc, uniq_step_res) reset = True pfr = bubble_lod_dump(ctx=ctx, step=step, stage=stage, full_data=full_data, reset=reset, data_gen=uniq_res_list) ctx.gbc.say('saved uniq ' + step + ' data res:', stuff=pfr, verbosity=700) return to_uniq
initially just a copy from UNIQ_PULL
def list_nic(self, instance_id): output = self.client.describe_instances(InstanceIds=[instance_id]) output = output.get("Reservations")[0].get("Instances")[0] return output.get("NetworkInterfaces")
List all Network Interface Controller
def list_ip(self, instance_id): output = self.client.describe_instances(InstanceIds=[instance_id]) output = output.get("Reservations")[0].get("Instances")[0] ips = {} ips['PrivateIp'] = output.get("PrivateIpAddress") ips['PublicIp'] = output.get("PublicIpAddress") return ips
Add all IPs
def main(): if len(argv) < 2: print 'Usage: %s fst_file [optional: save_file]' % argv[0] return flex_a = Flexparser() mma = flex_a.yyparse(argv[1]) mma.minimize() print mma if len(argv) == 3: mma.save(argv[2])
Testing function for Flex Regular Expressions to FST DFA
def _read_transitions(self): states = [] i = 0 regex = re.compile('[ \t\n\r:,]+') found = 0 # For maintaining the state of yy_nxt declaration state = 0 # For maintaining the state of opening and closing tag of yy_nxt substate = 0 # For maintaining the state of opening and closing tag of each set in yy_nxt mapping = [] # For writing each set of yy_next cur_line = None with open(self.outfile) as flex_file: for cur_line in flex_file: if cur_line[0:35] == "static yyconst flex_int16_t yy_nxt[" or cur_line[0:33] == "static const flex_int16_t yy_nxt[": found = 1 # print 'Found yy_next declaration' continue if found == 1: if state == 0 and cur_line[0:5] == " {": state = 1 continue if state == 1 and cur_line[0:7] == " } ;": state = 0 break if substate == 0 and cur_line[0:5] == " {": mapping = [] substate = 1 continue if substate == 1: if cur_line[0:6] != " },": cur_line = "".join(cur_line.split()) if cur_line == '': continue if cur_line[cur_line.__len__() - 1] == ',': splitted_line = regex.split( cur_line[:cur_line.__len__() - 1]) else: splitted_line = regex.split(cur_line) mapping = mapping + splitted_line continue else: cleared = [] for j in mapping: cleared.append(int(j)) states.append(cleared) mapping = [] substate = 0 return states
Read DFA transitions from flex compiled file Args: None Returns: list: The list of states and the destination for a character
def _read_accept_states(self): states = [] i = 0 regex = re.compile('[ \t\n\r:,]+') found = 0 # For maintaining the state of yy_accept declaration state = 0 # For maintaining the state of opening and closing tag of yy_accept mapping = [] # For writing each set of yy_accept cur_line = None with open(self.outfile) as flex_file: for cur_line in flex_file: if cur_line[0:37] == "static yyconst flex_int16_t yy_accept" or cur_line[0:35] == "static const flex_int16_t yy_accept": found = 1 continue if found == 1: # print x if state == 0 and cur_line[0:5] == " {": mapping.append(0) # there is always a zero there state = 1 continue if state == 1: if cur_line[0:7] != " } ;": cur_line = "".join(cur_line.split()) if cur_line == '': continue if cur_line[cur_line.__len__() - 1] == ',': splitted_line = regex.split( cur_line[:cur_line.__len__() - 1]) else: splitted_line = regex.split(cur_line) mapping = mapping + splitted_line continue else: cleared = [] for j in mapping: cleared.append(int(j)) max_value = max(cleared) for i in range(0, len(cleared)): if cleared[i] > 0 and cleared[ i] < (max_value - 1): states.append(i) return states return []
Read DFA accepted states from flex compiled file Args: None Returns: list: The list of accepted states
def _create_states(self, states_num): states = [] for i in range(0, states_num): states.append(i) return states
Args: states_num (int): Number of States Returns: list: An initialized list
def _add_sink_state(self, states): cleared = [] for i in range(0, 128): cleared.append(-1) states.append(cleared)
This function adds a sing state in the total states Args: states (list): The current states Returns: None
def _create_delta(self): states = self._read_transitions() total_states = len(states) self._add_sink_state(states) nulltrans = self._read_null_transitions() def delta(current_state, character): """ Sub function describing the transitions Args: current_state (str): The current state character (str): The input character Returns: str: The next state """ if character != '': newstate = states[current_state][ord(character)] if newstate > 0: return newstate else: return total_states else: return nulltrans[current_state] return total_states + 1, delta
This function creates the delta transition Args: startState (int): Initial state of automaton Results: int, func: A number indicating the total states, and the delta function
def yyparse(self, lexfile): temp = tempfile.gettempdir() self.outfile = temp+'/'+''.join( random.choice( string.ascii_uppercase + string.digits) for _ in range(5)) + '_lex.yy.c' self._create_automaton_from_regex(lexfile) states_num, delta = self._create_delta() states = self._create_states(states_num) accepted_states = self._read_accept_states() if self.alphabet != []: alphabet = self.alphabet else: alphabet = createalphabet() mma = DFA(alphabet) for state in states: if state != 0: for char in alphabet: nextstate = delta(state, char) mma.add_arc(state - 1, nextstate - 1, char) if state in accepted_states: mma[state - 1].final = True if os.path.exists(self.outfile): os.remove(self.outfile) return mma
Args: lexfile (str): Flex file to be parsed Returns: DFA: A dfa automaton
def set_path(ctx, path_str, value, data): ctx.gbc.say('set_path:value:' + str(value) + ' at:' + path_str + ' in:', stuff=data, verbosity=1001) path = path_str.split('.') ctx.gbc.say('path:', stuff=path, verbosity=100) if len(path) > 1: destk = '.'.join(path[0:-1]) lp = path[-1] ctx.gbc.say('destk:%s' % destk, verbosity=100) ctx.gbc.say('last:%s' % lp, verbosity=100) ctx.gbc.say('current keys:', stuff=data.keys(), verbosity=1001) if len(path) > 2: destk = unescape(ctx, destk) if destk not in data.keys(): ctx.gbc.say('destk not in current keys:', stuff=data.keys(), verbosity=1001) data[destk] = {} ctx.gbc.say('destk not added:', stuff=data, verbosity=1001) lp = unescape(ctx, lp) data[destk][lp] = value ctx.gbc.say('destk and val added:', stuff=data, verbosity=1001) else: path_str = unescape(ctx, path_str) data[path_str] = value ctx.gbc.say('set_path:res:', stuff=data, verbosity=1001) return data
Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable.
def coerce(self, value): if isinstance(value, int) or isinstance(value, compat.long): return value return int(value)
Convert text values into integer values. Args: value (str or int): The value to coerce. Raises: TypeError: If the value is not an int or string. ValueError: If the value is not int or an acceptable value. Returns: int: The integer value represented.
def has_permission(self): objs = [None] if hasattr(self, 'get_perms_objects'): objs = self.get_perms_objects() else: if hasattr(self, 'get_object'): try: objs = [self.get_object()] except Http404: raise except: pass if objs == [None]: objs = self.get_queryset() if (hasattr(self, 'permission_filter_queryset') and self.permission_filter_queryset is not False and self.request.method == 'GET'): if objs != [None]: self.perms_filter_queryset(objs) return True else: return check_perms(self.request.user, self.get_permission_required(), objs, self.request.method)
Permission checking for "normal" Django.
def check_permissions(self, request): objs = [None] if hasattr(self, 'get_perms_objects'): objs = self.get_perms_objects() else: if hasattr(self, 'get_object'): try: objs = [self.get_object()] except Http404: raise except: pass if objs == [None]: objs = self.get_queryset() if len(objs) == 0: objs = [None] if (hasattr(self, 'permission_filter_queryset') and self.permission_filter_queryset is not False and self.request.method == 'GET'): if objs != [None]: self.perms_filter_queryset(objs) else: has_perm = check_perms(self.request.user, self.get_permission_required(), objs, self.request.method) if not has_perm: msg = self.get_permission_denied_message( default="Permission denied." ) if isinstance(msg, Sequence): msg = msg[0] self.permission_denied(request, message=msg)
Permission checking for DRF.
def _hashed_key(self): return abs(int(hashlib.md5( self.key_prefix.encode('utf8') ).hexdigest(), 16)) % (10 ** ( self._size_mod if hasattr(self, '_size_mod') else 5))
Returns 16-digit numeric hash of the redis key
def expire_at(self, _time): return self._client.expireat(self.key_prefix, round(_time))
Sets the expiration time of :prop:key_prefix to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970)
def pexpire_at(self, _time): return self._client.pexpireat(self.key_prefix, round(_time))
Sets the expiration time of :prop:key_prefix to @_time @_time: absolute Unix timestamp (milliseconds since January 1, 1970)
def _decode(self, obj): if self.decode_responses and isinstance(obj, bytes): try: return obj.decode(self.encoding) except UnicodeDecodeError: return obj return obj
Decodes @obj using :prop:encoding if :prop:decode_responses
def _loads(self, string): if not self.serialized: return self._decode(string) if string is not None: try: return self.serializer.loads(string) except TypeError: #: catches bytes errors with the builtin json library return self.serializer.loads(self._decode(string)) except pickle.UnpicklingError as e: #: incr and decr methods create issues when pickle serialized # It's a terrible idea for a serialized instance # to be performing incr and decr methods, but I think # it makes sense to catch the error regardless decoded = self._decode(string) if decoded.isdigit(): return decoded raise pickle.UnpicklingError(e)
If :prop:serialized is True, @string will be unserialized using :prop:serializer
def _dumps(self, obj): if not self.serialized: return obj return self.serializer.dumps(obj)
If :prop:serialized is True, @obj will be serialized using :prop:serializer
def get(self, key, default=None): try: return self[key] except KeyError: return default or self._default
Gets @key from :prop:key_prefix, defaulting to @default
def incr(self, key, by=1): return self._client.incr(self.get_key(key), by)
Increments @key by @by -> #int the value of @key after the increment
def decr(self, key, by=1): return self._client.decr(self.get_key(key), by)
Decrements @key by @by -> #int the value of @key after the decrement
def mget(self, *keys): keys = list(map(self.get_key, keys)) return list(map(self._loads, self._client.mget(*keys)))
-> #list of values at the specified @keys
def update(self, data): if not data: return _rk, _dumps = self.get_key, self._dumps data = self._client.mset({ _rk(key): _dumps(value) for key, value in data.items()})
Set given keys to their respective values @data: #dict or :class:RedisMap of |{key: value}| entries to set
def set_ttl(self, key, ttl): return self._client.expire(self.get_key(key), ttl)
Sets time to live for @key to @ttl seconds -> #bool True if the timeout was set
def set_pttl(self, key, ttl): return self._client.pexpire(self.get_key(key), ttl)
Sets time to live for @key to @ttl milliseconds -> #bool True if the timeout was set
def expire_at(self, key, _time): return self._client.expireat(self.get_key(key), round(_time))
Sets the expiration time of @key to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970)
def pop(self, key): r = self[key] self.remove(key) return r
Removes @key from the instance, returns its value
def remove(self, *keys): keys = list(map(self.get_key, keys)) return self._client.delete(*keys)
Deletes @keys from :prop:_client @*keys: keys to remove -> #int the number of keys that were removed
def scan(self, match="*", count=1000, cursor=0): cursor, data = self._client.scan( cursor=cursor, match="{}:{}".format(self.key_prefix, match), count=count) return (cursor, list(map(self._decode, data)))
Iterates the set of keys in :prop:key_prefix in :prop:_client @match: #str pattern to match after the :prop:key_prefix @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection @cursor: the next cursor position -> #tuple (#int cursor position in scan, #list of full key names)
def iter(self, match="*", count=1000): replace_this = self.key_prefix+":" for key in self._client.scan_iter( match="{}:{}".format(self.key_prefix, match), count=count): yield self._decode(key).replace(replace_this, "", 1)
Iterates the set of keys in :prop:key_prefix in :prop:_client @match: #str pattern to match after the :prop:key_prefix @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> yields redis keys within this instance
def items(self): cursor = '0' _loads = self._loads _mget = self._client.mget while cursor != 0: cursor, keys = self.scan(cursor=cursor) if keys: vals = _mget(*keys) for i, key in enumerate(keys): yield ( key.replace( self.key_prefix+":", "", 1), _loads(vals[i]) )
Iterates the set of |{key: value}| entries in :prop:key_prefix of :prop:_client -> yields redis (key, value) #tuples within this instance
def clear(self, match="*", count=1000): cursor = '0' while cursor != 0: cursor, keys = self.scan(cursor=cursor, match=match, count=count) if keys: self._client.delete(*keys)
Removes all |{key: value}| entries in :prop:key_prefix of :prop:_client
def size(self): return int(self._client.hget(self._bucket_key, self.key_prefix) or 0)
-> #int number of keys in this instance
def _bucket_key(self): return "{}.size.{}".format( self.prefix, (self._hashed_key//1000) if self._hashed_key > 1000 else self._hashed_key)
Returns hash bucket key for the redis key
def incr(self, key, by=1): pipe = self._client.pipeline(transaction=False) pipe.incr(self.get_key(key), by) if key not in self: pipe.hincrby(self._bucket_key, self.key_prefix, 1) result = pipe.execute() return result[0]
:see::meth:RedisMap.incr
def update(self, data): result = None if data: pipe = self._client.pipeline(transaction=False) for k in data.keys(): pipe.exists(self.get_key(k)) exists = pipe.execute() exists = exists.count(True) _rk, _dumps = self.get_key, self._dumps data = { _rk(key): _dumps(value) for key, value in data.items()} pipe.mset(data) pipe.hincrby(self._bucket_key, self.key_prefix, len(data)-exists) result = pipe.execute()[0] return result
:see::meth:RedisMap.update
def clear(self, match="*", count=1000): cursor = '0' pipe = self._client.pipeline(transaction=False) while cursor != 0: cursor, keys = self.scan(cursor=cursor, match=match, count=count) if keys: pipe.delete(*keys) pipe.hdel(self._bucket_key, self.key_prefix) pipe.execute() return True
:see:meth:RedisMap.clear
def get(self, key, default=None): result = self._loads(self._client.get(self.get_key(key))) if result is not None: return result else: return default or self._default
Gets @key from :prop:key_prefix, defaulting to @default
def incr(self, field, by=1): return self._client.hincrby(self.key_prefix, field, by)
:see::meth:RedisMap.incr
def decr(self, field, by=1): return self._client.hincrby(self.key_prefix, field, by * -1)
:see::meth:RedisMap.decr
def mget(self, *keys): return list(map( self._loads, self._client.hmget(self.key_prefix, *keys)))
-> #list of values at the specified @keys
def all(self): return { self._decode(k): self._loads(v) for k, v in self._client.hgetall(self.key_prefix).items() }
-> #dict of all |{key: value}| entries in :prop:key_prefix of :prop:_client
def update(self, data): result = None if data: _dumps = self._dumps data = { key: _dumps(value) for key, value in data.items()} result = self._client.hmset(self.key_prefix, data) return result
:see::meth:RedisMap.update
def scan(self, match="*", count=1000, cursor=0): cursor, results = self._client.hscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map(self._decode, results)))
:see::meth:RedisMap.scan
def iter(self, match="*", count=1000): for field, value in self._client.hscan_iter( self.key_prefix, match=match, count=count): yield self._decode(field)
:see::meth:RedisMap.iter
def items(self, match="*", count=1000): for field, value in self._client.hscan_iter( self.key_prefix, match=match, count=count): yield self._decode(field), self._loads(value)
:see::meth:RedisMap.items
def keys(self): for field in self._client.hkeys(self.key_prefix): yield self._decode(field)
:see::meth:RedisMap.keys
def values(self): for val in self._client.hvals(self.key_prefix): yield self._loads(val)
:see::meth:RedisMap.keys
def get(self, key, default=None): try: result = self._loads(self._client.hget(self.key_prefix, key)) assert result is not None return result except (AssertionError, KeyError): return default or self._default
Gets @key from :prop:key_prefix, defaulting to @default
def reverse_iter(self, start=None, stop=None, count=2000): cursor = '0' count = 1000 start = start if start is not None else (-1 * count) stop = stop if stop is not None else -1 _loads = self._loads while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in reversed(cursor or []): yield _loads(x) start -= count stop -= count
-> yields items of the list in reverse
def reverse(self): tmp_list = RedisList( randint(0, 100000000), prefix=self.key_prefix, client=self._client, serializer=self.serializer, serialized=self.serialized) cursor = '0' count = 1000 start = (-1 * count) stop = -1 _loads = self._loads while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) if cursor: tmp_list.extend(map(_loads, reversed(cursor))) start -= count stop -= count self._client.rename(tmp_list.key_prefix, self.key_prefix) tmp_list.clear()
In place reverses the list. Very expensive on large data sets. The reversed list will be persisted to the redis :prop:_client as well.
def pop(self, index=None): if index is None: return self._loads(self._client.rpop(self.key_prefix)) elif index == 0: return self._loads(self._client.lpop(self.key_prefix)) else: _uuid = gen_rand_str(16, 24) r = self[index] self[index] = _uuid self.remove(_uuid) return r
Removes and returns the item at @index or from the end of the list -> item at @index
def extend(self, items): if items: if self.serialized: items = list(map(self._dumps, items)) self._client.rpush(self.key_prefix, *items)
Adds @items to the end of the list -> #int length of list after operation
def append(self, item): return self._client.rpush(self.key_prefix, self._dumps(item))
Adds @item to the end of the list -> #int length of list after operation
def count(self, value): cnt = 0 for x in self: if x == value: cnt += 1 return cnt
Not recommended for use on large lists due to time complexity, but it works. Use with caution. -> #int number of occurences of @value
def push(self, *items): if self.serialized: items = list(map(self._dumps, items)) return self._client.lpush(self.key_prefix, *items)
Prepends the list with @items -> #int length of list after operation
def index(self, item): for i, x in enumerate(self.iter()): if x == item: return i return None
Not recommended for use on large lists due to time complexity, but it works -> #int list index of @item
def insert(self, index, value): _uuid = gen_rand_str(24, 32) item_at_index = self[index] self[index] = _uuid uuid = _uuid _uuid = self._dumps(uuid) pipe = self._client.pipeline(transaction=True) # Needs to be atomic pipe.linsert( self.key_prefix, "BEFORE", _uuid, self._dumps(value)) pipe.linsert( self.key_prefix, "BEFORE", _uuid, item_at_index) results = pipe.execute() self.remove(uuid) return results[0]
Inserts @value before @index in the list. @index: list index to insert @value before @value: item to insert @where: whether to insert BEFORE|AFTER @refvalue -> #int new length of the list on success or -1 if refvalue is not in the list.
def remove(self, item, count=0): self._client.lrem(self.key_prefix, count, self._dumps(item))
Removes @item from the list for @count number of occurences
def iter(self, start=0, count=1000): cursor = '0' _loads = self._loads stop = start + count while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in cursor or []: yield _loads(x) start += (count + 1) stop += (count + 1)
@start: #int cursor start position @stop: #int cursor stop position @count: #int buffer limit -> yields all of the items in the list
def trim(self, start, end): return self._client.ltrim(self.key_prefix, start, end)
Trim the list, removing all values not within the slice between @start and @end. @start and @end can be negative numbers just like python slicing notation. @start: #int start position @end: #int end position -> result of :meth:redis.StrictRedis.ltrim
def add(self, member): return self._client.sadd(self.key_prefix, self._dumps(member))
Adds @member to the set -> #int the number of @members that were added to the set, excluding pre-existing members (1 or 0)
def update(self, members): if isinstance(members, RedisSet): size = self.size return (self.unionstore( self.key_prefix, members.key_prefix) - size) if self.serialized: members = list(map(self._dumps, members)) if members: return self._client.sadd(self.key_prefix, *members) return 0
Adds @members to the set @members: a :class:RedisSet object or #set -> #int the number of @members that were added to the set, excluding pre-existing members
def union(self, *others): others = self._typesafe_others(others) return set(map( self._loads, self._client.sunion(self.key_prefix, *others)))
Calculates union between sets @others: one or several :class:RedisSet objects or #str redis set keynames -> #set of new set members
def unioniter(self, *others): others = self._typesafe_others(others) for other in self._client.sunion(self.key_prefix, *others): yield self._loads(other)
The same as :meth:union, but returns iterator instead of #set @others: one or several :class:RedisSet objects or #str redis set keynames -> yields members of the resulting set
def unionstore(self, destination, *others): others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sunionstore(destination, self.key_prefix, *others)
The same as :meth:union, but stores the result in @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of items in the resulting set
def intersection(self, *others): others = self._typesafe_others(others) return set(map( self._loads, self._client.sinter(self.key_prefix, *others)))
Calculates the intersection of all the given sets, that is, members which are present in all given sets. @others: one or several #str keynames or :class:RedisSet objects -> #set of resulting intersection between @others and this set
def interiter(self, *others): others = self._typesafe_others(others) for other in self._client.sinter(self.key_prefix, *others): yield self._loads(other)
The same as :meth:intersection, but returns iterator instead of #set @others: one or several #str keynames or :class:RedisSet objects -> yields members of the resulting set
def interstore(self, destination, *others): others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sinterstore(destination, self.key_prefix, *others)
The same as :meth:intersection, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set
def difference(self, *others): others = self._typesafe_others(others) return set(map( self._loads, self._client.sdiff(self.key_prefix, *others)))
Calculates the difference between this set and @others @others: one or several #str keynames or :class:RedisSet objects -> set resulting from the difference between the first set and all @others.
def diffiter(self, *others): others = self._typesafe_others(others) for other in self._client.sdiff(self.key_prefix, *others): yield self._loads(other)
The same as :meth:difference, but returns iterator instead of #set @others: one or several #str keynames or :class:RedisSet objects -> yields members resulting from the difference between the first set and all @others.
def diffstore(self, destination, *others): others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sdiffstore(destination, self.key_prefix, *others)
The same as :meth:difference, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set
def move(self, member, destination): destination = self._typesafe(destination) return self._client.smove( self.key_prefix, destination, self._dumps(member))
Moves @member from this set to @destination atomically @member: a member of this set @destination: #str redis keyname or :class:RedisSet object -> #bool True if the member was moved
def rand(self, count=1): result = self._client.srandmember(self.key_prefix, count) return set(map(self._loads, result))
Gets @count random members from the set @count: #int number of members to return -> @count set members
def remove(self, *members): if self.serialized: members = list(map(self._dumps, members)) return self._client.srem(self.key_prefix, *members)
Removes @members from the set -> #int the number of members that were removed from the set
def members(self): if self.serialized: return set(map( self._loads, self._client.smembers(self.key_prefix))) else: return set(map( self._decode, self._client.smembers(self.key_prefix)))
-> #set of all members in the set
def scan(self, match="*", count=1000, cursor=0): cursor, data = self._client.sscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, set(map(self._loads, data)))
:see::RedisMap.scan
def iter(self, match="*", count=1000): _loads = self._loads for m in self._client.sscan_iter( self.key_prefix, match="*", count=count): yield _loads(m)
Iterates the set members in :prop:key_prefix of :prop:_client @match: #str pattern to match items by @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> yields members of the set
def incr(self, member, by=1): return self._client.zincrby(self.key_prefix, self._dumps(member), by)
Increments @member by @by within the sorted set
def decr(self, member, by=1): return self._client.zincrby( self.key_prefix, self._dumps(member), by * -1)
Decrements @member by @by within the sorted set
def add(self, *args, **kwargs): if args or kwargs: _dumps = self._dumps zargs = list(args) if args and self.serialized: # args format: value, key, value, key... zargs = [ _dumps(x) if (i % 2 == 1 and self.serialized) else x for i, x in enumerate(args)] if kwargs: # kwargs format: key=value, key=value zargs += [ _dumps(x) if (i % 2 == 1 and self.serialized) else x for y in kwargs.items() for i, x in enumerate(reversed(y))] return self._client.zadd(self.key_prefix, *zargs)
Adds member/value pairs to the sorted set in two ways: To add with @args: .. pairs = [4.0, 'member1', 5.0, 'member2'] sorted_set.add(*pairs) # sorted_set.add(4.0, 'member1', 5.0, 'member2') .. To add with @kwargs: .. pairs = {"member1": 4.0, "member2": 5.0} sorted_set.add(**pairs) # sorted_set.add(member1=4.0, member2=5.0) ..
def update(self, data): if data: _dumps = self._dumps zargs = [ _dumps(x) if (i % 2 == 1) else x for y in data.items() for i, x in enumerate(reversed(y)) ] return self._client.zadd(self.key_prefix, *zargs)
Adds @data to the sorted set @data: #dict or dict-like object
def remove(self, *members): members = list(map(self._dumps, members)) self._client.zrem(self.key_prefix, *members)
Removes @members from the sorted set
def rank(self, member): if self.reversed: return self._client.zrevrank(self.key_prefix, self._dumps(member)) return self._client.zrank(self.key_prefix, self._dumps(member))
Gets the ASC rank of @member from the sorted set, that is, lower scores have lower ranks
def count(self, min, max): return self._client.zcount(self.key_prefix, min, max)
-> #int number of elements in the sorted set with a score between @min and @max.
def iter(self, start=0, stop=-1, withscores=False, reverse=None): reverse = reverse if reverse is not None else self.reversed _loads = self._loads for member in self._client.zrange( self.key_prefix, start=start, end=stop, withscores=withscores, desc=reverse, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)
Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs
def iterbyscore(self, min='-inf', max='+inf', start=None, num=None, withscores=False, reverse=None): reverse = reverse if reverse is not None else self.reversed zfunc = self._client.zrangebyscore if not reverse \ else self._client.zrevrangebyscore _loads = self._loads for member in zfunc( self.key_prefix, min=min, max=max, start=start, num=num, withscores=withscores, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)
Return a range of values from the sorted set name with scores between @min and @max. If @start and @num are specified, then return a slice of the range. @min: #int minimum score, or #str '-inf' @max: #int minimum score, or #str '+inf' @start: #int starting range position @num: #int number of members to fetch @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs
def itemsbyscore(self, min='-inf', max='+inf', start=None, num=None, reverse=None): reverse = reverse if reverse is not None else self.reversed for member in self.iterbyscore( min, max, start, num, withscores=True, reverse=reverse): yield member
Return a range of |(member, score)| pairs from the sorted set name with scores between @min and @max. If @start and @num are specified, then return a slice of the range. @min: #int minimum score, or #str '-inf' @max: #int minimum score, or #str '+inf' @start: #int starting range position @num: #int number of members to fetch @reverse: #bool indicating whether to sort the results descendingly -> yields |(member, score)| #tuple pairs
def iterscan(self, match="*", count=1000): if self.serialized: return map( lambda x: (self._loads(x[0]), self.cast(x[1])), self._client.zscan_iter( self.key_prefix, match=match, count=count)) else: return map( lambda x: (self._decode(x[0]), self.cast(x[1])), self._client.zscan_iter( self.key_prefix, match=match, count=count))
Much slower than iter(), but much more memory efficient if k/v's retrieved are one-offs @match: matches member names in the sorted set @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> iterator of |(member, score)| pairs
def scan(self, match="*", count=1000, cursor=0): if self.serialized: cursor, data = self._client.zscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map( lambda x: (self._loads(x[0]), self.cast(x[1])), data))) else: cursor, data = self._client.zscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map( lambda x: (self._decode(x[0]), self.cast(x[1])), data)))
:see::meth:RedisMap.scan
def recv_blocking(conn, msglen): msg = b'' while len(msg) < msglen: maxlen = msglen-len(msg) if maxlen > 4096: maxlen = 4096 tmpmsg = conn.recv(maxlen) if not tmpmsg: raise RuntimeError("socket connection broken") msg += tmpmsg logging.debug("Msglen: %d of %d", len(msg), msglen) logging.debug("Message: %s", msg) return msg
Recieve data until msglen bytes have been received.
def compare_password(expected, actual): if expected == actual: return True, "OK" msg = [] ver_exp = expected[-8:].rstrip() ver_act = actual[-8:].rstrip() if expected[:-8] != actual[:-8]: msg.append("Password mismatch") if ver_exp != ver_act: msg.append("asterisk_mbox version mismatch. Client: '" + ver_act + "', Server: '" + ver_exp + "'") return False, ". ".join(msg)
Compare two 64byte encoded passwords.
def encode_to_sha(msg): if isinstance(msg, str): msg = msg.encode('utf-8') return (codecs.encode(msg, "hex_codec") + (b'00' * 32))[:64]
coerce numeric list inst sha-looking bytearray
def decode_from_sha(sha): if isinstance(sha, str): sha = sha.encode('utf-8') return codecs.decode(re.sub(rb'(00)*$', b'', sha), "hex_codec")
convert coerced sha back into numeric list
def put(self, item, block=True, timeout=None): super().put(item, block, timeout) self._putsocket.send(b'x')
put.
def get(self, block=True, timeout=None): try: item = super().get(block, timeout) self._getsocket.recv(1) return item except queue.Empty: raise queue.Empty
get.
def _api_path(self, item): if self.base_url is None: raise NotImplementedError("base_url not set") path = "/".join([x.blob["id"] for x in item.path]) return "/".join([self.base_url, path])
Get the API path for the current cursor position.