desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'C4.5
ID3ç®æ³è®¡ç®çæ¯ä¿¡æ¯å¢çïŒC4.5ç®æ³è®¡ç®çæ¯ä¿¡æ¯å¢çæ¯ïŒå¯¹äžé¢ID3çæ¬çåœæ°çšäœä¿®æ¹å³å¯'
| def _chooseBestFeatureToSplit_C45(self, X, y):
| numFeatures = X.shape[1]
oldEntropy = self._calcEntropy(y)
bestGainRatio = 0.0
bestFeatureIndex = (-1)
for i in range(numFeatures):
featList = X[:, i]
uniqueVals = set(featList)
newEntropy = 0.0
splitInformation = 0.0
for value in uniqueVals:
(sub_X, sub_y) = self._splitDataSet(X, y, i, value)
prob = (len(sub_y) / float(len(y)))
newEntropy += (prob * self._calcEntropy(sub_y))
splitInformation -= (prob * np.log2(prob))
if (splitInformation == 0.0):
pass
else:
infoGain = (oldEntropy - newEntropy)
gainRatio = (infoGain / splitInformation)
if (gainRatio > bestGainRatio):
bestGainRatio = gainRatio
bestFeatureIndex = i
return bestFeatureIndex
|
'åœæ°åèœïŒè¿ålabelListäžåºç°æ¬¡æ°æå€çlabel'
| def _majorityCnt(self, labelList):
| labelCount = {}
for vote in labelList:
if (vote not in labelCount.keys()):
labelCount[vote] = 0
labelCount[vote] += 1
sortedClassCount = sorted(labelCount.iteritems(), key=(lambda x: x[1]), reverse=True)
return sortedClassCount[0][0]
|
'featureIndexïŒç±»åæ¯å
ç»ïŒå®è®°åœäºXäžçç¹åŸåšåå§æ°æ®äžå¯¹åºçäžæ ã'
| def _createTree(self, X, y, featureIndex):
| labelList = list(y)
if (labelList.count(labelList[0]) == len(labelList)):
return labelList[0]
if (len(featureIndex) == 0):
return self._majorityCnt(labelList)
if (self._mode == 'C4.5'):
bestFeatIndex = self._chooseBestFeatureToSplit_C45(X, y)
elif (self._mode == 'ID3'):
bestFeatIndex = self._chooseBestFeatureToSplit_ID3(X, y)
bestFeatStr = featureIndex[bestFeatIndex]
featureIndex = list(featureIndex)
featureIndex.remove(bestFeatStr)
featureIndex = tuple(featureIndex)
myTree = {bestFeatStr: {}}
featValues = X[:, bestFeatIndex]
uniqueVals = set(featValues)
for value in uniqueVals:
(sub_X, sub_y) = self._splitDataSet(X, y, bestFeatIndex, value)
myTree[bestFeatStr][value] = self._createTree(sub_X, sub_y, featureIndex)
return myTree
|
'an invalid pipeline command at exec time adds the exception instance
to the list of returned values'
| def test_exec_error_in_response(self, r):
| r['c'] = 'a'
with r.pipeline() as pipe:
pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4)
result = pipe.execute(raise_on_error=False)
assert result[0]
assert (r['a'] == b('1'))
assert result[1]
assert (r['b'] == b('2'))
assert isinstance(result[2], redis.ResponseError)
assert (r['c'] == b('a'))
assert result[3]
assert (r['d'] == b('4'))
assert (pipe.set('z', 'zzz').execute() == [True])
assert (r['z'] == b('zzz'))
|
'When out of connections, block for timeout seconds, then raise'
| def test_connection_pool_blocks_until_timeout(self):
| pool = self.get_pool(max_connections=1, timeout=0.1)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
assert ((time.time() - start) >= 0.1)
|
'When out of connections, block until another connection is released
to the pool'
| def connection_pool_blocks_until_another_connection_released(self):
| pool = self.get_pool(max_connections=1, timeout=2)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
Thread(target=target).start()
start = time.time()
pool.get_connection('_')
assert ((time.time() - start) >= 0.1)
|
'An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368'
| def test_on_connect_error(self):
| bad_connection = redis.Redis(db=9999)
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert (len(pool._available_connections) == 1)
assert (not pool._available_connections[0]._sock)
|
'If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised'
| @skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
| with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
pool = r.connection_pool
assert (len(pool._available_connections) == 1)
assert (not pool._available_connections[0]._sock)
|
'BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.'
| @skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
| pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR', 'LOADING fake message')
pool = r.connection_pool
assert (not pipe.connection)
assert (len(pool._available_connections) == 1)
assert (not pool._available_connections[0]._sock)
|
'BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.'
| @skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
| pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert (not pipe.connection)
assert (len(pool._available_connections) == 1)
assert (not pool._available_connections[0]._sock)
|
'READONLY errors get turned in ReadOnlyError exceptions'
| @skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
| with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
|
'Older Redis versions contained \'allocation_stats\' in INFO that
was the cause of a number of bugs when parsing.'
| def test_22_info(self, r):
| info = 'allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330,13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020,20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303,27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160,34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523,41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171,49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332,58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30,67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25,76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46,85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20,94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15,103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52,111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54,119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52,127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62,135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7,144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1,155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2,172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3,187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1,207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2,220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1,>=256=203'
parsed = parse_info(info)
assert ('allocation_stats' in parsed)
assert ('6' in parsed['allocation_stats'])
assert ('>=256' in parsed['allocation_stats'])
|
'The PythonParser has some special cases for return values > 1MB'
| def test_large_responses(self, r):
| data = ''.join(([ascii_letters] * (5000000 // len(ascii_letters))))
r['a'] = data
assert (r['a'] == b(data))
|
'High precision floating point values sent to the server should keep
precision.'
| def test_floating_point_encoding(self, r):
| timestamp = 1349673917.939762
r.zadd('a', 'a1', timestamp)
assert (r.zscore('a', 'a1') == timestamp)
|
'If sleep is higher than timeout, it should raise an error'
| def test_high_sleep_raises_error(self, sr):
| with pytest.raises(LockError):
self.get_lock(sr, 'foo', timeout=1, sleep=2)
|
'Send an already packed command to the Redis server'
| def send_packed_command(self, command):
| if (not self._sock):
self.connect()
try:
self._sock.sendall(command)
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if (len(e.args) == 1):
(_errno, errmsg) = ('UNKNOWN', e.args[0])
else:
(_errno, errmsg) = e.args
raise ConnectionError(('Error %s while writing to socket. %s.' % (_errno, errmsg)))
except:
self.disconnect()
raise
|
'Pack a series of arguments into a value Redis command'
| def pack_command(self, *args):
| args_output = SYM_EMPTY.join([SYM_EMPTY.join((SYM_DOLLAR, b(str(len(k))), SYM_CRLF, k, SYM_CRLF)) for k in imap(self.encode, args)])
output = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF, args_output))
return output
|
'Round-robin slave balancer'
| def rotate_slaves(self):
| slaves = self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if (self.slave_rr_counter is None):
self.slave_rr_counter = random.randint(0, (len(slaves) - 1))
for _ in xrange(len(slaves)):
self.slave_rr_counter = ((self.slave_rr_counter + 1) % len(slaves))
slave = slaves[self.slave_rr_counter]
(yield slave)
try:
(yield self.get_master_address())
except MasterNotFoundError:
pass
raise SlaveNotFoundError(('No slave found for %r' % self.service_name))
|
'Asks sentinel servers for the Redis master\'s address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.'
| def discover_master(self, service_name):
| for (sentinel_no, sentinel) in enumerate(self.sentinels):
try:
masters = sentinel.sentinel_masters()
except (ConnectionError, TimeoutError):
continue
state = masters.get(service_name)
if (state and self.check_master_state(state, service_name)):
(self.sentinels[0], self.sentinels[sentinel_no]) = (sentinel, self.sentinels[0])
return (state['ip'], state['port'])
raise MasterNotFoundError(('No master found for %r' % (service_name,)))
|
'Remove slaves that are in an ODOWN or SDOWN state'
| def filter_slaves(self, slaves):
| slaves_alive = []
for slave in slaves:
if (slave['is_odown'] or slave['is_sdown']):
continue
slaves_alive.append((slave['ip'], slave['port']))
return slaves_alive
|
'Returns a list of alive slaves for service ``service_name``'
| def discover_slaves(self, service_name):
| for sentinel in self.sentinels:
try:
slaves = sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
|
'Returns a redis client instance for the ``service_name`` master.
A SentinelConnectionPool class is used to retrive the master\'s
address before establishing a new connection.
NOTE: If the master\'s address has changed, any cached connections to
the old master are closed.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.'
| def master_for(self, service_name, redis_class=StrictRedis, connection_pool_class=SentinelConnectionPool, **kwargs):
| kwargs['is_master'] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(service_name, self, **connection_kwargs))
|
'Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave\'s
address before establishing a new connection.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.'
| def slave_for(self, service_name, redis_class=StrictRedis, connection_pool_class=SentinelConnectionPool, **kwargs):
| kwargs['is_master'] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(service_name, self, **connection_kwargs))
|
'Return a Redis client object configured from the given URL, which must
use either `the ``redis://`` scheme
<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESP
connections or the ``unix://`` scheme for Unix domain sockets.
For example::
redis://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class\'s initializer. In the case
of conflicting arguments, querystring arguments always win.'
| @classmethod
def from_url(cls, url, db=None, **kwargs):
| connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool)
|
'Set a custom Response Callback'
| def set_response_callback(self, command, callback):
| self.response_callbacks[command] = callback
|
'Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.'
| def pipeline(self, transaction=True, shard_hint=None):
| return StrictPipeline(self.connection_pool, self.response_callbacks, transaction, shard_hint)
|
'Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The \'func\' callable
should expect a single argument which is a Pipeline object.'
| def transaction(self, func, *watches, **kwargs):
| shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return (func_value if value_from_callable else exec_value)
except WatchError:
if ((watch_delay is not None) and (watch_delay > 0)):
time.sleep(watch_delay)
continue
|
'Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it\'s available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2\'s lock.
In some use cases it\'s necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn\'t disabled in this case, the worker thread won\'t see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren\'t common and as such default to using
thread local storage.'
| def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, lock_class=None, thread_local=True):
| if (lock_class is None):
if (self._use_lua_lock is None):
try:
LuaLock.register_scripts(self)
self._use_lua_lock = True
except ResponseError:
self._use_lua_lock = False
lock_class = ((self._use_lua_lock and LuaLock) or Lock)
return lock_class(self, name, timeout=timeout, sleep=sleep, blocking_timeout=blocking_timeout, thread_local=thread_local)
|
'Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.'
| def pubsub(self, **kwargs):
| return PubSub(self.connection_pool, **kwargs)
|
'Execute a command and return a parsed response'
| def execute_command(self, *args, **options):
| pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if ((not connection.retry_on_timeout) and isinstance(e, TimeoutError)):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
|
'Parses a response from the Redis server'
| def parse_response(self, connection, command_name, **options):
| response = connection.read_response()
if (command_name in self.response_callbacks):
return self.response_callbacks[command_name](response, **options)
return response
|
'Tell the Redis server to rewrite the AOF file from data in memory.'
| def bgrewriteaof(self):
| return self.execute_command('BGREWRITEAOF')
|
'Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.'
| def bgsave(self):
| return self.execute_command('BGSAVE')
|
'Disconnects the client at ``address`` (ip:port)'
| def client_kill(self, address):
| return self.execute_command('CLIENT KILL', address)
|
'Returns a list of currently connected clients'
| def client_list(self):
| return self.execute_command('CLIENT LIST')
|
'Returns the current connection name'
| def client_getname(self):
| return self.execute_command('CLIENT GETNAME')
|
'Sets the current connection name'
| def client_setname(self, name):
| return self.execute_command('CLIENT SETNAME', name)
|
'Return a dictionary of configuration based on the ``pattern``'
| def config_get(self, pattern='*'):
| return self.execute_command('CONFIG GET', pattern)
|
'Set config item ``name`` with ``value``'
| def config_set(self, name, value):
| return self.execute_command('CONFIG SET', name, value)
|
'Reset runtime statistics'
| def config_resetstat(self):
| return self.execute_command('CONFIG RESETSTAT')
|
'Rewrite config file with the minimal change to reflect running config'
| def config_rewrite(self):
| return self.execute_command('CONFIG REWRITE')
|
'Returns the number of keys in the current database'
| def dbsize(self):
| return self.execute_command('DBSIZE')
|
'Returns version specific meta information about a given key'
| def debug_object(self, key):
| return self.execute_command('DEBUG OBJECT', key)
|
'Echo the string back from the server'
| def echo(self, value):
| return self.execute_command('ECHO', value)
|
'Delete all keys in all databases on the current host'
| def flushall(self):
| return self.execute_command('FLUSHALL')
|
'Delete all keys in the current database'
| def flushdb(self):
| return self.execute_command('FLUSHDB')
|
'Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError'
| def info(self, section=None):
| if (section is None):
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
|
'Return a Python datetime object representing the last time the
Redis database was saved to disk'
| def lastsave(self):
| return self.execute_command('LASTSAVE')
|
'Return the encoding, idletime, or refcount about the key'
| def object(self, infotype, key):
| return self.execute_command('OBJECT', infotype, key, infotype=infotype)
|
'Ping the Redis server'
| def ping(self):
| return self.execute_command('PING')
|
'Tell the Redis server to save its data to disk,
blocking until the save is complete'
| def save(self):
| return self.execute_command('SAVE')
|
'Redis Sentinel\'s SENTINEL command.'
| def sentinel(self, *args):
| warnings.warn(DeprecationWarning('Use the individual sentinel_* methods'))
|
'Returns a (host, port) pair for the given ``service_name``'
| def sentinel_get_master_addr_by_name(self, service_name):
| return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', service_name)
|
'Returns a dictionary containing the specified masters state.'
| def sentinel_master(self, service_name):
| return self.execute_command('SENTINEL MASTER', service_name)
|
'Returns a list of dictionaries containing each master\'s state.'
| def sentinel_masters(self):
| return self.execute_command('SENTINEL MASTERS')
|
'Add a new master to Sentinel to be monitored'
| def sentinel_monitor(self, name, ip, port, quorum):
| return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
|
'Remove a master from Sentinel\'s monitoring'
| def sentinel_remove(self, name):
| return self.execute_command('SENTINEL REMOVE', name)
|
'Returns a list of sentinels for ``service_name``'
| def sentinel_sentinels(self, service_name):
| return self.execute_command('SENTINEL SENTINELS', service_name)
|
'Set Sentinel monitoring parameters for a given master'
| def sentinel_set(self, name, option, value):
| return self.execute_command('SENTINEL SET', name, option, value)
|
'Returns a list of slaves for ``service_name``'
| def sentinel_slaves(self, service_name):
| return self.execute_command('SENTINEL SLAVES', service_name)
|
'Shutdown the server'
| def shutdown(self):
| try:
self.execute_command('SHUTDOWN')
except ConnectionError:
return
raise RedisError('SHUTDOWN seems to have failed.')
|
'Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.'
| def slaveof(self, host=None, port=None):
| if ((host is None) and (port is None)):
return self.execute_command('SLAVEOF', Token.get_token('NO'), Token.get_token('ONE'))
return self.execute_command('SLAVEOF', host, port)
|
'Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.'
| def slowlog_get(self, num=None):
| args = ['SLOWLOG GET']
if (num is not None):
args.append(num)
return self.execute_command(*args)
|
'Get the number of items in the slowlog'
| def slowlog_len(self):
| return self.execute_command('SLOWLOG LEN')
|
'Remove all items in the slowlog'
| def slowlog_reset(self):
| return self.execute_command('SLOWLOG RESET')
|
'Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).'
| def time(self):
| return self.execute_command('TIME')
|
'Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.'
| def wait(self, num_replicas, timeout):
| return self.execute_command('WAIT', num_replicas, timeout)
|
'Appends the string ``value`` to the value at ``key``. If ``key``
doesn\'t already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.'
| def append(self, key, value):
| return self.execute_command('APPEND', key, value)
|
'Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider'
| def bitcount(self, key, start=None, end=None):
| params = [key]
if ((start is not None) and (end is not None)):
params.append(start)
params.append(end)
elif (((start is not None) and (end is None)) or ((end is not None) and (start is None))):
raise RedisError('Both start and end must be specified')
return self.execute_command('BITCOUNT', *params)
|
'Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.'
| def bitop(self, operation, dest, *keys):
| return self.execute_command('BITOP', operation, dest, *keys)
|
'Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.'
| def bitpos(self, key, bit, start=None, end=None):
| if (bit not in (0, 1)):
raise RedisError('bit must be 0 or 1')
params = [key, bit]
((start is not None) and params.append(start))
if ((start is not None) and (end is not None)):
params.append(end)
elif ((start is None) and (end is not None)):
raise RedisError('start argument is not set, when end is specified')
return self.execute_command('BITPOS', *params)
|
'Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``'
| def decr(self, name, amount=1):
| return self.execute_command('DECRBY', name, amount)
|
'Delete one or more keys specified by ``names``'
| def delete(self, *names):
| return self.execute_command('DEL', *names)
|
'Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.'
| def dump(self, name):
| return self.execute_command('DUMP', name)
|
'Returns a boolean indicating whether key ``name`` exists'
| def exists(self, name):
| return self.execute_command('EXISTS', name)
|
'Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.'
| def expire(self, name, time):
| if isinstance(time, datetime.timedelta):
time = (time.seconds + ((time.days * 24) * 3600))
return self.execute_command('EXPIRE', name, time)
|
'Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.'
| def expireat(self, name, when):
| if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
|
'Return the value at key ``name``, or None if the key doesn\'t exist'
| def get(self, name):
| return self.execute_command('GET', name)
|
'Return the value at key ``name``, raises a KeyError if the key
doesn\'t exist.'
| def __getitem__(self, name):
| value = self.get(name)
if (value is not None):
return value
raise KeyError(name)
|
'Returns a boolean indicating the value of ``offset`` in ``name``'
| def getbit(self, name, offset):
| return self.execute_command('GETBIT', name, offset)
|
'Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)'
| def getrange(self, key, start, end):
| return self.execute_command('GETRANGE', key, start, end)
|
'Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.'
| def getset(self, name, value):
| return self.execute_command('GETSET', name, value)
|
'Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``'
| def incr(self, name, amount=1):
| return self.execute_command('INCRBY', name, amount)
|
'Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``'
| def incrby(self, name, amount=1):
| return self.incr(name, amount)
|
'Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``'
| def incrbyfloat(self, name, amount=1.0):
| return self.execute_command('INCRBYFLOAT', name, amount)
|
'Returns a list of keys matching ``pattern``'
| def keys(self, pattern='*'):
| return self.execute_command('KEYS', pattern)
|
'Returns a list of values ordered identically to ``keys``'
| def mget(self, keys, *args):
| args = list_or_args(keys, args)
return self.execute_command('MGET', *args)
|
'Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.'
| def mset(self, *args, **kwargs):
| if args:
if ((len(args) != 1) or (not isinstance(args[0], dict))):
raise RedisError('MSET requires **kwargs or a single dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSET', *items)
|
'Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.'
| def msetnx(self, *args, **kwargs):
| if args:
if ((len(args) != 1) or (not isinstance(args[0], dict))):
raise RedisError('MSETNX requires **kwargs or a single dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSETNX', *items)
|
'Moves the key ``name`` to a different Redis database ``db``'
| def move(self, name, db):
| return self.execute_command('MOVE', name, db)
|
'Removes an expiration on ``name``'
| def persist(self, name):
| return self.execute_command('PERSIST', name)
|
'Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.'
| def pexpire(self, name, time):
| if isinstance(time, datetime.timedelta):
ms = int((time.microseconds / 1000))
time = (((time.seconds + ((time.days * 24) * 3600)) * 1000) + ms)
return self.execute_command('PEXPIRE', name, time)
|
'Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.'
| def pexpireat(self, name, when):
| if isinstance(when, datetime.datetime):
ms = int((when.microsecond / 1000))
when = ((int(mod_time.mktime(when.timetuple())) * 1000) + ms)
return self.execute_command('PEXPIREAT', name, when)
|
'Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object'
| def psetex(self, name, time_ms, value):
| if isinstance(time_ms, datetime.timedelta):
ms = int((time_ms.microseconds / 1000))
time_ms = (((time_ms.seconds + ((time_ms.days * 24) * 3600)) * 1000) + ms)
return self.execute_command('PSETEX', name, time_ms, value)
|
'Returns the number of milliseconds until the key ``name`` will expire'
| def pttl(self, name):
| return self.execute_command('PTTL', name)
|
'Returns the name of a random key'
| def randomkey(self):
| return self.execute_command('RANDOMKEY')
|
'Rename key ``src`` to ``dst``'
| def rename(self, src, dst):
| return self.execute_command('RENAME', src, dst)
|
'Rename key ``src`` to ``dst`` if ``dst`` doesn\'t already exist'
| def renamenx(self, src, dst):
| return self.execute_command('RENAMENX', src, dst)
|
'Create a key using the provided serialized value, previously obtained
using DUMP.'
| def restore(self, name, ttl, value, replace=False):
| params = [name, ttl, value]
if replace:
params.append('REPLACE')
return self.execute_command('RESTORE', *params)
|
'Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` if it
does not already exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` if it
already exists.'
| def set(self, name, value, ex=None, px=None, nx=False, xx=False):
| pieces = [name, value]
if (ex is not None):
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = (ex.seconds + ((ex.days * 24) * 3600))
pieces.append(ex)
if (px is not None):
pieces.append('PX')
if isinstance(px, datetime.timedelta):
ms = int((px.microseconds / 1000))
px = (((px.seconds + ((px.days * 24) * 3600)) * 1000) + ms)
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
return self.execute_command('SET', *pieces)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.