idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
60,700 |
def map_add ( self , key , mapkey , value , create = False , ** kwargs ) : op = SD . upsert ( mapkey , value ) sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres )
|
Set a value for a key in a map .
|
60,701 |
def map_get ( self , key , mapkey ) : op = SD . get ( mapkey ) sdres = self . lookup_in ( key , op ) return self . _wrap_dsop ( sdres , True )
|
Retrieve a value from a map .
|
60,702 |
def map_remove ( self , key , mapkey , ** kwargs ) : op = SD . remove ( mapkey ) sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres )
|
Remove an item from a map .
|
60,703 |
def map_size ( self , key ) : rv = self . get ( key ) return len ( rv . value )
|
Get the number of items in the map .
|
60,704 |
def list_append ( self , key , value , create = False , ** kwargs ) : op = SD . array_append ( '' , value ) sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres )
|
Add an item to the end of a list .
|
60,705 |
def list_prepend ( self , key , value , create = False , ** kwargs ) : op = SD . array_prepend ( '' , value ) sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres )
|
Add an item to the beginning of a list .
|
60,706 |
def list_set ( self , key , index , value , ** kwargs ) : op = SD . replace ( '[{0}]' . format ( index ) , value ) sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres )
|
Sets an item within a list at a given position .
|
60,707 |
def set_add ( self , key , value , create = False , ** kwargs ) : op = SD . array_addunique ( '' , value ) try : sdres = self . mutate_in ( key , op , ** kwargs ) return self . _wrap_dsop ( sdres ) except E . SubdocPathExistsError : pass
|
Add an item to a set if the item does not yet exist .
|
60,708 |
def set_remove ( self , key , value , ** kwargs ) : while True : rv = self . get ( key ) try : ix = rv . value . index ( value ) kwargs [ 'cas' ] = rv . cas return self . list_remove ( key , ix , ** kwargs ) except E . KeyExistsError : pass except ValueError : return
|
Remove an item from a set .
|
60,709 |
def list_remove ( self , key , index , ** kwargs ) : return self . map_remove ( key , '[{0}]' . format ( index ) , ** kwargs )
|
Remove the element at a specific index from a list .
|
60,710 |
def queue_push ( self , key , value , create = False , ** kwargs ) : return self . list_prepend ( key , value , ** kwargs )
|
Add an item to the end of a queue .
|
60,711 |
def queue_pop ( self , key , ** kwargs ) : while True : try : itm = self . list_get ( key , - 1 ) except IndexError : raise E . QueueEmpty kwargs [ 'cas' ] = itm . cas try : self . list_remove ( key , - 1 , ** kwargs ) return itm except E . KeyExistsError : pass except IndexError : raise E . QueueEmpty
|
Remove and return the first item queue .
|
60,712 |
def _callback ( self , mres ) : try : rows = self . _process_payload ( self . raw . rows ) if rows : self . on_rows ( rows ) if self . raw . done : self . on_done ( ) finally : if self . raw . done : self . _clear ( )
|
This is invoked as the row callback . If rows is true then we are a row callback otherwise the request has ended and it s time to collect the other data
|
60,713 |
def create ( cls , name , email , cb ) : it = cls ( name , create_structure = True ) it . value [ 'email' ] = email cb . upsert_multi ( ItemSequence ( [ it ] ) ) return it
|
Create the basic structure of a player
|
60,714 |
def _doc_rev ( self , res ) : jstr = res . headers [ 'X-Couchbase-Meta' ] jobj = json . loads ( jstr ) return jobj [ 'rev' ]
|
Returns the rev id from the header
|
60,715 |
def design_create ( self , name , ddoc , use_devmode = True , syncwait = 0 ) : name = self . _cb . _mk_devmode ( name , use_devmode ) fqname = "_design/{0}" . format ( name ) if not isinstance ( ddoc , dict ) : ddoc = json . loads ( ddoc ) ddoc = ddoc . copy ( ) ddoc [ '_id' ] = fqname ddoc = json . dumps ( ddoc ) existing = None if syncwait : try : existing = self . design_get ( name , use_devmode = False ) except CouchbaseError : pass ret = self . _cb . _http_request ( type = _LCB . LCB_HTTP_TYPE_VIEW , path = fqname , method = _LCB . LCB_HTTP_METHOD_PUT , post_data = ddoc , content_type = "application/json" ) self . _design_poll ( name , 'add' , existing , syncwait , use_devmode = use_devmode ) return ret
|
Store a design document
|
60,716 |
def design_get ( self , name , use_devmode = True ) : name = self . _mk_devmode ( name , use_devmode ) existing = self . _http_request ( type = _LCB . LCB_HTTP_TYPE_VIEW , path = "_design/" + name , method = _LCB . LCB_HTTP_METHOD_GET , content_type = "application/json" ) return existing
|
Retrieve a design document
|
60,717 |
def design_delete ( self , name , use_devmode = True , syncwait = 0 ) : name = self . _mk_devmode ( name , use_devmode ) existing = None if syncwait : try : existing = self . design_get ( name , use_devmode = False ) except CouchbaseError : pass ret = self . _http_request ( type = _LCB . LCB_HTTP_TYPE_VIEW , path = "_design/" + name , method = _LCB . LCB_HTTP_METHOD_DELETE ) self . _design_poll ( name , 'del' , existing , syncwait ) return ret
|
Delete a design document
|
60,718 |
def design_list ( self ) : ret = self . _http_request ( type = _LCB . LCB_HTTP_TYPE_MANAGEMENT , path = "/pools/default/buckets/{0}/ddocs" . format ( self . _cb . bucket ) , method = _LCB . LCB_HTTP_METHOD_GET ) real_rows = { } for r in ret . value [ 'rows' ] : real_rows [ r [ 'doc' ] [ 'meta' ] [ 'id' ] ] = r [ 'doc' ] [ 'json' ] ret . value . clear ( ) ret . value . update ( real_rows ) return ret
|
List all design documents for the current bucket .
|
60,719 |
def n1ql_index_create ( self , ix , ** kwargs ) : defer = kwargs . pop ( 'defer' , False ) ignore_exists = kwargs . pop ( 'ignore_exists' , False ) primary = kwargs . pop ( 'primary' , False ) fields = kwargs . pop ( 'fields' , [ ] ) cond = kwargs . pop ( 'condition' , None ) if kwargs : raise TypeError ( 'Unknown keyword arguments' , kwargs ) info = self . _mk_index_def ( ix , primary ) if primary and fields : raise TypeError ( 'Cannot create primary index with explicit fields' ) elif not primary and not fields : raise ValueError ( 'Fields required for non-primary index' ) if fields : info . fields = fields if primary and info . name is N1QL_PRIMARY_INDEX : del info . name if cond : if primary : raise ValueError ( 'cannot specify condition for primary index' ) info . condition = cond options = { 'ignore_exists' : ignore_exists , 'defer' : defer } return IxmgmtRequest ( self . _cb , 'create' , info , ** options ) . execute ( )
|
Create an index for use with N1QL .
|
60,720 |
def n1ql_index_create_primary ( self , defer = False , ignore_exists = False ) : return self . n1ql_index_create ( '' , defer = defer , primary = True , ignore_exists = ignore_exists )
|
Create the primary index on the bucket .
|
60,721 |
def n1ql_index_drop ( self , ix , primary = False , ** kwargs ) : info = self . _mk_index_def ( ix , primary ) return IxmgmtRequest ( self . _cb , 'drop' , info , ** kwargs ) . execute ( )
|
Delete an index from the cluster .
|
60,722 |
def n1ql_index_build_deferred ( self , other_buckets = False ) : info = N1qlIndex ( ) if not other_buckets : info . keyspace = self . _cb . bucket return IxmgmtRequest ( self . _cb , 'build' , info ) . execute ( )
|
Instruct the server to begin building any previously deferred index definitions .
|
60,723 |
def n1ql_index_watch ( self , indexes , timeout = 30 , interval = 0.2 , watch_primary = False ) : kwargs = { 'timeout_us' : int ( timeout * 1000000 ) , 'interval_us' : int ( interval * 1000000 ) } ixlist = [ N1qlIndex . from_any ( x , self . _cb . bucket ) for x in indexes ] if watch_primary : ixlist . append ( N1qlIndex . from_any ( N1QL_PRIMARY_INDEX , self . _cb . bucket ) ) return IxmgmtRequest ( self . _cb , 'watch' , ixlist , ** kwargs ) . execute ( )
|
Await completion of index building
|
60,724 |
def _set_range_common ( self , k_sugar , k_start , k_end , value ) : if not isinstance ( value , ( list , tuple , _Unspec ) ) : raise ArgumentError . pyexc ( "Range specification for {0} must be a list, tuple or UNSPEC" . format ( k_sugar ) ) if self . _user_options . get ( k_start , UNSPEC ) is not UNSPEC or ( self . _user_options . get ( k_end , UNSPEC ) is not UNSPEC ) : raise ArgumentError . pyexc ( "Cannot specify {0} with either {1} or {2}" . format ( k_sugar , k_start , k_end ) ) if not value : self . _set_common ( k_start , UNSPEC , set_user = False ) self . _set_common ( k_end , UNSPEC , set_user = False ) self . _user_options [ k_sugar ] = UNSPEC return if len ( value ) not in ( 1 , 2 ) : raise ArgumentError . pyexc ( "Range specification " "must have one or two elements" , value ) value = value [ : : ] if len ( value ) == 1 : value . append ( UNSPEC ) for p , ix in ( ( k_start , 0 ) , ( k_end , 1 ) ) : self . _set_common ( p , value [ ix ] , set_user = False ) self . _user_options [ k_sugar ] = value
|
Checks to see if the client - side convenience key is present and if so converts the sugar convenience key into its real server - side equivalents .
|
60,725 |
def update ( self , copy = False , ** params ) : if copy : self = deepcopy ( self ) for k , v in params . items ( ) : if not hasattr ( self , k ) : if not self . unrecognized_ok : raise ArgumentError . pyexc ( "Unknown option" , k ) self . _set_common ( k , v ) else : setattr ( self , k , v ) return self
|
Chained assignment operator .
|
60,726 |
def from_any ( cls , params , ** ctor_opts ) : if isinstance ( params , cls ) : return deepcopy ( params ) elif isinstance ( params , dict ) : ctor_opts . update ( ** params ) if cls is QueryBase : if ( 'bbox' in params or 'start_range' in params or 'end_range' in params ) : return SpatialQuery ( ** ctor_opts ) else : return ViewQuery ( ** ctor_opts ) elif isinstance ( params , basestring ) : ret = cls ( ) ret . _base_str = params return ret else : raise ArgumentError . pyexc ( "Params must be Query, dict, or string" )
|
Creates a new Query object from input .
|
60,727 |
def encoded ( self ) : if not self . _encoded : self . _encoded = self . _encode ( ) if self . _base_str : return '&' . join ( ( self . _base_str , self . _encoded ) ) else : return self . _encoded
|
Returns an encoded form of the query
|
60,728 |
def registerDeferred ( self , event , d ) : try : self . _evq [ event ] . schedule ( d ) except KeyError : raise ValueError ( "No such event type" , event )
|
Register a defer to be fired at the firing of a specific event .
|
60,729 |
def queryEx ( self , viewcls , * args , ** kwargs ) : kwargs [ 'itercls' ] = viewcls o = super ( AsyncBucket , self ) . query ( * args , ** kwargs ) if not self . connected : self . connect ( ) . addCallback ( lambda x : o . start ( ) ) else : o . start ( ) return o
|
Query a view with the viewcls instance receiving events of the query as they arrive .
|
60,730 |
def n1qlQueryEx ( self , cls , * args , ** kwargs ) : kwargs [ 'itercls' ] = cls o = super ( AsyncBucket , self ) . n1ql_query ( * args , ** kwargs ) if not self . connected : self . connect ( ) . addCallback ( lambda x : o . start ( ) ) else : o . start ( ) return o
|
Execute a N1QL statement providing a custom handler for rows .
|
60,731 |
def n1qlQueryAll ( self , * args , ** kwargs ) : if not self . connected : cb = lambda x : self . n1qlQueryAll ( * args , ** kwargs ) return self . connect ( ) . addCallback ( cb ) kwargs [ 'itercls' ] = BatchedN1QLRequest o = super ( RawBucket , self ) . n1ql_query ( * args , ** kwargs ) o . start ( ) return o . _getDeferred ( )
|
Execute a N1QL query retrieving all rows .
|
60,732 |
def _wrap ( self , meth , * args , ** kwargs ) : if not self . connected : return self . _connectSchedule ( self . _wrap , meth , * args , ** kwargs ) opres = meth ( self , * args , ** kwargs ) return self . defer ( opres )
|
Calls a given method with the appropriate arguments or defers such a call until the instance has been connected
|
60,733 |
def get_decode_format ( flags ) : c_flags = flags & FMT_COMMON_MASK l_flags = flags & FMT_LEGACY_MASK if c_flags : if c_flags not in COMMON_FORMATS : return FMT_BYTES , False else : return COMMON2UNIFIED [ c_flags ] , True else : if not l_flags in LEGACY_FORMATS : return FMT_BYTES , False else : return LEGACY2UNIFIED [ l_flags ] , True
|
Returns a tuple of format recognized
|
60,734 |
def bucket_create ( self , name , bucket_type = 'couchbase' , bucket_password = '' , replicas = 0 , ram_quota = 1024 , flush_enabled = False ) : params = { 'name' : name , 'bucketType' : bucket_type , 'authType' : 'sasl' , 'saslPassword' : bucket_password if bucket_password else '' , 'flushEnabled' : int ( flush_enabled ) , 'ramQuotaMB' : ram_quota } if bucket_type in ( 'couchbase' , 'membase' , 'ephemeral' ) : params [ 'replicaNumber' ] = replicas return self . http_request ( path = '/pools/default/buckets' , method = 'POST' , content = self . _mk_formstr ( params ) , content_type = 'application/x-www-form-urlencoded' )
|
Create a new bucket
|
60,735 |
def wait_ready ( self , name , timeout = 5.0 , sleep_interval = 0.2 ) : end = time ( ) + timeout while True : try : info = self . bucket_info ( name ) . value for node in info [ 'nodes' ] : if node [ 'status' ] != 'healthy' : raise NotReadyError . pyexc ( 'Not all nodes are healthy' ) return except E . CouchbaseError : if time ( ) + sleep_interval > end : raise sleep ( sleep_interval )
|
Wait for a newly created bucket to be ready .
|
60,736 |
def bucket_update ( self , name , current , bucket_password = None , replicas = None , ram_quota = None , flush_enabled = None ) : params = { } current = current . value params [ 'authType' ] = current [ 'authType' ] if 'saslPassword' in current : params [ 'saslPassword' ] = current [ 'saslPassword' ] if bucket_password is not None : params [ 'authType' ] = 'sasl' params [ 'saslPassword' ] = bucket_password params [ 'replicaNumber' ] = ( replicas if replicas is not None else current [ 'replicaNumber' ] ) if ram_quota : params [ 'ramQuotaMB' ] = ram_quota else : params [ 'ramQuotaMB' ] = current [ 'quota' ] [ 'ram' ] / 1024 / 1024 if flush_enabled is not None : params [ 'flushEnabled' ] = int ( flush_enabled ) params [ 'proxyPort' ] = current [ 'proxyPort' ] return self . http_request ( path = '/pools/default/buckets/' + name , method = 'POST' , content_type = 'application/x-www-form-urlencoded' , content = self . _mk_formstr ( params ) )
|
Update an existing bucket s settings .
|
60,737 |
def users_get ( self , domain ) : path = self . _get_management_path ( domain ) return self . http_request ( path = path , method = 'GET' )
|
Retrieve a list of users from the server .
|
60,738 |
def user_get ( self , domain , userid ) : path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'GET' )
|
Retrieve a user from the server
|
60,739 |
def user_upsert ( self , domain , userid , password = None , roles = None , name = None ) : if not roles or not isinstance ( roles , list ) : raise E . ArgumentError ( "Roles must be a non-empty list" ) if password and domain == AuthDomain . External : raise E . ArgumentError ( "External domains must not have passwords" ) tmplist = [ ] for role in roles : if isinstance ( role , basestring ) : tmplist . append ( role ) else : tmplist . append ( '{0}[{1}]' . format ( * role ) ) role_string = ',' . join ( tmplist ) params = { 'roles' : role_string , } if password : params [ 'password' ] = password if name : params [ 'name' ] = name form = self . _mk_formstr ( params ) path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'PUT' , content_type = 'application/x-www-form-urlencoded' , content = form )
|
Upsert a user in the cluster
|
60,740 |
def convert_1x_args ( bucket , ** kwargs ) : host = kwargs . pop ( 'host' , 'localhost' ) port = kwargs . pop ( 'port' , None ) if not 'connstr' in kwargs and 'connection_string' not in kwargs : kwargs [ 'connection_string' ] = _build_connstr ( host , port , bucket ) return kwargs
|
Converts arguments for 1 . x constructors to their 2 . x forms
|
60,741 |
def parse ( cls , ss ) : up = urlparse ( ss ) path = up . path query = up . query if '?' in path : path , _ = up . path . split ( '?' ) if path . startswith ( '/' ) : path = path [ 1 : ] bucket = path options = parse_qs ( query ) scheme = up . scheme hosts = up . netloc . split ( ',' ) return cls ( bucket = bucket , options = options , hosts = hosts , scheme = scheme )
|
Parses an existing connection string
|
60,742 |
def encode ( self ) : opt_dict = { } for k , v in self . options . items ( ) : opt_dict [ k ] = v [ 0 ] ss = '{0}://{1}' . format ( self . scheme , ',' . join ( self . hosts ) ) if self . bucket : ss += '/' + self . bucket ss += '?' + urlencode ( opt_dict ) . replace ( '%2F' , '/' ) return ss
|
Encodes the current state of the object into a string .
|
60,743 |
def rc_to_exctype ( cls , rc ) : try : return _LCB_ERRNO_MAP [ rc ] except KeyError : newcls = _mk_lcberr ( rc ) _LCB_ERRNO_MAP [ rc ] = newcls return newcls
|
Map an error code to an exception
|
60,744 |
def split_results ( self ) : ret_ok , ret_fail = { } , { } count = 0 nokey_prefix = ( [ "" ] + sorted ( filter ( bool , self . all_results . keys ( ) ) ) ) [ - 1 ] for key , v in self . all_results . items ( ) : if not key : key = nokey_prefix + ":nokey:" + str ( count ) count += 1 success = getattr ( v , 'success' , True ) if success : ret_ok [ key ] = v else : ret_fail [ key ] = v return ret_ok , ret_fail
|
Convenience method to separate failed and successful results .
|
60,745 |
def add ( self , itm , ** options ) : if not options : options = None self . _d [ itm ] = options
|
Convenience method to add an item together with a series of options .
|
60,746 |
def deprecate_module_attribute ( mod , deprecated ) : deprecated = set ( deprecated ) class Wrapper ( object ) : def __getattr__ ( self , attr ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return getattr ( mod , attr ) def __setattr__ ( self , attr , value ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return setattr ( mod , attr , value ) return Wrapper ( )
|
Return a wrapped object that warns about deprecated accesses
|
60,747 |
def get ( self , path_or_index , default = None ) : err , value = self . _resolve ( path_or_index ) value = default if err else value return err , value
|
Get details about a given result
|
60,748 |
def query ( self , * args , ** kwargs ) : if not issubclass ( kwargs . get ( 'itercls' , None ) , AsyncViewBase ) : raise ArgumentError . pyexc ( "itercls must be defined " "and must be derived from AsyncViewBase" ) return super ( AsyncBucket , self ) . query ( * args , ** kwargs )
|
Reimplemented from base class .
|
60,749 |
def _gen_3spec ( op , path , xattr = False ) : flags = 0 if xattr : flags |= _P . SDSPEC_F_XATTR return Spec ( op , path , flags )
|
Returns a Spec tuple suitable for passing to the underlying C extension . This variant is called for operations that lack an input value .
|
60,750 |
def upsert ( path , value , create_parents = False , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_DICT_UPSERT , path , value , create_path = create_parents , ** kwargs )
|
Create or replace a dictionary path .
|
60,751 |
def array_append ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_LAST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , ** kwargs )
|
Add new values to the end of an array .
|
60,752 |
def array_prepend ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_FIRST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , ** kwargs )
|
Add new values to the beginning of an array .
|
60,753 |
def array_insert ( path , * values , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_INSERT , path , MultiValue ( * values ) , ** kwargs )
|
Insert items at a given position within an array .
|
60,754 |
def array_addunique ( path , value , create_parents = False , ** kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_UNIQUE , path , value , create_path = create_parents , ** kwargs )
|
Add a new value to an array if the value does not exist .
|
60,755 |
def counter ( path , delta , create_parents = False , ** kwargs ) : if not delta : raise ValueError ( "Delta must be positive or negative!" ) return _gen_4spec ( LCB_SDCMD_COUNTER , path , delta , create_path = create_parents , ** kwargs )
|
Increment or decrement a counter in a document .
|
60,756 |
def add_results ( self , * rvs , ** kwargs ) : if not rvs : raise MissingTokenError . pyexc ( message = 'No results passed' ) for rv in rvs : mi = rv . _mutinfo if not mi : if kwargs . get ( 'quiet' ) : return False raise MissingTokenError . pyexc ( message = 'Result does not contain token' ) self . _add_scanvec ( mi ) return True
|
Changes the state to reflect the mutation which yielded the given result .
|
60,757 |
def add_all ( self , bucket , quiet = False ) : added = False for mt in bucket . _mutinfo ( ) : added = True self . _add_scanvec ( mt ) if not added and not quiet : raise MissingTokenError ( 'Bucket object contains no tokens!' ) return added
|
Ensures the query result is consistent with all prior mutations performed by a given bucket .
|
60,758 |
def _assign_kwargs ( self , kwargs ) : for k in kwargs : if not hasattr ( self , k ) : raise AttributeError ( k , 'Not valid for' , self . __class__ . __name__ ) setattr ( self , k , kwargs [ k ] )
|
Assigns all keyword arguments to a given instance raising an exception if one of the keywords is not already the name of a property .
|
60,759 |
def _mk_range_bucket ( name , n1 , n2 , r1 , r2 ) : d = { } if r1 is not None : d [ n1 ] = r1 if r2 is not None : d [ n2 ] = r2 if not d : raise TypeError ( 'Must specify at least one range boundary!' ) d [ 'name' ] = name return d
|
Create a named range specification for encoding .
|
60,760 |
def add_range ( self , name , start = None , end = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'start' , 'end' , start , end ) ) return self
|
Adds a date range to the given facet .
|
60,761 |
def add_range ( self , name , min = None , max = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'min' , 'max' , min , max ) ) return self
|
Add a numeric range .
|
60,762 |
def mk_kwargs ( cls , kwargs ) : ret = { } kws = [ 'row_factory' , 'body' , 'parent' ] for k in kws : if k in kwargs : ret [ k ] = kwargs . pop ( k ) return ret
|
Pop recognized arguments from a keyword list .
|
60,763 |
def _set_named_args ( self , ** kv ) : for k in kv : self . _body [ '${0}' . format ( k ) ] = kv [ k ] return self
|
Set a named parameter in the query . The named field must exist in the query itself .
|
60,764 |
def consistent_with ( self , state ) : if self . consistency not in ( UNBOUNDED , NOT_BOUNDED , 'at_plus' ) : raise TypeError ( 'consistent_with not valid with other consistency options' ) if not state : raise TypeError ( 'Passed empty or invalid state' , state ) self . consistency = 'at_plus' self . _body [ 'scan_vectors' ] = state . _sv
|
Indicate that the query should be consistent with one or more mutations .
|
60,765 |
def timeout ( self ) : value = self . _body . get ( 'timeout' , '0s' ) value = value [ : - 1 ] return float ( value )
|
Optional per - query timeout . If set this will limit the amount of time in which the query can be executed and waited for .
|
60,766 |
def _is_ready ( self ) : while not self . finish_time or time . time ( ) < self . finish_time : result = self . _poll_deferred ( ) if result == 'success' : return True if result == 'failed' : raise couchbase . exceptions . InternalError ( "Failed exception" ) time . sleep ( self . interval ) raise couchbase . exceptions . TimeoutError ( "Deferred query timed out" )
|
Return True if and only if final result has been received optionally blocking until this is the case or the timeout is exceeded .
|
60,767 |
def package_version ( self ) : vbase = self . base_version if self . ncommits : vbase += '.dev{0}+{1}' . format ( self . ncommits , self . sha ) return vbase
|
Returns the well formed PEP - 440 version
|
60,768 |
def download_and_bootstrap ( src , name , prereq = None ) : if prereq : prereq_cmd = '{0} -c "{1}"' . format ( PY_EXE , prereq ) rv = os . system ( prereq_cmd ) if rv == 0 : return ulp = urllib2 . urlopen ( src ) fp = open ( name , "wb" ) fp . write ( ulp . read ( ) ) fp . close ( ) cmdline = "{0} {1}" . format ( PY_EXE , name ) rv = os . system ( cmdline ) assert rv == 0
|
Download and install something if prerequisite fails
|
60,769 |
def _register_opt ( parser , * args , ** kwargs ) : try : parser . add_option ( * args , ** kwargs ) except ( optparse . OptionError , TypeError ) : parse_from_config = kwargs . pop ( 'parse_from_config' , False ) option = parser . add_option ( * args , ** kwargs ) if parse_from_config : parser . config_options . append ( option . get_opt_string ( ) . lstrip ( '-' ) )
|
Handler to register an option for both Flake8 3 . x and 2 . x .
|
60,770 |
def dict_to_hashable ( d ) : return frozenset ( ( k , tuple ( v ) if isinstance ( v , list ) else ( dict_to_hashable ( v ) if isinstance ( v , dict ) else v ) ) for k , v in six . iteritems ( d ) )
|
Takes a dict and returns an immutable hashable version of that dict that can be used as a key in dicts or as a set value . Any two dicts passed in with the same content are guaranteed to return the same value . Any two dicts passed in with different content are guaranteed to return different values . Performs comparatively to repr .
|
60,771 |
def run ( self , request ) : if request . body . get ( 'action_name' ) : return self . _get_response_for_single_action ( request . body . get ( 'action_name' ) ) return self . _get_response_for_all_actions ( )
|
Introspects all of the actions on the server and returns their documentation .
|
60,772 |
def _make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
|
Given a list of in - order middleware callables middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
|
60,773 |
def send_request ( self , job_request , message_expiry_in_seconds = None ) : request_id = self . request_counter self . request_counter += 1 meta = { } wrapper = self . _make_middleware_stack ( [ m . request for m in self . middleware ] , self . _base_send_request , ) try : with self . metrics . timer ( 'client.send.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : wrapper ( request_id , meta , job_request , message_expiry_in_seconds ) return request_id finally : self . metrics . commit ( )
|
Send a JobRequest and return a request ID .
|
60,774 |
def get_all_responses ( self , receive_timeout_in_seconds = None ) : wrapper = self . _make_middleware_stack ( [ m . response for m in self . middleware ] , self . _get_response , ) try : while True : with self . metrics . timer ( 'client.receive.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : request_id , response = wrapper ( receive_timeout_in_seconds ) if response is None : break yield request_id , response finally : self . metrics . commit ( )
|
Receive all available responses from the transport as a generator .
|
60,775 |
def call_action ( self , service_name , action , body = None , ** kwargs ) : return self . call_action_future ( service_name , action , body , ** kwargs ) . result ( )
|
Build and send a single job request with one action .
|
60,776 |
def call_actions ( self , service_name , actions , expansions = None , raise_job_errors = True , raise_action_errors = True , timeout = None , ** kwargs ) : return self . call_actions_future ( service_name , actions , expansions , raise_job_errors , raise_action_errors , timeout , ** kwargs ) . result ( )
|
Build and send a single job request with one or more actions .
|
60,777 |
def call_actions_parallel ( self , service_name , actions , ** kwargs ) : return self . call_actions_parallel_future ( service_name , actions , ** kwargs ) . result ( )
|
Build and send multiple job requests to one service each job with one action to be executed in parallel and return once all responses have been received .
|
60,778 |
def call_jobs_parallel ( self , jobs , expansions = None , raise_job_errors = True , raise_action_errors = True , catch_transport_errors = False , timeout = None , ** kwargs ) : return self . call_jobs_parallel_future ( jobs , expansions = expansions , raise_job_errors = raise_job_errors , raise_action_errors = raise_action_errors , catch_transport_errors = catch_transport_errors , timeout = timeout , ** kwargs ) . result ( )
|
Build and send multiple job requests to one or more services each with one or more actions to be executed in parallel and return once all responses have been received .
|
60,779 |
def send_request ( self , service_name , actions , switches = None , correlation_id = None , continue_on_error = False , context = None , control_extra = None , message_expiry_in_seconds = None , suppress_response = False , ) : control_extra = control_extra . copy ( ) if control_extra else { } if message_expiry_in_seconds and 'timeout' not in control_extra : control_extra [ 'timeout' ] = message_expiry_in_seconds handler = self . _get_handler ( service_name ) control = self . _make_control_header ( continue_on_error = continue_on_error , control_extra = control_extra , suppress_response = suppress_response , ) context = self . _make_context_header ( switches = switches , correlation_id = correlation_id , context_extra = context , ) job_request = JobRequest ( actions = actions , control = control , context = context or { } ) return handler . send_request ( job_request , message_expiry_in_seconds )
|
Build and send a JobRequest and return a request ID .
|
60,780 |
def get_all_responses ( self , service_name , receive_timeout_in_seconds = None ) : handler = self . _get_handler ( service_name ) return handler . get_all_responses ( receive_timeout_in_seconds )
|
Receive all available responses from the service as a generator .
|
60,781 |
def get_reloader ( main_module_name , watch_modules , signal_forks = False ) : if USE_PY_INOTIFY : return _PyInotifyReloader ( main_module_name , watch_modules , signal_forks ) return _PollingReloader ( main_module_name , watch_modules , signal_forks )
|
Don t instantiate a reloader directly . Instead call this method to get a reloader and then call main on that reloader .
|
60,782 |
def ext_hook ( self , code , data ) : if code == self . EXT_DATETIME : microseconds = self . STRUCT_DATETIME . unpack ( data ) [ 0 ] return datetime . datetime . utcfromtimestamp ( microseconds / 1000000.0 ) elif code == self . EXT_DATE : return datetime . date ( * self . STRUCT_DATE . unpack ( data ) ) elif code == self . EXT_TIME : return datetime . time ( * self . STRUCT_TIME . unpack ( data ) ) elif code == self . EXT_DECIMAL : obj_len = self . STRUCT_DECIMAL_LENGTH . unpack ( data [ : 2 ] ) [ 0 ] obj_decoder = struct . Struct ( str ( '!{}s' . format ( obj_len ) ) ) return decimal . Decimal ( obj_decoder . unpack ( data [ 2 : ] ) [ 0 ] . decode ( 'utf-8' ) ) elif code == self . EXT_CURRINT : code , minor_value = self . STRUCT_CURRINT . unpack ( data ) return currint . Amount . from_code_and_minor ( code . decode ( 'ascii' ) , minor_value ) else : raise TypeError ( 'Cannot decode unknown extension type {} from MessagePack' . format ( code ) )
|
Decodes our custom extension types
|
60,783 |
def send_request_message ( self , request_id , meta , body , _ = None ) : self . _current_request = ( request_id , meta , body ) try : self . server . handle_next_request ( ) finally : self . _current_request = None
|
Receives a request from the client and handles and dispatches in in - thread . message_expiry_in_seconds is not supported . Messages do not expire as the server handles the request immediately in the same thread before this method returns . This method blocks until the server has completed handling the request .
|
60,784 |
def send_response_message ( self , request_id , meta , body ) : self . response_messages . append ( ( request_id , meta , body ) )
|
Add the response to the deque .
|
60,785 |
def StatusActionFactory ( version , build = None , base_class = BaseStatusAction ) : return type ( str ( 'StatusAction' ) , ( base_class , ) , { str ( '_version' ) : version , str ( '_build' ) : build } , )
|
A factory for creating a new status action class specific to a service .
|
60,786 |
def make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
|
Given a list of in - order middleware callable objects middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
|
60,787 |
def process_job ( self , job_request ) : try : validation_errors = [ Error ( code = error . code , message = error . message , field = error . pointer , ) for error in ( JobRequestSchema . errors ( job_request ) or [ ] ) ] if validation_errors : raise JobError ( errors = validation_errors ) job_request [ 'client' ] = self . make_client ( job_request [ 'context' ] ) job_request [ 'async_event_loop' ] = self . _async_event_loop if hasattr ( self , '_async_event_loop_thread' ) : job_request [ 'run_coroutine' ] = self . _async_event_loop_thread . run_coroutine else : job_request [ 'run_coroutine' ] = None wrapper = self . make_middleware_stack ( [ m . job for m in self . middleware ] , self . execute_job , ) job_response = wrapper ( job_request ) if 'correlation_id' in job_request [ 'context' ] : job_response . context [ 'correlation_id' ] = job_request [ 'context' ] [ 'correlation_id' ] except JobError as e : self . metrics . counter ( 'server.error.job_error' ) . increment ( ) job_response = JobResponse ( errors = e . errors , ) except Exception as e : self . metrics . counter ( 'server.error.unhandled_error' ) . increment ( ) return self . handle_job_exception ( e ) return job_response
|
Validate execute and run the job request wrapping it with any applicable job middleware .
|
60,788 |
def handle_job_exception ( self , exception , variables = None ) : try : error_str , traceback_str = six . text_type ( exception ) , traceback . format_exc ( ) except Exception : self . metrics . counter ( 'server.error.error_formatting_failure' ) . increment ( ) error_str , traceback_str = 'Error formatting error' , traceback . format_exc ( ) self . logger . exception ( exception ) if not isinstance ( traceback_str , six . text_type ) : try : traceback_str = traceback_str . decode ( 'utf-8' ) except UnicodeDecodeError : traceback_str = 'UnicodeDecodeError: Traceback could not be decoded' error_dict = { 'code' : ERROR_CODE_SERVER_ERROR , 'message' : 'Internal server error: %s' % error_str , 'traceback' : traceback_str , } if variables is not None : try : error_dict [ 'variables' ] = { key : repr ( value ) for key , value in variables . items ( ) } except Exception : self . metrics . counter ( 'server.error.variable_formatting_failure' ) . increment ( ) error_dict [ 'variables' ] = 'Error formatting variables' return JobResponse ( errors = [ error_dict ] )
|
Makes and returns a last - ditch error response .
|
60,789 |
def execute_job ( self , job_request ) : job_response = JobResponse ( ) job_switches = RequestSwitchSet ( job_request [ 'context' ] [ 'switches' ] ) for i , raw_action_request in enumerate ( job_request [ 'actions' ] ) : action_request = EnrichedActionRequest ( action = raw_action_request [ 'action' ] , body = raw_action_request . get ( 'body' , None ) , switches = job_switches , context = job_request [ 'context' ] , control = job_request [ 'control' ] , client = job_request [ 'client' ] , async_event_loop = job_request [ 'async_event_loop' ] , run_coroutine = job_request [ 'run_coroutine' ] , ) action_in_class_map = action_request . action in self . action_class_map if action_in_class_map or action_request . action in ( 'status' , 'introspect' ) : if action_in_class_map : action = self . action_class_map [ action_request . action ] ( self . settings ) elif action_request . action == 'introspect' : from pysoa . server . action . introspection import IntrospectionAction action = IntrospectionAction ( server = self ) else : if not self . _default_status_action_class : from pysoa . server . action . status import make_default_status_action_class self . _default_status_action_class = make_default_status_action_class ( self . __class__ ) action = self . _default_status_action_class ( self . settings ) wrapper = self . make_middleware_stack ( [ m . action for m in self . middleware ] , action , ) try : action_response = wrapper ( action_request ) except ActionError as e : action_response = ActionResponse ( action = action_request . action , errors = e . errors , ) else : action_response = ActionResponse ( action = action_request . action , errors = [ Error ( code = ERROR_CODE_UNKNOWN , message = 'The action "{}" was not found on this server.' . format ( action_request . action ) , field = 'action' , ) ] , ) job_response . actions . append ( action_response ) if ( action_response . errors and not job_request [ 'control' ] . get ( 'continue_on_error' , False ) ) : break return job_response
|
Processes and runs the action requests contained in the job and returns a JobResponse .
|
60,790 |
def handle_shutdown_signal ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Received double interrupt, forcing shutdown' ) sys . exit ( 1 ) else : self . logger . warning ( 'Received interrupt, initiating shutdown' ) self . shutting_down = True
|
Handles the reception of a shutdown signal .
|
60,791 |
def harakiri ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Graceful shutdown failed after {}s. Exiting now!' . format ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] ) ) sys . exit ( 1 ) else : self . logger . warning ( 'No activity during {}s, triggering harakiri with grace {}s' . format ( self . settings [ 'harakiri' ] [ 'timeout' ] , self . settings [ 'harakiri' ] [ 'shutdown_grace' ] , ) ) self . shutting_down = True signal . alarm ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] )
|
Handles the reception of a timeout signal indicating that a request has been processing for too long as defined by the Harakiri settings .
|
60,792 |
def run ( self ) : self . logger . info ( 'Service "{service}" server starting up, pysoa version {pysoa}, listening on transport {transport}.' . format ( service = self . service_name , pysoa = pysoa . version . __version__ , transport = self . transport , ) ) self . setup ( ) self . metrics . commit ( ) if self . _async_event_loop_thread : self . _async_event_loop_thread . start ( ) self . _create_heartbeat_file ( ) signal . signal ( signal . SIGINT , self . handle_shutdown_signal ) signal . signal ( signal . SIGTERM , self . handle_shutdown_signal ) signal . signal ( signal . SIGALRM , self . harakiri ) try : while not self . shutting_down : signal . alarm ( self . settings [ 'harakiri' ] [ 'timeout' ] ) self . handle_next_request ( ) self . metrics . commit ( ) except MessageReceiveError : self . logger . exception ( 'Error receiving message from transport; shutting down' ) except Exception : self . metrics . counter ( 'server.error.unknown' ) . increment ( ) self . logger . exception ( 'Unhandled server error; shutting down' ) finally : self . metrics . commit ( ) self . logger . info ( 'Server shutting down' ) if self . _async_event_loop_thread : self . _async_event_loop_thread . join ( ) self . _close_django_caches ( shutdown = True ) self . _delete_heartbeat_file ( ) self . logger . info ( 'Server shutdown complete' )
|
Starts the server run loop and returns after the server shuts down due to a shutdown - request Harakiri signal or unhandled exception . See the documentation for Server . main for full details on the chain of Server method calls .
|
60,793 |
def emit ( self , record ) : try : formatted_message = self . format ( record ) encoded_message = formatted_message . encode ( 'utf-8' ) prefix = suffix = b'' if getattr ( self , 'ident' , False ) : prefix = self . ident . encode ( 'utf-8' ) if isinstance ( self . ident , six . text_type ) else self . ident if getattr ( self , 'append_nul' , True ) : suffix = '\000' . encode ( 'utf-8' ) priority = '<{:d}>' . format ( self . encodePriority ( self . facility , self . mapPriority ( record . levelname ) ) ) . encode ( 'utf-8' ) message_length = len ( encoded_message ) message_length_limit = self . maximum_length - len ( prefix ) - len ( suffix ) - len ( priority ) if message_length < message_length_limit : parts = [ priority + prefix + encoded_message + suffix ] elif self . overflow == self . OVERFLOW_BEHAVIOR_TRUNCATE : truncated_message , _ = self . _cleanly_slice_encoded_string ( encoded_message , message_length_limit ) parts = [ priority + prefix + truncated_message + suffix ] else : try : index = formatted_message . index ( record . getMessage ( ) [ : 40 ] ) start_of_message , to_chunk = formatted_message [ : index ] , formatted_message [ index : ] except ( TypeError , ValueError ) : start_of_message , to_chunk = '{} ' . format ( formatted_message [ : 30 ] ) , formatted_message [ 30 : ] start_of_message = start_of_message . encode ( 'utf-8' ) to_chunk = to_chunk . encode ( 'utf-8' ) chunk_length_limit = message_length_limit - len ( start_of_message ) - 12 i = 1 parts = [ ] remaining_message = to_chunk while remaining_message : message_id = b'' subtractor = 0 if i > 1 : message_id = '{}' . format ( i ) . encode ( 'utf-8' ) subtractor = 14 + len ( message_id ) chunk , remaining_message = self . _cleanly_slice_encoded_string ( remaining_message , chunk_length_limit - subtractor , ) if i > 1 : chunk = b"(cont'd #" + message_id + b') ...' + chunk i += 1 if remaining_message : chunk = chunk + b"... (cont'd)" parts . append ( priority + prefix + start_of_message + chunk + suffix ) self . _send ( parts ) except Exception : self . handleError ( record )
|
Emits a record . The record is sent carefully according to the following rules to ensure that data is not lost by exceeding the MTU of the connection .
|
60,794 |
def add_expansion ( self , expansion_node ) : existing_expansion_node = self . get_expansion ( expansion_node . name ) if existing_expansion_node : for child_expansion in expansion_node . expansions : existing_expansion_node . add_expansion ( child_expansion ) else : self . _expansions [ expansion_node . name ] = expansion_node
|
Add a child expansion node to the type node s expansions .
|
60,795 |
def find_objects ( self , obj ) : objects = [ ] if isinstance ( obj , dict ) : object_type = obj . get ( '_type' ) if object_type == self . type : objects . append ( obj ) else : for sub_object in six . itervalues ( obj ) : objects . extend ( self . find_objects ( sub_object ) ) elif isinstance ( obj , list ) : for sub_object in obj : objects . extend ( self . find_objects ( sub_object ) ) return objects
|
Find all objects in obj that match the type of the type node .
|
60,796 |
def to_dict ( self ) : expansion_strings = [ ] for expansion in self . expansions : expansion_strings . extend ( expansion . to_strings ( ) ) return { self . type : expansion_strings , }
|
Convert the tree node to its dictionary representation .
|
60,797 |
def to_strings ( self ) : result = [ ] if not self . expansions : result . append ( self . name ) else : for expansion in self . expansions : result . extend ( '{}.{}' . format ( self . name , es ) for es in expansion . to_strings ( ) ) return result
|
Convert the expansion node to a list of expansion strings .
|
60,798 |
def dict_to_trees ( self , expansion_dict ) : trees = [ ] for node_type , expansion_list in six . iteritems ( expansion_dict ) : type_node = TypeNode ( node_type = node_type ) for expansion_string in expansion_list : expansion_node = type_node for expansion_name in expansion_string . split ( '.' ) : child_expansion_node = expansion_node . get_expansion ( expansion_name ) if not child_expansion_node : type_expansion = self . type_expansions [ expansion_node . type ] [ expansion_name ] type_route = self . type_routes [ type_expansion [ 'route' ] ] if type_expansion [ 'destination_field' ] == type_expansion [ 'source_field' ] : raise ValueError ( 'Expansion configuration destination_field error: ' 'destination_field can not have the same name as the source_field: ' '{}' . format ( type_expansion [ 'source_field' ] ) ) child_expansion_node = ExpansionNode ( node_type = type_expansion [ 'type' ] , name = expansion_name , source_field = type_expansion [ 'source_field' ] , destination_field = type_expansion [ 'destination_field' ] , service = type_route [ 'service' ] , action = type_route [ 'action' ] , request_field = type_route [ 'request_field' ] , response_field = type_route [ 'response_field' ] , raise_action_errors = type_expansion . get ( 'raise_action_errors' , False ) , ) expansion_node . add_expansion ( child_expansion_node ) expansion_node = child_expansion_node trees . append ( type_node ) return trees
|
Convert an expansion dictionary to a list of expansion trees .
|
60,799 |
def trees_to_dict ( trees_list ) : result = { } for tree in trees_list : result . update ( tree . to_dict ( ) ) return result
|
Convert a list of TreeNode s to an expansion dictionary .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.