code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def write_shortstr(self, s): self._flushbits() if isinstance(s, unicode): s = s.encode('utf-8') if len(s) > 255: raise ValueError('String too long') self.write_octet(len(s)) self.out.write(s)
Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8.
def write_table(self, d): self._flushbits() table_data = AMQPWriter() for k, v in d.items(): table_data.write_shortstr(k) if isinstance(v, basestring): if isinstance(v, unicode): v = v.encode('utf-8') table_data.write(byte(83)) # 'S' table_data.write_longstr(v) elif isinstance(v, (int, long)): table_data.write(byte(73)) # 'I' table_data.write(pack('>i', v)) elif isinstance(v, Decimal): table_data.write(byte(68)) # 'D' sign, digits, exponent = v.as_tuple() v = 0 for d in digits: v = (v * 10) + d if sign: v = -v table_data.write_octet(-exponent) table_data.write(pack('>i', v)) elif isinstance(v, datetime): table_data.write(byte(84)) # 'T' table_data.write_timestamp(v) ## FIXME: timezone ? elif isinstance(v, dict): table_data.write(byte(70)) # 'F' table_data.write_table(v) else: raise ValueError('%s not serializable in AMQP' % repr(v)) table_data = table_data.getvalue() self.write_long(len(table_data)) self.out.write(table_data)
Write out a Python dictionary made of up string keys, and values that are strings, signed integers, Decimal, datetime.datetime, or sub-dictionaries following the same constraints.
def write_timestamp(self, v): self.out.write(pack('>q', long(mktime(v.timetuple()))))
Write out a Python datetime.datetime object as a 64-bit integer representing seconds since the Unix epoch.
def on_error(e): # pragma: no cover exname = {'RuntimeError': 'Runtime error', 'Value Error': 'Value error'} sys.stderr.write('{}: {}\n'.format(exname[e.__class__.__name__], str(e))) sys.stderr.write('See file slam_error.log for additional details.\n') sys.exit(1)
Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function.
def on_unexpected_error(e): # pragma: no cover sys.stderr.write('Unexpected error: {} ({})\n'.format( str(e), e.__class__.__name__)) sys.stderr.write('See file slam_error.log for additional details.\n') sys.exit(1)
Catch-all error handler Unexpected errors will be handled by this function.
def _run_lambda_function(event, context, app, config): # pragma: no cover args = event.get('args', []) kwargs = event.get('kwargs', {}) # first attempt to invoke the function passing the lambda event and context try: ret = app(*args, event=event, context=context, **kwargs) except TypeError: # try again without passing the event and context ret = app(*args, **kwargs) return ret
Run the function. This is the default when no plugins (such as wsgi) define an alternative run function.
def _generate_lambda_handler(config, output='.slam/handler.py'): # Determine what the start up code is. The default is to just run the # function, but it can be overriden by a plugin such as wsgi for a more # elaborated way to run the function. run_function = _run_lambda_function for name, plugin in plugins.items(): if name in config and hasattr(plugin, 'run_lambda_function'): run_function = plugin.run_lambda_function run_code = ''.join(inspect.getsourcelines(run_function)[0][1:]) # generate handler.py with open(os.path.join(os.path.dirname(__file__), 'templates/handler.py.template')) as f: template = f.read() template = render_template(template, module=config['function']['module'], app=config['function']['app'], run_lambda_function=run_code, config_json=json.dumps(config, separators=(',', ':'))) with open(output, 'wt') as f: f.write(template + '\n')
Generate a handler.py file for the lambda function start up.
def build(rebuild_deps, config_file): config = _load_config(config_file) print("Building lambda package...") package = _build(config, rebuild_deps=rebuild_deps) print("{} has been built successfully.".format(package))
Build lambda package.
def invoke(stage, async, dry_run, config_file, args): config = _load_config(config_file) if stage is None: stage = config['devstage'] cfn = boto3.client('cloudformation') lmb = boto3.client('lambda') try: stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0] except botocore.exceptions.ClientError: raise RuntimeError('This project has not been deployed yet.') function = _get_from_stack(stack, 'Output', 'FunctionArn') if dry_run: invocation_type = 'DryRun' elif async: invocation_type = 'Event' else: invocation_type = 'RequestResponse' # parse input arguments data = {} for arg in args: s = arg.split('=', 1) if len(s) != 2: raise ValueError('Invalid argument ' + arg) if s[0][-1] == ':': # JSON argument data[s[0][:-1]] = json.loads(s[1]) else: # string argument data[s[0]] = s[1] rv = lmb.invoke(FunctionName=function, InvocationType=invocation_type, Qualifier=stage, Payload=json.dumps({'kwargs': data}, sort_keys=True)) if rv['StatusCode'] != 200 and rv['StatusCode'] != 202: raise RuntimeError('Unexpected error. Status code = {}.'.format( rv['StatusCode'])) if invocation_type == 'RequestResponse': payload = json.loads(rv['Payload'].read().decode('utf-8')) if 'FunctionError' in rv: if 'stackTrace' in payload: print('Traceback (most recent call last):') for frame in payload['stackTrace']: print(' File "{}", line {}, in {}'.format( frame[0], frame[1], frame[2])) print(' ' + frame[3]) print('{}: {}'.format(payload['errorType'], payload['errorMessage'])) else: raise RuntimeError('Unknown error') else: print(str(payload))
Invoke the lambda function.
def delete(no_logs, config_file): config = _load_config(config_file) s3 = boto3.client('s3') cfn = boto3.client('cloudformation') logs = boto3.client('logs') try: stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0] except botocore.exceptions.ClientError: raise RuntimeError('This project has not been deployed yet.') bucket = _get_from_stack(stack, 'Parameter', 'LambdaS3Bucket') lambda_package = _get_from_stack(stack, 'Parameter', 'LambdaS3Key') function = _get_from_stack(stack, 'Output', 'FunctionArn').split(':')[-1] api_id = _get_from_stack(stack, 'Output', 'ApiId') if api_id: log_groups = ['API-Gateway-Execution-Logs_' + api_id + '/' + stage for stage in config['stage_environments'].keys()] else: log_groups = [] log_groups.append('/aws/lambda/' + function) print('Deleting {}...'.format(config['name'])) cfn.delete_stack(StackName=config['name']) waiter = cfn.get_waiter('stack_delete_complete') waiter.wait(StackName=config['name']) if not no_logs: print('Deleting logs...') for log_group in log_groups: try: logs.delete_log_group(logGroupName=log_group) except botocore.exceptions.ClientError: print(' Log group {} could not be deleted.'.format(log_group)) print('Deleting files...') try: s3.delete_object(Bucket=bucket, Key=lambda_package) s3.delete_bucket(Bucket=bucket) except botocore.exceptions.ClientError: print(' S3 bucket {} could not be deleted.'.format(bucket))
Delete the project.
def template(config_file): config = _load_config(config_file) print(get_cfn_template(config, pretty=True))
Print the default Cloudformation deployment template.
def register_plugins(): if pkg_resources: # pragma: no cover for ep in pkg_resources.iter_entry_points('slam_plugins'): plugin = ep.load() # add any init options to the main init command if hasattr(plugin, 'init') and hasattr(plugin.init, '_arguments'): for arg in plugin.init._arguments: init.parser.add_argument(*arg[0], **arg[1]) init._arguments += plugin.init._arguments init._argnames += plugin.init._argnames plugins[ep.name] = plugin
find any installed plugins and register them.
def synchronise_device_state(self, device_state, authentication_headers): payload = { 'context': device_state, 'event': { 'header': { 'namespace': 'System', 'name': 'SynchronizeState', 'messageId': '' }, 'payload': {} } } multipart_data = MultipartEncoder( fields=[ ( 'metadata', ( 'metadata', json.dumps(payload), 'application/json', {'Content-Disposition': "form-data; name='metadata'"} ) ), ], boundary='boundary' ) headers = { **authentication_headers, 'Content-Type': multipart_data.content_type } stream_id = self.connection.request( 'GET', '/v20160207/events', body=multipart_data, headers=headers, ) response = self.connection.get_response(stream_id) assert response.status in [http.client.NO_CONTENT, http.client.OK]
Synchronizing the component states with AVS Components state must be synchronised with AVS after establishing the downchannel stream in order to create a persistent connection with AVS. Note that currently this function is paying lip-service synchronising the device state: the device state is hard-coded.
def send_audio_file( self, audio_file, device_state, authentication_headers, dialog_request_id, distance_profile, audio_format ): payload = { 'context': device_state, 'event': { 'header': { 'namespace': 'SpeechRecognizer', 'name': 'Recognize', 'messageId': self.generate_message_id(), 'dialogRequestId': dialog_request_id, }, 'payload': { 'profile': distance_profile, 'format': audio_format } } } multipart_data = MultipartEncoder( fields=[ ( 'request', ( 'request', json.dumps(payload), 'application/json;', {'Content-Disposition': "form-data; name='request'"} ), ), ( 'audio', ( 'audio', audio_file, 'application/octet-stream', {'Content-Disposition': "form-data; name='audio'"} ) ), ], boundary='boundary', ) headers = { **authentication_headers, 'Content-Type': multipart_data.content_type } stream_id = self.connection.request( 'POST', '/v20160207/events', headers=headers, body=multipart_data, ) response = self.connection.get_response(stream_id) return self.parse_response(response)
Send audio to AVS The file-like object are steaming uploaded for improved latency. Returns: bytes -- wav audio bytes returned from AVS
def retrieve_api_token(self): payload = self.oauth2_manager.get_access_token_params( refresh_token=self.refresh_token ) response = requests.post( self.oauth2_manager.access_token_url, json=payload ) response.raise_for_status() response_json = json.loads(response.text) return response_json['access_token']
Retrieve the access token from AVS. This function is memoized, so the value returned by the function will be remembered and returned by subsequent calls until the memo expires. This is because the access token lasts for one hour, then a new token needs to be requested. Decorators: helpers.expiring_memo Returns: str -- The access token for communicating with AVS
def get_user_permission_full_codename(perm): User = get_user_model() return '%s.%s_%s' % (User._meta.app_label, perm, User._meta.module_name)
Returns 'app_label.<perm>_<usermodulename>'. If standard ``auth.User`` is used, for 'change' perm this would return ``auth.change_user`` and if ``myapp.CustomUser`` is used it would return ``myapp.change_customuser``.
def commit(using=None): try: django.db.transaction.commit(using) except django.db.transaction.TransactionManagementError: pass
Possibility of calling transaction.commit() in new Django versions (in atomic block).
def rollback(using=None, sid=None): if sid: django.db.transaction.savepoint_rollback(sid) else: try: django.db.transaction.rollback(using) except django.db.transaction.TransactionManagementError: django.db.transaction.set_rollback(True, using)
Possibility of calling transaction.rollback() in new Django versions (in atomic block). Important: transaction savepoint (sid) is required for Django < 1.8
def get_template_loaders(): try: from django.template.engine import Engine except ImportError: # Django < 1.8 Engine = None if Engine: try: engine = Engine.get_default() except ImproperlyConfigured: loaders = [] else: loaders = engine.template_loaders else: # Django < 1.8 from django.template.loader import find_template_loader loaders = [ find_template_loader(loader_name) for loader_name in settings.TEMPLATE_LOADERS] return loaders
Compatibility method to fetch the template loaders. Source: https://github.com/django-debug-toolbar/django-debug-toolbar/blob/ece1c2775af108a92a0ef59636266b49e286e916/debug_toolbar/compat.py
def resolve_url(to, *args, **kwargs): from compat import six, force_text # If it's a model, use get_absolute_url() if hasattr(to, 'get_absolute_url'): return to.get_absolute_url() if isinstance(to, Promise): # Expand the lazy instance, as it can cause issues when it is passed # further to some Python functions like urlparse. to = force_text(to) if isinstance(to, six.string_types): # Handle relative URLs if any(to.startswith(path) for path in ('./', '../')): return to # Next try a reverse URL resolution. try: return urlresolvers.reverse(to, args=args, kwargs=kwargs) except urlresolvers.NoReverseMatch: # If this is a callable, re-raise. if callable(to): raise # If this doesn't "feel" like a URL, re-raise. if '/' not in to and '.' not in to: raise # Finally, fall back and assume it's a URL return to
Return a URL appropriate for the arguments passed. The arguments could be: * A model: the model's `get_absolute_url()` function will be called. * A view name, possibly with arguments: `urlresolvers.reverse()` will be used to reverse-resolve the name. * A URL, which will be returned as-is.
def webpack_template_tag(path_to_config): # TODO: allow selection of entries # Django's template system silently fails on some exceptions try: return webpack(path_to_config) except (AttributeError, ValueError) as e: raise six.reraise(BundlingError, BundlingError(*e.args), sys.exc_info()[2])
A template tag that will output a webpack bundle. Usage: {% load webpack %} {% webpack 'path/to/webpack.config.js' as bundle %} {{ bundle.render_css|safe }} {{ bundle.render_js|safe }}
def _prepare_key(key, *args, **kwargs): if not args and not kwargs: return key items = sorted(kwargs.items()) hashable_args = (args, tuple(items)) args_key = hashlib.md5(pickle.dumps(hashable_args)).hexdigest() return "%s/args:%s" % (key, args_key)
if arguments are given, adds a hash of the args to the key.
def get_versions() -> FileVersionResult: version_counter = Counter() versions_match = False version_str = None versions_discovered = OrderedDict() for version_obj in version_objects: discovered = version_obj.get_version() versions_discovered[version_obj.key_name] = discovered version_counter.update([discovered]) if len(version_counter) == 1: versions_match = True version_str = list(version_counter.keys())[0] return FileVersionResult( uniform=versions_match, version_details=versions_discovered, version_result=version_str, )
Search specific project files and extract versions to check. :return: A FileVersionResult object for reporting.
def get_version_without_beta(version_info: FileVersionResult) -> str: if not version_info: raise TypeError("version_info cannot be 'None'!") if not version_info.uniform: raise ValueError("version_info is not uniform!") beta_flag = ".123." current_version = version_info.version_result # We can just split and take the first value since: # # - If the flag *is not there*, we get the entire string back. # # OR # # - If the flag *is there*, we want everything before it, # AKA the first value. # return current_version.split(beta_flag)[0]
Get the project's version string *without* any test or beta build labels. :param version_info: The current version_info of the project. :return: The current version string, without any beta build string values.
def get_version_with_beta(version_info: FileVersionResult) -> str: if not version_info: raise TypeError("version_info cannot be 'None'!") if not version_info.uniform: raise ValueError("version_info is not uniform!") # Capture datetime as UTC for build timestamp utc_datetime = datetime.datetime.utcnow() # Setup version string information beta_flag = ".123." build_number = utc_datetime.strftime("%Y%m%d.%H%M%S") cleaned_version = get_version_without_beta(version_info) # Return the new version string for the build. return "{base}{flag}{build}".format( base=cleaned_version, flag=beta_flag, build=build_number, )
Get the project's version string *with* a beta build label based on UTC. :param version_info: The current version_info of the project. :return: The project's version string, with a beta build number.
def supportedChars(*tests): for test in tests: try: test.encode(sys.stdout.encoding) return test except UnicodeEncodeError: pass return '?' * len(tests[0])
Takes any number of strings, and returns the first one the terminal encoding supports. If none are supported it returns '?' the length of the first string.
def app_templates_dirs(self): app_templates_dirs = OrderedDict() for app_config in apps.get_app_configs(): templates_dir = os.path.join( getattr(app_config, 'path', '/'), 'templates') if os.path.isdir(templates_dir): templates_dir = upath(templates_dir) app_templates_dirs[app_config.name] = templates_dir app_templates_dirs[app_config.label] = templates_dir return app_templates_dirs
Build a cached dict with settings.INSTALLED_APPS as keys and the 'templates' directory of each application as values.
def get_contents(self, origin): try: path = self.get_app_template_path( origin.app_name, origin.template_name) with io.open(path, encoding=self.engine.file_charset) as fp: return fp.read() except KeyError: raise TemplateDoesNotExist(origin) except IOError as error: if error.errno == errno.ENOENT: raise TemplateDoesNotExist(origin) raise
Try to load the origin.
def get_template_sources(self, template_name): if ':' not in template_name: self.reset(True) return app, template_path = template_name.split(':') if app: yield NamespaceOrigin( app_name=app, name='app_namespace:%s:%s' % (app, template_name), template_name=template_path, loader=self) return self.reset(False) for app in self.app_templates_dirs: file_path = self.get_app_template_path(app, template_path) if file_path in self._already_used: continue self._already_used.append(file_path) yield NamespaceOrigin( app_name=app, name='app_namespace:%s:%s' % (app, template_name), template_name=template_path, loader=self)
Build a list of Origin to load 'template_name' splitted with ':'. The first item is the name of the application and the last item is the true value of 'template_name' provided by the specified application.
def load_template_source(self, *ka): template_name = ka[0] for origin in self.get_template_sources(template_name): try: return self.get_contents(origin), origin.name except TemplateDoesNotExist: pass raise TemplateDoesNotExist(template_name)
Backward compatible method for Django < 2.0.
def image(self): self._image = self.genImage() self._image = funcs.rotateImage(self._image, self.direction) return self._image
Generates the image using self.genImage(), then rotates it to self.direction and returns it.
def rotateImage(image, angle): image = [list(row) for row in image] for n in range(angle % 4): image = list(zip(*image[::-1])) return image
rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw
def move(self, direction=0): direction = int(direction) self.position = list(self.position) if direction == 0: self.position[1] += 1 elif direction == 1: self.position[0] -= 1 elif direction == 2: self.position[1] -= 1 elif direction == 3: self.position[0] += 1
direction = int range(0, 4) 0 = Down 1 = Left 2 = Up 3 = Right
def touching(self, canvas, side=None): try: sides = list(side) except TypeError: if side is None: sides = list(range(4)) else: sides = [side] # Generate rotated images for each direction. images = {} image = self.image.image() for side in sides: images.update({ (side + 2) % 4: funcs.rotateImage(image, (side + 2) % 4) }) # Go through each image finding top edge, # then rotate coordinates to match original image. edges = [] for side, image in images.items(): for x in range(len(image[0])): y = 0 # If the first pixel is True, no look any further. if not image[0][x]: while not image[y][x]: y += 1 if y >= len(image): # Fallen off bottom of image, therefore no edge. y = None break # Don't do anything if no pixels in column. if not y is None: y -= 1 # To get pixel next to pixel which is on. # Get coordinates the right way around. pos = (x, y) size = [len(image), len(image[0])] for i in range(4 - side): size.reverse() pos = funcs.rotatePosition(pos, size) edges.append(pos) # Find if any other sprites are in our edge coordinates. for pixel in edges: pixel = (int(self.position[0] + pixel[0]), int(self.position[1] + pixel[1])) if canvas.testPixel(pixel): return True return False
Returns True if touching any pixels [on specified side]. 0 = Bottom 1 = Left 2 = Top 3 = Right None = All This works by creating a list of the image rotated so all the requested sides are facing up, then it finds the top edge for each image and rotates the coordinates back to the original image.
def overlaps(self, canvas, exclude=[]): try: exclude = list(exclude) except TypeError: exclude = [exclude] exclude.append(self) for selfY, row in enumerate(self.image.image()): for selfX, pixel in enumerate(row): canvasPixelOn = canvas.testPixel( (selfX + self.position[0], selfY + self.position[1]), excludedSprites=exclude ) if pixel and canvasPixelOn: return True return False
Returns True if sprite is touching any other sprite.
def onEdge(self, canvas): sides = [] if int(self.position[0]) <= 0: sides.append(1) if (int(self.position[0]) + self.image.width) >= canvas.width: sides.append(3) if int(self.position[1]) <= 0: sides.append(2) if (int(self.position[1]) + self.image.height) >= canvas.height: sides.append(0) return sides
Returns a list of the sides of the sprite which are touching the edge of the canvas. 0 = Bottom 1 = Left 2 = Top 3 = Right
def remote_property(name, get_command, set_command, field_name, doc=None): def getter(self): try: return getattr(self, name) except AttributeError: value = getattr(self.sendCommand(get_command()), field_name) setattr(self, name, value) return value def setter(self, value): setattr(self, name, value) self.sendCommand(set_command(value)) return property(getter, setter, doc=doc)
Property decorator that facilitates writing properties for values from a remote device. Arguments: name: The field name to use on the local object to store the cached property. get_command: A function that returns the remote value of the property. set_command: A function that accepts a new value for the property and sets it remotely. field_name: The name of the field to retrieve from the response message to get operations.
def sendCommand(self, command): command_data = [ord(x) for x in buffer(command)] self.hid.write(command_data) response_data = ''.join(chr(x) for x in self.hid.read(64)) response = command.RESPONSE.from_buffer_copy(response_data) if response.status != 0: raise CommandException(response.status) return response
Sends a Command object to the MCP2210 and returns its response. Arguments: A commands.Command instance Returns: A commands.Response instance, or raises a CommandException on error.
def transfer(self, data): settings = self.transfer_settings settings.spi_tx_size = len(data) self.transfer_settings = settings response = '' for i in range(0, len(data), 60): response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data time.sleep(0.01) while len(response) < len(data): response += self.sendCommand(commands.SPITransferCommand('')).data return ''.join(response)
Transfers data over SPI. Arguments: data: The data to transfer. Returns: The data returned by the SPI device.
def render_json_response(self, context_dict, status=200): json_context = json.dumps( context_dict, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs() ).encode(u'utf-8') return HttpResponse( json_context, content_type=self.get_content_type(), status=status )
Limited serialization for shipping plain data. Do not use for models or other complex or custom objects.
def form_valid(self, form): self.object = form.save(commit=False) self.pre_save() self.object.save() if hasattr(form, 'save_m2m'): form.save_m2m() self.post_save() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) return HttpResponseRedirect(self.get_success_url())
If the request is ajax, save the form and return a json response. Otherwise return super as expected.
def form_invalid(self, form): if self.request.is_ajax(): return self.render_json_response(self.get_error_result(form)) return super(AjaxFormMixin, self).form_invalid(form)
We have errors in the form. If ajax, return them as json. Otherwise, proceed as normal.
def assumes(*args): '''Stores a function's assumptions as an attribute.''' args = tuple(args) def decorator(func): func.assumptions = args return func return decoratof assumes(*args): '''Stores a function's assumptions as an attribute.''' args = tuple(args) def decorator(func): func.assumptions = args return func return decorator
Stores a function's assumptions as an attribute.
def overridden_by_assumptions(*args): '''Stores what assumptions a function is overridden by as an attribute.''' args = tuple(args) def decorator(func): func.overridden_by_assumptions = args return func return decoratof overridden_by_assumptions(*args): '''Stores what assumptions a function is overridden by as an attribute.''' args = tuple(args) def decorator(func): func.overridden_by_assumptions = args return func return decorator
Stores what assumptions a function is overridden by as an attribute.
def sma(array, window_size, axis=-1, mode='reflect', **kwargs): kwargs['axis'] = axis kwargs['mode'] = mode if not isinstance(window_size, int): raise TypeError('window_size must be an integer') if not isinstance(kwargs['axis'], int): raise TypeError('axis must be an integer') return convolve1d(array, np.repeat(1.0, window_size)/window_size, **kwargs)
Computes a 1D simple moving average along the given axis. Parameters ---------- array : ndarray Array on which to perform the convolution. window_size: int Width of the simple moving average window in indices. axis : int, optional Axis along which to perform the moving average mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to ‘constant’. Default is ‘reflect’. kwargs : optional Other arguments to pass to `scipy.ndimage.filters.convolve1d` Returns ------- sma : ndarray Simple moving average of the given array with the specified window size along the requested axis. Raises ------ TypeError: If window_size or axis are not integers.
def quantity_string(name, quantity_dict): '''Takes in an abbreviation for a quantity and a quantity dictionary, and returns a more descriptive string of the quantity as "name (units)." Raises ValueError if the name is not in quantity_dict ''' if name not in quantity_dict.keys(): raise ValueError('{0} is not present in quantity_dict'.format(name)) return '{0} ({1})'.format(quantity_dict[name]['name'], quantity_dict[name]['units']f quantity_string(name, quantity_dict): '''Takes in an abbreviation for a quantity and a quantity dictionary, and returns a more descriptive string of the quantity as "name (units)." Raises ValueError if the name is not in quantity_dict ''' if name not in quantity_dict.keys(): raise ValueError('{0} is not present in quantity_dict'.format(name)) return '{0} ({1})'.format(quantity_dict[name]['name'], quantity_dict[name]['units'])
Takes in an abbreviation for a quantity and a quantity dictionary, and returns a more descriptive string of the quantity as "name (units)." Raises ValueError if the name is not in quantity_dict
def assumption_list_string(assumptions, assumption_dict): ''' Takes in a list of short forms of assumptions and an assumption dictionary, and returns a "list" form of the long form of the assumptions. Raises ------ ValueError if one of the assumptions is not in assumption_dict. ''' if isinstance(assumptions, six.string_types): raise TypeError('assumptions must be an iterable of strings, not a ' 'string itself') for a in assumptions: if a not in assumption_dict.keys(): raise ValueError('{} not present in assumption_dict'.format(a)) assumption_strings = [assumption_dict[a] for a in assumptions] return strings_to_list_string(assumption_stringsf assumption_list_string(assumptions, assumption_dict): ''' Takes in a list of short forms of assumptions and an assumption dictionary, and returns a "list" form of the long form of the assumptions. Raises ------ ValueError if one of the assumptions is not in assumption_dict. ''' if isinstance(assumptions, six.string_types): raise TypeError('assumptions must be an iterable of strings, not a ' 'string itself') for a in assumptions: if a not in assumption_dict.keys(): raise ValueError('{} not present in assumption_dict'.format(a)) assumption_strings = [assumption_dict[a] for a in assumptions] return strings_to_list_string(assumption_strings)
Takes in a list of short forms of assumptions and an assumption dictionary, and returns a "list" form of the long form of the assumptions. Raises ------ ValueError if one of the assumptions is not in assumption_dict.
def quantity_spec_string(name, quantity_dict): ''' Returns a quantity specification for docstrings. Example ------- >>> quantity_spec_string('Tv') >>> 'Tv : float or ndarray\n Data for virtual temperature.' ''' if name not in quantity_dict.keys(): raise ValueError('{0} not present in quantity_dict'.format(name)) s = '{0} : float or ndarray\n'.format(name) s += doc_paragraph('Data for {0}.'.format( quantity_string(name, quantity_dict)), indent=4) return f quantity_spec_string(name, quantity_dict): ''' Returns a quantity specification for docstrings. Example ------- >>> quantity_spec_string('Tv') >>> 'Tv : float or ndarray\n Data for virtual temperature.' ''' if name not in quantity_dict.keys(): raise ValueError('{0} not present in quantity_dict'.format(name)) s = '{0} : float or ndarray\n'.format(name) s += doc_paragraph('Data for {0}.'.format( quantity_string(name, quantity_dict)), indent=4) return s
Returns a quantity specification for docstrings. Example ------- >>> quantity_spec_string('Tv') >>> 'Tv : float or ndarray\n Data for virtual temperature.'
def doc_paragraph(s, indent=0): '''Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces. ''' return '\n'.join([' '*indent + l for l in wrap(s, width=80-indent)]f doc_paragraph(s, indent=0): '''Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces. ''' return '\n'.join([' '*indent + l for l in wrap(s, width=80-indent)])
Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces.
def parse_derivative_string(string, quantity_dict): ''' Assuming the string is of the form d(var1)d(var2), returns var1, var2. Raises ValueError if the string is not of this form, or if the vars are not keys in the quantity_dict, or if var2 is not a coordinate-like variable. ''' match = derivative_prog.match(string) if match is None: raise ValueError('string is not in the form of a derivative') varname = match.group(1) coordname = match.group(2) if (varname not in quantity_dict.keys() or coordname not in quantity_dict.keys()): raise ValueError('variable in string not a valid quantity') return varname, coordnamf parse_derivative_string(string, quantity_dict): ''' Assuming the string is of the form d(var1)d(var2), returns var1, var2. Raises ValueError if the string is not of this form, or if the vars are not keys in the quantity_dict, or if var2 is not a coordinate-like variable. ''' match = derivative_prog.match(string) if match is None: raise ValueError('string is not in the form of a derivative') varname = match.group(1) coordname = match.group(2) if (varname not in quantity_dict.keys() or coordname not in quantity_dict.keys()): raise ValueError('variable in string not a valid quantity') return varname, coordname
Assuming the string is of the form d(var1)d(var2), returns var1, var2. Raises ValueError if the string is not of this form, or if the vars are not keys in the quantity_dict, or if var2 is not a coordinate-like variable.
def get_calculatable_quantities(inputs, methods): ''' Given an interable of input quantity names and a methods dictionary, returns a list of output quantities that can be calculated. ''' output_quantities = [] updated = True while updated: updated = False for output in methods.keys(): if output in output_quantities or output in inputs: # we already know we can calculate this continue for args, func in methods[output].items(): if all([arg in inputs or arg in output_quantities for arg in args]): output_quantities.append(output) updated = True break return tuple(output_quantities) + tuple(inputsf get_calculatable_quantities(inputs, methods): ''' Given an interable of input quantity names and a methods dictionary, returns a list of output quantities that can be calculated. ''' output_quantities = [] updated = True while updated: updated = False for output in methods.keys(): if output in output_quantities or output in inputs: # we already know we can calculate this continue for args, func in methods[output].items(): if all([arg in inputs or arg in output_quantities for arg in args]): output_quantities.append(output) updated = True break return tuple(output_quantities) + tuple(inputs)
Given an interable of input quantity names and a methods dictionary, returns a list of output quantities that can be calculated.
def _check_scalar(value): '''If value is a 0-dimensional array, returns the contents of value. Otherwise, returns value. ''' if isinstance(value, np.ndarray): if value.ndim == 0: # We have a 0-dimensional array return value[None][0] return valuf _check_scalar(value): '''If value is a 0-dimensional array, returns the contents of value. Otherwise, returns value. ''' if isinstance(value, np.ndarray): if value.ndim == 0: # We have a 0-dimensional array return value[None][0] return value
If value is a 0-dimensional array, returns the contents of value. Otherwise, returns value.
def ip(): ok, err = _hack_ip() if not ok: click.secho(click.style(err, fg='red')) sys.exit(1) click.secho(click.style(err, fg='green'))
Show ip address.
def wp(ssid): if not ssid: ok, err = _detect_wifi_ssid() if not ok: click.secho(click.style(err, fg='red')) sys.exit(1) ssid = err ok, err = _hack_wifi_password(ssid) if not ok: click.secho(click.style(err, fg='red')) sys.exit(1) click.secho(click.style('{ssid}:{password}'.format(ssid=ssid, password=err), fg='green'))
Show wifi password.
def build_url(self, data): query_part_one = [] query_part_two = [] keys_to_be_removed = [] for key, value in data.items(): if key not in ['version', 'restApi', 'resourcePath']: if key == 'mapArea': query_part_one.append(','.join(str(val) for val in value)) keys_to_be_removed.append(key) elif key == 'includeLocationCodes': query_part_one.append(value) keys_to_be_removed.append(key) else: if isinstance(value, list): value = ','.join(str(val) for val in value) query_part_two.append('{0}={1}'.format(key, value)) keys_to_be_removed.append(key) for k in keys_to_be_removed: del data[k] data['query'] = '{0}?{1}'.format('/'.join(query_part_one), '&'.join(query_part_two)) return data
This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values
def build_query_string(self, data): query_params = [] keys_to_be_removed = [] for key, value in data.items(): if key not in ['version', 'restApi', 'resourcePath']: if key == 'addressLine': query_params.append('{0}={1}'.format(key, quote(value))) keys_to_be_removed.append(key) else: query_params.append('{0}={1}'.format(key, value)) keys_to_be_removed.append(key) data['query'] = "&".join(query_params) for k in keys_to_be_removed: del data[k] return data
This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values
def build_query_string(self, data): queryValues = [] keys_to_be_removed = [] for key, value in data.items(): if key not in ['version', 'restApi', 'resourcePath']: if not key == 'point': queryValues.append('{0}={1}'.format(key, value)) keys_to_be_removed.append(key) keys_to_be_removed.append(key) queryString = '&'.join(queryValues) data['query'] = '{0}?{1}'.format(data['point'], queryString) for k in list(set(keys_to_be_removed)): del data[k] return data
This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values
def build_url(self): url = '{protocol}/{url}/{rest}/{version}/{restapi}/{rscpath}/' \ '{query}'.format(protocol=self.schema.protocol, url=self.schema.main_url, rest=self.schema.rest, version=self.schema.version, restapi=self.schema.restApi, rscpath=self.schema.resourcePath, query=self.schema.query) return url.replace('/None/', '/')
Builds the URL for elevations API services based on the data given by the user. Returns: url (str): URL for the elevations API services
def get_data(self): url = self.build_url() self.elevationdata = requests.get(url) if not self.elevationdata.status_code == 200: raise self.elevationdata.raise_for_status()
Gets data from the given url
def elevations(self): resources = self.get_resource() elevations = namedtuple('elevations_data', 'elevations') try: return [elevations(resource['elevations']) for resource in resources] except KeyError: return [elevations(resource['offsets']) for resource in resources] except TypeError: try: if isinstance(resources['ElevationData']['Elevations'], dict): return elevations(resources['ElevationData']['Elevations']) except KeyError: offsets = namedtuple('offsets_data', 'offsets') try: if isinstance(resources['SeaLevelData']['Offsets'], dict): return offsets(resources['SeaLevelData']['Offsets']) except KeyError: print(KeyError)
Retrieves elevations/offsets from the output response Returns: elevations/offsets (namedtuple): A named tuple of list of elevations/offsets
def zoomlevel(self): resources = self.get_resource() zoomlevel = namedtuple('zoomlevel', 'zoomLevel') try: return [zoomlevel(resource['zoomLevel']) for resource in resources] except TypeError: try: if isinstance(resources['ElevationData'], dict): return zoomlevel(resources['ElevationData']['ZoomLevel']) except KeyError: try: if isinstance(resources['SeaLevelData'], dict): zoom = resources['SeaLevelData']['ZoomLevel'] return zoomlevel(zoom) except KeyError: print(KeyError)
Retrieves zoomlevel from the output response Returns: zoomlevel (namedtuple): A namedtuple of zoomlevel from the output response
def to_json_file(self, path, file_name=None): if bool(path) and os.path.isdir(path): self.write_to_json(path, file_name) else: self.write_to_json(os.getcwd(), file_name)
Writes output to a JSON file with the given file name
def get_data(self): url = self.build_url() self.incidents_data = requests.get(url) if not self.incidents_data.status_code == 200: raise self.incidents_data.raise_for_status()
Gets data from the given url
def response_to_dict(self): try: return json.loads(self.incidents_data.text) except Exception: return json.loads(json.dumps(xmltodict.parse( self.incidents_data.text)))
This method helps in returning the output JSON data from the URL and also it helps in converting the XML output/response (string) to a JSON object Returns: data (dict): JSON data from the output/response
def description(self): resource_list = self.traffic_incident() description = namedtuple('description', 'description') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [description(resource['description']) for resource in resource_list] except (KeyError, TypeError): try: return [description(resource['Description']) for resource in resource_list] except KeyError: return None
Retrieves the description of the incident/incidents from the output response Returns: description(namedtuple): List of named tuples of descriptions of the incident/incidents
def congestion(self): resource_list = self.traffic_incident() congestion = namedtuple('congestion', 'congestion') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [congestion(resource['congestion']) for resource in resource_list] except (KeyError, TypeError): try: return [congestion(resource['CongestionInfo']) for resource in resource_list] except KeyError: return None
Retrieves the congestion information of the incident/incidents from the output response Returns: congestion(namedtuple): List of named tuples of congestion info of the incident/incidents
def detour_info(self): resource_list = self.traffic_incident() detour_info = namedtuple('detour_info', 'detour_info') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [detour_info(resource['detour']) for resource in resource_list] except (KeyError, TypeError): try: return [detour_info(resource['detourInfo']) for resource in resource_list] except KeyError: return None
Retrieves the detour information of the incident/incidents from the output response Returns: detour_info(namedtuple): List of named tuples of detour info of the incident/incidents
def start_time(self): resource_list = self.traffic_incident() start_time = namedtuple('start_time', 'start_time') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [start_time(resource['start']) for resource in resource_list] except (KeyError, TypeError): return [start_time(resource['StartTimeUTC']) for resource in resource_list]
Retrieves the start time of the incident/incidents from the output response Returns: start_time(namedtuple): List of named tuples of start time of the incident/incidents
def end_time(self): resource_list = self.traffic_incident() end_time = namedtuple('end_time', 'end_time') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [end_time(resource['end']) for resource in resource_list] except (KeyError, TypeError): return [end_time(resource['EndTimeUTC']) for resource in resource_list]
Retrieves the end time of the incident/incidents from the output response Returns: end_time(namedtuple): List of named tuples of end time of the incident/incidents
def incident_id(self): resource_list = self.traffic_incident() incident_id = namedtuple('incident_id', 'incident_id') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [incident_id(resource['incidentId']) for resource in resource_list] except (KeyError, TypeError): return [incident_id(resource['IncidentId']) for resource in resource_list]
Retrieves the incident id/ids of the incident/incidents from the output response Returns: incident_id(namedtuple): List of named tuples of incident id/ids of the incident/incidents
def lane_info(self): resource_list = self.traffic_incident() lane_info = namedtuple('lane_info', 'lane_info') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [lane_info(resource['lane']) for resource in resource_list] except (KeyError, TypeError): try: return [lane_info(resource['LaneInfo']) for resource in resource_list] except KeyError: return None
Retrieves the lane info of the incident/incidents from the output response Returns: lane_info(namedtuple): List of named tuples of lane info of the incident/incidents
def last_modified(self): resource_list = self.traffic_incident() last_modified = namedtuple('last_modified', 'last_modified') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [last_modified(resource['lastModified']) for resource in resource_list] except (KeyError, TypeError): return [last_modified(resource['LastModifiedUTC']) for resource in resource_list]
Retrieves the last modified time stamp of the incident/incidents from the output response Returns: last_modified(namedtuple): List of named tuples of last modified time stamp of the incident/incidents
def road_closed(self): resource_list = self.traffic_incident() road_closed = namedtuple('road_closed', 'road_closed') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [road_closed(resource['roadClosed']) for resource in resource_list] except (KeyError, TypeError): return [road_closed(resource['RoadClosed']) for resource in resource_list]
Retrieves the road closed information for the incident/incidents from the output response Returns: road_closed(namedtuple): List of named tuples of road closed information for the incident/incidents
def severity(self): resource_list = self.traffic_incident() severity = namedtuple('severity', 'severity') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [severity(resource['severity']) for resource in resource_list] except (KeyError, TypeError): return [severity(resource['Severity']) for resource in resource_list]
Retrieves the severity for the incident/incidents from the output response Returns: severity(namedtuple): List of named tuples of severity for the incident/incidents
def type(self): resource_list = self.traffic_incident() type = namedtuple('type', 'type') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [type(resource['type']) for resource in resource_list] except (KeyError, TypeError): return [type(resource['Type']) for resource in resource_list]
Retrieves the type of the incident/incidents from the output response Returns: type(namedtuple): List of named tuples of type of the incident/incidents
def is_verified(self): resource_list = self.traffic_incident() verified = namedtuple('verified', 'verified') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [verified(resource['verified']) for resource in resource_list] except (KeyError, TypeError): return [verified(resource['Verified']) for resource in resource_list]
Retrieves the verification status of the incident/incidents from the output response Returns: verified(namedtuple): List of named tuples of verification status of the incident/incidents
def build_query_string(self, data): query = [] keys_to_be_removed = [] for key, value in data.items(): if key not in ['version', 'restApi', 'resourcePath']: if not key == 'method': if key == 'points': value = ','.join(str(val) for val in value) keys_to_be_removed.append(key) query.append('{0}={1}'.format(key, value)) keys_to_be_removed.append(key) keys_to_be_removed.append(key) querystring = '&'.join(query) data['query'] = '{0}?{1}'.format(data['method'], querystring) for k in list(set(keys_to_be_removed)): del data[k] return data
This method occurs after dumping the data into the class. Args: data (dict): dictionary of all the query values Returns: data (dict): ordered dict of all the values
def get_coordinates(self): resource_list = self.get_resource() coordinates = namedtuple('coordinates', ['latitude', 'longitude']) try: return [coordinates(*resource['point']['coordinates']) for resource in resource_list] except (KeyError, TypeError): try: if isinstance(resource_list, dict): resource_list = [resource_list] return [coordinates(resource['Point']['Latitude'], resource['Point']['Longitude']) for resource in resource_list] except (KeyError, ValueError) as exc: print(exc)
Retrieves coordinates (latitudes/longitudes) from the output JSON/XML response Returns: coordinates (namedtuple): List of named tuples of coordinates (latitudes and longitudes)
def get_address(self): resource_list = self.get_resource() try: return [resource['address'] for resource in resource_list] except (KeyError, TypeError): try: if isinstance(resource_list, dict): resource_list = [resource_list] return [resource['Address'] for resource in resource_list] except (KeyError, TypeError) as exc: print(exc)
Retrieves addresses from the output JSON/XML response Returns: address (namedtuple): List of named tuples of addresses
def get_bbox(self): resource_list = self.get_resource() bounding_box = namedtuple('boundingbox', ['southlatitude', 'westlongitude', 'northlatitude', 'eastlongitude']) try: return [bounding_box(*resource['bbox']) for resource in resource_list] except (KeyError, TypeError): try: if isinstance(resource_list, dict): resource_list = [resource_list] return [bounding_box(resource['BoundingBox']['SouthLatitude'], resource['BoundingBox']['WestLongitude'], resource['BoundingBox']['NorthLatitude'], resource['BoundingBox']['EastLongitude']) for resource in resource_list] except (KeyError, TypeError) as exc: print(exc)
Retrieves the bounding box coordinates from the output JSON/XML response Returns: boundingbox (namedtuple): List of named tuples of bounding box coordinates
def get_data(self): url = self.build_url() self.locationApiData = requests.get(url) if not self.locationApiData.status_code == 200: raise self.locationApiData.raise_for_status()
Gets data from the built url
def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): cluster = get_cluster() if name not in cluster.metadata.keyspaces: #try the 1.2 method replication_map = { 'class': strategy_class, 'replication_factor':replication_factor } replication_map.update(replication_values) if strategy_class.lower() != 'simplestrategy': # Although the Cassandra documentation states for `replication_factor` # that it is "Required if class is SimpleStrategy; otherwise, # not used." we get an error if it is present. replication_map.pop('replication_factor', None) query = """ CREATE KEYSPACE {} WITH REPLICATION = {} """.format(name, json.dumps(replication_map).replace('"', "'")) if strategy_class != 'SimpleStrategy': query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') execute(query)
creates a keyspace :param name: name of keyspace to create :param strategy_class: keyspace replication strategy class :param replication_factor: keyspace replication factor :param durable_writes: 1.2 only, write log is bypassed if set to False :param **replication_values: 1.2 only, additional values to ad to the replication data map
def get_compaction_options(model): if not model.__compaction__: return {} result = {'class':model.__compaction__} def setter(key, limited_to_strategy = None): """ sets key in result, checking if the key is limited to either SizeTiered or Leveled :param key: one of the compaction options, like "bucket_high" :param limited_to_strategy: SizeTieredCompactionStrategy, LeveledCompactionStrategy :return: """ mkey = "__compaction_{}__".format(key) tmp = getattr(model, mkey) if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__: raise CQLEngineException("{} is limited to {}".format(key, limited_to_strategy)) if tmp: # Explicitly cast the values to strings to be able to compare the # values against introspected values from Cassandra. result[key] = str(tmp) setter('tombstone_compaction_interval') setter('tombstone_threshold') setter('bucket_high', SizeTieredCompactionStrategy) setter('bucket_low', SizeTieredCompactionStrategy) setter('max_threshold', SizeTieredCompactionStrategy) setter('min_threshold', SizeTieredCompactionStrategy) setter('min_sstable_size', SizeTieredCompactionStrategy) setter('sstable_size_in_mb', LeveledCompactionStrategy) return result
Generates dictionary (later converted to a string) for creating and altering tables with compaction strategy :param model: :return:
def update_compaction(model): logger.debug("Checking %s for compaction differences", model) table = get_table_settings(model) existing_options = table.options.copy() existing_compaction_strategy = existing_options['compaction_strategy_class'] existing_options = json.loads(existing_options['compaction_strategy_options']) desired_options = get_compaction_options(model) desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy) desired_options.pop('class', None) do_update = False if desired_compact_strategy not in existing_compaction_strategy: do_update = True for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: do_update = True # check compaction_strategy_options if do_update: options = get_compaction_options(model) # jsonify options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) execute(query) return True return False
Updates the compaction options for the given model if necessary. :param model: The model to update. :return: `True`, if the compaction options were modified in Cassandra, `False` otherwise. :rtype: bool
def add_where_clause(self, clause): if not isinstance(clause, WhereClause): raise StatementException("only instances of WhereClause can be added to statements") clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.where_clauses.append(clause)
adds a where clause to this statement :param clause: the clause to add :type clause: WhereClause
def add_assignment_clause(self, clause): if not isinstance(clause, AssignmentClause): raise StatementException("only instances of AssignmentClause can be added to statements") clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.assignments.append(clause)
adds an assignment clause to this statement :param clause: the clause to add :type clause: AssignmentClause
def add_transaction_clause(self, clause): if not isinstance(clause, TransactionClause): raise StatementException('only instances of AssignmentClause can be added to statements') clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.transactions.append(clause)
Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause
def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, retry_connect=False, **kwargs): global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if not default_keyspace: raise UndefinedKeyspaceException() from cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) return cluster = Cluster(hosts, **kwargs) try: session = cluster.connect() except NoHostAvailable: if retry_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) raise session.row_factory = dict_factory
Records the hosts and connects to one of them :param hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html :type hosts: list :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level :type consistency: int :param lazy_connect: True if should not connect until first use :type lazy_connect: bool :param retry_connect: bool :param retry_connect: True if we should retry to connect even if there was a connection failure initially
def validate(self, value): if value is None: if self.required: raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field)) return value
Returns a cleaned and validated value. Raises a ValidationError if there's a problem
def from_datetime(self, dt): global _last_timestamp epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 timestamp = (dt - epoch).total_seconds() - offset node = None clock_seq = None nanoseconds = int(timestamp * 1e9) timestamp = int(nanoseconds // 100) + 0x01b21dd213814000 if clock_seq is None: import random clock_seq = random.randrange(1 << 14) # instead of stable storage time_low = timestamp & 0xffffffff time_mid = (timestamp >> 32) & 0xffff time_hi_version = (timestamp >> 48) & 0x0fff clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = (clock_seq >> 8) & 0x3f if node is None: node = getnode() return pyUUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1)
generates a UUID for a given datetime :param dt: datetime :type dt: datetime :return:
def get_column_def(self): static = "static" if self.static else "" db_type = self.db_type.format(self.value_type.db_type) return '{} {} {}'.format(self.cql, db_type, static)
Returns a column definition for CQL table definition
def _construct_instance(cls, values): # we're going to take the values, which is from the DB as a dict # and translate that into our local fields # the db_map is a db_field -> model field map items = values.items() field_dict = dict([(cls._db_map.get(k, k),v) for k,v in items]) if cls._is_polymorphic: poly_key = field_dict.get(cls._polymorphic_column_name) if poly_key is None: raise PolyMorphicModelException('polymorphic key was not found in values') poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base klass = poly_base._get_model_by_polymorphic_key(poly_key) if klass is None: poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_polymorphic_key(poly_key) if klass is None: raise PolyMorphicModelException( 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__) ) if not issubclass(klass, cls): raise PolyMorphicModelException( '{} is not a subclass of {}'.format(klass.__name__, cls.__name__) ) field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()} else: klass = cls instance = klass(**field_dict) instance._is_persisted = True return instance
method used to construct instances from query results this is where polymorphic deserialization occurs
def _can_update(self): if not self._is_persisted: return False pks = self._primary_keys.keys() return all([not self._values[k].changed for k in self._primary_keys])
Called by the save function to check if this should be persisted with update or insert :return:
def column_family_name(cls, include_keyspace=True): cf_name = '' if cls.__table_name__: cf_name = cls.__table_name__.lower() else: # get polymorphic base table names if model is polymorphic if cls._is_polymorphic and not cls._is_polymorphic_base: return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace) camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) cf_name += ccase(cls.__name__) #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() cf_name = re.sub(r'^_+', '', cf_name) if not include_keyspace: return cf_name return '{}.{}'.format(cls._get_keyspace(), cf_name)
Returns the column family name if it's been defined otherwise, it creates it from the module and class name
def delete(self): self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout).delete()
Deletes this instance
def _select_query(self): if self._where: self._validate_select_where() return SelectStatement( self.column_family_name, fields=self._select_fields(), where=self._where, order_by=self._order, limit=self._limit, allow_filtering=self._allow_filtering )
Returns a select clause based on the given filter args
def _parse_filter_arg(self, arg): statement = arg.rsplit('__', 1) if len(statement) == 1: return arg, None elif len(statement) == 2: return statement[0], statement[1] else: raise QueryException("Can't parse '{}'".format(arg))
Parses a filter arg in the format: <colname>__<op> :returns: colname, op tuple
def iff(self, *args, **kwargs): if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on iff are not allowed") clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, TransactionClause): raise QueryException('{} is not a valid query operator'.format(operator)) clone._transaction.append(operator) for col_name, val in kwargs.items(): exists = False try: column = self.model._get_column(col_name) except KeyError: if col_name == 'pk__token': if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) quote_field = False else: raise QueryException("Can't resolve column name: '{}'".format(col_name)) if isinstance(val, Token): if col_name != 'pk__token': raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( 'Token() received {} arguments but model has {} partition keys'.format( len(val.value), len(partition_columns))) val.set_columns(partition_columns) if isinstance(val, BaseQueryFunction) or exists is True: query_val = val else: query_val = column.to_database(val) clone._transaction.append(TransactionClause(col_name, query_val)) return clone
Adds IF statements to queryset
def get(self, *args, **kwargs): if args or kwargs: return self.filter(*args, **kwargs).get() self._execute_query() if len(self._result_cache) == 0: raise self.model.DoesNotExist elif len(self._result_cache) > 1: raise self.model.MultipleObjectsReturned( '{} objects found'.format(len(self._result_cache))) else: return self[0]
Returns a single instance matching this query, optionally with additional filter kwargs. A DoesNotExistError will be raised if there are no rows matching the query A MultipleObjectsFoundError will be raised if there is more than one row matching the queyr
def order_by(self, *colnames): if len(colnames) == 0: clone = copy.deepcopy(self) clone._order = [] return clone conditions = [] for colname in colnames: conditions.append('"{}" {}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) return clone
orders the result set. ordering can only use clustering columns. Default order is ascending, prepend a '-' to the column name for descending