text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Use this decorator on Step.action implementation.
<END_TASK>
<USER_TASK:>
Description:
def update_variables(func):
"""
Use this decorator on Step.action implementation.
Your action method should always return variables, or
both variables and output.
This decorator will update variables with output.
""" |
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, tuple):
return self.process_register(result[0], result[1])
else:
return self.process_register(result)
return wrapper |
<SYSTEM_TASK:>
set the properties of the app model by the given data dict
<END_TASK>
<USER_TASK:>
Description:
def _set_properties(self, data):
"""
set the properties of the app model by the given data dict
""" |
for property in data.keys():
if property in vars(self):
setattr(self, property, data[property]) |
<SYSTEM_TASK:>
This action handler responds to the "roll call" emitted by the api
<END_TASK>
<USER_TASK:>
Description:
async def roll_call_handler(service, action_type, payload, props, **kwds):
"""
This action handler responds to the "roll call" emitted by the api
gateway when it is brought up with the normal summary produced by
the service.
""" |
# if the action type corresponds to a roll call
if action_type == roll_call_type():
# then announce the service
await service.announce() |
<SYSTEM_TASK:>
This query handler builds the dynamic picture of availible services.
<END_TASK>
<USER_TASK:>
Description:
async def flexible_api_handler(service, action_type, payload, props, **kwds):
"""
This query handler builds the dynamic picture of availible services.
""" |
# if the action represents a new service
if action_type == intialize_service_action():
# the treat the payload like json if its a string
model = json.loads(payload) if isinstance(payload, str) else payload
# the list of known models
models = service._external_service_data['models']
# the list of known connections
connections = service._external_service_data['connections']
# the list of known mutations
mutations = service._external_service_data['mutations']
# if the model is a connection
if 'connection' in model:
# if we haven't seen the connection before
if not [conn for conn in connections if conn['name'] == model['name']]:
# add it to the list
connections.append(model)
# or if there are registered fields
elif 'fields' in model and not [mod for mod in models if mod['name'] == model['name']]:
# add it to the model list
models.append(model)
# the service could provide mutations as well as affect the topology
if 'mutations' in model:
# go over each mutation announce
for mutation in model['mutations']:
# if there isn't a mutation by the same name in the local cache
if not [mut for mut in mutations if mut['name'] == mutation['name']]:
# add it to the local cache
mutations.append(mutation)
# if there are models
if models:
# create a new schema corresponding to the models and connections
service.schema = generate_api_schema(
models=models,
connections=connections,
mutations=mutations,
) |
<SYSTEM_TASK:>
This function figures out the list of orderings for the given model and
<END_TASK>
<USER_TASK:>
Description:
def _parse_order_by(model, order_by):
"""
This function figures out the list of orderings for the given model and
argument.
Args:
model (nautilus.BaseModel): The model to compute ordering against
order_by (list of str): the list of fields to order_by. If the field
starts with a `+` then the order is acending, if `-` descending,
if no character proceeds the field, the ordering is assumed to be
ascending.
Returns:
(list of filters): the model filters to apply to the query
""" |
# the list of filters for the models
out = []
# for each attribute we have to order by
for key in order_by:
# remove any whitespace
key = key.strip()
# if the key starts with a plus
if key.startswith("+"):
# add the ascending filter to the list
out.append(getattr(model, key[1:]))
# otherwise if the key starts with a minus
elif key.startswith("-"):
# add the descending filter to the list
out.append(getattr(model, key[1:]).desc())
# otherwise the key needs the default filter
else:
# add the default filter to the list
out.append(getattr(model, key))
# returnt the list of filters
return out |
<SYSTEM_TASK:>
Creates the example directory structure necessary for a model service.
<END_TASK>
<USER_TASK:>
Description:
def model(model_names):
"""
Creates the example directory structure necessary for a model service.
""" |
# for each model name we need to create
for model_name in model_names:
# the template context
context = {
'name': model_name,
}
# render the model template
render_template(template='common', context=context)
render_template(template='model', context=context) |
<SYSTEM_TASK:>
Creates the example directory structure necessary for a connection
<END_TASK>
<USER_TASK:>
Description:
def connection(model_connections):
"""
Creates the example directory structure necessary for a connection
service.
""" |
# for each connection group
for connection_str in model_connections:
# the services to connect
services = connection_str.split(':')
services.sort()
service_name = ''.join([service.title() for service in services])
# the template context
context = {
# make sure the first letter is lowercase
'name': service_name[0].lower() + service_name[1:],
'services': services,
}
render_template(template='common', context=context)
render_template(template='connection', context=context) |
<SYSTEM_TASK:>
This function returns the conventional action designator for a given model.
<END_TASK>
<USER_TASK:>
Description:
def get_model_string(model):
"""
This function returns the conventional action designator for a given model.
""" |
name = model if isinstance(model, str) else model.__name__
return normalize_string(name) |
<SYSTEM_TASK:>
This function takes a list of type summaries and builds a dictionary
<END_TASK>
<USER_TASK:>
Description:
def build_native_type_dictionary(fields, respect_required=False, wrap_field=True, name=''):
"""
This function takes a list of type summaries and builds a dictionary
with native representations of each entry. Useful for dynamically
building native class records from summaries.
""" |
# a place to start when building the input field attributes
input_fields = {}
# go over every input in the summary
for field in fields:
field_name = name + field['name']
field_type = field['type']
# if the type field is a string
if isinstance(field_type, str):
# compute the native api type for the field
field_type = convert_typestring_to_api_native(field_type)(
# required=respect_required and field['required']
)
# add an entry in the attributes
input_fields[field['name']] = field_type
# we could also be looking at a dictionary
elif isinstance(field_type, dict):
object_fields = field_type['fields']
# add the dictionary to the parent as a graphql object type
input_fields[field['name']] = graphql_type_from_summary(
summary={
'name': field_name+"ArgType",
'fields': object_fields
}
)
# if we are supposed to wrap the object in a field
if wrap_field:
# then wrap the value we just added
input_fields[field['name']] = graphene.Field(input_fields[field['name']])
# we're done
return input_fields |
<SYSTEM_TASK:>
This function provides the standard form for crud mutations.
<END_TASK>
<USER_TASK:>
Description:
def summarize_crud_mutation(method, model, isAsync=False):
"""
This function provides the standard form for crud mutations.
""" |
# create the approrpriate action type
action_type = get_crud_action(method=method, model=model)
# the name of the mutation
name = crud_mutation_name(model=model, action=method)
# a mapping of methods to input factories
input_map = {
'create': create_mutation_inputs,
'update': update_mutation_inputs,
'delete': delete_mutation_inputs,
}
# a mappting of methods to output factories
output_map = {
'create': create_mutation_outputs,
'update': update_mutation_outputs,
'delete': delete_mutation_outputs,
}
# the inputs for the mutation
inputs = input_map[method](model)
# the mutation outputs
outputs = output_map[method](model)
# return the appropriate summary
return summarize_mutation(
mutation_name=name,
event=action_type,
isAsync=isAsync,
inputs=inputs,
outputs=outputs
) |
<SYSTEM_TASK:>
This function starts the brokers interaction with the kafka stream
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
This function starts the brokers interaction with the kafka stream
""" |
self.loop.run_until_complete(self._consumer.start())
self.loop.run_until_complete(self._producer.start())
self._consumer_task = self.loop.create_task(self._consume_event_callback()) |
<SYSTEM_TASK:>
This method stops the brokers interaction with the kafka stream
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""
This method stops the brokers interaction with the kafka stream
""" |
self.loop.run_until_complete(self._consumer.stop())
self.loop.run_until_complete(self._producer.stop())
# attempt
try:
# to cancel the service
self._consumer_task.cancel()
# if there was no service
except AttributeError:
# keep going
pass |
<SYSTEM_TASK:>
This method sends a message over the kafka stream.
<END_TASK>
<USER_TASK:>
Description:
async def send(self, payload='', action_type='', channel=None, **kwds):
"""
This method sends a message over the kafka stream.
""" |
# use a custom channel if one was provided
channel = channel or self.producer_channel
# serialize the action type for the
message = serialize_action(action_type=action_type, payload=payload, **kwds)
# send the message
return await self._producer.send(channel, message.encode()) |
<SYSTEM_TASK:>
This function returns the conventional form of the actions.
<END_TASK>
<USER_TASK:>
Description:
def serialize_action(action_type, payload, **extra_fields):
"""
This function returns the conventional form of the actions.
""" |
action_dict = dict(
action_type=action_type,
payload=payload,
**extra_fields
)
# return a serializable version
return json.dumps(action_dict) |
<SYSTEM_TASK:>
This function returns the fields for a schema that matches the provided
<END_TASK>
<USER_TASK:>
Description:
def fields_for_model(model):
"""
This function returns the fields for a schema that matches the provided
nautilus model.
Args:
model (nautilus.model.BaseModel): The model to base the field list on
Returns:
(dict<field_name: str, graphqlType>): A mapping of field names to
graphql types
""" |
# the attribute arguments (no filters)
args = {field.name.lower() : convert_peewee_field(field) \
for field in model.fields()}
# use the field arguments, without the segments
return args |
<SYSTEM_TASK:>
Create an SQL Alchemy table that connects the provides services
<END_TASK>
<USER_TASK:>
Description:
def create_connection_model(service):
""" Create an SQL Alchemy table that connects the provides services """ |
# the services connected
services = service._services
# the mixins / base for the model
bases = (BaseModel,)
# the fields of the derived
attributes = {model_service_name(service): fields.CharField() for service in services}
# create an instance of base model with the right attributes
return type(BaseModel)(connection_service_name(service), bases, attributes) |
<SYSTEM_TASK:>
This factory returns an action handler that creates a new instance of
<END_TASK>
<USER_TASK:>
Description:
def create_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that creates a new instance of
the specified model when a create action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to create when the action
received.
Returns:
function(action_type, payload): The action handler for this model
""" |
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
# if the payload represents a new instance of `Model`
if action_type == get_crud_action('create', name or Model):
# print('handling create for ' + name or Model)
try:
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
# for each required field
for requirement in Model.required_fields():
# save the name of the field
field_name = requirement.name
# ensure the value is in the payload
# TODO: check all required fields rather than failing on the first
if not field_name in payload and field_name != 'id':
# yell loudly
raise ValueError(
"Required field not found in payload: %s" %field_name
)
# create a new model
new_model = Model(**payload)
# save the new model instance
new_model.save()
# if we need to tell someone about what happened
if notify:
# publish the scucess event
await service.event_broker.send(
payload=ModelSerializer().serialize(new_model),
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# if we need to tell someone about what happened
if notify:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# otherwise we aren't supposed to notify
else:
# raise the exception normally
raise err
# return the handler
return action_handler |
<SYSTEM_TASK:>
Equality checks are overwitten to perform the actual check in a
<END_TASK>
<USER_TASK:>
Description:
async def _has_id(self, *args, **kwds):
"""
Equality checks are overwitten to perform the actual check in a
semantic way.
""" |
# if there is only one positional argument
if len(args) == 1:
# parse the appropriate query
result = await parse_string(
self._query,
self.service.object_resolver,
self.service.connection_resolver,
self.service.mutation_resolver,
obey_auth=False
)
# go to the bottom of the result for the list of matching ids
return self._find_id(result['data'], args[0])
# otherwise
else:
# treat the attribute like a normal filter
return self._has_id(**kwds) |
<SYSTEM_TASK:>
This method performs a depth-first search for the given uid in the dictionary of results.
<END_TASK>
<USER_TASK:>
Description:
def _find_id(self, result, uid):
"""
This method performs a depth-first search for the given uid in the dictionary of results.
""" |
# if the result is a list
if isinstance(result, list):
# if the list has a valid entry
if any([self._find_id(value, uid) for value in result]):
# then we're done
return True
# otherwise results could be dictionaries
if isinstance(result, dict):
# the children of the result that are lists
list_children = [value for value in result.values() if isinstance(value, list)]
# go to every value that is a list
for value in list_children:
# if the value is a match
if self._find_id(value, uid):
# we're done
return True
# the children of the result that are dicts
dict_children = [value for value in result.values() if isinstance(value, dict)]
# perform the check on every child that is a dict
for value in dict_children:
# if the child is a match
if self._find_id(value, uid):
# we're done
return True
# if there are no values that are lists and there is an id key
if not list_children and not dict_children and 'id' in result:
# the value of the remote id field
result_id = result['id']
# we've found a match if the id field matches (cast to match type)
return result_id == type(result_id)(uid)
# we didn't find the result
return False |
<SYSTEM_TASK:>
Returns a builder inserting a new block before the current block
<END_TASK>
<USER_TASK:>
Description:
def add_before(self):
"""Returns a builder inserting a new block before the current block""" |
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx) |
<SYSTEM_TASK:>
Returns a builder inserting a new block after the current block
<END_TASK>
<USER_TASK:>
Description:
def add_after(self):
"""Returns a builder inserting a new block after the current block""" |
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) |
<SYSTEM_TASK:>
Creates a comment block
<END_TASK>
<USER_TASK:>
Description:
def comment(self, text, comment_prefix='#'):
"""Creates a comment block
Args:
text (str): content of comment without #
comment_prefix (str): character indicating start of comment
Returns:
self for chaining
""" |
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self |
<SYSTEM_TASK:>
Creates a section block
<END_TASK>
<USER_TASK:>
Description:
def section(self, section):
"""Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
""" |
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self |
<SYSTEM_TASK:>
Creates a vertical space of newlines
<END_TASK>
<USER_TASK:>
Description:
def space(self, newlines=1):
"""Creates a vertical space of newlines
Args:
newlines (int): number of empty lines
Returns:
self for chaining
""" |
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self |
<SYSTEM_TASK:>
Creates a new option inside a section
<END_TASK>
<USER_TASK:>
Description:
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
""" |
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self |
<SYSTEM_TASK:>
Add a Comment object to the section
<END_TASK>
<USER_TASK:>
Description:
def add_comment(self, line):
"""Add a Comment object to the section
Used during initial parsing mainly
Args:
line (str): one line in the comment
""" |
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self |
<SYSTEM_TASK:>
Add a Space object to the section
<END_TASK>
<USER_TASK:>
Description:
def add_space(self, line):
"""Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces
""" |
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self |
<SYSTEM_TASK:>
Set an option for chaining.
<END_TASK>
<USER_TASK:>
Description:
def set(self, option, value=None):
"""Set an option for chaining.
Args:
option (str): option name
value (str): value, default None
""" |
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self |
<SYSTEM_TASK:>
Read and parse a filename.
<END_TASK>
<USER_TASK:>
Description:
def read(self, filename, encoding=None):
"""Read and parse a filename.
Args:
filename (str): path to file
encoding (str): encoding of file, default None
""" |
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename) |
<SYSTEM_TASK:>
Returns list of configuration options for the named section.
<END_TASK>
<USER_TASK:>
Description:
def options(self, section):
"""Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names
""" |
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options() |
<SYSTEM_TASK:>
Gets an option value for a given section.
<END_TASK>
<USER_TASK:>
Description:
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
""" |
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value |
<SYSTEM_TASK:>
Checks for the existence of a given option in a given section.
<END_TASK>
<USER_TASK:>
Description:
def has_option(self, section, option):
"""Checks for the existence of a given option in a given section.
Args:
section (str): name of section
option (str): name of option
Returns:
bool: whether the option exists in the given section
""" |
if section not in self.sections():
return False
else:
option = self.optionxform(option)
return option in self[section] |
<SYSTEM_TASK:>
This factory returns an action handler that deletes a new instance of
<END_TASK>
<USER_TASK:>
Description:
def delete_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that deletes a new instance of
the specified model when a delete action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
""" |
# necessary imports
from nautilus.database import db
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
# if the payload represents a new instance of `model`
if action_type == get_crud_action('delete', name or Model):
try:
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
# the id in the payload representing the record to delete
record_id = payload['id'] if 'id' in payload else payload['pk']
# get the model matching the payload
try:
model_query = Model.select().where(Model.primary_key() == record_id)
except KeyError:
raise RuntimeError("Could not find appropriate id to remove service record.")
# remove the model instance
model_query.get().delete_instance()
# if we need to tell someone about what happened
if notify:
# publish the success event
await service.event_broker.send(
payload='{"status":"ok"}',
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# if we need to tell someone about what happened
if notify:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# otherwise we aren't supposed to notify
else:
# raise the exception normally
raise err
# return the handler
return action_handler |
<SYSTEM_TASK:>
This factory returns an action handler that responds to read requests
<END_TASK>
<USER_TASK:>
Description:
def read_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
""" |
async def action_handler(service, action_type, payload, props, **kwds):
# if the payload represents a new instance of `model`
if action_type == get_crud_action('read', name or Model):
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
try:
# resolve the query using the service schema
resolved = service.schema.execute(payload)
# create the string response
response = json.dumps({
'data': {key:value for key,value in resolved.data.items()},
'errors': resolved.errors
})
# publish the success event
await service.event_broker.send(
payload=response,
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# return the handler
return action_handler |
<SYSTEM_TASK:>
This method converts a type into a dict.
<END_TASK>
<USER_TASK:>
Description:
def _from_type(self, config):
"""
This method converts a type into a dict.
""" |
def is_user_attribute(attr):
return (
not attr.startswith('__') and
not isinstance(getattr(config, attr), collections.abc.Callable)
)
return {attr: getattr(config, attr) for attr in dir(config) \
if is_user_attribute(attr)} |
<SYSTEM_TASK:>
This function traverses a query and collects the corresponding
<END_TASK>
<USER_TASK:>
Description:
async def walk_query(obj, object_resolver, connection_resolver, errors, current_user=None, __naut_name=None, obey_auth=True, **filters):
"""
This function traverses a query and collects the corresponding
information in a dictionary.
""" |
# if the object has no selection set
if not hasattr(obj, 'selection_set'):
# yell loudly
raise ValueError("Can only resolve objects, not primitive types")
# the name of the node
node_name = __naut_name or obj.name.value if obj.name else obj.operation
# the selected fields
selection_set = obj.selection_set.selections
def _build_arg_tree(arg):
"""
This function recursively builds the arguments for lists and single values
"""
# TODO: what about object arguments??
# if there is a single value
if hasattr(arg, 'value'):
# assign the value to the filter
return arg.value
# otherwise if there are multiple values for the argument
elif hasattr(arg, 'values'):
return [_build_arg_tree(node) for node in arg.values]
# for each argument on this node
for arg in obj.arguments:
# add it to the query filters
filters[arg.name.value] = _build_arg_tree(arg.value)
# the fields we have to ask for
fields = [field for field in selection_set if not field.selection_set]
# the links between objects
connections = [field for field in selection_set if field.selection_set]
try:
# resolve the model with the given fields
models = await object_resolver(node_name, [field.name.value for field in fields], current_user=current_user, obey_auth=obey_auth, **filters)
# if something went wrong resolving the object
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
return None
# add connections to each matching model
for model in models:
# if is an id for the model
if 'pk' in model:
# for each connection
for connection in connections:
# the name of the connection
connection_name = connection.name.value
# the target of the connection
node = {
'name': node_name,
'pk': model['pk']
}
try:
# go through the connection
connected_ids, next_target = await connection_resolver(
connection_name,
node,
)
# if there are connections
if connected_ids:
# add the id filter to the list
filters['pk_in'] = connected_ids
# add the connection field
value = await walk_query(
connection,
object_resolver,
connection_resolver,
errors,
current_user=current_user,
obey_auth=obey_auth,
__naut_name=next_target,
**filters
)
# there were no connections
else:
value = []
# if something went wrong
except Exception as e:
# add the error as a string
errors.append(e.__str__())
# stop here
value = None
# set the connection to the appropriate value
model[connection_name] = value
# return the list of matching models
return models |
<SYSTEM_TASK:>
This action handler interprets the payload as a query to be executed
<END_TASK>
<USER_TASK:>
Description:
async def query_handler(service, action_type, payload, props, **kwds):
"""
This action handler interprets the payload as a query to be executed
by the api gateway service.
""" |
# check that the action type indicates a query
if action_type == query_action_type():
print('encountered query event {!r} '.format(payload))
# perform the query
result = await parse_string(payload,
service.object_resolver,
service.connection_resolver,
service.mutation_resolver,
obey_auth=False
)
# the props for the reply message
reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {}
# publish the success event
await service.event_broker.send(
payload=result,
action_type=change_action_status(action_type, success_status()),
**reply_props
) |
<SYSTEM_TASK:>
This function returns the standard summary for mutations inputs
<END_TASK>
<USER_TASK:>
Description:
def summarize_mutation_io(name, type, required=False):
"""
This function returns the standard summary for mutations inputs
and outputs
""" |
return dict(
name=name,
type=type,
required=required
) |
<SYSTEM_TASK:>
This function returns the name of a mutation that performs the specified
<END_TASK>
<USER_TASK:>
Description:
def crud_mutation_name(action, model):
"""
This function returns the name of a mutation that performs the specified
crud action on the given model service
""" |
model_string = get_model_string(model)
# make sure the mutation name is correctly camelcases
model_string = model_string[0].upper() + model_string[1:]
# return the mutation name
return "{}{}".format(action, model_string) |
<SYSTEM_TASK:>
This function create the actual mutation io summary corresponding to the model
<END_TASK>
<USER_TASK:>
Description:
def _summarize_o_mutation_type(model):
"""
This function create the actual mutation io summary corresponding to the model
""" |
from nautilus.api.util import summarize_mutation_io
# compute the appropriate name for the object
object_type_name = get_model_string(model)
# return a mutation io object
return summarize_mutation_io(
name=object_type_name,
type=_summarize_object_type(model),
required=False
) |
<SYSTEM_TASK:>
This function returns the summary for a given model
<END_TASK>
<USER_TASK:>
Description:
def _summarize_object_type(model):
"""
This function returns the summary for a given model
""" |
# the fields for the service's model
model_fields = {field.name: field for field in list(model.fields())}
# summarize the model
return {
'fields': [{
'name': key,
'type': type(convert_peewee_field(value)).__name__
} for key, value in model_fields.items()
]
} |
<SYSTEM_TASK:>
This function combines the given action handlers into a single function
<END_TASK>
<USER_TASK:>
Description:
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
""" |
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError("Provided handler is not a coroutine: %s" % handler)
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds)
# return the combined action handler
return combined_handler |
<SYSTEM_TASK:>
This factory returns an action handler that updates a new instance of
<END_TASK>
<USER_TASK:>
Description:
def update_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that updates a new instance of
the specified model when a update action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to update when the action
received.
Returns:
function(type, payload): The action handler for this model
""" |
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
# if the payload represents a new instance of `Model`
if action_type == get_crud_action('update', name or Model):
try:
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
# grab the nam eof the primary key for the model
pk_field = Model.primary_key()
# make sure there is a primary key to id the model
if not pk_field.name in payload:
# yell loudly
raise ValueError("Must specify the pk of the model when updating")
# grab the matching model
model = Model.select().where(pk_field == payload[pk_field.name]).get()
# remove the key from the payload
payload.pop(pk_field.name, None)
# for every key,value pair
for key, value in payload.items():
# TODO: add protection for certain fields from being
# changed by the api
setattr(model, key, value)
# save the updates
model.save()
# if we need to tell someone about what happened
if notify:
# publish the scucess event
await service.event_broker.send(
payload=ModelSerializer().serialize(model),
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# if we need to tell someone about what happened
if notify:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# otherwise we aren't supposed to notify
else:
# raise the exception normally
raise err
# return the handler
return action_handler |
<SYSTEM_TASK:>
This function returns a graphql mutation corresponding to the provided
<END_TASK>
<USER_TASK:>
Description:
def graphql_mutation_from_summary(summary):
"""
This function returns a graphql mutation corresponding to the provided
summary.
""" |
# get the name of the mutation from the summary
mutation_name = summary['name']
# print(summary)
# the treat the "type" string as a gra
input_name = mutation_name + "Input"
input_fields = build_native_type_dictionary(summary['inputs'], name=input_name, respect_required=True)
# the inputs for the mutation are defined by a class record
inputs = type('Input', (object,), input_fields)
# the outputs for the mutation are attributes to the class record
output_name = mutation_name + "Output"
outputs = build_native_type_dictionary(summary['outputs'], name=output_name)
# a no-op in order to satisfy the introspection query
mutate = classmethod(lambda *_, **__ : 'hello')
# create the appropriate mutation class record
mutation = type(mutation_name, (graphene.Mutation,), {
'Input': inputs,
'mutate': mutate,
**outputs
})
# return the newly created mutation record
return mutation |
<SYSTEM_TASK:>
This function takes a series of ditionaries and creates an argument
<END_TASK>
<USER_TASK:>
Description:
def arg_string_from_dict(arg_dict, **kwds):
"""
This function takes a series of ditionaries and creates an argument
string for a graphql query
""" |
# the filters dictionary
filters = {
**arg_dict,
**kwds,
}
# return the correctly formed string
return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items()) |
<SYSTEM_TASK:>
This function creates a graphql schema that provides a single model
<END_TASK>
<USER_TASK:>
Description:
def create_model_schema(target_model):
""" This function creates a graphql schema that provides a single model """ |
from nautilus.database import db
# create the schema instance
schema = graphene.Schema(auto_camelcase=False)
# grab the primary key from the model
primary_key = target_model.primary_key()
primary_key_type = convert_peewee_field(primary_key)
# create a graphene object
class ModelObjectType(PeeweeObjectType):
class Meta:
model = target_model
pk = Field(primary_key_type, description="The primary key for this object.")
@graphene.resolve_only_args
def resolve_pk(self):
return getattr(self, self.primary_key().name)
class Query(graphene.ObjectType):
""" the root level query """
all_models = List(ModelObjectType, args=args_for_model(target_model))
@graphene.resolve_only_args
def resolve_all_models(self, **args):
# filter the model query according to the arguments
# print(filter_model(target_model, args)[0].__dict__)
return filter_model(target_model, args)
# add the query to the schema
schema.query = Query
return schema |
<SYSTEM_TASK:>
This function verifies the token using the secret key and returns its
<END_TASK>
<USER_TASK:>
Description:
def read_session_token(secret_key, token):
"""
This function verifies the token using the secret key and returns its
contents.
""" |
return jwt.decode(token.encode('utf-8'), secret_key,
algorithms=[token_encryption_algorithm()]
) |
<SYSTEM_TASK:>
The default action Handler has no action.
<END_TASK>
<USER_TASK:>
Description:
async def handle_action(self, action_type, payload, **kwds):
"""
The default action Handler has no action.
""" |
# if there is a service attached to the action handler
if hasattr(self, 'service'):
# handle roll calls
await roll_call_handler(self.service, action_type, payload, **kwds) |
<SYSTEM_TASK:>
This method is used to announce the existence of the service
<END_TASK>
<USER_TASK:>
Description:
async def announce(self):
"""
This method is used to announce the existence of the service
""" |
# send a serialized event
await self.event_broker.send(
action_type=intialize_service_action(),
payload=json.dumps(self.summarize())
) |
<SYSTEM_TASK:>
This function starts the service's network intefaces.
<END_TASK>
<USER_TASK:>
Description:
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs):
"""
This function starts the service's network intefaces.
Args:
port (int): The port for the http server.
""" |
print("Running service on http://localhost:%i. " % port + \
"Press Ctrl+C to terminate.")
# apply the configuration to the service config
self.config.port = port
self.config.host = host
# start the loop
try:
# if an event broker has been created for this service
if self.event_broker:
# start the broker
self.event_broker.start()
# announce the service
self.loop.run_until_complete(self.announce())
# the handler for the http server
http_handler = self.app.make_handler()
# create an asyncio server
self._http_server = self.loop.create_server(http_handler, host, port)
# grab the handler for the server callback
self._server_handler = self.loop.run_until_complete(self._http_server)
# start the event loop
self.loop.run_forever()
# if the user interrupted the server
except KeyboardInterrupt:
# keep going
pass
# when we're done
finally:
try:
# clean up the service
self.cleanup()
# if we end up closing before any variables get assigned
except UnboundLocalError:
# just ignore it (there was nothing to close)
pass
# close the event loop
self.loop.close() |
<SYSTEM_TASK:>
This function is called when the service has finished running
<END_TASK>
<USER_TASK:>
Description:
def cleanup(self):
"""
This function is called when the service has finished running
regardless of intentionally or not.
""" |
# if an event broker has been created for this service
if self.event_broker:
# stop the event broker
self.event_broker.stop()
# attempt
try:
# close the http server
self._server_handler.close()
self.loop.run_until_complete(self._server_handler.wait_closed())
self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout))
# if there was no handler
except AttributeError:
# keep going
pass
# more cleanup
self.loop.run_until_complete(self.app.shutdown())
self.loop.run_until_complete(self.app.cleanup()) |
<SYSTEM_TASK:>
This method provides a programatic way of added invidual routes
<END_TASK>
<USER_TASK:>
Description:
def add_http_endpoint(self, url, request_handler):
"""
This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler
""" |
self.app.router.add_route('*', url, request_handler) |
<SYSTEM_TASK:>
This method provides a decorator for adding endpoints to the
<END_TASK>
<USER_TASK:>
Description:
def route(cls, route, config=None):
"""
This method provides a decorator for adding endpoints to the
http server.
Args:
route (str): The url to be handled by the RequestHandled
config (dict): Configuration for the request handler
Example:
.. code-block:: python
import nautilus
from nauilus.network.http import RequestHandler
class MyService(nautilus.Service):
# ...
@MyService.route('/')
class HelloWorld(RequestHandler):
def get(self):
return self.finish('hello world')
""" |
def decorator(wrapped_class, **kwds):
# add the endpoint at the given route
cls._routes.append(
dict(url=route, request_handler=wrapped_class)
)
# return the class undecorated
return wrapped_class
# return the decorator
return decorator |
<SYSTEM_TASK:>
This function generates a session token signed by the secret key which
<END_TASK>
<USER_TASK:>
Description:
def generate_session_token(secret_key, **payload):
"""
This function generates a session token signed by the secret key which
can be used to extract the user credentials in a verifiable way.
""" |
return jwt.encode(payload, secret_key, algorithm=token_encryption_algorithm()).decode('utf-8') |
<SYSTEM_TASK:>
This function provides a standard representation of mutations to be
<END_TASK>
<USER_TASK:>
Description:
def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False):
"""
This function provides a standard representation of mutations to be
used when services announce themselves
""" |
return dict(
name=mutation_name,
event=event,
isAsync=isAsync,
inputs=inputs,
outputs=outputs,
) |
<SYSTEM_TASK:>
Ensure that loaded values are PasswordHashes.
<END_TASK>
<USER_TASK:>
Description:
def coerce(cls, key, value):
"""Ensure that loaded values are PasswordHashes.""" |
if isinstance(value, PasswordHash):
return value
return super(PasswordHash, cls).coerce(key, value) |
<SYSTEM_TASK:>
Recreates the internal hash.
<END_TASK>
<USER_TASK:>
Description:
def rehash(self, password):
"""Recreates the internal hash.""" |
self.hash = self._new(password, self.desired_rounds)
self.rounds = self.desired_rounds |
<SYSTEM_TASK:>
This function configures the database used for models to make
<END_TASK>
<USER_TASK:>
Description:
def init_db(self):
"""
This function configures the database used for models to make
the configuration parameters.
""" |
# get the database url from the configuration
db_url = self.config.get('database_url', 'sqlite:///nautilus.db')
# configure the nautilus database to the url
nautilus.database.init_db(db_url) |
<SYSTEM_TASK:>
This attribute provides the mapping of services to their auth requirement
<END_TASK>
<USER_TASK:>
Description:
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
""" |
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth |
<SYSTEM_TASK:>
This function handles the registration of the given user credentials in the database
<END_TASK>
<USER_TASK:>
Description:
async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
""" |
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(
self.model.user == user_data[root_query()][0]['pk']
)[0]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user')
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
# otherwise the passwords don't match
raise RuntimeError("Incorrect credentials") |
<SYSTEM_TASK:>
This function is used to provide a sessionToken for later requests.
<END_TASK>
<USER_TASK:>
Description:
async def register_user(self, password, **kwds):
"""
This function is used to provide a sessionToken for later requests.
Args:
uid (str): The
""" |
# so make one
user = await self._create_remote_user(password=password, **kwds)
# if there is no pk field
if not 'pk' in user:
# make sure the user has a pk field
user['pk'] = user['id']
# the query to find a matching query
match_query = self.model.user == user['id']
# if the user has already been registered
if self.model.select().where(match_query).count() > 0:
# yell loudly
raise RuntimeError('The user is already registered.')
# create an entry in the user password table
password = self.model(user=user['id'], password=password)
# save it to the database
password.save()
# return a dictionary with the user we created and a session token for later use
return {
'user': user,
'sessionToken': self._user_session_token(user)
} |
<SYSTEM_TASK:>
This function resolves a given object in the remote backend services
<END_TASK>
<USER_TASK:>
Description:
async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters):
"""
This function resolves a given object in the remote backend services
""" |
try:
# check if an object with that name has been registered
registered = [model for model in self._external_service_data['models'] \
if model['name']==object_name][0]
# if there is no connection data yet
except AttributeError:
raise ValueError("No objects are registered with this schema yet.")
# if we dont recognize the model that was requested
except IndexError:
raise ValueError("Cannot query for object {} on this service.".format(object_name))
# the valid fields for this object
valid_fields = [field['name'] for field in registered['fields']]
# figure out if any invalid fields were requested
invalid_fields = [field for field in fields if field not in valid_fields]
try:
# make sure we never treat pk as invalid
invalid_fields.remove('pk')
# if they weren't asking for pk as a field
except ValueError:
pass
# if there were
if invalid_fields:
# yell loudly
raise ValueError("Cannot query for fields {!r} on {}".format(
invalid_fields, registered['name']
))
# make sure we include the id in the request
fields.append('pk')
# the query for model records
query = query_for_model(fields, **filters)
# the action type for the question
action_type = get_crud_action('read', object_name)
# query the appropriate stream for the information
response = await self.event_broker.ask(
action_type=action_type,
payload=query
)
# treat the reply like a json object
response_data = json.loads(response)
# if something went wrong
if 'errors' in response_data and response_data['errors']:
# return an empty response
raise ValueError(','.join(response_data['errors']))
# grab the valid list of matches
result = response_data['data'][root_query()]
# grab the auth handler for the object
auth_criteria = self.auth_criteria.get(object_name)
# if we care about auth requirements and there is one for this object
if obey_auth and auth_criteria:
# build a second list of authorized entries
authorized_results = []
# for each query result
for query_result in result:
# create a graph entity for the model
graph_entity = GraphEntity(self, model_type=object_name, id=query_result['pk'])
# if the auth handler passes
if await auth_criteria(model=graph_entity, user_id=current_user):
# add the result to the final list
authorized_results.append(query_result)
# overwrite the query result
result = authorized_results
# apply the auth handler to the result
return result |
<SYSTEM_TASK:>
the default behavior for mutations is to look up the event,
<END_TASK>
<USER_TASK:>
Description:
async def mutation_resolver(self, mutation_name, args, fields):
"""
the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result
""" |
try:
# make sure we can identify the mutation
mutation_summary = [mutation for mutation in \
self._external_service_data['mutations'] \
if mutation['name'] == mutation_name][0]
# if we couldn't get the first entry in the list
except KeyError as e:
# make sure the error is reported
raise ValueError("Could not execute mutation named: " + mutation_name)
# the function to use for running the mutation depends on its schronicity
# event_function = self.event_broker.ask \
# if mutation_summary['isAsync'] else self.event_broker.send
event_function = self.event_broker.ask
# send the event and wait for a response
value = await event_function(
action_type=mutation_summary['event'],
payload=args
)
try:
# return a dictionary with the values we asked for
return json.loads(value)
# if the result was not valid json
except json.decoder.JSONDecodeError:
# just throw the value
raise RuntimeError(value) |
<SYSTEM_TASK:>
Get or create publish
<END_TASK>
<USER_TASK:>
Description:
def publish(self, distribution, storage=""):
"""
Get or create publish
""" |
try:
return self._publishes[distribution]
except KeyError:
self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage))
return self._publishes[distribution] |
<SYSTEM_TASK:>
Add mirror or repo to publish
<END_TASK>
<USER_TASK:>
Description:
def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """ |
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) |
<SYSTEM_TASK:>
Check if publish name matches list of names or regex patterns
<END_TASK>
<USER_TASK:>
Description:
def _publish_match(self, publish, names=False, name_only=False):
"""
Check if publish name matches list of names or regex patterns
""" |
if names:
for name in names:
if not name_only and isinstance(name, re._pattern_type):
if re.match(name, publish.name):
return True
else:
operand = name if name_only else [name, './%s' % name]
if publish in operand:
return True
return False
else:
return True |
<SYSTEM_TASK:>
Compare two publishes
<END_TASK>
<USER_TASK:>
Description:
def compare(self, other, components=[]):
"""
Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']}
""" |
lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local"))
diff, equal = ({}, {})
for component, snapshots in self.components.items():
if component not in list(other.components.keys()):
# Component is missing in other
diff[component] = snapshots
continue
equal_snapshots = list(set(snapshots).intersection(other.components[component]))
if equal_snapshots:
lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots))
equal[component] = equal_snapshots
diff_snapshots = list(set(snapshots).difference(other.components[component]))
if diff_snapshots:
lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots))
diff[component] = diff_snapshots
return (diff, equal) |
<SYSTEM_TASK:>
Find this publish on remote
<END_TASK>
<USER_TASK:>
Description:
def _get_publish(self):
"""
Find this publish on remote
""" |
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and \
publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \
publish['Storage'] == self.storage:
return publish
raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local")) |
<SYSTEM_TASK:>
Serialize publish in YAML
<END_TASK>
<USER_TASK:>
Description:
def save_publish(self, save_path):
"""
Serialize publish in YAML
""" |
timestamp = time.strftime("%Y%m%d%H%M%S")
yaml_dict = {}
yaml_dict["publish"] = self.name
yaml_dict["name"] = timestamp
yaml_dict["components"] = []
yaml_dict["storage"] = self.storage
for component, snapshots in self.components.items():
packages = self.get_packages(component)
package_dict = []
for package in packages:
(arch, name, version, ref) = self.parse_package_ref(package)
package_dict.append({'package': name, 'version': version, 'arch': arch, 'ref': ref})
snapshot = self._find_snapshot(snapshots[0])
yaml_dict["components"].append({'component': component, 'snapshot': snapshot['Name'],
'description': snapshot['Description'], 'packages': package_dict})
name = self.name.replace('/', '-')
lg.info("Saving publish %s in %s" % (name, save_path))
with open(save_path, 'w') as save_file:
yaml.dump(yaml_dict, save_file, default_flow_style=False) |
<SYSTEM_TASK:>
Restore publish from config file
<END_TASK>
<USER_TASK:>
Description:
def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
""" |
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) |
<SYSTEM_TASK:>
Load publish info from remote
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""
Load publish info from remote
""" |
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Name']
self.publish_snapshots.append({
'Component': component,
'Name': snapshot
})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote, fallback_self=True):
self.add(source, component) |
<SYSTEM_TASK:>
Return package refs for given components
<END_TASK>
<USER_TASK:>
Description:
def get_packages(self, component=None, components=[], packages=None):
"""
Return package refs for given components
""" |
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
if component and snapshot['Component'] not in components:
# We don't want packages for this component
continue
component_refs = self._get_packages(self.client, "snapshots", snapshot['Name'])
if packages:
# Filter package names
for ref in component_refs:
if self.parse_package_ref(ref)[1] in packages:
package_refs.append(ref)
else:
package_refs.extend(component_refs)
return package_refs |
<SYSTEM_TASK:>
Return tuple of architecture, package_name, version, id
<END_TASK>
<USER_TASK:>
Description:
def parse_package_ref(self, ref):
"""
Return tuple of architecture, package_name, version, id
""" |
if not ref:
return None
parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref)
return parsed.groups() |
<SYSTEM_TASK:>
Add snapshot of component to publish
<END_TASK>
<USER_TASK:>
Description:
def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
""" |
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] |
<SYSTEM_TASK:>
Find snapshot on remote by name or regular expression
<END_TASK>
<USER_TASK:>
Description:
def _find_snapshot(self, name):
"""
Find snapshot on remote by name or regular expression
""" |
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote["Name"] == name or \
re.match(name, remote["Name"]):
return remote
return None |
<SYSTEM_TASK:>
Get list of source snapshot names of given snapshot
<END_TASK>
<USER_TASK:>
Description:
def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
""" |
if not snapshot:
return []
source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots |
<SYSTEM_TASK:>
Create component snapshots by merging other snapshots of same component
<END_TASK>
<USER_TASK:>
Description:
def merge_snapshots(self):
"""
Create component snapshots by merging other snapshots of same component
""" |
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(snapshots) <= 1:
# Only one snapshot, no need to merge
lg.debug("Component %s has only one snapshot %s, not creating merge snapshot" % (component, snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': snapshots[0]
})
continue
# Look if merged snapshot doesn't already exist
remote_snapshot = self._find_snapshot(r'^%s%s-%s-\d+' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component))
if remote_snapshot:
source_snapshots = self._get_source_snapshots(remote_snapshot)
# Check if latest merged snapshot has same source snapshots like us
snapshots_want = list(snapshots)
snapshots_want.sort()
lg.debug("Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s" % (remote_snapshot['Name'], source_snapshots, snapshots_want))
if snapshots_want == source_snapshots:
lg.info("Remote merge snapshot already exists: %s (%s)" % (remote_snapshot['Name'], source_snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': remote_snapshot['Name']
})
continue
snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component, self.timestamp)
lg.info("Creating merge snapshot %s for component %s of snapshots %s" % (snapshot_name, component, snapshots))
package_refs = []
for snapshot in snapshots:
# Get package refs from each snapshot
packages = self._get_packages(self.client, "snapshots", snapshot)
package_refs.extend(packages)
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': snapshots,
'Description': "Merged from sources: %s" % ', '.join("'%s'" % snap for snap in snapshots),
'PackageRefs': package_refs,
}
)
except AptlyException as e:
if e.res.status_code == 400:
lg.warning("Error creating snapshot %s, assuming it already exists" % snapshot_name)
else:
raise
self.publish_snapshots.append({
'Component': component,
'Name': snapshot_name
}) |
<SYSTEM_TASK:>
Prints the time func takes to execute.
<END_TASK>
<USER_TASK:>
Description:
def timing_decorator(func):
"""Prints the time func takes to execute.""" |
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper for printing execution time.
Parameters
----------
print_time: bool, optional
whether or not to print time function takes.
"""
print_time = kwargs.pop('print_time', False)
if not print_time:
return func(*args, **kwargs)
else:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(func.__name__ + ' took %.3f seconds' %
(end_time - start_time))
return result
return wrapper |
<SYSTEM_TASK:>
Saves object with pickle.
<END_TASK>
<USER_TASK:>
Description:
def pickle_save(data, name, **kwargs):
"""Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
""" |
extension = kwargs.pop('extension', '.pkl')
overwrite_existing = kwargs.pop('overwrite_existing', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
filename = name + extension
# Check if the target directory exists and if not make it
dirname = os.path.dirname(filename)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(dirname)
if os.path.isfile(filename) and not overwrite_existing:
print(filename + ' already exists! Saving with time appended')
filename = name + '_' + time.asctime().replace(' ', '_')
filename += extension
# check if permission error is defined (was not before python 3.3)
# and otherwise use IOError
try:
PermissionError
except NameError:
PermissionError = IOError
try:
outfile = open(filename, 'wb')
pickle.dump(data, outfile)
outfile.close()
except (MemoryError, PermissionError) as err:
warnings.warn((type(err).__name__ + ' in pickle_save: continue without'
' saving.'), UserWarning) |
<SYSTEM_TASK:>
Load data with pickle.
<END_TASK>
<USER_TASK:>
Description:
def pickle_load(name, extension='.pkl'):
"""Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path.
""" |
filename = name + extension
infile = open(filename, 'rb')
data = pickle.load(infile)
infile.close()
return data |
<SYSTEM_TASK:>
Helper function for parallelising thread_values_df.
<END_TASK>
<USER_TASK:>
Description:
def run_thread_values(run, estimator_list):
"""Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)).
""" |
threads = nestcheck.ns_run_utils.get_run_threads(run)
vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list)
for th in threads]
vals_array = np.stack(vals_list, axis=1)
assert vals_array.shape == (len(estimator_list), len(threads))
return vals_array |
<SYSTEM_TASK:>
Applies statistical_distances to each unique pair of distribution
<END_TASK>
<USER_TASK:>
Description:
def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True):
"""Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed.
""" |
out = []
index = []
for i, samp_i in enumerate(dist_list):
for j, samp_j in enumerate(dist_list):
if j < i:
index.append(str((i, j)))
out.append(statistical_distances(
samp_i, samp_j, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist))
columns = ['ks pvalue', 'ks distance']
if earth_mover_dist:
columns.append('earth mover distance')
if energy_dist:
columns.append('energy distance')
ser = pd.DataFrame(out, index=index, columns=columns).unstack()
ser.index.names = ['calculation type', 'run']
return ser |
<SYSTEM_TASK:>
Compute measures of the statistical distance between samples.
<END_TASK>
<USER_TASK:>
Description:
def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
""" |
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) |
<SYSTEM_TASK:>
Generate dummy data for a single nested sampling thread.
<END_TASK>
<USER_TASK:>
Description:
def get_dummy_thread(nsamples, **kwargs):
"""Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
""" |
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.),
'theta': np.random.random((nsamples, ndim)),
'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread |
<SYSTEM_TASK:>
Generate dummy data for a nested sampling run.
<END_TASK>
<USER_TASK:>
Description:
def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
""" |
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) |
<SYSTEM_TASK:>
Generate dummy data for a dynamic nested sampling run.
<END_TASK>
<USER_TASK:>
Description:
def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
""" |
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) |
<SYSTEM_TASK:>
Plots kde estimates of distributions of samples in each cell of the
<END_TASK>
<USER_TASK:>
Description:
def kde_plot_df(df, xlims=None, **kwargs):
"""Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure
""" |
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin),
supmax + 0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig |
<SYSTEM_TASK:>
Helper function for making fgivenx plots of functions with 2 array
<END_TASK>
<USER_TASK:>
Description:
def alternate_helper(x, alt_samps, func=None):
"""Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths.""" |
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 = alt_samps[::2]
arg2 = alt_samps[1::2]
return func(x, arg1, arg2) |
<SYSTEM_TASK:>
Helper function for plot_run_nlive.
<END_TASK>
<USER_TASK:>
Description:
def average_by_key(dict_in, key):
"""Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float
""" |
if key is None:
return np.mean(np.concatenate(list(dict_in.values())))
else:
try:
return np.mean(dict_in[key])
except KeyError:
print('method name "' + key + '" not found, so ' +
'normalise area under the analytic relative posterior ' +
'mass curve using the mean of all methods.')
return np.mean(np.concatenate(list(dict_in.values()))) |
<SYSTEM_TASK:>
Process output from many nested sampling runs in parallel with optional
<END_TASK>
<USER_TASK:>
Description:
def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
""" |
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run] |
<SYSTEM_TASK:>
Wrapper which applies process_func and handles some common errors so one
<END_TASK>
<USER_TASK:>
Description:
def process_error_helper(root, base_dir, process_func, errors_to_handle=(),
**func_kwargs):
"""Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root.
""" |
try:
return process_func(root, base_dir, **func_kwargs)
except errors_to_handle as err:
run = {'error': type(err).__name__,
'output': {'file_root': root}}
return run |
<SYSTEM_TASK:>
Loads data from a PolyChord run into the nestcheck dictionary format for
<END_TASK>
<USER_TASK:>
Description:
def process_polychord_run(file_root, base_dir, process_stats_file=True,
**kwargs):
"""Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
""" |
# N.B. PolyChord dead points files also contains remaining live points at
# termination
samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')
ns_run = process_samples_array(samples, **kwargs)
ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}
if process_stats_file:
try:
ns_run['output'] = process_polychord_stats(file_root, base_dir)
except (OSError, IOError, ValueError) as err:
warnings.warn(
('process_polychord_stats raised {} processing {}.stats file. '
' Proceeding without stats.').format(
type(err).__name__, os.path.join(base_dir, file_root)),
UserWarning)
return ns_run |
<SYSTEM_TASK:>
Loads data from a MultiNest run into the nestcheck dictionary format for
<END_TASK>
<USER_TASK:>
Description:
def process_multinest_run(file_root, base_dir, **kwargs):
"""Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
""" |
# Load dead and live points
dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')
live = np.loadtxt(os.path.join(base_dir, file_root)
+ '-phys_live-birth.txt')
# Remove unnecessary final columns
dead = dead[:, :-2]
live = live[:, :-1]
assert dead[:, -2].max() < live[:, -2].min(), (
'final live points should have greater logls than any dead point!',
dead, live)
ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)
assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), (
'As MultiNest does not currently perform dynamic nested sampling, all '
'threads should start by sampling the whole prior.')
ns_run['output'] = {}
ns_run['output']['file_root'] = file_root
ns_run['output']['base_dir'] = base_dir
return ns_run |
<SYSTEM_TASK:>
Transforms results from a dynesty run into the nestcheck dictionary
<END_TASK>
<USER_TASK:>
Description:
def process_dynesty_run(results):
"""Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
""" |
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
# Try processing standard nested sampling results
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
'perhaps the final live points are not included?')
thread_min_max[:, 0] = -np.inf
except AttributeError:
# If results has no nlive attribute, it must be dynamic nested sampling
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run |
<SYSTEM_TASK:>
Convert an array of nested sampling dead and live points of the type
<END_TASK>
<USER_TASK:>
Description:
def process_samples_array(samples, **kwargs):
"""Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key).
""" |
samples = samples[np.argsort(samples[:, -2])]
ns_run = {}
ns_run['logl'] = samples[:, -2]
ns_run['theta'] = samples[:, :-2]
birth_contours = samples[:, -1]
# birth_contours, ns_run['theta'] = check_logls_unique(
# samples[:, -2], samples[:, -1], samples[:, :-2])
birth_inds = birth_inds_given_contours(
birth_contours, ns_run['logl'], **kwargs)
ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)
unique_threads = np.unique(ns_run['thread_labels'])
assert np.array_equal(unique_threads,
np.asarray(range(unique_threads.shape[0])))
# Work out nlive_array and thread_min_max logls from thread labels and
# birth contours
thread_min_max = np.zeros((unique_threads.shape[0], 2))
# NB delta_nlive indexes are offset from points' indexes by 1 as we need an
# element to represent the initial sampling of live points before any dead
# points are created.
# I.E. birth on step 1 corresponds to replacing dead point zero
delta_nlive = np.zeros(samples.shape[0] + 1)
for label in unique_threads:
thread_inds = np.where(ns_run['thread_labels'] == label)[0]
# Max is final logl in thread
thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]
thread_start_birth_ind = birth_inds[thread_inds[0]]
# delta nlive indexes are +1 from logl indexes to allow for initial
# nlive (before first dead point)
delta_nlive[thread_inds[-1] + 1] -= 1
if thread_start_birth_ind == birth_inds[0]:
# thread minimum is -inf as it starts by sampling from whole prior
thread_min_max[label, 0] = -np.inf
delta_nlive[0] += 1
else:
assert thread_start_birth_ind >= 0
thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]
delta_nlive[thread_start_birth_ind + 1] += 1
ns_run['thread_min_max'] = thread_min_max
ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]
return ns_run |
<SYSTEM_TASK:>
Maps the iso-likelihood contours on which points were born to the
<END_TASK>
<USER_TASK:>
Description:
def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
""" |
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) |
<SYSTEM_TASK:>
Creates a random sample from choices without replacement, subject to the
<END_TASK>
<USER_TASK:>
Description:
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
""" |
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output |
<SYSTEM_TASK:>
Apply function to iterable with parallel map, and hence returns
<END_TASK>
<USER_TASK:>
Description:
def parallel_map(func, *arg_iterable, **kwargs):
"""Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
""" |
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return list(map(func_to_map, *arg_iterable)) |
<SYSTEM_TASK:>
Apply function to iterable with parallelisation and a tqdm progress bar.
<END_TASK>
<USER_TASK:>
Description:
def parallel_apply(func, arg_iterable, **kwargs):
"""Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
""" |
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
func_args = kwargs.pop('func_args', ())
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if 'leave' not in tqdm_kwargs: # default to leave=False
tqdm_kwargs['leave'] = False
assert isinstance(func_args, tuple), (
str(func_args) + ' is type ' + str(type(func_args)))
assert isinstance(func_pre_args, tuple), (
str(func_pre_args) + ' is type ' + str(type(func_pre_args)))
progress = select_tqdm()
if not parallel:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for
x in progress(arg_iterable, **tqdm_kwargs)]
else:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
futures = []
for element in arg_iterable:
futures.append(pool.submit(
func, *(func_pre_args + (element,) + func_args),
**func_kwargs))
results = []
for fut in progress(concurrent.futures.as_completed(futures),
total=len(arg_iterable), **tqdm_kwargs):
results.append(fut.result())
return results |
<SYSTEM_TASK:>
If running in a jupyter notebook, then returns tqdm_notebook.
<END_TASK>
<USER_TASK:>
Description:
def select_tqdm():
"""If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function
""" |
try:
progress = tqdm.tqdm_notebook
assert get_ipython().has_trait('kernel')
except (NameError, AssertionError):
progress = tqdm.tqdm
return progress |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.