text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_file(self): """Update the read-in configuration file. """
if self._filename is None: raise NoConfigFileReadError() with open(self._filename, 'w') as fb: self.write(fb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_format(self, **kwargs): """Call ConfigParser to validate config Args: kwargs: are passed to :class:`configparser.ConfigParser` """
args = dict( dict_type=self._dict, allow_no_value=self._allow_no_value, inline_comment_prefixes=self._inline_comment_prefixes, strict=self._strict, empty_lines_in_values=self._empty_lines_in_values ) args.update(kwargs) parser = ConfigParser(**args) updated_cfg = str(self) parser.read_string(updated_cfg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self, section): """Returns list of configuration options for the named section. Args: section (str): name of section Returns: list: list of option names """
if not self.has_section(section): raise NoSectionError(section) from None return self.__getitem__(section).options()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, section, option): """Gets an option value for a given section. Args: section (str): section name option (str): option name Returns: :class:`Option`: Option object holding key/value pair """
if not self.has_section(section): raise NoSectionError(section) from None section = self.__getitem__(section) option = self.optionxform(option) try: value = section[option] except KeyError: raise NoOptionError(option, section) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_option(self, section, option): """Checks for the existence of a given option in a given section. Args: section (str): name of section option (str): name of option Returns: bool: whether the option exists in the given section """
if section not in self.sections(): return False else: option = self.optionxform(option) return option in self[section]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def render_template(template, out_dir='.', context=None): ''' This function renders the template desginated by the argument to the designated directory using the given context. Args: template (string) : the source template to use (relative to ./templates) out_dir (string) : the name of the output directory context (dict) : the template rendering context ''' # the directory containing templates template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'templates', template ) # the files and empty directories to copy files = [] empty_dirs = [] for (dirpath, _, filenames) in os.walk(template_directory): # if there are no files in the directory if len(filenames) == 0: # add the directory to the list empty_dirs.append(os.path.relpath(dirpath, template_directory)) # otherwise there are files in this directory else: # add the files to the list files.extend([os.path.join(dirpath, filepath) for filepath in filenames]) # for each template file for source_file in files: # open a new file that we are going to write to with open(source_file, 'r') as file: # create a template out of the source file contents template = Template(file.read()) # render the template with the given contents template_rendered = template.render(**(context or {})) # the location of the source relative to the template directory source_relpath = os.path.relpath(source_file, template_directory) # the target filename filename = os.path.join(out_dir, source_relpath) # create a jinja template out of the file path filename_rendered = Template(filename).render(**context) # the directory of the target file source_dir = os.path.dirname(filename_rendered) # if the directory doesn't exist if not os.path.exists(source_dir): # create the directories os.makedirs(source_dir) # create the target file with open(filename_rendered, 'w') as target_file: # write the rendered template to the target file target_file.write(template_rendered) # for each empty directory for dirpath in empty_dirs: try: # dirname dirname = os.path.join(out_dir, dirpath) # treat the dirname as a jinja template dirname_rendered = Template(dirname).render(**context) # if the directory doesn't exist if not os.path.exists(dirname_rendered): # create the directory in the target, replacing the name os.makedirs(dirname_rendered) except OSError as exc: # if the directory already exists if exc.errno == errno.EEXIST and os.path.isdir(dirpath): # keep going (noop) pass # otherwise its an error we don't handle else: # pass it along raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_handler(Model, name=None, **kwds): """ This factory returns an action handler that deletes a new instance of the specified model when a delete action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model """
# necessary imports from nautilus.database import db async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `model` if action_type == get_crud_action('delete', name or Model): try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # the id in the payload representing the record to delete record_id = payload['id'] if 'id' in payload else payload['pk'] # get the model matching the payload try: model_query = Model.select().where(Model.primary_key() == record_id) except KeyError: raise RuntimeError("Could not find appropriate id to remove service record.") # remove the model instance model_query.get().delete_instance() # if we need to tell someone about what happened if notify: # publish the success event await service.event_broker.send( payload='{"status":"ok"}', action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_handler(Model, name=None, **kwds): """ This factory returns an action handler that responds to read requests by resolving the payload as a graphql query against the internal schema. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model """
async def action_handler(service, action_type, payload, props, **kwds): # if the payload represents a new instance of `model` if action_type == get_crud_action('read', name or Model): # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] try: # resolve the query using the service schema resolved = service.schema.execute(payload) # create the string response response = json.dumps({ 'data': {key:value for key,value in resolved.data.items()}, 'errors': resolved.errors }) # publish the success event await service.event_broker.send( payload=response, action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # return the handler return action_handler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _from_type(self, config): """ This method converts a type into a dict. """
def is_user_attribute(attr): return ( not attr.startswith('__') and not isinstance(getattr(config, attr), collections.abc.Callable) ) return {attr: getattr(config, attr) for attr in dir(config) \ if is_user_attribute(attr)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def walk_query(obj, object_resolver, connection_resolver, errors, current_user=None, __naut_name=None, obey_auth=True, **filters): """ This function traverses a query and collects the corresponding information in a dictionary. """
# if the object has no selection set if not hasattr(obj, 'selection_set'): # yell loudly raise ValueError("Can only resolve objects, not primitive types") # the name of the node node_name = __naut_name or obj.name.value if obj.name else obj.operation # the selected fields selection_set = obj.selection_set.selections def _build_arg_tree(arg): """ This function recursively builds the arguments for lists and single values """ # TODO: what about object arguments?? # if there is a single value if hasattr(arg, 'value'): # assign the value to the filter return arg.value # otherwise if there are multiple values for the argument elif hasattr(arg, 'values'): return [_build_arg_tree(node) for node in arg.values] # for each argument on this node for arg in obj.arguments: # add it to the query filters filters[arg.name.value] = _build_arg_tree(arg.value) # the fields we have to ask for fields = [field for field in selection_set if not field.selection_set] # the links between objects connections = [field for field in selection_set if field.selection_set] try: # resolve the model with the given fields models = await object_resolver(node_name, [field.name.value for field in fields], current_user=current_user, obey_auth=obey_auth, **filters) # if something went wrong resolving the object except Exception as e: # add the error as a string errors.append(e.__str__()) # stop here return None # add connections to each matching model for model in models: # if is an id for the model if 'pk' in model: # for each connection for connection in connections: # the name of the connection connection_name = connection.name.value # the target of the connection node = { 'name': node_name, 'pk': model['pk'] } try: # go through the connection connected_ids, next_target = await connection_resolver( connection_name, node, ) # if there are connections if connected_ids: # add the id filter to the list filters['pk_in'] = connected_ids # add the connection field value = await walk_query( connection, object_resolver, connection_resolver, errors, current_user=current_user, obey_auth=obey_auth, __naut_name=next_target, **filters ) # there were no connections else: value = [] # if something went wrong except Exception as e: # add the error as a string errors.append(e.__str__()) # stop here value = None # set the connection to the appropriate value model[connection_name] = value # return the list of matching models return models
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def query_handler(service, action_type, payload, props, **kwds): """ This action handler interprets the payload as a query to be executed by the api gateway service. """
# check that the action type indicates a query if action_type == query_action_type(): print('encountered query event {!r} '.format(payload)) # perform the query result = await parse_string(payload, service.object_resolver, service.connection_resolver, service.mutation_resolver, obey_auth=False ) # the props for the reply message reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {} # publish the success event await service.event_broker.send( payload=result, action_type=change_action_status(action_type, success_status()), **reply_props )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summarize_mutation_io(name, type, required=False): """ This function returns the standard summary for mutations inputs and outputs """
return dict( name=name, type=type, required=required )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crud_mutation_name(action, model): """ This function returns the name of a mutation that performs the specified crud action on the given model service """
model_string = get_model_string(model) # make sure the mutation name is correctly camelcases model_string = model_string[0].upper() + model_string[1:] # return the mutation name return "{}{}".format(action, model_string)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _summarize_o_mutation_type(model): """ This function create the actual mutation io summary corresponding to the model """
from nautilus.api.util import summarize_mutation_io # compute the appropriate name for the object object_type_name = get_model_string(model) # return a mutation io object return summarize_mutation_io( name=object_type_name, type=_summarize_object_type(model), required=False )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _summarize_object_type(model): """ This function returns the summary for a given model """
# the fields for the service's model model_fields = {field.name: field for field in list(model.fields())} # summarize the model return { 'fields': [{ 'name': key, 'type': type(convert_peewee_field(value)).__name__ } for key, value in model_fields.items() ] }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine_action_handlers(*handlers): """ This function combines the given action handlers into a single function which will call all of them. """
# make sure each of the given handlers is callable for handler in handlers: # if the handler is not a function if not (iscoroutinefunction(handler) or iscoroutine(handler)): # yell loudly raise ValueError("Provided handler is not a coroutine: %s" % handler) # the combined action handler async def combined_handler(*args, **kwds): # goes over every given handler for handler in handlers: # call the handler await handler(*args, **kwds) # return the combined action handler return combined_handler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_handler(Model, name=None, **kwds): """ This factory returns an action handler that updates a new instance of the specified model when a update action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to update when the action received. Returns: function(type, payload): The action handler for this model """
async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `Model` if action_type == get_crud_action('update', name or Model): try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # grab the nam eof the primary key for the model pk_field = Model.primary_key() # make sure there is a primary key to id the model if not pk_field.name in payload: # yell loudly raise ValueError("Must specify the pk of the model when updating") # grab the matching model model = Model.select().where(pk_field == payload[pk_field.name]).get() # remove the key from the payload payload.pop(pk_field.name, None) # for every key,value pair for key, value in payload.items(): # TODO: add protection for certain fields from being # changed by the api setattr(model, key, value) # save the updates model.save() # if we need to tell someone about what happened if notify: # publish the scucess event await service.event_broker.send( payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def graphql_mutation_from_summary(summary): """ This function returns a graphql mutation corresponding to the provided summary. """
# get the name of the mutation from the summary mutation_name = summary['name'] # print(summary) # the treat the "type" string as a gra input_name = mutation_name + "Input" input_fields = build_native_type_dictionary(summary['inputs'], name=input_name, respect_required=True) # the inputs for the mutation are defined by a class record inputs = type('Input', (object,), input_fields) # the outputs for the mutation are attributes to the class record output_name = mutation_name + "Output" outputs = build_native_type_dictionary(summary['outputs'], name=output_name) # a no-op in order to satisfy the introspection query mutate = classmethod(lambda *_, **__ : 'hello') # create the appropriate mutation class record mutation = type(mutation_name, (graphene.Mutation,), { 'Input': inputs, 'mutate': mutate, **outputs }) # return the newly created mutation record return mutation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def arg_string_from_dict(arg_dict, **kwds): """ This function takes a series of ditionaries and creates an argument string for a graphql query """
# the filters dictionary filters = { **arg_dict, **kwds, } # return the correctly formed string return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_model_schema(target_model): """ This function creates a graphql schema that provides a single model """
from nautilus.database import db # create the schema instance schema = graphene.Schema(auto_camelcase=False) # grab the primary key from the model primary_key = target_model.primary_key() primary_key_type = convert_peewee_field(primary_key) # create a graphene object class ModelObjectType(PeeweeObjectType): class Meta: model = target_model pk = Field(primary_key_type, description="The primary key for this object.") @graphene.resolve_only_args def resolve_pk(self): return getattr(self, self.primary_key().name) class Query(graphene.ObjectType): """ the root level query """ all_models = List(ModelObjectType, args=args_for_model(target_model)) @graphene.resolve_only_args def resolve_all_models(self, **args): # filter the model query according to the arguments # print(filter_model(target_model, args)[0].__dict__) return filter_model(target_model, args) # add the query to the schema schema.query = Query return schema
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def connection_service_name(service, *args): ''' the name of a service that manages the connection between services ''' # if the service is a string if isinstance(service, str): return service return normalize_string(type(service).__name__)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_session_token(secret_key, token): """ This function verifies the token using the secret key and returns its contents. """
return jwt.decode(token.encode('utf-8'), secret_key, algorithms=[token_encryption_algorithm()] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def handle_action(self, action_type, payload, **kwds): """ The default action Handler has no action. """
# if there is a service attached to the action handler if hasattr(self, 'service'): # handle roll calls await roll_call_handler(self.service, action_type, payload, **kwds)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def announce(self): """ This method is used to announce the existence of the service """
# send a serialized event await self.event_broker.send( action_type=intialize_service_action(), payload=json.dumps(self.summarize()) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs): """ This function starts the service's network intefaces. Args: port (int): The port for the http server. """
print("Running service on http://localhost:%i. " % port + \ "Press Ctrl+C to terminate.") # apply the configuration to the service config self.config.port = port self.config.host = host # start the loop try: # if an event broker has been created for this service if self.event_broker: # start the broker self.event_broker.start() # announce the service self.loop.run_until_complete(self.announce()) # the handler for the http server http_handler = self.app.make_handler() # create an asyncio server self._http_server = self.loop.create_server(http_handler, host, port) # grab the handler for the server callback self._server_handler = self.loop.run_until_complete(self._http_server) # start the event loop self.loop.run_forever() # if the user interrupted the server except KeyboardInterrupt: # keep going pass # when we're done finally: try: # clean up the service self.cleanup() # if we end up closing before any variables get assigned except UnboundLocalError: # just ignore it (there was nothing to close) pass # close the event loop self.loop.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cleanup(self): """ This function is called when the service has finished running regardless of intentionally or not. """
# if an event broker has been created for this service if self.event_broker: # stop the event broker self.event_broker.stop() # attempt try: # close the http server self._server_handler.close() self.loop.run_until_complete(self._server_handler.wait_closed()) self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout)) # if there was no handler except AttributeError: # keep going pass # more cleanup self.loop.run_until_complete(self.app.shutdown()) self.loop.run_until_complete(self.app.cleanup())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_http_endpoint(self, url, request_handler): """ This method provides a programatic way of added invidual routes to the http server. Args: url (str): the url to be handled by the request_handler request_handler (nautilus.network.RequestHandler): The request handler """
self.app.router.add_route('*', url, request_handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def route(cls, route, config=None): """ This method provides a decorator for adding endpoints to the http server. Args: route (str): The url to be handled by the RequestHandled config (dict): Configuration for the request handler Example: .. code-block:: python import nautilus from nauilus.network.http import RequestHandler class MyService(nautilus.Service): @MyService.route('/') class HelloWorld(RequestHandler): def get(self): return self.finish('hello world') """
def decorator(wrapped_class, **kwds): # add the endpoint at the given route cls._routes.append( dict(url=route, request_handler=wrapped_class) ) # return the class undecorated return wrapped_class # return the decorator return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_session_token(secret_key, **payload): """ This function generates a session token signed by the secret key which can be used to extract the user credentials in a verifiable way. """
return jwt.encode(payload, secret_key, algorithm=token_encryption_algorithm()).decode('utf-8')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False): """ This function provides a standard representation of mutations to be used when services announce themselves """
return dict( name=mutation_name, event=event, isAsync=isAsync, inputs=inputs, outputs=outputs, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coerce(cls, key, value): """Ensure that loaded values are PasswordHashes."""
if isinstance(value, PasswordHash): return value return super(PasswordHash, cls).coerce(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rehash(self, password): """Recreates the internal hash."""
self.hash = self._new(password, self.desired_rounds) self.rounds = self.desired_rounds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_db(self): """ This function configures the database used for models to make the configuration parameters. """
# get the database url from the configuration db_url = self.config.get('database_url', 'sqlite:///nautilus.db') # configure the nautilus database to the url nautilus.database.init_db(db_url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auth_criteria(self): """ This attribute provides the mapping of services to their auth requirement Returns: (dict) : the mapping from services to their auth requirements. """
# the dictionary we will return auth = {} # go over each attribute of the service for attr in dir(self): # make sure we could hit an infinite loop if attr != 'auth_criteria': # get the actual attribute attribute = getattr(self, attr) # if the service represents an auth criteria if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'): # add the criteria to the final results auth[getattr(self, attr)._service_auth] = attribute # return the auth mapping return auth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def login_user(self, password, **kwds): """ This function handles the registration of the given user credentials in the database """
# find the matching user with the given email user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data'] try: # look for a matching entry in the local database passwordEntry = self.model.select().where( self.model.user == user_data[root_query()][0]['pk'] )[0] # if we couldn't acess the id of the result except (KeyError, IndexError) as e: # yell loudly raise RuntimeError('Could not find matching registered user') # if the given password matches the stored hash if passwordEntry and passwordEntry.password == password: # the remote entry for the user user = user_data[root_query()][0] # then return a dictionary with the user and sessionToken return { 'user': user, 'sessionToken': self._user_session_token(user) } # otherwise the passwords don't match raise RuntimeError("Incorrect credentials")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def register_user(self, password, **kwds): """ This function is used to provide a sessionToken for later requests. Args: uid (str): The """
# so make one user = await self._create_remote_user(password=password, **kwds) # if there is no pk field if not 'pk' in user: # make sure the user has a pk field user['pk'] = user['id'] # the query to find a matching query match_query = self.model.user == user['id'] # if the user has already been registered if self.model.select().where(match_query).count() > 0: # yell loudly raise RuntimeError('The user is already registered.') # create an entry in the user password table password = self.model(user=user['id'], password=password) # save it to the database password.save() # return a dictionary with the user we created and a session token for later use return { 'user': user, 'sessionToken': self._user_session_token(user) }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters): """ This function resolves a given object in the remote backend services """
try: # check if an object with that name has been registered registered = [model for model in self._external_service_data['models'] \ if model['name']==object_name][0] # if there is no connection data yet except AttributeError: raise ValueError("No objects are registered with this schema yet.") # if we dont recognize the model that was requested except IndexError: raise ValueError("Cannot query for object {} on this service.".format(object_name)) # the valid fields for this object valid_fields = [field['name'] for field in registered['fields']] # figure out if any invalid fields were requested invalid_fields = [field for field in fields if field not in valid_fields] try: # make sure we never treat pk as invalid invalid_fields.remove('pk') # if they weren't asking for pk as a field except ValueError: pass # if there were if invalid_fields: # yell loudly raise ValueError("Cannot query for fields {!r} on {}".format( invalid_fields, registered['name'] )) # make sure we include the id in the request fields.append('pk') # the query for model records query = query_for_model(fields, **filters) # the action type for the question action_type = get_crud_action('read', object_name) # query the appropriate stream for the information response = await self.event_broker.ask( action_type=action_type, payload=query ) # treat the reply like a json object response_data = json.loads(response) # if something went wrong if 'errors' in response_data and response_data['errors']: # return an empty response raise ValueError(','.join(response_data['errors'])) # grab the valid list of matches result = response_data['data'][root_query()] # grab the auth handler for the object auth_criteria = self.auth_criteria.get(object_name) # if we care about auth requirements and there is one for this object if obey_auth and auth_criteria: # build a second list of authorized entries authorized_results = [] # for each query result for query_result in result: # create a graph entity for the model graph_entity = GraphEntity(self, model_type=object_name, id=query_result['pk']) # if the auth handler passes if await auth_criteria(model=graph_entity, user_id=current_user): # add the result to the final list authorized_results.append(query_result) # overwrite the query result result = authorized_results # apply the auth handler to the result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def mutation_resolver(self, mutation_name, args, fields): """ the default behavior for mutations is to look up the event, publish the correct event type with the args as the body, and return the fields contained in the result """
try: # make sure we can identify the mutation mutation_summary = [mutation for mutation in \ self._external_service_data['mutations'] \ if mutation['name'] == mutation_name][0] # if we couldn't get the first entry in the list except KeyError as e: # make sure the error is reported raise ValueError("Could not execute mutation named: " + mutation_name) # the function to use for running the mutation depends on its schronicity # event_function = self.event_broker.ask \ # if mutation_summary['isAsync'] else self.event_broker.send event_function = self.event_broker.ask # send the event and wait for a response value = await event_function( action_type=mutation_summary['event'], payload=args ) try: # return a dictionary with the values we asked for return json.loads(value) # if the result was not valid json except json.decoder.JSONDecodeError: # just throw the value raise RuntimeError(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parser(): """Get a parser object"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-s1", dest="s1", help="sequence 1") parser.add_argument("-s2", dest="s2", help="sequence 2") return parser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def _async_request_soup(url): ''' Perform a GET web request and return a bs4 parser ''' from bs4 import BeautifulSoup import aiohttp _LOGGER.debug('GET %s', url) async with aiohttp.ClientSession() as session: resp = await session.get(url) text = await resp.text() return BeautifulSoup(text, 'html.parser')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def async_determine_channel(channel): ''' Check whether the current channel is correct. If not try to determine it using fuzzywuzzy ''' from fuzzywuzzy import process channel_data = await async_get_channels() if not channel_data: _LOGGER.error('No channel data. Cannot determine requested channel.') return channels = [c for c in channel_data.get('data', {}).keys()] if channel in channels: return channel else: res = process.extractOne(channel, channels)[0] _LOGGER.debug('No direct match found for %s. Resort to guesswork.' 'Guessed %s', channel, res) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def async_get_channels(no_cache=False, refresh_interval=4): ''' Get channel list and corresponding urls ''' # Check cache now = datetime.datetime.now() max_cache_age = datetime.timedelta(hours=refresh_interval) if not no_cache and 'channels' in _CACHE: cache = _CACHE.get('channels') cache_age = cache.get('last_updated') if now - cache_age < max_cache_age: _LOGGER.debug('Found channel list in cache.') return cache else: _LOGGER.debug('Found outdated channel list in cache. Update it.') _CACHE.pop('channels') soup = await _async_request_soup(BASE_URL + '/plan.html') channels = {} for li_item in soup.find_all('li'): try: child = li_item.findChild() if not child or child.name != 'a': continue href = child.get('href') if not href or not href.startswith('/programme/chaine'): continue channels[child.get('title')] = BASE_URL + href except Exception as exc: _LOGGER.error('Exception occured while fetching the channel ' 'list: %s', exc) if channels: _CACHE['channels'] = {'last_updated': now, 'data': channels} return _CACHE['channels']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def resize_program_image(img_url, img_size=300): ''' Resize a program's thumbnail to the desired dimension ''' match = re.match(r'.+/(\d+)x(\d+)/.+', img_url) if not match: _LOGGER.warning('Could not compute current image resolution of %s', img_url) return img_url res_x = int(match.group(1)) res_y = int(match.group(2)) # aspect_ratio = res_x / res_y target_res_y = int(img_size * res_y / res_x) return re.sub( r'{}x{}'.format(res_x, res_y), r'{}x{}'.format(img_size, target_res_y), img_url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_current_program_progress(program): ''' Get the current progress of the program in % ''' now = datetime.datetime.now() program_duration = get_program_duration(program) if not program_duration: return progress = now - program.get('start_time') return progress.seconds * 100 / program_duration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_program_duration(program): ''' Get a program's duration in seconds ''' program_start = program.get('start_time') program_end = program.get('end_time') if not program_start or not program_end: _LOGGER.error('Could not determine program start and/or end times.') _LOGGER.debug('Program data: %s', program) return program_duration = program_end - program_start return program_duration.seconds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_remaining_time(program): ''' Get the remaining time in seconds of a program that is currently on. ''' now = datetime.datetime.now() program_start = program.get('start_time') program_end = program.get('end_time') if not program_start or not program_end: _LOGGER.error('Could not determine program start and/or end times.') _LOGGER.debug('Program data: %s', program) return if now > program_end: _LOGGER.error('The provided program has already ended.') _LOGGER.debug('Program data: %s', program) return 0 progress = now - program_start return progress.seconds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def extract_program_summary(data): ''' Extract the summary data from a program's detail page ''' from bs4 import BeautifulSoup soup = BeautifulSoup(data, 'html.parser') try: return soup.find( 'div', {'class': 'episode-synopsis'} ).find_all('div')[-1].text.strip() except Exception: _LOGGER.info('No summary found for program: %s', soup.find('a', {'class': 'prog_name'})) return "No summary"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def async_set_summary(program): ''' Set a program's summary ''' import aiohttp async with aiohttp.ClientSession() as session: resp = await session.get(program.get('url')) text = await resp.text() summary = extract_program_summary(text) program['summary'] = summary return program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def async_get_program_guide(channel, no_cache=False, refresh_interval=4): ''' Get the program data for a channel ''' chan = await async_determine_channel(channel) now = datetime.datetime.now() max_cache_age = datetime.timedelta(hours=refresh_interval) if not no_cache and 'guide' in _CACHE and _CACHE.get('guide').get(chan): cache = _CACHE.get('guide').get(chan) cache_age = cache.get('last_updated') if now - cache_age < max_cache_age: _LOGGER.debug('Found program guide in cache.') return cache.get('data') else: _LOGGER.debug('Found outdated program guide in cache. Update it.') _CACHE['guide'].pop(chan) chans = await async_get_channels() url = chans.get('data', {}).get(chan) if not url: _LOGGER.error('Could not determine URL for %s', chan) return soup = await _async_request_soup(url) programs = [] for prg_item in soup.find_all('div', {'class': 'program-infos'}): try: prog_info = prg_item.find('a', {'class': 'prog_name'}) prog_name = prog_info.text.strip() prog_url = prog_info.get('href') if not prog_url: _LOGGER.warning('Failed to retrive the detail URL for program %s. ' 'The summary will be empty', prog_name) prog_type = prg_item.find('span', {'class': 'prog_type'}).text.strip() prog_times = prg_item.find('div', {'class': 'prog_progress'}) prog_start = datetime.datetime.fromtimestamp( int(prog_times.get('data-start'))) prog_end = datetime.datetime.fromtimestamp( int(prog_times.get('data-end'))) img = prg_item.find_previous_sibling().find( 'img', {'class': 'prime_broadcast_image'}) prog_img = img.get('data-src') if img else None programs.append( {'name': prog_name, 'type': prog_type, 'img': prog_img, 'url': prog_url, 'summary': None, 'start_time': prog_start, 'end_time': prog_end}) except Exception as exc: _LOGGER.error('Exception occured while fetching the program ' 'guide for channel %s: %s', chan, exc) import traceback traceback.print_exc() # Set the program summaries asynchronously tasks = [async_set_summary(prog) for prog in programs] programs = await asyncio.gather(*tasks) if programs: if 'guide' not in _CACHE: _CACHE['guide'] = {} _CACHE['guide'][chan] = {'last_updated': now, 'data': programs} return programs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def async_get_current_program(channel, no_cache=False): ''' Get the current program info ''' chan = await async_determine_channel(channel) guide = await async_get_program_guide(chan, no_cache) if not guide: _LOGGER.warning('Could not retrieve TV program for %s', channel) return now = datetime.datetime.now() for prog in guide: start = prog.get('start_time') end = prog.get('end_time') if now > start and now < end: return prog
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish(self, distribution, storage=""): """ Get or create publish """
try: return self._publishes[distribution] except KeyError: self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage)) return self._publishes[distribution]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, snapshot, distributions, component='main', storage=""): """ Add mirror or repo to publish """
for dist in distributions: self.publish(dist, storage=storage).add(snapshot, component)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _publish_match(self, publish, names=False, name_only=False): """ Check if publish name matches list of names or regex patterns """
if names: for name in names: if not name_only and isinstance(name, re._pattern_type): if re.match(name, publish.name): return True else: operand = name if name_only else [name, './%s' % name] if publish in operand: return True return False else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare(self, other, components=[]): """ Compare two publishes It expects that other publish is same or older than this one Return tuple (diff, equal) of dict {'component': ['snapshot']} """
lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local")) diff, equal = ({}, {}) for component, snapshots in self.components.items(): if component not in list(other.components.keys()): # Component is missing in other diff[component] = snapshots continue equal_snapshots = list(set(snapshots).intersection(other.components[component])) if equal_snapshots: lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots)) equal[component] = equal_snapshots diff_snapshots = list(set(snapshots).difference(other.components[component])) if diff_snapshots: lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots)) diff[component] = diff_snapshots return (diff, equal)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_publish(self): """ Find this publish on remote """
publishes = self._get_publishes(self.client) for publish in publishes: if publish['Distribution'] == self.distribution and \ publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \ publish['Storage'] == self.storage: return publish raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_publish(self, save_path): """ Serialize publish in YAML """
timestamp = time.strftime("%Y%m%d%H%M%S") yaml_dict = {} yaml_dict["publish"] = self.name yaml_dict["name"] = timestamp yaml_dict["components"] = [] yaml_dict["storage"] = self.storage for component, snapshots in self.components.items(): packages = self.get_packages(component) package_dict = [] for package in packages: (arch, name, version, ref) = self.parse_package_ref(package) package_dict.append({'package': name, 'version': version, 'arch': arch, 'ref': ref}) snapshot = self._find_snapshot(snapshots[0]) yaml_dict["components"].append({'component': component, 'snapshot': snapshot['Name'], 'description': snapshot['Description'], 'packages': package_dict}) name = self.name.replace('/', '-') lg.info("Saving publish %s in %s" % (name, save_path)) with open(save_path, 'w') as save_file: yaml.dump(yaml_dict, save_file, default_flow_style=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore_publish(self, config, components, recreate=False): """ Restore publish from config file """
if "all" in components: components = [] try: self.load() publish = True except NoSuchPublish: publish = False new_publish_snapshots = [] to_publish = [] created_snapshots = [] for saved_component in config.get('components', []): component_name = saved_component.get('component') if not component_name: raise Exception("Corrupted file") if components and component_name not in components: continue saved_packages = [] if not saved_component.get('packages'): raise Exception("Component %s is empty" % component_name) for package in saved_component.get('packages'): package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref')) saved_packages.append(package_ref) to_publish.append(component_name) timestamp = time.strftime("%Y%m%d%H%M%S") snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot')) lg.debug("Creating snapshot %s for component %s of packages: %s" % (snapshot_name, component_name, saved_packages)) try: self.client.do_post( '/snapshots', data={ 'Name': snapshot_name, 'SourceSnapshots': [], 'Description': saved_component.get('description'), 'PackageRefs': saved_packages, } ) created_snapshots.append(snapshot_name) except AptlyException as e: if e.res.status_code == 404: # delete all the previously created # snapshots because the file is corrupted self._remove_snapshots(created_snapshots) raise Exception("Source snapshot or packages don't exist") else: raise new_publish_snapshots.append({ 'Component': component_name, 'Name': snapshot_name }) if components: self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish] check_components = [x for x in new_publish_snapshots if x['Component'] in components] if len(check_components) != len(components): self._remove_snapshots(created_snapshots) raise Exception("Not possible to find all the components required in the backup file") self.publish_snapshots += new_publish_snapshots self.do_publish(recreate=recreate, merge_snapshots=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self): """ Load publish info from remote """
publish = self._get_publish() self.architectures = publish['Architectures'] for source in publish['Sources']: component = source['Component'] snapshot = source['Name'] self.publish_snapshots.append({ 'Component': component, 'Name': snapshot }) snapshot_remote = self._find_snapshot(snapshot) for source in self._get_source_snapshots(snapshot_remote, fallback_self=True): self.add(source, component)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_packages(self, component=None, components=[], packages=None): """ Return package refs for given components """
if component: components = [component] package_refs = [] for snapshot in self.publish_snapshots: if component and snapshot['Component'] not in components: # We don't want packages for this component continue component_refs = self._get_packages(self.client, "snapshots", snapshot['Name']) if packages: # Filter package names for ref in component_refs: if self.parse_package_ref(ref)[1] in packages: package_refs.append(ref) else: package_refs.extend(component_refs) return package_refs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_package_ref(self, ref): """ Return tuple of architecture, package_name, version, id """
if not ref: return None parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref) return parsed.groups()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, snapshot, component='main'): """ Add snapshot of component to publish """
try: self.components[component].append(snapshot) except KeyError: self.components[component] = [snapshot]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_snapshot(self, name): """ Find snapshot on remote by name or regular expression """
remote_snapshots = self._get_snapshots(self.client) for remote in reversed(remote_snapshots): if remote["Name"] == name or \ re.match(name, remote["Name"]): return remote return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_source_snapshots(self, snapshot, fallback_self=False): """ Get list of source snapshot names of given snapshot TODO: we have to decide by description at the moment """
if not snapshot: return [] source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description']) if not source_snapshots and fallback_self: source_snapshots = [snapshot['Name']] source_snapshots.sort() return source_snapshots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_snapshots(self): """ Create component snapshots by merging other snapshots of same component """
self.publish_snapshots = [] for component, snapshots in self.components.items(): if len(snapshots) <= 1: # Only one snapshot, no need to merge lg.debug("Component %s has only one snapshot %s, not creating merge snapshot" % (component, snapshots)) self.publish_snapshots.append({ 'Component': component, 'Name': snapshots[0] }) continue # Look if merged snapshot doesn't already exist remote_snapshot = self._find_snapshot(r'^%s%s-%s-\d+' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component)) if remote_snapshot: source_snapshots = self._get_source_snapshots(remote_snapshot) # Check if latest merged snapshot has same source snapshots like us snapshots_want = list(snapshots) snapshots_want.sort() lg.debug("Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s" % (remote_snapshot['Name'], source_snapshots, snapshots_want)) if snapshots_want == source_snapshots: lg.info("Remote merge snapshot already exists: %s (%s)" % (remote_snapshot['Name'], source_snapshots)) self.publish_snapshots.append({ 'Component': component, 'Name': remote_snapshot['Name'] }) continue snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component, self.timestamp) lg.info("Creating merge snapshot %s for component %s of snapshots %s" % (snapshot_name, component, snapshots)) package_refs = [] for snapshot in snapshots: # Get package refs from each snapshot packages = self._get_packages(self.client, "snapshots", snapshot) package_refs.extend(packages) try: self.client.do_post( '/snapshots', data={ 'Name': snapshot_name, 'SourceSnapshots': snapshots, 'Description': "Merged from sources: %s" % ', '.join("'%s'" % snap for snap in snapshots), 'PackageRefs': package_refs, } ) except AptlyException as e: if e.res.status_code == 400: lg.warning("Error creating snapshot %s, assuming it already exists" % snapshot_name) else: raise self.publish_snapshots.append({ 'Component': component, 'Name': snapshot_name })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timing_decorator(func): """Prints the time func takes to execute."""
@functools.wraps(func) def wrapper(*args, **kwargs): """ Wrapper for printing execution time. Parameters ---------- print_time: bool, optional whether or not to print time function takes. """ print_time = kwargs.pop('print_time', False) if not print_time: return func(*args, **kwargs) else: start_time = time.time() result = func(*args, **kwargs) end_time = time.time() print(func.__name__ + ' took %.3f seconds' % (end_time - start_time)) return result return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pickle_save(data, name, **kwargs): """Saves object with pickle. Parameters data: anything picklable Object to save. name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. overwrite existing: bool, optional When the save path already contains file: if True, file will be overwritten, if False the data will be saved with the system time appended to the file name. """
extension = kwargs.pop('extension', '.pkl') overwrite_existing = kwargs.pop('overwrite_existing', True) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) filename = name + extension # Check if the target directory exists and if not make it dirname = os.path.dirname(filename) if not os.path.exists(dirname) and dirname != '': os.makedirs(dirname) if os.path.isfile(filename) and not overwrite_existing: print(filename + ' already exists! Saving with time appended') filename = name + '_' + time.asctime().replace(' ', '_') filename += extension # check if permission error is defined (was not before python 3.3) # and otherwise use IOError try: PermissionError except NameError: PermissionError = IOError try: outfile = open(filename, 'wb') pickle.dump(data, outfile) outfile.close() except (MemoryError, PermissionError) as err: warnings.warn((type(err).__name__ + ' in pickle_save: continue without' ' saving.'), UserWarning)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pickle_load(name, extension='.pkl'): """Load data with pickle. Parameters name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path. """
filename = name + extension infile = open(filename, 'rb') data = pickle.load(infile) infile.close() return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_thread_values(run, estimator_list): """Helper function for parallelising thread_values_df. Parameters ns_run: dict Nested sampling run dictionary. estimator_list: list of functions Returns ------- vals_array: numpy array Array of estimator values for each thread. Has shape (len(estimator_list), len(theads)). """
threads = nestcheck.ns_run_utils.get_run_threads(run) vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list) for th in threads] vals_array = np.stack(vals_list, axis=1) assert vals_array.shape == (len(estimator_list), len(threads)) return vals_array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True): """Applies statistical_distances to each unique pair of distribution samples in dist_list. Parameters dist_list: list of 1d arrays earth_mover_dist: bool, optional Passed to statistical_distances. energy_dist: bool, optional Passed to statistical_distances. Returns ------- ser: pandas Series object Values are statistical distances. Index levels are: calculation type: name of statistical distance. run: tuple containing the index in dist_list of the pair of samples arrays from which the statistical distance was computed. """
out = [] index = [] for i, samp_i in enumerate(dist_list): for j, samp_j in enumerate(dist_list): if j < i: index.append(str((i, j))) out.append(statistical_distances( samp_i, samp_j, earth_mover_dist=earth_mover_dist, energy_dist=energy_dist)) columns = ['ks pvalue', 'ks distance'] if earth_mover_dist: columns.append('earth mover distance') if energy_dist: columns.append('energy distance') ser = pd.DataFrame(out, index=index, columns=columns).unstack() ser.index.names = ['calculation type', 'run'] return ser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def statistical_distances(samples1, samples2, earth_mover_dist=True, energy_dist=True): """Compute measures of the statistical distance between samples. Parameters samples1: 1d array samples2: 1d array earth_mover_dist: bool, optional Whether or not to compute the Earth mover's distance between the samples. energy_dist: bool, optional Whether or not to compute the energy distance between the samples. Returns ------- 1d array """
out = [] temp = scipy.stats.ks_2samp(samples1, samples2) out.append(temp.pvalue) out.append(temp.statistic) if earth_mover_dist: out.append(scipy.stats.wasserstein_distance(samples1, samples2)) if energy_dist: out.append(scipy.stats.energy_distance(samples1, samples2)) return np.asarray(out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dummy_thread(nsamples, **kwargs): """Generate dummy data for a single nested sampling thread. Log-likelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters nsamples: int Number of samples in thread. ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values. """
seed = kwargs.pop('seed', False) ndim = kwargs.pop('ndim', 2) logl_start = kwargs.pop('logl_start', -np.inf) logl_range = kwargs.pop('logl_range', 1) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if seed is not False: np.random.seed(seed) thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range, 'nlive_array': np.full(nsamples, 1.), 'theta': np.random.random((nsamples, ndim)), 'thread_labels': np.zeros(nsamples).astype(int)} if logl_start != -np.inf: thread['logl'] += logl_start thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]]) return thread
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dummy_run(nthread, nsamples, **kwargs): """Generate dummy data for a nested sampling run. Log-likelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters nthreads: int Number of threads in the run. nsamples: int Number of samples in thread. ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values. """
seed = kwargs.pop('seed', False) ndim = kwargs.pop('ndim', 2) logl_start = kwargs.pop('logl_start', -np.inf) logl_range = kwargs.pop('logl_range', 1) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) threads = [] # set seed before generating any threads and do not reset for each thread if seed is not False: np.random.seed(seed) threads = [] for _ in range(nthread): threads.append(get_dummy_thread( nsamples, ndim=ndim, seed=False, logl_start=logl_start, logl_range=logl_range)) # Sort threads in order of starting logl so labels match labels that would # have been given processing a dead points array. N.B. this only works when # all threads have same start_logl threads = sorted(threads, key=lambda th: th['logl'][0]) for i, _ in enumerate(threads): threads[i]['thread_labels'] = np.full(nsamples, i) # Use combine_ns_runs rather than combine threads as this relabels the # threads according to their order return nestcheck.ns_run_utils.combine_threads(threads)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dummy_dynamic_run(nsamples, **kwargs): """Generate dummy data for a dynamic nested sampling run. Loglikelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters nsamples: int Number of samples in thread. nthread_init: int Number of threads in the inital run (starting at logl=-np.inf). nthread_dyn: int Number of threads in the inital run (starting at randomly chosen points in the initial run). ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values. """
seed = kwargs.pop('seed', False) ndim = kwargs.pop('ndim', 2) nthread_init = kwargs.pop('nthread_init', 2) nthread_dyn = kwargs.pop('nthread_dyn', 3) logl_range = kwargs.pop('logl_range', 1) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed, logl_start=-np.inf, logl_range=logl_range) dyn_starts = list(np.random.choice( init['logl'], nthread_dyn, replace=True)) threads = nestcheck.ns_run_utils.get_run_threads(init) # Seed must be False here so it is not set again for each thread threads += [get_dummy_thread( nsamples, ndim=ndim, seed=False, logl_start=start, logl_range=logl_range) for start in dyn_starts] # make sure the threads have unique labels and combine them for i, _ in enumerate(threads): threads[i]['thread_labels'] = np.full(nsamples, i) run = nestcheck.ns_run_utils.combine_threads(threads) # To make sure the thread labelling is same way it would when # processing a dead points file, tranform into dead points samples = nestcheck.write_polychord_output.run_dead_birth_array(run) return nestcheck.data_processing.process_samples_array(samples)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_long_description(): """Get PyPI long description from the .rst file."""
pkg_dir = get_package_dir() with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file: long_description = readme_file.read() return long_description
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kde_plot_df(df, xlims=None, **kwargs): """Plots kde estimates of distributions of samples in each cell of the input pandas DataFrame. There is one subplot for each dataframe column, and on each subplot there is one kde line. Parameters df: pandas data frame Each cell must contain a 1d numpy array of samples. xlims: dict, optional Dictionary of xlimits - keys are column names and values are lists of length 2. num_xticks: int, optional Number of xticks on each subplot. figsize: tuple, optional Size of figure in inches. nrows: int, optional Number of rows of subplots. ncols: int, optional Number of columns of subplots. normalize: bool, optional If true, kde plots are normalized to have the same area under their curves. If False, their max value is set to 1. legend: bool, optional Should a legend be added? legend_kwargs: dict, optional Additional kwargs for legend. Returns ------- fig: matplotlib figure """
assert xlims is None or isinstance(xlims, dict) figsize = kwargs.pop('figsize', (6.4, 1.5)) num_xticks = kwargs.pop('num_xticks', None) nrows = kwargs.pop('nrows', 1) ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows))) normalize = kwargs.pop('normalize', True) legend = kwargs.pop('legend', False) legend_kwargs = kwargs.pop('legend_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) for nax, col in enumerate(df): if nrows == 1: ax = axes[nax] else: ax = axes[nax // ncols, nax % ncols] supmin = df[col].apply(np.min).min() supmax = df[col].apply(np.max).max() support = np.linspace(supmin - 0.1 * (supmax - supmin), supmax + 0.1 * (supmax - supmin), 200) handles = [] labels = [] for name, samps in df[col].iteritems(): pdf = scipy.stats.gaussian_kde(samps)(support) if not normalize: pdf /= pdf.max() handles.append(ax.plot(support, pdf, label=name)[0]) labels.append(name) ax.set_ylim(bottom=0) ax.set_yticks([]) if xlims is not None: try: ax.set_xlim(xlims[col]) except KeyError: pass ax.set_xlabel(col) if num_xticks is not None: ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator( nbins=num_xticks)) if legend: fig.legend(handles, labels, **legend_kwargs) return fig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def alternate_helper(x, alt_samps, func=None): """Helper function for making fgivenx plots of functions with 2 array arguments of variable lengths."""
alt_samps = alt_samps[~np.isnan(alt_samps)] arg1 = alt_samps[::2] arg2 = alt_samps[1::2] return func(x, arg1, arg2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def average_by_key(dict_in, key): """Helper function for plot_run_nlive. Try returning the average of dict_in[key] and, if this does not work or if key is None, return average of whole dict. Parameters dict_in: dict Values should be arrays. key: str Returns ------- average: float """
if key is None: return np.mean(np.concatenate(list(dict_in.values()))) else: try: return np.mean(dict_in[key]) except KeyError: print('method name "' + key + '" not found, so ' + 'normalise area under the analytic relative posterior ' + 'mass curve using the mean of all methods.') return np.mean(np.concatenate(list(dict_in.values())))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_process_data(file_roots, **kwargs): """Process output from many nested sampling runs in parallel with optional error handling and caching. The result can be cached using the 'save_name', 'save' and 'load' kwargs (by default this is not done). See save_load_result docstring for more details. Remaining kwargs passed to parallel_utils.parallel_apply (see its docstring for more details). Parameters file_roots: list of strs file_roots for the runs to load. base_dir: str, optional path to directory containing files. process_func: function, optional function to use to process the data. func_kwargs: dict, optional additional keyword arguments for process_func. errors_to_handle: error or tuple of errors, optional which errors to catch when they occur in processing rather than raising. save_name: str or None, optional See nestcheck.io_utils.save_load_result. save: bool, optional See nestcheck.io_utils.save_load_result. load: bool, optional See nestcheck.io_utils.save_load_result. overwrite_existing: bool, optional See nestcheck.io_utils.save_load_result. Returns ------- list of ns_run dicts List of nested sampling runs in dict format (see the module docstring for more details). """
base_dir = kwargs.pop('base_dir', 'chains') process_func = kwargs.pop('process_func', process_polychord_run) func_kwargs = kwargs.pop('func_kwargs', {}) func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ()) data = nestcheck.parallel_utils.parallel_apply( process_error_helper, file_roots, func_args=(base_dir, process_func), func_kwargs=func_kwargs, **kwargs) # Sort processed runs into the same order as file_roots (as parallel_apply # does not preserve order) data = sorted(data, key=lambda x: file_roots.index(x['output']['file_root'])) # Extract error information and print errors = {} for i, run in enumerate(data): if 'error' in run: try: errors[run['error']].append(i) except KeyError: errors[run['error']] = [i] for error_name, index_list in errors.items(): message = (error_name + ' processing ' + str(len(index_list)) + ' / ' + str(len(file_roots)) + ' files') if len(index_list) != len(file_roots): message += ('. Roots with errors have (zero based) indexes: ' + str(index_list)) print(message) # Return runs which did not have errors return [run for run in data if 'error' not in run]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_error_helper(root, base_dir, process_func, errors_to_handle=(), **func_kwargs): """Wrapper which applies process_func and handles some common errors so one bad run does not spoil the whole batch. Useful errors to handle include: OSError: if you are not sure if all the files exist AssertionError: if some of the many assertions fail for known reasons; for example is there are occasional problems decomposing runs into threads due to limited numerical precision in logls. Parameters root: str File root. base_dir: str Directory containing file. process_func: func Function for processing file. errors_to_handle: error type or tuple of error types Errors to catch without throwing an exception. func_kwargs: dict Kwargs to pass to process_func. Returns ------- run: dict Nested sampling run dict (see the module docstring for more details) or, if an error occured, a dict containing its type and the file root. """
try: return process_func(root, base_dir, **func_kwargs) except errors_to_handle as err: run = {'error': type(err).__name__, 'output': {'file_root': root}} return run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_polychord_run(file_root, base_dir, process_stats_file=True, **kwargs): """Loads data from a PolyChord run into the nestcheck dictionary format for analysis. N.B. producing required output file containing information about the iso-likelihood contours within which points were sampled (where they were "born") requies PolyChord version v1.13 or later and the setting write_dead=True. Parameters file_root: str Root for run output file names (PolyChord file_root setting). base_dir: str Directory containing data (PolyChord base_dir setting). process_stats_file: bool, optional Should PolyChord's <root>.stats file be processed? Set to False if you don't have the <root>.stats file (such as if PolyChord was run with write_stats=False). kwargs: dict, optional Options passed to ns_run_utils.check_ns_run. Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). """
# N.B. PolyChord dead points files also contains remaining live points at # termination samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt') ns_run = process_samples_array(samples, **kwargs) ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root} if process_stats_file: try: ns_run['output'] = process_polychord_stats(file_root, base_dir) except (OSError, IOError, ValueError) as err: warnings.warn( ('process_polychord_stats raised {} processing {}.stats file. ' ' Proceeding without stats.').format( type(err).__name__, os.path.join(base_dir, file_root)), UserWarning) return ns_run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_multinest_run(file_root, base_dir, **kwargs): """Loads data from a MultiNest run into the nestcheck dictionary format for analysis. N.B. producing required output file containing information about the iso-likelihood contours within which points were sampled (where they were "born") requies MultiNest version 3.11 or later. Parameters file_root: str Root name for output files. When running MultiNest, this is determined by the nest_root parameter. base_dir: str Directory containing output files. When running MultiNest, this is determined by the nest_root parameter. kwargs: dict, optional Passed to ns_run_utils.check_ns_run (via process_samples_array) Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). """
# Load dead and live points dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt') live = np.loadtxt(os.path.join(base_dir, file_root) + '-phys_live-birth.txt') # Remove unnecessary final columns dead = dead[:, :-2] live = live[:, :-1] assert dead[:, -2].max() < live[:, -2].min(), ( 'final live points should have greater logls than any dead point!', dead, live) ns_run = process_samples_array(np.vstack((dead, live)), **kwargs) assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), ( 'As MultiNest does not currently perform dynamic nested sampling, all ' 'threads should start by sampling the whole prior.') ns_run['output'] = {} ns_run['output']['file_root'] = file_root ns_run['output']['base_dir'] = base_dir return ns_run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_dynesty_run(results): """Transforms results from a dynesty run into the nestcheck dictionary format for analysis. This function has been tested with dynesty v9.2.0. Note that the nestcheck point weights and evidence will not be exactly the same as the dynesty ones as nestcheck calculates logX volumes more precisely (using the trapezium rule). This function does not require the birth_inds_given_contours and threads_given_birth_inds functions as dynesty results objects already include thread labels via their samples_id property. If the dynesty run is dynamic, the batch_bounds property is need to determine the threads' starting birth contours. Parameters results: dynesty results object N.B. the remaining live points at termination must be included in the results (dynesty samplers' run_nested method does this if add_live_points=True - its default value). Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). """
samples = np.zeros((results.samples.shape[0], results.samples.shape[1] + 3)) samples[:, 0] = results.logl samples[:, 1] = results.samples_id samples[:, 3:] = results.samples unique_th, first_inds = np.unique(results.samples_id, return_index=True) assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0]))) thread_min_max = np.full((unique_th.shape[0], 2), np.nan) try: # Try processing standard nested sampling results assert unique_th.shape[0] == results.nlive assert np.array_equal( np.unique(results.samples_id[-results.nlive:]), np.asarray(range(results.nlive))), ( 'perhaps the final live points are not included?') thread_min_max[:, 0] = -np.inf except AttributeError: # If results has no nlive attribute, it must be dynamic nested sampling assert unique_th.shape[0] == sum(results.batch_nlive) for th_lab, ind in zip(unique_th, first_inds): thread_min_max[th_lab, 0] = ( results.batch_bounds[results.samples_batch[ind], 0]) for th_lab in unique_th: final_ind = np.where(results.samples_id == th_lab)[0][-1] thread_min_max[th_lab, 1] = results.logl[final_ind] samples[final_ind, 2] = -1 assert np.all(~np.isnan(thread_min_max)) run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max) nestcheck.ns_run_utils.check_ns_run(run) return run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_samples_array(samples, **kwargs): """Convert an array of nested sampling dead and live points of the type produced by PolyChord and MultiNest into a nestcheck nested sampling run dictionary. Parameters samples: 2d numpy array Array of dead points and any remaining live points at termination. Has #parameters + 2 columns: kwargs: dict, optional Options passed to birth_inds_given_contours Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). Only contains information in samples (not additional optional output key). """
samples = samples[np.argsort(samples[:, -2])] ns_run = {} ns_run['logl'] = samples[:, -2] ns_run['theta'] = samples[:, :-2] birth_contours = samples[:, -1] # birth_contours, ns_run['theta'] = check_logls_unique( # samples[:, -2], samples[:, -1], samples[:, :-2]) birth_inds = birth_inds_given_contours( birth_contours, ns_run['logl'], **kwargs) ns_run['thread_labels'] = threads_given_birth_inds(birth_inds) unique_threads = np.unique(ns_run['thread_labels']) assert np.array_equal(unique_threads, np.asarray(range(unique_threads.shape[0]))) # Work out nlive_array and thread_min_max logls from thread labels and # birth contours thread_min_max = np.zeros((unique_threads.shape[0], 2)) # NB delta_nlive indexes are offset from points' indexes by 1 as we need an # element to represent the initial sampling of live points before any dead # points are created. # I.E. birth on step 1 corresponds to replacing dead point zero delta_nlive = np.zeros(samples.shape[0] + 1) for label in unique_threads: thread_inds = np.where(ns_run['thread_labels'] == label)[0] # Max is final logl in thread thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]] thread_start_birth_ind = birth_inds[thread_inds[0]] # delta nlive indexes are +1 from logl indexes to allow for initial # nlive (before first dead point) delta_nlive[thread_inds[-1] + 1] -= 1 if thread_start_birth_ind == birth_inds[0]: # thread minimum is -inf as it starts by sampling from whole prior thread_min_max[label, 0] = -np.inf delta_nlive[0] += 1 else: assert thread_start_birth_ind >= 0 thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind] delta_nlive[thread_start_birth_ind + 1] += 1 ns_run['thread_min_max'] = thread_min_max ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1] return ns_run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs): """Maps the iso-likelihood contours on which points were born to the index of the dead point on this contour. MultiNest and PolyChord use different values to identify the inital live points which were sampled from the whole prior (PolyChord uses -1e+30 and MultiNest -0.179769313486231571E+309). However in each case the first dead point must have been sampled from the whole prior, so for either package we can use init_birth = birth_logl_arr[0] If there are many points with the same logl_arr and dup_assert is False, these points are randomly assigned an order (to ensure results are consistent, random seeding is used). Parameters logl_arr: 1d numpy array logl values of each point. birth_logl_arr: 1d numpy array Birth contours - i.e. logl values of the iso-likelihood contour from within each point was sampled (on which it was born). dup_assert: bool, optional See ns_run_utils.check_ns_run_logls docstring. dup_warn: bool, optional See ns_run_utils.check_ns_run_logls docstring. Returns ------- birth_inds: 1d numpy array of ints Step at which each element of logl_arr was sampled. Points sampled from the whole prior are assigned value -1. """
dup_assert = kwargs.pop('dup_assert', False) dup_warn = kwargs.pop('dup_warn', False) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) assert logl_arr.ndim == 1, logl_arr.ndim assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim # Check for duplicate logl values (if specified by dup_assert or dup_warn) nestcheck.ns_run_utils.check_ns_run_logls( {'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn) # Random seed so results are consistent if there are duplicate logls state = np.random.get_state() # Save random state before seeding np.random.seed(0) # Calculate birth inds init_birth = birth_logl_arr[0] assert np.all(birth_logl_arr <= logl_arr), ( logl_arr[birth_logl_arr > logl_arr]) birth_inds = np.full(birth_logl_arr.shape, np.nan) birth_inds[birth_logl_arr == init_birth] = -1 for i, birth_logl in enumerate(birth_logl_arr): if not np.isnan(birth_inds[i]): # birth ind has already been assigned continue dup_deaths = np.where(logl_arr == birth_logl)[0] if dup_deaths.shape == (1,): # death index is unique birth_inds[i] = dup_deaths[0] continue # The remainder of this loop deals with the case that multiple points # have the same logl value (=birth_logl). This can occur due to limited # precision, or for likelihoods with contant regions. In this case we # randomly assign the duplicates birth steps in a manner # that provides a valid division into nested sampling runs dup_births = np.where(birth_logl_arr == birth_logl)[0] assert dup_deaths.shape[0] > 1, dup_deaths if np.all(birth_logl_arr[dup_deaths] != birth_logl): # If no points both are born and die on this contour, we can just # randomly assign an order np.random.shuffle(dup_deaths) inds_to_use = dup_deaths else: # If some points are both born and die on the contour, we need to # take care that the assigned birth inds do not result in some # points dying before they are born try: inds_to_use = sample_less_than_condition( dup_deaths, dup_births) except ValueError: raise ValueError(( 'There is no way to allocate indexes dup_deaths={} such ' 'that each is less than dup_births={}.').format( dup_deaths, dup_births)) try: # Add our selected inds_to_use values to the birth_inds array # Note that dup_deaths (and hence inds to use) may have more # members than dup_births, because one of the duplicates may be # the final point in a thread. We therefore include only the first # dup_births.shape[0] elements birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]] except ValueError: warnings.warn(( 'for logl={}, the number of points born (indexes=' '{}) is bigger than the number of points dying ' '(indexes={}). This indicates a problem with your ' 'nested sampling software - it may be caused by ' 'a bug in PolyChord which was fixed in PolyChord ' 'v1.14, so try upgrading. I will try to give an ' 'approximate allocation of threads but this may ' 'fail.').format( birth_logl, dup_births, inds_to_use), UserWarning) extra_inds = np.random.choice( inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0]) inds_to_use = np.concatenate((inds_to_use, extra_inds)) np.random.shuffle(inds_to_use) birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]] assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum() np.random.set_state(state) # Reset random state return birth_inds.astype(int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_less_than_condition(choices_in, condition): """Creates a random sample from choices without replacement, subject to the condition that each element of the output is greater than the corresponding element of the condition array. condition should be in ascending order. """
output = np.zeros(min(condition.shape[0], choices_in.shape[0])) choices = copy.deepcopy(choices_in) for i, _ in enumerate(output): # randomly select one of the choices which meets condition avail_inds = np.where(choices < condition[i])[0] selected_ind = np.random.choice(avail_inds) output[i] = choices[selected_ind] # remove the chosen value choices = np.delete(choices, selected_ind) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parallel_map(func, *arg_iterable, **kwargs): """Apply function to iterable with parallel map, and hence returns results in order. functools.partial is used to freeze func_pre_args and func_kwargs, meaning that the iterable argument must be the last positional argument. Roughly equivalent to Parameters func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. chunksize: int, optional Perform function in batches func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs """
chunksize = kwargs.pop('chunksize', 1) func_pre_args = kwargs.pop('func_pre_args', ()) func_kwargs = kwargs.pop('func_kwargs', {}) max_workers = kwargs.pop('max_workers', None) parallel = kwargs.pop('parallel', True) parallel_warning = kwargs.pop('parallel_warning', True) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) func_to_map = functools.partial(func, *func_pre_args, **func_kwargs) if parallel: pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize)) else: if parallel_warning: warnings.warn(('parallel_map has parallel=False - turn on ' 'parallelisation for faster processing'), UserWarning) return list(map(func_to_map, *arg_iterable))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parallel_apply(func, arg_iterable, **kwargs): """Apply function to iterable with parallelisation and a tqdm progress bar. Roughly equivalent to arg_iterable] but will **not** necessarily return results in input order. Parameters func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. func_args: tuple, optional Additional positional arguments for func. func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs """
max_workers = kwargs.pop('max_workers', None) parallel = kwargs.pop('parallel', True) parallel_warning = kwargs.pop('parallel_warning', True) func_args = kwargs.pop('func_args', ()) func_pre_args = kwargs.pop('func_pre_args', ()) func_kwargs = kwargs.pop('func_kwargs', {}) tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if 'leave' not in tqdm_kwargs: # default to leave=False tqdm_kwargs['leave'] = False assert isinstance(func_args, tuple), ( str(func_args) + ' is type ' + str(type(func_args))) assert isinstance(func_pre_args, tuple), ( str(func_pre_args) + ' is type ' + str(type(func_pre_args))) progress = select_tqdm() if not parallel: if parallel_warning: warnings.warn(('parallel_map has parallel=False - turn on ' 'parallelisation for faster processing'), UserWarning) return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for x in progress(arg_iterable, **tqdm_kwargs)] else: pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) futures = [] for element in arg_iterable: futures.append(pool.submit( func, *(func_pre_args + (element,) + func_args), **func_kwargs)) results = [] for fut in progress(concurrent.futures.as_completed(futures), total=len(arg_iterable), **tqdm_kwargs): results.append(fut.result()) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_tqdm(): """If running in a jupyter notebook, then returns tqdm_notebook. Otherwise returns a regular tqdm progress bar. Returns ------- progress: function """
try: progress = tqdm.tqdm_notebook assert get_ipython().has_trait('kernel') except (NameError, AssertionError): progress = tqdm.tqdm return progress
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summary_df_from_list(results_list, names, **kwargs): """Make a panda data frame of the mean and std devs of each element of a list of 1d arrays, including the uncertainties on the values. This just converts the array to a DataFrame and calls summary_df on it. Parameters results_list: list of 1d numpy arrays Must have same length as names. names: list of strs Names for the output df's columns. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """
for arr in results_list: assert arr.shape == (len(names),) df = pd.DataFrame(np.stack(results_list, axis=0)) df.columns = names return summary_df(df, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def summary_df_from_multi(multi_in, inds_to_keep=None, **kwargs): """Apply summary_df to a multiindex while preserving some levels. Parameters multi_in: multiindex pandas DataFrame inds_to_keep: None or list of strs, optional Index levels to preserve. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """
# Need to pop include true values and add separately at the end as # otherwise we get multiple true values added include_true_values = kwargs.pop('include_true_values', False) true_values = kwargs.get('true_values', None) if inds_to_keep is None: inds_to_keep = list(multi_in.index.names)[:-1] if 'calculation type' not in inds_to_keep: df = multi_in.groupby(inds_to_keep).apply( summary_df, include_true_values=False, **kwargs) else: # If there is already a level called 'calculation type' in multi, # summary_df will try making a second 'calculation type' index and (as # of pandas v0.23.0) throw an error. Avoid this by renaming. inds_to_keep = [lev if lev != 'calculation type' else 'calculation type temp' for lev in inds_to_keep] multi_temp = copy.deepcopy(multi_in) multi_temp.index.set_names( [lev if lev != 'calculation type' else 'calculation type temp' for lev in list(multi_temp.index.names)], inplace=True) df = multi_temp.groupby(inds_to_keep).apply( summary_df, include_true_values=False, **kwargs) # add the 'calculation type' values ('mean' and 'std') produced by # summary_df to the input calculation type names (now in level # 'calculation type temp') ind = (df.index.get_level_values('calculation type temp') + ' ' + df.index.get_level_values('calculation type')) order = list(df.index.names) order.remove('calculation type temp') df.index = df.index.droplevel( ['calculation type', 'calculation type temp']) df['calculation type'] = list(ind) df.set_index('calculation type', append=True, inplace=True) df = df.reorder_levels(order) if include_true_values: assert true_values is not None tv_ind = ['true values' if name == 'calculation type' else '' for name in df.index.names[:-1]] + ['value'] df.loc[tuple(tv_ind), :] = true_values return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def efficiency_gain_df(method_names, method_values, est_names, **kwargs): r"""Calculated data frame showing .. math:: \mathrm{efficiency\,gain} = \frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}} See the dynamic nested sampling paper (Higson et al. 2019) for more details. The standard method on which to base the gain is assumed to be the first method input. The output DataFrame will contain rows: mean [dynamic goal]: mean calculation result for standard nested sampling and dynamic nested sampling with each input dynamic goal. std [dynamic goal]: standard deviation of results for standard nested sampling and dynamic nested sampling with each input dynamic goal. gain [dynamic goal]: the efficiency gain (computational speedup) from dynamic nested sampling compared to standard nested sampling. This equals (variance of standard results) / (variance of dynamic results); see the dynamic nested sampling paper for more details. Parameters method names: list of strs method values: list Each element is a list of 1d arrays of results for the method. Each array must have shape (len(est_names),). est_names: list of strs Provide column titles for output df. true_values: iterable of same length as estimators list True values of the estimators for the given likelihood and prior. Returns ------- results: pandas data frame Results data frame. """
true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) adjust_nsamp = kwargs.pop('adjust_nsamp', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if adjust_nsamp is not None: assert adjust_nsamp.shape == (len(method_names),) assert len(method_names) == len(method_values) df_dict = {} for i, method_name in enumerate(method_names): # Set include_true_values=False as we don't want them repeated for # every method df = summary_df_from_list( method_values[i], est_names, true_values=true_values, include_true_values=False, include_rmse=include_rmse) if i != 0: stats = ['std'] if include_rmse: stats.append('rmse') if adjust_nsamp is not None: # Efficiency gain measures performance per number of # samples (proportional to computational work). If the # number of samples is not the same we can adjust this. adjust = (adjust_nsamp[0] / adjust_nsamp[i]) else: adjust = 1 for stat in stats: # Calculate efficiency gain vs standard nested sampling gain, gain_unc = get_eff_gain( df_dict[method_names[0]].loc[(stat, 'value')], df_dict[method_names[0]].loc[(stat, 'uncertainty')], df.loc[(stat, 'value')], df.loc[(stat, 'uncertainty')], adjust=adjust) key = stat + ' efficiency gain' df.loc[(key, 'value'), :] = gain df.loc[(key, 'uncertainty'), :] = gain_unc df_dict[method_name] = df results = pd.concat(df_dict) results.index.rename('dynamic settings', level=0, inplace=True) new_ind = [] new_ind.append(pd.CategoricalIndex( results.index.get_level_values('calculation type'), ordered=True, categories=['true values', 'mean', 'std', 'rmse', 'std efficiency gain', 'rmse efficiency gain'])) new_ind.append(pd.CategoricalIndex( results.index.get_level_values('dynamic settings'), ordered=True, categories=[''] + method_names)) new_ind.append(results.index.get_level_values('result type')) results.set_index(new_ind, inplace=True) if include_true_values: with warnings.catch_warnings(): # Performance not an issue here so suppress annoying warning warnings.filterwarnings('ignore', message=( 'indexing past lexsort depth may impact performance.')) results.loc[('true values', '', 'value'), :] = true_values results.sort_index(inplace=True) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rmse_and_unc(values_array, true_values): r"""Calculate the root meet squared error and its numerical uncertainty. With a reasonably large number of values in values_list the uncertainty on sq_errors should be approximately normal (from the central limit theorem). Uncertainties are calculated via error propagation: if :math:`\sigma` is the error on :math:`X` then the error on :math:`\sqrt{X}` is :math:`\frac{\sigma}{2 \sqrt{X}}`. Parameters values_array: 2d numpy array Array of results: each row corresponds to a different estimate of the quantities considered. true_values: 1d numpy array Correct values for the quantities considered. Returns ------- rmse: 1d numpy array Root-mean-squared-error for each quantity. rmse_unc: 1d numpy array Numerical uncertainties on each element of rmse. """
assert true_values.shape == (values_array.shape[1],) errors = values_array - true_values[np.newaxis, :] sq_errors = errors ** 2 sq_errors_mean = np.mean(sq_errors, axis=0) sq_errors_mean_unc = (np.std(sq_errors, axis=0, ddof=1) / np.sqrt(sq_errors.shape[0])) rmse = np.sqrt(sq_errors_mean) rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc return rmse, rmse_unc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d): r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given their values and uncertainties. This assumes the covariance = 0, and that the input uncertainties are small compared to the corresponding input values. _n and _d denote the numerator and denominator respectively. Parameters values_n: float or numpy array Numerator values. sigmas_n: float or numpy array :math:`1\sigma` uncertainties on values_n. values_d: float or numpy array Denominator values. sigmas_d: float or numpy array :math:`1\sigma` uncertainties on values_d. Returns ------- std: float or numpy array :math:`1\sigma` uncertainty on values_n / values_d. """
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2) std *= (values_n / values_d) return std
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array_given_run(ns_run): """Converts information on samples in a nested sampling run dictionary into a numpy array representation. This allows fast addition of more samples and recalculation of nlive. Parameters ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). Returns ------- samples: 2d numpy array Array containing columns [logl, thread label, change in nlive at sample, (thetas)] with each row representing a single sample. """
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1])) samples[:, 0] = ns_run['logl'] samples[:, 1] = ns_run['thread_labels'] # Calculate 'change in nlive' after each step samples[:-1, 2] = np.diff(ns_run['nlive_array']) samples[-1, 2] = -1 # nlive drops to zero after final point samples[:, 3:] = ns_run['theta'] return samples
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_run_threads(ns_run): """ Get the individual threads from a nested sampling run. Parameters ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). Returns ------- threads: list of numpy array Each thread (list element) is a samples array containing columns [logl, thread label, change in nlive at sample, (thetas)] with each row representing a single sample. """
samples = array_given_run(ns_run) unique_threads = np.unique(ns_run['thread_labels']) assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], ( 'some threads have no points! {0} != {1}'.format( unique_threads.shape[0], ns_run['thread_min_max'].shape[0])) threads = [] for i, th_lab in enumerate(unique_threads): thread_array = samples[np.where(samples[:, 1] == th_lab)] # delete changes in nlive due to other threads in the run thread_array[:, 2] = 0 thread_array[-1, 2] = -1 min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2)) assert min_max[0, 1] == thread_array[-1, 0], ( 'thread max logl should equal logl of its final point!') threads.append(dict_given_run_array(thread_array, min_max)) return threads
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine_ns_runs(run_list_in, **kwargs): """ Combine a list of complete nested sampling run dictionaries into a single ns run. Input runs must contain any repeated threads. Parameters run_list_in: list of dicts List of nested sampling runs in dict format (see data_processing module docstring for more details). kwargs: dict, optional Options for check_ns_run. Returns ------- run: dict Nested sampling run dict (see data_processing module docstring for more details). """
run_list = copy.deepcopy(run_list_in) if len(run_list) == 1: run = run_list[0] else: nthread_tot = 0 for i, _ in enumerate(run_list): check_ns_run(run_list[i], **kwargs) run_list[i]['thread_labels'] += nthread_tot nthread_tot += run_list[i]['thread_min_max'].shape[0] thread_min_max = np.vstack([run['thread_min_max'] for run in run_list]) # construct samples array from the threads, including an updated nlive samples_temp = np.vstack([array_given_run(run) for run in run_list]) samples_temp = samples_temp[np.argsort(samples_temp[:, 0])] # Make combined run run = dict_given_run_array(samples_temp, thread_min_max) # Combine only the additive properties stored in run['output'] run['output'] = {} for key in ['nlike', 'ndead']: try: run['output'][key] = sum([temp['output'][key] for temp in run_list_in]) except KeyError: pass check_ns_run(run, **kwargs) return run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine_threads(threads, assert_birth_point=False): """ Combine list of threads into a single ns run. This is different to combining runs as repeated threads are allowed, and as some threads can start from log-likelihood contours on which no dead point in the run is present. Note that if all the thread labels are not unique and in ascending order, the output will fail check_ns_run. However provided the thread labels are not used it will work ok for calculations based on nlive, logl and theta. Parameters threads: list of dicts List of nested sampling run dicts, each representing a single thread. assert_birth_point: bool, optional Whether or not to assert there is exactly one point present in the run with the log-likelihood at which each point was born. This is not true for bootstrap resamples of runs, where birth points may be repeated or not present at all. Returns ------- run: dict Nested sampling run dict (see data_processing module docstring for more details). """
thread_min_max = np.vstack([td['thread_min_max'] for td in threads]) assert len(threads) == thread_min_max.shape[0] # construct samples array from the threads, including an updated nlive samples_temp = np.vstack([array_given_run(thread) for thread in threads]) samples_temp = samples_temp[np.argsort(samples_temp[:, 0])] # update the changes in live points column for threads which start part way # through the run. These are only present in dynamic nested sampling. logl_starts = thread_min_max[:, 0] state = np.random.get_state() # save random state np.random.seed(0) # seed to make sure any random assignment is repoducable for logl_start in logl_starts[logl_starts != -np.inf]: ind = np.where(samples_temp[:, 0] == logl_start)[0] if assert_birth_point: assert ind.shape == (1,), \ 'No unique birth point! ' + str(ind.shape) if ind.shape == (1,): # If the point at which this thread started is present exactly # once in this bootstrap replication: samples_temp[ind[0], 2] += 1 elif ind.shape == (0,): # If the point with the likelihood at which the thread started # is not present in this particular bootstrap replication, # approximate it with the point with the nearest likelihood. ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start)) samples_temp[ind_closest, 2] += 1 else: # If the point at which this thread started is present multiple # times in this bootstrap replication, select one at random to # increment nlive on. This avoids any systematic bias from e.g. # always choosing the first point. samples_temp[np.random.choice(ind), 2] += 1 np.random.set_state(state) # make run ns_run = dict_given_run_array(samples_temp, thread_min_max) try: check_ns_run_threads(ns_run) except AssertionError: # If the threads are not valid (e.g. for bootstrap resamples) then # set them to None so they can't be accidentally used ns_run['thread_labels'] = None ns_run['thread_min_max'] = None return ns_run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_w_rel(ns_run, simulate=False): """Get the relative posterior weights of the samples, normalised so the maximum sample weight is 1. This is calculated from get_logw with protection against numerical overflows. Parameters ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). simulate: bool, optional See the get_logw docstring for more details. Returns ------- w_rel: 1d numpy array Relative posterior masses of points. """
logw = get_logw(ns_run, simulate=simulate) return np.exp(logw - logw.max())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_logx(nlive, simulate=False): r"""Returns a logx vector showing the expected or simulated logx positions of points. The shrinkage factor between two points .. math:: t_i = X_{i-1} / X_{i} is distributed as the largest of :math:`n_i` uniform random variables between 1 and 0, where :math:`n_i` is the local number of live points. We are interested in .. math:: \log(t_i) = \log X_{i-1} - \log X_{i} which has expected value :math:`-1/n_i`. Parameters nlive_array: 1d numpy array Ordered local number of live points present at each point's iso-likelihood contour. simulate: bool, optional Should log prior volumes logx be simulated from their distribution (if False their expected values are used). Returns ------- logx: 1d numpy array log X values for points. """
assert nlive.min() > 0, ( 'nlive contains zeros or negative values! nlive = ' + str(nlive)) if simulate: logx_steps = np.log(np.random.random(nlive.shape)) / nlive else: logx_steps = -1 * (nlive.astype(float) ** -1) return np.cumsum(logx_steps)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_ns_run_members(run): """Check nested sampling run member keys and values. Parameters run: dict nested sampling run to check. Raises ------ AssertionError if run does not have expected properties. """
run_keys = list(run.keys()) # Mandatory keys for key in ['logl', 'nlive_array', 'theta', 'thread_labels', 'thread_min_max']: assert key in run_keys run_keys.remove(key) # Optional keys for key in ['output']: try: run_keys.remove(key) except ValueError: pass # Check for unexpected keys assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys) # Check type of mandatory members for key in ['logl', 'nlive_array', 'theta', 'thread_labels', 'thread_min_max']: assert isinstance(run[key], np.ndarray), ( key + ' is type ' + type(run[key]).__name__) # check shapes of keys assert run['logl'].ndim == 1 assert run['logl'].shape == run['nlive_array'].shape assert run['logl'].shape == run['thread_labels'].shape assert run['theta'].ndim == 2 assert run['logl'].shape[0] == run['theta'].shape[0]