text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Will test whether the ACS service is up and alive. <END_TASK> <USER_TASK:> Description: def is_alive(self): """ Will test whether the ACS service is up and alive. """
response = self.get_monitoring_heartbeat() if response.status_code == 200 and response.content == 'alive': return True return False
<SYSTEM_TASK:> Will create or update a policy set for the given path. <END_TASK> <USER_TASK:> Description: def _put_policy_set(self, policy_set_id, body): """ Will create or update a policy set for the given path. """
assert isinstance(body, (dict)), "PUT requires body to be a dict." uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._put(uri, body)
<SYSTEM_TASK:> Get a specific policy set by id. <END_TASK> <USER_TASK:> Description: def _get_policy_set(self, policy_set_id): """ Get a specific policy set by id. """
uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._get(uri)
<SYSTEM_TASK:> Delete a specific policy set by id. Method is idempotent. <END_TASK> <USER_TASK:> Description: def delete_policy_set(self, policy_set_id): """ Delete a specific policy set by id. Method is idempotent. """
uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._delete(uri)
<SYSTEM_TASK:> Will create a new policy set to enforce the given policy details. <END_TASK> <USER_TASK:> Description: def add_policy(self, name, action, resource, subject, condition, policy_set_id=None, effect='PERMIT'): """ Will create a new policy set to enforce the given policy details. The name is just a helpful descriptor for the policy. The action maps to a HTTP verb. Policies are evaluated against resources and subjects. They are identified by matching a uriTemplate or attributes. Examples:: resource = { "uriTemplate": "/asset/{id}" } subject: { "attributes": [{ "issuer": "default", "name": "role" }] } The condition is expected to be a string that defines a groovy operation that can be evaluated. Examples:: condition = "match.single(subject.attributes('default', 'role'), 'admin') """
# If not given a policy set id will generate one if not policy_set_id: policy_set_id = str(uuid.uuid4()) # Only a few operations / actions are supported in policy definitions if action not in ['GET', 'PUT', 'POST', 'DELETE']: raise ValueError("Invalid action") # Defines a single policy to be part of the policy set. policy = { "name": name, "target": { "resource": resource, "subject": subject, "action": action, }, "conditions": [{ "name": "", "condition": condition, }], "effect": effect, } # Body of the request is a list of policies body = { "name": policy_set_id, "policies": [policy], } result = self._put_policy_set(policy_set_id, body) return result
<SYSTEM_TASK:> Evaluate a policy-set against a subject and resource. <END_TASK> <USER_TASK:> Description: def is_allowed(self, subject_id, action, resource_id, policy_sets=[]): """ Evaluate a policy-set against a subject and resource. example/ is_allowed('/user/j12y', 'GET', '/asset/12') """
body = { "action": action, "subjectIdentifier": subject_id, "resourceIdentifier": resource_id, } if policy_sets: body['policySetsEvaluationOrder'] = policy_sets # Will return a 200 with decision uri = self.uri + '/v1/policy-evaluation' logging.debug("URI=" + str(uri)) logging.debug("BODY=" + str(body)) response = self.service._post(uri, body) if 'effect' in response: if response['effect'] in ['NOT_APPLICABLE', 'PERMIT']: return True return False
<SYSTEM_TASK:> Fill in the path of the PEM file containing the CA certificate. <END_TASK> <USER_TASK:> Description: def _fill_in_cainfo(self): """Fill in the path of the PEM file containing the CA certificate. The priority is: 1. user provided path, 2. path to the cacert.pem bundle provided by certifi (if installed), 3. let pycurl use the system path where libcurl's cacert bundle is assumed to be stored, as established at libcurl build time. """
if self.cainfo: cainfo = self.cainfo else: try: cainfo = certifi.where() except AttributeError: cainfo = None if cainfo: self._pycurl.setopt(pycurl.CAINFO, cainfo)
<SYSTEM_TASK:> Sending a single cURL request to download <END_TASK> <USER_TASK:> Description: def curl(self): """Sending a single cURL request to download"""
c = self._pycurl # Resume download if os.path.exists(self.path) and self.resume: mode = 'ab' self.downloaded = os.path.getsize(self.path) c.setopt(pycurl.RESUME_FROM, self.downloaded) else: mode = 'wb' with open(self.path, mode) as f: c.setopt(c.URL, utf8_encode(self.url)) if self.auth: c.setopt(c.USERPWD, '%s:%s' % self.auth) c.setopt(c.USERAGENT, self._user_agent) c.setopt(c.WRITEDATA, f) h = self._get_pycurl_headers() if h is not None: c.setopt(pycurl.HTTPHEADER, h) c.setopt(c.NOPROGRESS, 0) c.setopt(pycurl.FOLLOWLOCATION, 1) c.setopt(c.PROGRESSFUNCTION, self.progress) self._fill_in_cainfo() if self._pass_through_opts: for key, value in self._pass_through_opts.items(): c.setopt(key, value) c.perform()
<SYSTEM_TASK:> Start downloading, handling auto retry, download resume and path <END_TASK> <USER_TASK:> Description: def start(self): """ Start downloading, handling auto retry, download resume and path moving """
if not self.auto_retry: self.curl() return while not self.is_finished: try: self.curl() except pycurl.error as e: # transfer closed with n bytes remaining to read if e.args[0] == pycurl.E_PARTIAL_FILE: pass # HTTP server doesn't seem to support byte ranges. # Cannot resume. elif e.args[0] == pycurl.E_HTTP_RANGE_ERROR: break # Recv failure: Connection reset by peer elif e.args[0] == pycurl.E_RECV_ERROR: if self._rst_retries < self.max_rst_retries: pass else: raise e self._rst_retries += 1 else: raise e self._move_path() self._done()
<SYSTEM_TASK:> Create a new instance of the UAA service. Requires a <END_TASK> <USER_TASK:> Description: def create(self, secret, **kwargs): """ Create a new instance of the UAA service. Requires a secret password for the 'admin' user account. """
parameters = {"adminClientSecret": secret} self.service.create(parameters=parameters) # Store URI into environment variable predix.config.set_env_value(self.use_class, 'uri', self._get_uri()) # Once we create it login self.authenticate()
<SYSTEM_TASK:> Authenticate into the UAA instance as the admin user. <END_TASK> <USER_TASK:> Description: def authenticate(self): """ Authenticate into the UAA instance as the admin user. """
# Make sure we've stored uri for use predix.config.set_env_value(self.use_class, 'uri', self._get_uri()) self.uaac = predix.security.uaa.UserAccountAuthentication() self.uaac.authenticate('admin', self._get_admin_secret(), use_cache=False) self.is_admin = True
<SYSTEM_TASK:> Use a cryptograhically-secure Pseudorandom number generator for picking <END_TASK> <USER_TASK:> Description: def _create_secret(self, length=12): """ Use a cryptograhically-secure Pseudorandom number generator for picking a combination of letters, digits, and punctuation to be our secret. :param length: how long to make the secret (12 seems ok most of the time) """
# Charset will have 64 +- characters charset = string.digits + string.ascii_letters + '+-' return "".join(random.SystemRandom().choice(charset) for _ in range(length))
<SYSTEM_TASK:> Create a new client for use by applications. <END_TASK> <USER_TASK:> Description: def create_client(self, client_id, client_secret): """ Create a new client for use by applications. """
assert self.is_admin, "Must authenticate() as admin to create client" return self.uaac.create_client(client_id, client_secret)
<SYSTEM_TASK:> Add the client credentials to the specified manifest. <END_TASK> <USER_TASK:> Description: def add_client_to_manifest(self, client_id, client_secret, manifest): """ Add the client credentials to the specified manifest. """
assert self.is_admin, "Must authenticate() as admin to create client" return self.uaac.add_client_to_manifest(client_id, client_secret, manifest)
<SYSTEM_TASK:> Returns the URI endpoint for an instance of a UAA <END_TASK> <USER_TASK:> Description: def _get_uaa_uri(self): """ Returns the URI endpoint for an instance of a UAA service instance from environment inspection. """
if 'VCAP_SERVICES' in os.environ: services = json.loads(os.getenv('VCAP_SERVICES')) predix_uaa = services['predix-uaa'][0]['credentials'] return predix_uaa['uri'] else: return predix.config.get_env_value(self, 'uri')
<SYSTEM_TASK:> Returns response of authenticating with the given client and <END_TASK> <USER_TASK:> Description: def _authenticate_client(self, client, secret): """ Returns response of authenticating with the given client and secret. """
client_s = str.join(':', [client, secret]) credentials = base64.b64encode(client_s.encode('utf-8')).decode('utf-8') headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Cache-Control': 'no-cache', 'Authorization': 'Basic ' + credentials } params = { 'client_id': client, 'grant_type': 'client_credentials' } uri = self.uri + '/oauth/token' logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) logging.debug("BODY=" + str(params)) response = requests.post(uri, headers=headers, params=params) if response.status_code == 200: logging.debug("RESPONSE=" + str(response.json())) return response.json() else: logging.warning("Failed to authenticate as %s" % (client)) response.raise_for_status()
<SYSTEM_TASK:> Returns the response of authenticating with the given <END_TASK> <USER_TASK:> Description: def _authenticate_user(self, user, password): """ Returns the response of authenticating with the given user and password. """
headers = self._get_headers() params = { 'username': user, 'password': password, 'grant_type': 'password', } uri = self.uri + '/oauth/token' logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) logging.debug("BODY=" + str(params)) response = requests.post(uri, headers=headers, params=params) if response.status_code == 200: logging.debug("RESPONSE=" + str(response.json())) return response.json() else: logging.warning("Failed to authenticate %s" % (user)) response.raise_for_status()
<SYSTEM_TASK:> For a given client will test whether or not the token <END_TASK> <USER_TASK:> Description: def is_expired_token(self, client): """ For a given client will test whether or not the token has expired. This is for testing a client object and does not look up from client_id. You can use _get_client_from_cache() to lookup a client from client_id. """
if 'expires' not in client: return True expires = dateutil.parser.parse(client['expires']) if expires < datetime.datetime.now(): return True return False
<SYSTEM_TASK:> If we don't yet have a uaa cache we need to <END_TASK> <USER_TASK:> Description: def _initialize_uaa_cache(self): """ If we don't yet have a uaa cache we need to initialize it. As there may be more than one UAA instance we index by issuer and then store any clients, users, etc. """
try: os.makedirs(os.path.dirname(self._cache_path)) except OSError as exc: if exc.errno != errno.EEXIST: raise data = {} data[self.uri] = [] return data
<SYSTEM_TASK:> For the given client_id return what is <END_TASK> <USER_TASK:> Description: def _get_client_from_cache(self, client_id): """ For the given client_id return what is cached. """
data = self._read_uaa_cache() # Only if we've cached any for this issuer if self.uri not in data: return for client in data[self.uri]: if client['id'] == client_id: return client
<SYSTEM_TASK:> Cache the client details into a cached file on disk. <END_TASK> <USER_TASK:> Description: def _write_to_uaa_cache(self, new_item): """ Cache the client details into a cached file on disk. """
data = self._read_uaa_cache() # Initialize client list if first time if self.uri not in data: data[self.uri] = [] # Remove existing client record and any expired tokens for client in data[self.uri]: if new_item['id'] == client['id']: data[self.uri].remove(client) continue # May have old tokens laying around to be cleaned up if 'expires' in client: expires = dateutil.parser.parse(client['expires']) if expires < datetime.datetime.now(): data[self.uri].remove(client) continue data[self.uri].append(new_item) with open(self._cache_path, 'w') as output: output.write(json.dumps(data, sort_keys=True, indent=4))
<SYSTEM_TASK:> Authenticate the given client against UAA. The resulting token <END_TASK> <USER_TASK:> Description: def authenticate(self, client_id, client_secret, use_cache=True): """ Authenticate the given client against UAA. The resulting token will be cached for reuse. """
# We will reuse a token for as long as we have one cached # and it hasn't expired. if use_cache: client = self._get_client_from_cache(client_id) if (client) and (not self.is_expired_token(client)): self.authenticated = True self.client = client return # Let's authenticate the client client = { 'id': client_id, 'secret': client_secret } res = self._authenticate_client(client_id, client_secret) client.update(res) expires = datetime.datetime.now() + \ datetime.timedelta(seconds=res['expires_in']) client['expires'] = expires.isoformat() # Cache it for repeated use until expired self._write_to_uaa_cache(client) self.client = client self.authenticated = True
<SYSTEM_TASK:> Log currently authenticated user out, invalidating any existing tokens. <END_TASK> <USER_TASK:> Description: def logout(self): """ Log currently authenticated user out, invalidating any existing tokens. """
# Remove token from local cache # MAINT: need to expire token on server data = self._read_uaa_cache() if self.uri in data: for client in data[self.uri]: if client['id'] == self.client['id']: data[self.uri].remove(client) with open(self._cache_path, 'w') as output: output.write(json.dumps(data, sort_keys=True, indent=4))
<SYSTEM_TASK:> Simple POST request for a given uri path. <END_TASK> <USER_TASK:> Description: def _post(self, uri, data, headers=None): """ Simple POST request for a given uri path. """
if not headers: headers = self._get_headers() logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) logging.debug("BODY=" + str(data)) response = self.session.post(uri, headers=headers, data=json.dumps(data)) logging.debug("STATUS=" + str(response.status_code)) if response.status_code in [200, 201]: return response.json() else: logging.error(b"ERROR=" + response.content) response.raise_for_status()
<SYSTEM_TASK:> Returns the bare access token for the authorized client. <END_TASK> <USER_TASK:> Description: def get_token(self): """ Returns the bare access token for the authorized client. """
if not self.authenticated: raise ValueError("Must authenticate() as a client first.") # If token has expired we'll need to refresh and get a new # client credential if self.is_expired_token(self.client): logging.info("client token expired, will need to refresh token") self.authenticate(self.client['id'], self.client['secret'], use_cache=False) return self.client['access_token']
<SYSTEM_TASK:> Returns the scopes for the authenticated client. <END_TASK> <USER_TASK:> Description: def get_scopes(self): """ Returns the scopes for the authenticated client. """
if not self.authenticated: raise ValueError("Must authenticate() as a client first.") scope = self.client['scope'] return scope.split()
<SYSTEM_TASK:> Warn that the required scope is not found in the scopes <END_TASK> <USER_TASK:> Description: def assert_has_permission(self, scope_required): """ Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin') """
if not self.authenticated: raise ValueError("Must first authenticate()") if scope_required not in self.get_scopes(): logging.warning("Authenticated as %s" % (self.client['id'])) logging.warning("Have scopes: %s" % (str.join(',', self.get_scopes()))) logging.warning("Insufficient scope %s for operation" % (scope_required)) raise ValueError("Client does not have permission.") return True
<SYSTEM_TASK:> Grant the given client_id permissions for managing clients. <END_TASK> <USER_TASK:> Description: def grant_client_permissions(self, client_id, admin=False, write=False, read=False, secret=False): """ Grant the given client_id permissions for managing clients. - clients.admin: super user scope to create, modify, delete - clients.write: scope ot create and modify clients - clients.read: scope to read info about clients - clients.secret: scope to change password of a client """
self.assert_has_permission('clients.admin') perms = [] if admin: perms.append('clients.admin') if write or admin: perms.append('clients.write') if read or admin: perms.append('clients.read') if secret or admin: perms.append('clients.secret') if perms: self.update_client_grants(client_id, scope=perms, authorities=perms)
<SYSTEM_TASK:> Returns the clients stored in the instance of UAA. <END_TASK> <USER_TASK:> Description: def get_clients(self): """ Returns the clients stored in the instance of UAA. """
self.assert_has_permission('clients.read') uri = self.uri + '/oauth/clients' headers = self.get_authorization_headers() response = requests.get(uri, headers=headers) return response.json()['resources']
<SYSTEM_TASK:> Returns details about a specific client by the client_id. <END_TASK> <USER_TASK:> Description: def get_client(self, client_id): """ Returns details about a specific client by the client_id. """
self.assert_has_permission('clients.read') uri = self.uri + '/oauth/clients/' + client_id headers = self.get_authorization_headers() response = requests.get(uri, headers=headers) if response.status_code == 200: return response.json() else: # Not found but don't raise return
<SYSTEM_TASK:> Will extend the client with additional scopes or <END_TASK> <USER_TASK:> Description: def update_client_grants(self, client_id, scope=[], authorities=[], grant_types=[], redirect_uri=[], replace=False): """ Will extend the client with additional scopes or authorities. Any existing scopes and authorities will be left as is unless asked to replace entirely. """
self.assert_has_permission('clients.write') client = self.get_client(client_id) if not client: raise ValueError("Must first create client: '%s'" % (client_id)) if replace: changes = { 'client_id': client_id, 'scope': scope, 'authorities': authorities, } else: changes = {'client_id': client_id} if scope: changes['scope'] = client['scope'] changes['scope'].extend(scope) if authorities: changes['authorities'] = client['authorities'] changes['authorities'].extend(authorities) if grant_types: if 'authorization_code' in grant_types and not redirect_uri: logging.warning("A redirect_uri is required for authorization_code.") changes['authorized_grant_types'] = client['authorized_grant_types'] changes['authorized_grant_types'].extend(grant_types) if redirect_uri: if 'redirect_uri' in client: changes['redirect_uri'] = client['redirect_uri'] changes['redirect_uri'].extend(redirect_uri) else: changes['redirect_uri'] = redirect_uri uri = self.uri + '/oauth/clients/' + client_id headers = { "pragma": "no-cache", "Cache-Control": "no-cache", "Content-Type": "application/json", "Accepts": "application/json", "Authorization": "Bearer " + self.get_token() } logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) logging.debug("BODY=" + json.dumps(changes)) response = requests.put(uri, headers=headers, data=json.dumps(changes)) logging.debug("STATUS=" + str(response.status_code)) if response.status_code == 200: return response else: logging.error(response.content) response.raise_for_status()
<SYSTEM_TASK:> Will create a new client for your application use. <END_TASK> <USER_TASK:> Description: def create_client(self, client_id, client_secret, manifest=None, client_credentials=True, refresh_token=True, authorization_code=False, redirect_uri=[]): """ Will create a new client for your application use. - client_credentials: allows client to get access token - refresh_token: can be used to get new access token when expired without re-authenticating - authorization_code: redirection-based flow for user authentication More details about Grant types: - https://github.com/cloudfoundry/uaa/blob/master/docs/UAA-Security.md - https://tools.ietf.org/html/rfc6749 A redirect_uri is required when using authorization_code. See: https://www.predix.io/support/article/KB0013026 """
self.assert_has_permission('clients.admin') if authorization_code and not redirect_uri: raise ValueError("Must provide a redirect_uri for clients used with authorization_code") # Check if client already exists client = self.get_client(client_id) if client: return client uri = self.uri + '/oauth/clients' headers = { "pragma": "no-cache", "Cache-Control": "no-cache", "Content-Type": "application/json", "Accepts": "application/json", "Authorization": "Bearer " + self.get_token() } grant_types = [] if client_credentials: grant_types.append('client_credentials') if refresh_token: grant_types.append('refresh_token') if authorization_code: grant_types.append('authorization_code') params = { "client_id": client_id, "client_secret": client_secret, "scope": ["uaa.none"], "authorized_grant_types": grant_types, "authorities": ["uaa.none"], "autoapprove": [] } if redirect_uri: params.append(redirect_uri) response = requests.post(uri, headers=headers, data=json.dumps(params)) if response.status_code == 201: if manifest: self.add_client_to_manifest(client_id, client_secret, manifest) client = { 'id': client_id, 'secret': client_secret } self._write_to_uaa_cache(client) return response else: logging.error(response.content) response.raise_for_status()
<SYSTEM_TASK:> Creates a new user account with the required details. <END_TASK> <USER_TASK:> Description: def create_user(self, username, password, family_name, given_name, primary_email, details={}): """ Creates a new user account with the required details. :: create_user('j12y', 'my-secret', 'Delancey', 'Jayson', '[email protected]') """
self.assert_has_permission('scim.write') data = { 'userName': username, 'password': password, 'name': { 'familyName': family_name, 'givenName': given_name, }, 'emails': [{ 'value': primary_email, 'primary': True, }] } if details: data.update(details) return self._post_user(data)
<SYSTEM_TASK:> Delete user with given id. <END_TASK> <USER_TASK:> Description: def delete_user(self, id): """ Delete user with given id. """
self.assert_has_permission('scim.write') uri = self.uri + '/Users/%s' % id headers = self._get_headers() logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) response = self.session.delete(uri, headers=headers) logging.debug("STATUS=" + str(response.status_code)) if response.status_code == 200: return response else: logging.error(response.content) response.raise_for_status()
<SYSTEM_TASK:> Returns details for user of the given username. <END_TASK> <USER_TASK:> Description: def get_user_by_username(self, username): """ Returns details for user of the given username. If there is more than one match will only return the first. Use get_users() for full result set. """
results = self.get_users(filter='username eq "%s"' % (username)) if results['totalResults'] == 0: logging.warning("Found no matches for given username.") return elif results['totalResults'] > 1: logging.warning("Found %s matches for username %s" % (results['totalResults'], username)) return results['resources'][0]
<SYSTEM_TASK:> Returns details for user with the given email address. <END_TASK> <USER_TASK:> Description: def get_user_by_email(self, email): """ Returns details for user with the given email address. If there is more than one match will only return the first. Use get_users() for full result set. """
results = self.get_users(filter='email eq "%s"' % (email)) if results['totalResults'] == 0: logging.warning("Found no matches for given email.") return elif results['totalResults'] > 1: logging.warning("Found %s matches for email %s" % (results['totalResults'], email)) return results['resources'][0]
<SYSTEM_TASK:> Returns details about the user for the given id. <END_TASK> <USER_TASK:> Description: def get_user(self, id): """ Returns details about the user for the given id. Use get_user_by_email() or get_user_by_username() for help identifiying the id. """
self.assert_has_permission('scim.read') return self._get(self.uri + '/Users/%s' % (id))
<SYSTEM_TASK:> Grant the given client id all the scopes and authorities <END_TASK> <USER_TASK:> Description: def grant_client(self, client_id, read=True, write=True): """ Grant the given client id all the scopes and authorities needed to work with the timeseries service. """
scopes = ['openid'] authorities = ['uaa.resource'] if write: for zone in self.service.settings.data['ingest']['zone-token-scopes']: scopes.append(zone) authorities.append(zone) if read: for zone in self.service.settings.data['query']['zone-token-scopes']: scopes.append(zone) authorities.append(zone) self.service.uaa.uaac.update_client_grants(client_id, scope=scopes, authorities=authorities) return self.service.uaa.uaac.get_client(client_id)
<SYSTEM_TASK:> Return the uri used for queries on time series data. <END_TASK> <USER_TASK:> Description: def get_query_uri(self): """ Return the uri used for queries on time series data. """
# Query URI has extra path we don't want so strip it off here query_uri = self.service.settings.data['query']['uri'] query_uri = urlparse(query_uri) return query_uri.scheme + '://' + query_uri.netloc
<SYSTEM_TASK:> This method tries to determine the requirements of a particular project <END_TASK> <USER_TASK:> Description: def find_requirements(path): """ This method tries to determine the requirements of a particular project by inspecting the possible places that they could be defined. It will attempt, in order: 1) to parse setup.py in the root for an install_requires value 2) to read a requirements.txt file or a requirements.pip in the root 3) to read all .txt files in a folder called 'requirements' in the root 4) to read files matching "*requirements*.txt" and "*reqs*.txt" in the root, excluding any starting or ending with 'test' If one of these succeeds, then a list of pkg_resources.Requirement's will be returned. If none can be found, then a RequirementsNotFound will be raised """
requirements = [] setup_py = os.path.join(path, 'setup.py') if os.path.exists(setup_py) and os.path.isfile(setup_py): try: requirements = from_setup_py(setup_py) requirements.sort() return requirements except CouldNotParseRequirements: pass for reqfile_name in ('requirements.txt', 'requirements.pip'): reqfile_path = os.path.join(path, reqfile_name) if os.path.exists(reqfile_path) and os.path.isfile(reqfile_path): try: requirements += from_requirements_txt(reqfile_path) except CouldNotParseRequirements as e: pass requirements_dir = os.path.join(path, 'requirements') if os.path.exists(requirements_dir) and os.path.isdir(requirements_dir): from_dir = from_requirements_dir(requirements_dir) if from_dir is not None: requirements += from_dir from_blob = from_requirements_blob(path) if from_blob is not None: requirements += from_blob requirements = list(set(requirements)) if len(requirements) > 0: requirements.sort() return requirements raise RequirementsNotFound
<SYSTEM_TASK:> Returns the GUID for the app instance with <END_TASK> <USER_TASK:> Description: def get_app_guid(self, app_name): """ Returns the GUID for the app instance with the given name. """
summary = self.space.get_space_summary() for app in summary['apps']: if app['name'] == app_name: return app['guid']
<SYSTEM_TASK:> Delete the given app. <END_TASK> <USER_TASK:> Description: def delete_app(self, app_name): """ Delete the given app. Will fail intentionally if there are any service bindings. You must delete those first. """
if app_name not in self.space.get_apps(): logging.warning("App not found so... succeeded?") return True guid = self.get_app_guid(app_name) self.api.delete("/v2/apps/%s" % (guid))
<SYSTEM_TASK:> Reads in config file of UAA credential information <END_TASK> <USER_TASK:> Description: def _get_service_config(self): """ Reads in config file of UAA credential information or generates one as a side-effect if not yet initialized. """
# Should work for windows, osx, and linux environments if not os.path.exists(self.config_path): try: os.makedirs(os.path.dirname(self.config_path)) except OSError as exc: if exc.errno != errno.EEXIST: raise return {} with open(self.config_path, 'r') as data: return json.load(data)
<SYSTEM_TASK:> Will write the config out to disk. <END_TASK> <USER_TASK:> Description: def _write_service_config(self): """ Will write the config out to disk. """
with open(self.config_path, 'w') as output: output.write(json.dumps(self.data, sort_keys=True, indent=4))
<SYSTEM_TASK:> Create an instance of the Blob Store Service with the typical <END_TASK> <USER_TASK:> Description: def create(self, **kwargs): """ Create an instance of the Blob Store Service with the typical starting settings. """
self.service.create(**kwargs) predix.config.set_env_value(self.use_class, 'url', self.service.settings.data['url']) predix.config.set_env_value(self.use_class, 'access_key_id', self.service.settings.data['access_key_id']) predix.config.set_env_value(self.use_class, 'bucket_name', self.service.settings.data['bucket_name']) predix.config.set_env_value(self.use_class, 'host', self.service.settings.data['host']) predix.config.set_env_value(self.use_class, 'secret_access_key', self.service.settings.data['secret_access_key'])
<SYSTEM_TASK:> Returns the raw results of an asset search for a given bounding <END_TASK> <USER_TASK:> Description: def _get_assets(self, bbox, size=None, page=None, asset_type=None, device_type=None, event_type=None, media_type=None): """ Returns the raw results of an asset search for a given bounding box. """
uri = self.uri + '/v1/assets/search' headers = self._get_headers() params = { 'bbox': bbox, } # Query parameters params['q'] = [] if device_type: if isinstance(device_type, str): device_type = [device_type] for device in device_type: if device not in self.DEVICE_TYPES: logging.warning("Invalid device type: %s" % device) params['q'].append("device-type:%s" % device) if asset_type: if isinstance(asset_type, str): asset_type = [asset_type] for asset in asset_type: if asset not in self.ASSET_TYPES: logging.warning("Invalid asset type: %s" % asset) params['q'].append("assetType:%s" % asset) if media_type: if isinstance(media_type, str): media_type = [media_type] for media in media_type: if media not in self.MEDIA_TYPES: logging.warning("Invalid media type: %s" % media) params['q'].append("mediaType:%s" % media) if event_type: if isinstance(event_type, str): event_type = [event_type] for event in event_type: if event not in self.EVENT_TYPES: logging.warning("Invalid event type: %s" % event) params['q'].append("eventTypes:%s" % event) # Pagination parameters if size: params['size'] = size if page: params['page'] = page return self.service._get(uri, params=params, headers=headers)
<SYSTEM_TASK:> Query the assets stored in the intelligent environment for a given <END_TASK> <USER_TASK:> Description: def get_assets(self, bbox, **kwargs): """ Query the assets stored in the intelligent environment for a given bounding box and query. Assets can be filtered by type of asset, event, or media available. - device_type=['DATASIM'] - asset_type=['CAMERA'] - event_type=['PKIN'] - media_type=['IMAGE'] Pagination can be controlled with keyword parameters - page=2 - size=100 Returns a list of assets stored in a dictionary that describe their: - asset-id - device-type - device-id - media-type - coordinates - event-type Additionally there are some _links for additional information. """
response = self._get_assets(bbox, **kwargs) # Remove broken HATEOAS _links but identify asset uid first assets = [] for asset in response['_embedded']['assets']: asset_url = asset['_links']['self'] uid = asset_url['href'].split('/')[-1] asset['uid'] = uid del(asset['_links']) assets.append(asset) return assets
<SYSTEM_TASK:> Returns raw response for an given asset by its unique id. <END_TASK> <USER_TASK:> Description: def _get_asset(self, asset_uid): """ Returns raw response for an given asset by its unique id. """
uri = self.uri + '/v2/assets/' + asset_uid headers = self._get_headers() return self.service._get(uri, headers=headers)
<SYSTEM_TASK:> Label input grid with hysteresis method. <END_TASK> <USER_TASK:> Description: def label(self, input_grid): """ Label input grid with hysteresis method. Args: input_grid: 2D array of values. Returns: Labeled output grid. """
unset = 0 high_labels, num_labels = label(input_grid > self.high_thresh) region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1] output_grid = np.zeros(input_grid.shape, dtype=int) stack = [] for rank in region_ranking: label_num = rank + 1 label_i, label_j = np.where(high_labels == label_num) for i in range(label_i.size): if output_grid[label_i[i], label_j[i]] == unset: stack.append((label_i[i], label_j[i])) while len(stack) > 0: index = stack.pop() output_grid[index] = label_num for i in range(index[0] - 1, index[0] + 2): for j in range(index[1] - 1, index[1] + 2): if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]: if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset): stack.append((i, j)) return output_grid
<SYSTEM_TASK:> Remove labeled objects that do not meet size threshold criteria. <END_TASK> <USER_TASK:> Description: def size_filter(labeled_grid, min_size): """ Remove labeled objects that do not meet size threshold criteria. Args: labeled_grid: 2D output from label method. min_size: minimum size of object in pixels. Returns: labeled grid with smaller objects removed. """
out_grid = np.zeros(labeled_grid.shape, dtype=int) slices = find_objects(labeled_grid) j = 1 for i, s in enumerate(slices): box = labeled_grid[s] size = np.count_nonzero(box.ravel() == (i + 1)) if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1: out_grid[np.where(labeled_grid == i + 1)] = j j += 1 return out_grid
<SYSTEM_TASK:> Return a string value that the user enters. Raises exception for cancel. <END_TASK> <USER_TASK:> Description: def get_string(self, prompt, default_str=None) -> str: """Return a string value that the user enters. Raises exception for cancel."""
accept_event = threading.Event() value_ref = [None] def perform(): def accepted(text): value_ref[0] = text accept_event.set() def rejected(): accept_event.set() self.__message_column.remove_all() pose_get_string_message_box(self.ui, self.__message_column, prompt, str(default_str), accepted, rejected) #self.__message_column.add(self.__make_cancel_row()) with self.__lock: self.__q.append(perform) self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q) accept_event.wait() def update_message_column(): self.__message_column.remove_all() self.__message_column.add(self.__make_cancel_row()) self.document_controller.add_task("ui_" + str(id(self)), update_message_column) if value_ref[0] is None: raise Exception("Cancel") return value_ref[0]
<SYSTEM_TASK:> Parse an arbitrary block of python code to get the value of a named argument <END_TASK> <USER_TASK:> Description: def value_of_named_argument_in_function(argument_name, function_name, search_str, resolve_varname=False): """ Parse an arbitrary block of python code to get the value of a named argument from inside a function call """
try: search_str = unicode(search_str) except NameError: pass readline = StringIO(search_str).readline try: token_generator = tokenize.generate_tokens(readline) tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator] except tokenize.TokenError as e: raise ValueError('search_str is not parse-able python code: ' + str(e)) in_function = False is_var = False for i in range(len(tokens)): if ( not in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '(' ): in_function = True continue elif ( in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): # value is set to another variable which we are going to attempt to resolve if resolve_varname and tokens[i+2].typenum == 1: is_var = True argument_name = tokens[i+2].value break # again, for a very specific usecase -- get the whole value and concatenate it # this will match something like _version.__version__ j = 3 while True: if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58: break j += 1 return ''.join([t.value for t in tokens[i+2:i+j]]).strip() # this is very dumb logic, and only works if the function argument is set to a variable # which is set to a string value if is_var: for i in range(len(tokens)): if ( tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): return tokens[i+2].value.strip() return None
<SYSTEM_TASK:> Search for a regex in a file <END_TASK> <USER_TASK:> Description: def regex_in_file(regex, filepath, return_match=False): """ Search for a regex in a file If return_match is True, return the found object instead of a boolean """
file_content = get_file_content(filepath) re_method = funcy.re_find if return_match else funcy.re_test return re_method(regex, file_content)
<SYSTEM_TASK:> Search for a regex in a file contained within the package directory <END_TASK> <USER_TASK:> Description: def regex_in_package_file(regex, filename, package_name, return_match=False): """ Search for a regex in a file contained within the package directory If return_match is True, return the found object instead of a boolean """
filepath = package_file_path(filename, package_name) return regex_in_file(regex, filepath, return_match=return_match)
<SYSTEM_TASK:> Test to see if a string is a URL or not, defined in this case as a string for which <END_TASK> <USER_TASK:> Description: def string_is_url(test_str): """ Test to see if a string is a URL or not, defined in this case as a string for which urlparse returns a scheme component >>> string_is_url('somestring') False >>> string_is_url('https://some.domain.org/path') True """
parsed = urlparse.urlparse(test_str) return parsed.scheme is not None and parsed.scheme != ''
<SYSTEM_TASK:> Begin transaction state for item. <END_TASK> <USER_TASK:> Description: def item_transaction(self, item) -> Transaction: """Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe. """
items = self.__build_transaction_items(item) transaction = Transaction(self, item, items) self.__transactions.append(transaction) return transaction
<SYSTEM_TASK:> Insert a new data item into document model. <END_TASK> <USER_TASK:> Description: def insert_data_item(self, before_index, data_item, auto_display: bool = True) -> None: """Insert a new data item into document model. This method is NOT threadsafe. """
assert data_item is not None assert data_item not in self.data_items assert before_index <= len(self.data_items) and before_index >= 0 assert data_item.uuid not in self.__uuid_to_data_item # update the session data_item.session_id = self.session_id # insert in internal list self.__insert_data_item(before_index, data_item, do_write=True) # automatically add a display if auto_display: display_item = DisplayItem.DisplayItem(data_item=data_item) self.append_display_item(display_item)
<SYSTEM_TASK:> Begins a live state for the data item. <END_TASK> <USER_TASK:> Description: def begin_data_item_live(self, data_item): """Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive. """
with self.__live_data_items_lock: old_live_count = self.__live_data_items.get(data_item.uuid, 0) self.__live_data_items[data_item.uuid] = old_live_count + 1 if old_live_count == 0: data_item._enter_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.begin_data_item_live(dependent_data_item)
<SYSTEM_TASK:> Ends a live state for the data item. <END_TASK> <USER_TASK:> Description: def end_data_item_live(self, data_item): """Ends a live state for the data item. The live-ness property is propagated to dependent data items, similar to the transactions. This method is thread safe. """
with self.__live_data_items_lock: live_count = self.__live_data_items.get(data_item.uuid, 0) - 1 assert live_count >= 0 self.__live_data_items[data_item.uuid] = live_count if live_count == 0: data_item._exit_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.end_data_item_live(dependent_data_item)
<SYSTEM_TASK:> Construct a data item reference. <END_TASK> <USER_TASK:> Description: def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel): """Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe. """
session_id = self.session_id key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id) data_item_reference = self.get_data_item_reference(key) with data_item_reference.mutex: data_item = data_item_reference.data_item # if we still don't have a data item, create it. if data_item is None: data_item = DataItem.DataItem() data_item.ensure_data_source() data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name data_item.category = "temporary" data_item_reference.data_item = data_item def append_data_item(): self.append_data_item(data_item) self._update_data_item_reference(key, data_item) self.__call_soon(append_data_item) def update_session(): # update the session, but only if necessary (this is an optimization to prevent unnecessary display updates) if data_item.session_id != session_id: data_item.session_id = session_id session_metadata = ApplicationData.get_session_metadata_dict() if data_item.session_metadata != session_metadata: data_item.session_metadata = session_metadata if data_channel.processor: src_data_channel = hardware_source.data_channels[data_channel.src_channel_index] src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id)) data_channel.processor.connect_data_item_reference(src_data_item_reference) self.__call_soon(update_session) return data_item_reference
<SYSTEM_TASK:> Loads time series of 2D data grids from each opened file. The code <END_TASK> <USER_TASK:> Description: def load_data_old(self): """ Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """
units = "" if len(self.file_objects) == 1 and self.file_objects[0] is not None: data = self.file_objects[0].variables[self.variable][self.forecast_hours] if hasattr(self.file_objects[0].variables[self.variable], "units"): units = self.file_objects[0].variables[self.variable].units elif len(self.file_objects) > 1: grid_shape = [len(self.file_objects), 1, 1] for file_object in self.file_objects: if file_object is not None: if self.variable in file_object.variables.keys(): grid_shape = file_object.variables[self.variable].shape elif self.variable.ljust(6, "_") in file_object.variables.keys(): grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape else: print("{0} not found".format(self.variable)) raise KeyError break data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2])) for f, file_object in enumerate(self.file_objects): if file_object is not None: if self.variable in file_object.variables.keys(): var_name = self.variable elif self.variable.ljust(6, "_") in file_object.variables.keys(): var_name = self.variable.ljust(6, "_") else: print("{0} not found".format(self.variable)) raise KeyError data[f] = file_object.variables[var_name][0] if units == "" and hasattr(file_object.variables[var_name], "units"): units = file_object.variables[var_name].units else: data = None return data, units
<SYSTEM_TASK:> Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. <END_TASK> <USER_TASK:> Description: def load_data(self): """ Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """
units = "" if self.file_objects[0] is None: raise IOError() var_name, z_index = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys())) ntimes = 0 if 'time' in self.file_objects[0].variables[var_name].dimensions: ntimes = len(self.file_objects[0].dimensions['time']) if ntimes > 1: if z_index is None: data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32) else: data = self.file_objects[0].variables[var_name][self.forecast_hours, z_index].astype(np.float32) else: y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:] data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32) for f, file_object in enumerate(self.file_objects): if file_object is not None: if z_index is None: data[f] = file_object.variables[var_name][0] else: data[f] = file_object.variables[var_name][0, z_index] if hasattr(self.file_objects[0].variables[var_name], "units"): units = self.file_objects[0].variables[var_name].units return data, units
<SYSTEM_TASK:> Searches var list for variable name, checks other variable name format options. <END_TASK> <USER_TASK:> Description: def format_var_name(variable, var_list): """ Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file. """
z_index = None if variable in var_list: var_name = variable elif variable.ljust(6, "_") in var_list: var_name = variable.ljust(6, "_") elif any([variable in v_sub.split("_") for v_sub in var_list]): var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)] z_index = var_name.split("_").index(variable) else: raise KeyError("{0} not found in {1}".format(variable, var_list)) return var_name, z_index
<SYSTEM_TASK:> Output hail forecast values to csv files by run date and ensemble member. <END_TASK> <USER_TASK:> Description: def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """
merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
<SYSTEM_TASK:> Loads the forecast files and gathers the forecast information into pandas DataFrames. <END_TASK> <USER_TASK:> Description: def load_forecasts(self): """ Loads the forecast files and gathers the forecast information into pandas DataFrames. """
forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"), self.ensemble_member) forecast_files = sorted(glob(forecast_path + "*.json")) for forecast_file in forecast_files: file_obj = open(forecast_file) json_obj = json.load(file_obj) file_obj.close() track_id = json_obj['properties']["id"] obs_track_id = json_obj['properties']["obs_track_id"] forecast_hours = json_obj['properties']['times'] duration = json_obj['properties']['duration'] for f, feature in enumerate(json_obj['features']): area = np.sum(feature["properties"]["masks"]) step_id = track_id + "_{0:02d}".format(f) for model_type in self.model_types: for model_name in self.model_names[model_type]: prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")] if model_type == "condition": prediction = [prediction] row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f], f + 1, duration, area] + prediction self.forecasts[model_type][model_name].loc[step_id] = row
<SYSTEM_TASK:> Loads the track total and step files and merges the information into a single data frame. <END_TASK> <USER_TASK:> Description: def load_obs(self): """ Loads the track total and step files and merges the information into a single data frame. """
track_total_file = self.track_data_csv_path + \ "track_total_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_step_file = self.track_data_csv_path + \ "track_step_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_total_cols = ["Track_ID", "Translation_Error_X", "Translation_Error_Y", "Start_Time_Error"] track_step_cols = ["Step_ID", "Track_ID", "Hail_Size", "Shape", "Location", "Scale"] track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols) track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols) obs_data = pd.merge(track_step_data, track_total_data, on="Track_ID", how="left") self.obs = obs_data
<SYSTEM_TASK:> Match forecasts and observations. <END_TASK> <USER_TASK:> Description: def merge_obs(self): """ Match forecasts and observations. """
for model_type in self.model_types: self.matched_forecasts[model_type] = {} for model_name in self.model_names[model_type]: self.matched_forecasts[model_type][model_name] = pd.merge(self.forecasts[model_type][model_name], self.obs, right_on="Step_ID", how="left", left_index=True)
<SYSTEM_TASK:> Calculates a ROC curve at a specified intensity threshold. <END_TASK> <USER_TASK:> Description: def roc(self, model_type, model_name, intensity_threshold, prob_thresholds, query=None): """ Calculates a ROC curve at a specified intensity threshold. Args: model_type: type of model being evaluated (e.g. size). model_name: machine learning model being evaluated intensity_threshold: forecast bin used as the split point for evaluation prob_thresholds: Array of probability thresholds being evaluated. query: str to filter forecasts based on values of forecasts, obs, and metadata. Returns: A DistributedROC object """
roc_obj = DistributedROC(prob_thresholds, 0.5) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] obs_values = np.zeros(sub_forecasts.shape[0]) if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_values = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.forecast_bins[model_type]].values]) obs_probs = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) obs_values[obs_probs >= 0.01] = 1 elif len(self.forecast_bins[model_type]) > 1: fbin = np.argmin(np.abs(self.forecast_bins[model_type] - intensity_threshold)) forecast_values = 1 - sub_forecasts[self.forecast_bins[model_type].astype(str)].values.cumsum(axis=1)[:, fbin] obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 else: forecast_values = sub_forecasts[self.forecast_bins[model_type].astype(str)[0]].values obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 roc_obj.update(forecast_values, obs_values) return roc_obj
<SYSTEM_TASK:> Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. <END_TASK> <USER_TASK:> Description: def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): """ Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """
if query is not None: dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts["dist"][dist_model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts["Area"].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
<SYSTEM_TASK:> Load the forecast files into memory. <END_TASK> <USER_TASK:> Description: def load_forecasts(self): """ Load the forecast files into memory. """
run_date_str = self.run_date.strftime("%Y%m%d") for model_name in self.model_names: self.raw_forecasts[model_name] = {} forecast_file = self.forecast_path + run_date_str + "/" + \ model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str) forecast_obj = Dataset(forecast_file) forecast_hours = forecast_obj.variables["forecast_hour"][:] valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0] for size_threshold in self.size_thresholds: self.raw_forecasts[model_name][size_threshold] = \ forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices] forecast_obj.close()
<SYSTEM_TASK:> Aggregate the forecasts within the specified time windows. <END_TASK> <USER_TASK:> Description: def get_window_forecasts(self): """ Aggregate the forecasts within the specified time windows. """
for model_name in self.model_names: self.window_forecasts[model_name] = {} for size_threshold in self.size_thresholds: self.window_forecasts[model_name][size_threshold] = \ np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0) for sl in self.hour_windows])
<SYSTEM_TASK:> Use a dilation filter to grow positive observation areas by a specified number of grid points <END_TASK> <USER_TASK:> Description: def dilate_obs(self, dilation_radius): """ Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """
for s in self.size_thresholds: self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape) for t in range(self.dilated_obs[s].shape[0]): self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
<SYSTEM_TASK:> Generate ROC Curve objects for each machine learning model, size threshold, and time window. <END_TASK> <USER_TASK:> Description: def roc_curves(self, prob_thresholds): """ Generate ROC Curve objects for each machine learning model, size threshold, and time window. :param prob_thresholds: Probability thresholds for the ROC Curve :param dilation_radius: Number of times to dilate the observation grid. :return: a dictionary of DistributedROC objects. """
all_roc_curves = {} for model_name in self.model_names: all_roc_curves[model_name] = {} for size_threshold in self.size_thresholds: all_roc_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_roc_curves[model_name][size_threshold][hour_range] = \ DistributedROC(prob_thresholds, 1) if self.obs_mask: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_roc_curves
<SYSTEM_TASK:> Output reliability curves for each machine learning model, size threshold, and time window. <END_TASK> <USER_TASK:> Description: def reliability_curves(self, prob_thresholds): """ Output reliability curves for each machine learning model, size threshold, and time window. :param prob_thresholds: :param dilation_radius: :return: """
all_rel_curves = {} for model_name in self.model_names: all_rel_curves[model_name] = {} for size_threshold in self.size_thresholds: all_rel_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_rel_curves[model_name][size_threshold][hour_range] = \ DistributedReliability(prob_thresholds, 1) if self.obs_mask: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_rel_curves
<SYSTEM_TASK:> Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. <END_TASK> <USER_TASK:> Description: def load_map_coordinates(map_file): """ Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays. """
if map_file[-4:] == ".pkl": map_data = pickle.load(open(map_file)) lon = map_data['lon'] lat = map_data['lat'] else: map_data = Dataset(map_file) if "lon" in map_data.variables.keys(): lon = map_data.variables['lon'][:] lat = map_data.variables['lat'][:] else: lon = map_data.variables["XLONG"][0] lat = map_data.variables["XLAT"][0] return lon, lat
<SYSTEM_TASK:> Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. <END_TASK> <USER_TASK:> Description: def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """
data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
<SYSTEM_TASK:> Interpolates MRMS data to a different grid using cubic bivariate splines <END_TASK> <USER_TASK:> Description: def interpolate_grid(self, in_lon, in_lat): """ Interpolates MRMS data to a different grid using cubic bivariate splines """
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) for d in range(self.data.shape[0]): print("Loading ", d, self.variable, self.start_date) if self.data[d].max() > -999: step = self.data[d] step[step < 0] = 0 if self.lat[-1] < self.lat[0]: spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3) else: spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3) print("Evaluating", d, self.variable, self.start_date) flat_data = spline.ev(in_lat.ravel(), in_lon.ravel()) out_data[d] = flat_data.reshape(in_lon.shape) del spline else: print(d, " is missing") out_data[d] = -9999 return out_data
<SYSTEM_TASK:> Finds the largest value within a given radius of a point on the interpolated grid. <END_TASK> <USER_TASK:> Description: def max_neighbor(self, in_lon, in_lat, radius=0.05): """ Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
<SYSTEM_TASK:> Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create <END_TASK> <USER_TASK:> Description: def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """
if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
<SYSTEM_TASK:> Return a generator for data. <END_TASK> <USER_TASK:> Description: def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """
hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
<SYSTEM_TASK:> Parse config file for aliases and automatically register them. <END_TASK> <USER_TASK:> Description: def parse_hardware_aliases_config_file(config_path): """ Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """
if os.path.exists(config_path): logging.info("Parsing alias file {:s}".format(config_path)) try: config = configparser.ConfigParser() config.read(config_path) for section in config.sections(): device = config.get(section, "device") hardware_alias = config.get(section, "hardware_alias") display_name = config.get(section, "display_name") try: logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name)) HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name)) except Exception as e: logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device)) logging.info(traceback.format_exc()) except Exception as e: logging.info("Error reading alias file from: " + config_path) logging.info(traceback.format_exc()) return True return False
<SYSTEM_TASK:> Configure an alias. <END_TASK> <USER_TASK:> Description: def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name): """ Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """
self.__aliases[alias_instrument_id] = (instrument_id, display_name) for f in self.aliases_updated: f()
<SYSTEM_TASK:> Called from hardware source when data starts streaming. <END_TASK> <USER_TASK:> Description: def start(self): """Called from hardware source when data starts streaming."""
old_start_count = self.__start_count self.__start_count += 1 if old_start_count == 0: self.data_channel_start_event.fire()
<SYSTEM_TASK:> Connect to the data item reference, creating a crop graphic if necessary. <END_TASK> <USER_TASK:> Description: def connect_data_item_reference(self, data_item_reference): """Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """
display_item = data_item_reference.display_item data_item = display_item.data_item if display_item else None if data_item and display_item: self.__connect_display(display_item) else: def data_item_reference_changed(): self.__data_item_reference_changed_event_listener.close() self.connect_data_item_reference(data_item_reference) # ugh. recursive mess. self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed)
<SYSTEM_TASK:> Grab the earliest data from the buffer, blocking until one is available. <END_TASK> <USER_TASK:> Description: def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available."""
timeout = timeout if timeout is not None else 10.0 with self.__buffer_lock: if len(self.__buffer) == 0: done_event = threading.Event() self.__done_events.append(done_event) self.__buffer_lock.release() done = done_event.wait(timeout) self.__buffer_lock.acquire() if not done: raise Exception("Could not grab latest.") return self.__buffer.pop(0)
<SYSTEM_TASK:> Grab the next data to finish from the buffer, blocking until one is available. <END_TASK> <USER_TASK:> Description: def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available."""
with self.__buffer_lock: self.__buffer = list() return self.grab_latest(timeout)
<SYSTEM_TASK:> Grab the next data to start from the buffer, blocking until one is available. <END_TASK> <USER_TASK:> Description: def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available."""
self.grab_next(timeout) return self.grab_next(timeout)
<SYSTEM_TASK:> Resume recording after pause. <END_TASK> <USER_TASK:> Description: def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe."""
with self.__state_lock: if self.__state == DataChannelBuffer.State.paused: self.__state = DataChannelBuffer.State.started
<SYSTEM_TASK:> Takes a mapping and returns the n keys associated with the largest values <END_TASK> <USER_TASK:> Description: def nlargest(n, mapping): """ Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """
try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
<SYSTEM_TASK:> Return a new pqict mapping keys from an iterable to the same value. <END_TASK> <USER_TASK:> Description: def fromkeys(cls, iterable, value, **kwargs): """ Return a new pqict mapping keys from an iterable to the same value. """
return cls(((k, value) for k in iterable), **kwargs)
<SYSTEM_TASK:> If ``key`` is in the pqdict, remove it and return its priority value, <END_TASK> <USER_TASK:> Description: def pop(self, key=__marker, default=__marker): """ If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """
heap = self._heap position = self._position # pq semantics: remove and return top *key* (value is discarded) if key is self.__marker: if not heap: raise KeyError('pqdict is empty') key = heap[0].key del self[key] return key # dict semantics: remove and return *value* mapped from key try: pos = position.pop(key) # raises KeyError except KeyError: if default is self.__marker: raise return default else: node_to_delete = heap[pos] end = heap.pop() if end is not node_to_delete: heap[pos] = end position[end.key] = pos self._reheapify(pos) value = node_to_delete.value del node_to_delete return value
<SYSTEM_TASK:> Remove and return the item with highest priority. Raises ``KeyError`` <END_TASK> <USER_TASK:> Description: def popitem(self): """ Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """
heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
<SYSTEM_TASK:> Return the item with highest priority. Raises ``KeyError`` if pqdict is <END_TASK> <USER_TASK:> Description: def topitem(self): """ Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """
try: node = self._heap[0] except IndexError: raise KeyError('pqdict is empty') return node.key, node.value
<SYSTEM_TASK:> Add a new item. Raises ``KeyError`` if key is already in the pqdict. <END_TASK> <USER_TASK:> Description: def additem(self, key, value): """ Add a new item. Raises ``KeyError`` if key is already in the pqdict. """
if key in self._position: raise KeyError('%s is already in the queue' % repr(key)) self[key] = value
<SYSTEM_TASK:> Equivalent to inserting a new item followed by removing the top <END_TASK> <USER_TASK:> Description: def pushpopitem(self, key, value, node_factory=_Node): """ Equivalent to inserting a new item followed by removing the top priority item, but faster. Raises ``KeyError`` if the new key is already in the pqdict. """
heap = self._heap position = self._position precedes = self._precedes prio = self._keyfn(value) if self._keyfn else value node = node_factory(key, value, prio) if key in self: raise KeyError('%s is already in the queue' % repr(key)) if heap and precedes(heap[0].prio, node.prio): node, heap[0] = heap[0], node position[key] = 0 del position[node.key] self._sink(0) return node.key, node.value
<SYSTEM_TASK:> Update the priority value of an existing item. Raises ``KeyError`` if <END_TASK> <USER_TASK:> Description: def updateitem(self, key, new_val): """ Update the priority value of an existing item. Raises ``KeyError`` if key is not in the pqdict. """
if key not in self._position: raise KeyError(key) self[key] = new_val
<SYSTEM_TASK:> Replace the key of an existing heap node in place. Raises ``KeyError`` <END_TASK> <USER_TASK:> Description: def replace_key(self, key, new_key): """ Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict. """
heap = self._heap position = self._position if new_key in self: raise KeyError('%s is already in the queue' % repr(new_key)) pos = position.pop(key) # raises appropriate KeyError position[new_key] = pos heap[pos].key = new_key
<SYSTEM_TASK:> Fast way to swap the priority level of two items in the pqdict. Raises <END_TASK> <USER_TASK:> Description: def swap_priority(self, key1, key2): """ Fast way to swap the priority level of two items in the pqdict. Raises ``KeyError`` if either key does not exist. """
heap = self._heap position = self._position if key1 not in self or key2 not in self: raise KeyError pos1, pos2 = position[key1], position[key2] heap[pos1].key, heap[pos2].key = key2, key1 position[key1], position[key2] = pos2, pos1
<SYSTEM_TASK:> Repair a broken heap. If the state of an item's priority value changes <END_TASK> <USER_TASK:> Description: def heapify(self, key=__marker): """ Repair a broken heap. If the state of an item's priority value changes you can re-sort the relevant item only by providing ``key``. """
if key is self.__marker: n = len(self._heap) for pos in reversed(range(n//2)): self._sink(pos) else: try: pos = self._position[key] except KeyError: raise KeyError(key) self._reheapify(pos)
<SYSTEM_TASK:> Check to make sure _version.py is contained in the package <END_TASK> <USER_TASK:> Description: def package_has_version_file(package_name): """ Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name) return os.path.isfile(version_file_path)