Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def _get_request(self, auth=None): ''' Return an http request object auth: Auth data to use Returns: A HSRequest object ''' self.request = HSRequest(auth or self.auth, self.env) self.request.response_callback = self.response_callback return self.request
[]
Please provide a description of the function:def _authenticate(self, email_address=None, password=None, api_key=None, access_token=None, access_token_type=None): ''' Create authentication object to send requests Args: email_address (str): Email address of the account to make the requests password (str): Password of the account used with email address api_key (str): API Key. You can find your API key in https://www.hellosign.com/home/myAccount/current_tab/integrations access_token (str): OAuth access token access_token_type (str): Type of OAuth access token Raises: NoAuthMethod: If no authentication information found Returns: A HTTPBasicAuth or HSAccessTokenAuth object ''' if access_token_type and access_token: return HSAccessTokenAuth(access_token, access_token_type) elif api_key: return HTTPBasicAuth(api_key, '') elif email_address and password: return HTTPBasicAuth(email_address, password) else: raise NoAuthMethod("No authentication information found!")
[]
Please provide a description of the function:def _check_required_fields(self, fields=None, either_fields=None): ''' Check the values of the fields If no value found in `fields`, an exception will be raised. `either_fields` are the fields that one of them must have a value Raises: HSException: If no value found in at least one item of`fields`, or no value found in one of the items of `either_fields` Returns: None ''' for (key, value) in fields.items(): # If value is a dict, one of the fields in the dict is required -> # exception if all are None if not value: raise HSException("Field '%s' is required." % key) if either_fields is not None: for field in either_fields: if not any(field.values()): raise HSException("One of the following fields is required: %s" % ", ".join(field.keys()))
[]
Please provide a description of the function:def _send_signature_request(self, test_mode=False, client_id=None, files=None, file_urls=None, title=None, subject=None, message=None, signing_redirect_url=None, signers=None, cc_email_addresses=None, form_fields_per_document=None, use_text_tags=False, hide_text_tags=False, metadata=None, ux_version=None, allow_decline=False): ''' To share the same logic between send_signature_request & send_signature_request_embedded functions Args: test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False. client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough) files (list of str): The uploaded file(s) to send for signature file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` title (str, optional): The title you want to assign to the SignatureRequest subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page cc_email_addresses (list, optional): A list of email addresses that should be CCed form_fields_per_document (str): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest) use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields hide_text_tags (bool, optional): Hide text tag areas metadata (dict, optional): Metadata to associate with the signature request ux_version (int): UX version, either 1 (default) or 2. allow_decline (bool, optional); Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: A SignatureRequest object ''' # Files files_payload = HSFormat.format_file_params(files) # File URLs file_urls_payload = HSFormat.format_file_url_params(file_urls) # Signers signers_payload = HSFormat.format_dict_list(signers, 'signers') # CCs cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses') # Metadata metadata_payload = HSFormat.format_single_dict(metadata, 'metadata') payload = { "test_mode": self._boolean(test_mode), "client_id": client_id, "title": title, "subject": subject, "message": message, "signing_redirect_url": signing_redirect_url, "form_fields_per_document": form_fields_per_document, "use_text_tags": self._boolean(use_text_tags), "hide_text_tags": self._boolean(hide_text_tags), "allow_decline": self._boolean(allow_decline) } if ux_version is not None: payload['ux_version'] = ux_version # remove attributes with none value payload = HSFormat.strip_none_values(payload) url = self.SIGNATURE_REQUEST_CREATE_URL if client_id: url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_URL data = {} data.update(payload) data.update(signers_payload) data.update(cc_email_addresses_payload) data.update(file_urls_payload) data.update(metadata_payload) request = self._get_request() response = request.post(url, data=data, files=files_payload) return response
[]
Please provide a description of the function:def _send_signature_request_with_template(self, test_mode=False, client_id=None, template_id=None, template_ids=None, title=None, subject=None, message=None, signing_redirect_url=None, signers=None, ccs=None, custom_fields=None, metadata=None, ux_version=None, allow_decline=False): ''' To share the same logic between send_signature_request_with_template and send_signature_request_embedded_with_template Args: test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False. client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough) template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids. template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id. title (str, optional): The title you want to assign to the SignatureRequest subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. signers (list of dict): A list of signers, which each has the following attributes: role_name (str): Role the signer is assigned to name (str): The name of the signer email_address (str): Email address of the signer pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page ccs (list of dict, optional): The email address of the CC filling the role of RoleName. Required when a CC role exists for the Template. Each dict has the following attributes: role_name (str): CC role name email_address (str): CC email address custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}` metadata (dict, optional): Metadata to associate with the signature request ux_version (int): UX version, either 1 (default) or 2. allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: A SignatureRequest object ''' # Signers signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name') # CCs ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name') # Custom fields custom_fields_payload = HSFormat.format_custom_fields(custom_fields) # Metadata metadata_payload = HSFormat.format_single_dict(metadata, 'metadata') # Template ids template_ids_payload = {} if template_ids: for i in range(len(template_ids)): template_ids_payload["template_ids[%s]" % i] = template_ids[i] payload = { "test_mode": self._boolean(test_mode), "client_id": client_id, "template_id": template_id, "title": title, "subject": subject, "message": message, "signing_redirect_url": signing_redirect_url, "allow_decline": self._boolean(allow_decline) } if ux_version is not None: payload['ux_version'] = ux_version # remove attributes with empty value payload = HSFormat.strip_none_values(payload) url = self.SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL if client_id: url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL data = payload.copy() data.update(signers_payload) data.update(ccs_payload) data.update(custom_fields_payload) data.update(metadata_payload) data.update(template_ids_payload) request = self._get_request() response = request.post(url, data=data) return response
[]
Please provide a description of the function:def _create_unclaimed_draft(self, test_mode=False, client_id=None, is_for_embedded_signing=False, requester_email_address=None, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, requesting_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False): ''' Creates a new Draft that can be claimed using the claim URL Args: test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False. client_id (str): Client id of the app used to create the embedded draft. is_for_embedded_signing (bool): Whether this is for embedded signing on top of being for embedded requesting. requester_email_address (str): Email address of the requester when creating a draft for embedded requesting. files (list of str): The uploaded file(s) to send for signature. file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional. subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent. form_fields_per_document (str): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest). metadata (dict, optional): Metadata to associate with the draft use_preexisting_fields (bool): Whether to use preexisting PDF fields allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: An UnclaimedDraft object ''' # Files files_payload = HSFormat.format_file_params(files) # Files URLs file_urls_payload = HSFormat.format_file_url_params(file_urls) # Signers signers_payload = {} if signers: for (idx, signer) in enumerate(signers): if draft_type == UnclaimedDraft.UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE: if "name" not in signer and "email_address" not in signer: raise HSException("Signer's name and email are required") signers_payload = HSFormat.format_dict_list(signers, 'signers') # CCs cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses') # Metadata metadata_payload = HSFormat.format_single_dict(metadata, 'metadata') payload = { "test_mode": self._boolean(test_mode), "type": draft_type, "subject": subject, "message": message, "signing_redirect_url": signing_redirect_url, "form_fields_per_document": form_fields_per_document, "use_preexisting_fields": self._boolean(use_preexisting_fields), "allow_decline": self._boolean(allow_decline) } url = self.UNCLAIMED_DRAFT_CREATE_URL if client_id is not None: payload.update({ 'client_id': client_id, 'is_for_embedded_signing': '1' if is_for_embedded_signing else '0', 'requester_email_address': requester_email_address, 'requesting_redirect_url': requesting_redirect_url }) url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL # remove attributes with none value payload = HSFormat.strip_none_values(payload) data = payload.copy() data.update(signers_payload) data.update(cc_email_addresses_payload) data.update(file_urls_payload) data.update(metadata_payload) request = self._get_request() response = request.post(url, data=data, files=files_payload) return response
[]
Please provide a description of the function:def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None): ''' Add or Remove user from a Template We use this function for two tasks because they have the same API call Args: template_id (str): The id of the template account_id (str): ID of the account to add/remove access to/from email_address (str): The email_address of the account to add/remove access to/from Raises: HSException: If no email address or account_id specified Returns: A Template object ''' if not email_address and not account_id: raise HSException("No email address or account_id specified") data = {} if account_id is not None: data = { "account_id": account_id } else: data = { "email_address": email_address } request = self._get_request() response = request.post(url + template_id, data) return response
[]
Please provide a description of the function:def _add_remove_team_member(self, url, email_address=None, account_id=None): ''' Add or Remove a team member We use this function for two different tasks because they have the same API call Args: email_address (str): Email address of the Account to add/remove account_id (str): ID of the Account to add/remove Returns: A Team object ''' if not email_address and not account_id: raise HSException("No email address or account_id specified") data = {} if account_id is not None: data = { "account_id": account_id } else: data = { "email_address": email_address } request = self._get_request() response = request.post(url, data) return response
[]
Please provide a description of the function:def _create_embedded_template_draft(self, client_id, signer_roles, test_mode=False, files=None, file_urls=None, title=None, subject=None, message=None, cc_roles=None, merge_fields=None, use_preexisting_fields=False): ''' Helper method for creating embedded template drafts. See public function for params. ''' url = self.TEMPLATE_CREATE_EMBEDDED_DRAFT_URL payload = { 'test_mode': self._boolean(test_mode), 'client_id': client_id, 'title': title, 'subject': subject, 'message': message, 'use_preexisting_fields': self._boolean(use_preexisting_fields) } # Prep files files_payload = HSFormat.format_file_params(files) file_urls_payload = HSFormat.format_file_url_params(file_urls) # Prep Signer Roles signer_roles_payload = HSFormat.format_dict_list(signer_roles, 'signer_roles') # Prep CCs ccs_payload = HSFormat.format_param_list(cc_roles, 'cc_roles') # Prep Merge Fields merge_fields_payload = { 'merge_fields': json.dumps(merge_fields) } # Assemble data for sending data = {} data.update(payload) data.update(file_urls_payload) data.update(signer_roles_payload) data.update(ccs_payload) if (merge_fields is not None): data.update(merge_fields_payload) data = HSFormat.strip_none_values(data) request = self._get_request() response = request.post(url, data=data, files=files_payload) return response
[]
Please provide a description of the function:def _create_embedded_unclaimed_draft_with_template(self, test_mode=False, client_id=None, is_for_embedded_signing=False, template_id=None, template_ids=None, requester_email_address=None, title=None, subject=None, message=None, signers=None, ccs=None, signing_redirect_url=None, requesting_redirect_url=None, metadata=None, custom_fields=None, allow_decline=False): ''' Helper method for creating unclaimed drafts from templates See public function for params. ''' #single params payload = { "test_mode": self._boolean(test_mode), "client_id": client_id, "is_for_embedded_signing": self._boolean(is_for_embedded_signing), "template_id": template_id, "requester_email_address": requester_email_address, "title": title, "subject": subject, "message": message, "signing_redirect_url": signing_redirect_url, "requesting_redirect_url": requesting_redirect_url, "allow_decline": self._boolean(allow_decline) } #format multi params template_ids_payload = HSFormat.format_param_list(template_ids, 'template_ids') signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name') ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name') metadata_payload = HSFormat.format_single_dict(metadata, 'metadata') custom_fields_payload = HSFormat.format_custom_fields(custom_fields) #assemble payload data = {} data.update(payload) data.update(template_ids_payload) data.update(signers_payload) data.update(ccs_payload) data.update(metadata_payload) data.update(custom_fields_payload) data = HSFormat.strip_none_values(data) #send call url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL request = self._get_request() response = request.post(url, data=data) return response
[]
Please provide a description of the function:def get_file(self, url, path_or_file=None, headers=None, filename=None): ''' Get a file from a url and save it as `filename` Args: url (str): URL to send the request to path_or_file (str or file): A writable File-like object or a path to save the file to. filename (str): [DEPRECATED] File name to save the file as, this can be either a full path or a relative path headers (str, optional): custom headers Returns: True if file is downloaded and written successfully, False otherwise. ''' path_or_file = path_or_file or filename if self.debug: print("GET FILE: %s, headers=%s" % (url, headers)) self.headers = self._get_default_headers() if headers is not None: self.headers.update(headers) response = requests.get(url, headers=self.headers, auth=self.auth, verify=self.verify_ssl) self.http_status_code = response.status_code try: # No need to check for warnings here self._check_error(response) try: path_or_file.write(response.content) except AttributeError: fd = os.open(path_or_file, os.O_CREAT | os.O_RDWR) with os.fdopen(fd, "w+b") as f: f.write(response.content) except: return False return True
[]
Please provide a description of the function:def get(self, url, headers=None, parameters=None, get_json=True): ''' Send a GET request with custome headers and parameters Args: url (str): URL to send the request to headers (str, optional): custom headers parameters (str, optional): optional parameters Returns: A JSON object of the returned response if `get_json` is True, Requests' response object otherwise ''' if self.debug: print("GET: %s, headers=%s" % (url, headers)) self.headers = self._get_default_headers() get_parameters = self.parameters if get_parameters is None: # In case self.parameters is still empty get_parameters = {} if headers is not None: self.headers.update(headers) if parameters is not None: get_parameters.update(parameters) response = requests.get(url, headers=self.headers, params=get_parameters, auth=self.auth, verify=self.verify_ssl) json_response = self._process_json_response(response) return json_response if get_json is True else response
[]
Please provide a description of the function:def post(self, url, data=None, files=None, headers=None, get_json=True): ''' Make POST request to a url Args: url (str): URL to send the request to data (dict, optional): Data to send files (dict, optional): Files to send with the request headers (str, optional): custom headers Returns: A JSON object of the returned response if `get_json` is True, Requests' response object otherwise ''' if self.debug: print("POST: %s, headers=%s" % (url, headers)) self.headers = self._get_default_headers() if headers is not None: self.headers.update(headers) response = requests.post(url, headers=self.headers, data=data, auth=self.auth, files=files, verify=self.verify_ssl) json_response = self._process_json_response(response) return json_response if get_json is True else response
[]
Please provide a description of the function:def _get_json_response(self, resp): ''' Parse a JSON response ''' if resp is not None and resp.text is not None: try: text = resp.text.strip('\n') if len(text) > 0: return json.loads(text) except ValueError as e: if self.debug: print("Could not decode JSON response: \"%s\"" % resp.text) raise e
[]
Please provide a description of the function:def _process_json_response(self, response): ''' Process a given response ''' json_response = self._get_json_response(response) if self.response_callback is not None: json_response = self.response_callback(json_response) response._content = json.dumps(json_response) self.http_status_code = response.status_code self._check_error(response, json_response) self._check_warnings(json_response) return json_response
[]
Please provide a description of the function:def _check_error(self, response, json_response=None): ''' Check for HTTP error code from the response, raise exception if there's any Args: response (object): Object returned by requests' `get` and `post` methods json_response (dict): JSON response, if applicable Raises: HTTPError: If the status code of response is either 4xx or 5xx Returns: True if status code is not error code ''' # If status code is 4xx or 5xx, that should be an error if response.status_code >= 400: json_response = json_response or self._get_json_response(response) err_cls = self._check_http_error_code(response.status_code) try: raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code) # This is to catch error when we post get oauth data except TypeError: raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code) # Return True if everything is OK return True
[]
Please provide a description of the function:def _check_warnings(self, json_response): ''' Extract warnings from the response to make them accessible Args: json_response (dict): JSON response ''' self.warnings = None if json_response: self.warnings = json_response.get('warnings') if self.debug and self.warnings: for w in self.warnings: print("WARNING: %s - %s" % (w['warning_name'], w['warning_msg']))
[]
Please provide a description of the function:def from_response(self, response_data): ''' Builds a new HSAccessTokenAuth straight from response data Args: response_data (dict): Response data to use Returns: A HSAccessTokenAuth objet ''' return HSAccessTokenAuth( response_data['access_token'], response_data['token_type'], response_data['refresh_token'], response_data['expires_in'], response_data.get('state') # Not always here )
[]
Please provide a description of the function:def find_response_component(self, api_id=None, signature_id=None): ''' Find one or many repsonse components. Args: api_id (str): Api id associated with the component(s) to be retrieved. signature_id (str): Signature id associated with the component(s) to be retrieved. Returns: A list of dictionaries containing component data ''' if not api_id and not signature_id: raise ValueError('At least one of api_id and signature_id is required') components = list() if self.response_data: for component in self.response_data: if (api_id and component['api_id']) == api_id or (signature_id and component['signature_id'] == signature_id): components.append(component) return components
[]
Please provide a description of the function:def find_signature(self, signature_id=None, signer_email_address=None): ''' Return a signature for the given parameters Args: signature_id (str): Id of the signature to retrieve. signer_email_address (str): Email address of the associated signer for the signature to retrieve. Returns: A Signature object or None ''' if self.signatures: for signature in self.signatures: if signature.signature_id == signature_id or signature.signer_email_address == signer_email_address: return signature
[]
Please provide a description of the function:def _uncamelize(self, s): ''' Convert a camel-cased string to using underscores ''' res = '' if s: for i in range(len(s)): if i > 0 and s[i].lower() != s[i]: res += '_' res += s[i].lower() return res
[]
Please provide a description of the function:def format_file_params(files): ''' Utility method for formatting file parameters for transmission ''' files_payload = {} if files: for idx, filename in enumerate(files): files_payload["file[" + str(idx) + "]"] = open(filename, 'rb') return files_payload
[]
Please provide a description of the function:def format_file_url_params(file_urls): ''' Utility method for formatting file URL parameters for transmission ''' file_urls_payload = {} if file_urls: for idx, fileurl in enumerate(file_urls): file_urls_payload["file_url[" + str(idx) + "]"] = fileurl return file_urls_payload
[]
Please provide a description of the function:def format_param_list(listed_params, output_name): ''' Utility method for formatting lists of parameters for api consumption Useful for email address lists, etc Args: listed_params (list of values) - the list to format output_name (str) - the parameter name to prepend to each key ''' output_payload = {} if listed_params: for index, item in enumerate(listed_params): output_payload[str(output_name) + "[" + str(index) + "]" ] = item return output_payload
[]
Please provide a description of the function:def format_dict_list(list_of_dicts, output_name, key=None): ''' Utility method for formatting lists of dictionaries for api consumption. Takes something like [{name: val1, email: val2},{name: val1, email: val2}] for signers and outputs: signers[0][name] : val1 signers[0][email] : val2 ... Args: list_of_dicts (list of dicts) - the list to format output_name (str) - the parameter name to prepend to each key key (str, optional) - Used for substituting a key present in the dictionaries for the index. The above might become signers['Lawyer']['name'] instead of using a numerical index if the key "role_name" was specified. ''' output_payload = {} if list_of_dicts: for index, dictionary in enumerate(list_of_dicts): index_or_key = dictionary[key] if key else str(index) base_name = output_name + '[' + index_or_key + ']' for (param, value) in dictionary.items(): if param != key: #key params are stripped output_payload[base_name + '[' + param + ']'] = value return output_payload
[]
Please provide a description of the function:def format_single_dict(dictionary, output_name): ''' Currently used for metadata fields ''' output_payload = {} if dictionary: for (k, v) in dictionary.items(): output_payload[output_name + '[' + k + ']'] = v return output_payload
[]
Please provide a description of the function:def format_custom_fields(list_of_custom_fields): ''' Custom fields formatting for submission ''' output_payload = {} if list_of_custom_fields: # custom_field: {"name": value} for custom_field in list_of_custom_fields: for key, value in custom_field.items(): output_payload["custom_fields[" + key + "]"] = value return output_payload
[]
Please provide a description of the function:def set_logscale(self,t=True): if(t == self.get_logscale()): return else: if(t): self.__image = np.log10(self.__image+1) self.__logscale_flag = True; else: self.__image = 10**self.__image-1. self.__logscale_flag = False;
[ "\n - set_logscale(): If M is the matrix of the image, it defines the image M as log10(M+1).\n " ]
Please provide a description of the function:def histogram(self,axis=None, **kargs): if(axis == None): axis = plt.gca() axis.hist(self.__image.ravel(), **kargs)
[ "\n - histogram(axis=None, **kargs): It computes and shows the histogram of the image. This is \n usefull for choosing a proper scale to the output, or for clipping some values. If \n axis is None, it selects the current axis to plot the histogram.\n \n Keyword arguments:\n \n *bins*:\n Either an integer number of bins or a sequence giving the\n bins. If *bins* is an integer, *bins* + 1 bin edges\n will be returned, consistent with :func:`numpy.histogram`\n for numpy version >= 1.3, and with the *new* = True argument\n in earlier versions.\n Unequally spaced bins are supported if *bins* is a sequence.\n \n *range*:\n The lower and upper range of the bins. Lower and upper outliers\n are ignored. If not provided, *range* is (x.min(), x.max()).\n Range has no effect if *bins* is a sequence.\n \n If *bins* is a sequence or *range* is specified, autoscaling\n is based on the specified bin range instead of the\n range of x.\n \n *normed*:\n If *True*, the first element of the return tuple will\n be the counts normalized to form a probability density, i.e.,\n ``n/(len(x)*dbin)``. In a probability density, the integral of\n the histogram should be 1; you can verify that with a\n trapezoidal integration of the probability density function::\n \n pdf, bins, patches = ax.hist(...)\n print(np.sum(pdf * np.diff(bins)))\n \n .. note::\n \n Until numpy release 1.5, the underlying numpy\n histogram function was incorrect with *normed*=*True*\n if bin sizes were unequal. MPL inherited that\n error. It is now corrected within MPL when using\n earlier numpy versions\n \n *weights*:\n An array of weights, of the same shape as *x*. Each value in\n *x* only contributes its associated weight towards the bin\n count (instead of 1). If *normed* is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n \n *cumulative*:\n If *True*, then a histogram is computed where each bin\n gives the counts in that bin plus all bins for smaller values.\n The last bin gives the total number of datapoints. If *normed*\n is also *True* then the histogram is normalized such that the\n last bin equals 1. If *cumulative* evaluates to less than 0\n (e.g. -1), the direction of accumulation is reversed. In this\n case, if *normed* is also *True*, then the histogram is normalized\n such that the first bin equals 1.\n \n *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]\n The type of histogram to draw.\n \n - 'bar' is a traditional bar-type histogram. If multiple data\n are given the bars are aranged side by side.\n \n - 'barstacked' is a bar-type histogram where multiple\n data are stacked on top of each other.\n \n - 'step' generates a lineplot that is by default\n unfilled.\n \n - 'stepfilled' generates a lineplot that is by default\n filled.\n \n *align*: ['left' | 'mid' | 'right' ]\n Controls how the histogram is plotted.\n \n - 'left': bars are centered on the left bin edges.\n \n - 'mid': bars are centered between the bin edges.\n \n - 'right': bars are centered on the right bin edges.\n \n *orientation*: [ 'horizontal' | 'vertical' ]\n If 'horizontal', :func:`~matplotlib.pyplot.barh` will be\n used for bar-type histograms and the *bottom* kwarg will be\n the left edges.\n \n *rwidth*:\n The relative width of the bars as a fraction of the bin\n width. If *None*, automatically compute the width. Ignored\n if *histtype* = 'step' or 'stepfilled'.\n \n *log*:\n If *True*, the histogram axis will be set to a log scale.\n If *log* is *True* and *x* is a 1D array, empty bins will\n be filtered out and only the non-empty (*n*, *bins*,\n *patches*) will be returned.\n \n *color*:\n Color spec or sequence of color specs, one per\n dataset. Default (*None*) uses the standard line\n color sequence.\n \n *label*:\n String, or sequence of strings to match multiple\n datasets. Bar charts yield multiple patches per\n dataset, but only the first gets the label, so\n that the legend command will work as expected::\n \n ax.hist(10+2*np.random.randn(1000), label='men')\n ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)\n ax.legend()\n \n \n kwargs are used to update the properties of the\n :class:`~matplotlib.patches.Patch` instances returned by *hist*:\n \n agg_filter: unknown\n alpha: float or None \n animated: [True | False] \n antialiased or aa: [True | False] or None for default \n axes: an :class:`~matplotlib.axes.Axes` instance \n clip_box: a :class:`matplotlib.transforms.Bbox` instance \n clip_on: [True | False] \n clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] \n color: matplotlib color spec\n contains: a callable function \n edgecolor or ec: mpl color spec, or None for default, or 'none' for no color \n facecolor or fc: mpl color spec, or None for default, or 'none' for no color \n figure: a :class:`matplotlib.figure.Figure` instance \n fill: [True | False] \n gid: an id string \n hatch: [ '/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ] \n label: any string \n linestyle or ls: ['solid' | 'dashed' | 'dashdot' | 'dotted'] \n linewidth or lw: float or None for default \n lod: [True | False] \n path_effects: unknown\n picker: [None|float|boolean|callable] \n rasterized: [True | False | None] \n snap: unknown\n transform: :class:`~matplotlib.transforms.Transform` instance \n url: a url string \n visible: [True | False] \n zorder: any number \n " ]
Please provide a description of the function:def save(self,outputfile,**kargs): plt.imsave(outputfile, self.__image, **kargs)
[ "\n - Save the image in some common image formats. It uses the pyplot.save \n method. \n outputfile is a string containing a path to a filename, \n of a Python file-like object. If *format* is *None* and\n *fname* is a string, the output format is deduced from\n the extension of the filename.\n\n Keyword arguments:\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* set the color scaling for the image by fixing the\n values that map to the colormap color limits. If either *vmin* or *vmax*\n is None, that limit is determined from the *arr* min/max value.\n *cmap*:\n cmap is a colors.Colormap instance, eg cm.jet.\n If None, default to the rc image.cmap value.\n *format*:\n One of the file extensions supported by the active\n backend. Most backends support png, pdf, ps, eps and svg.\n *origin*\n [ 'upper' | 'lower' ] Indicates where the [0,0] index of\n the array is in the upper left or lower left corner of\n the axes. Defaults to the rc image.origin value.\n *dpi*\n The DPI to store in the metadata of the file. This does not affect the\n resolution of the output image.\n " ]
Please provide a description of the function:def set_autocamera(self,mode='density'): self.Camera.set_autocamera(self._Particles,mode=mode) self._camera_params = self.Camera.get_params() self._x, self._y, self._hsml, self._kview = self.__compute_scene() self._m = self._Particles._mass[self._kview]
[ "\n - set_autocamera(mode='density'): By default, Scene defines its \n own Camera. However, there is no a general way for doing so. Scene \n uses a density criterion for getting the point of view. If this is \n not a good option for your problem, you can choose among:\n |'minmax'|'density'|'median'|'mean'|. If None of the previous methods\n work well, you may define the camera params by yourself.\n " ]
Please provide a description of the function:def get_scene(self): return self._x, self._y, self._hsml, self._m, self._kview
[ "\n - get_scene(): It return the x and y position, the smoothing length \n of the particles and the index of the particles that are active in \n the scene. In principle this is an internal function and you don't \n need this data. \n " ]
Please provide a description of the function:def update_camera(self,**kargs): self.Camera.set_params(**kargs) self._x, self._y, self._hsml, self._kview = self.__compute_scene() self._m = self._Particles._mass[self._kview]
[ "\n - update_camera(**kwarg): By using this method you can define all \n the new paramenters of the camera. Read the available **kwarg in \n the sphviewer.Camera documentation. \n " ]
Please provide a description of the function:def plot(self,axis=None,**kargs): if(axis == None): axis = plt.gca() axis.plot(self.__x, self.__y, 'k.', **kargs)
[ "\n - plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. \n It shows the scene as seen by the camera. It is to say, it plots the particles according\n to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None,\n the plot is made on the current axis.\n\n The kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n agg_filter: unknown\n alpha: float (0.0 transparent through 1.0 opaque) \n animated: [True | False] \n antialiased or aa: [True | False] \n axes: an :class:`~matplotlib.axes.Axes` instance \n clip_box: a :class:`matplotlib.transforms.Bbox` instance \n clip_on: [True | False] \n clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] \n color or c: any matplotlib color \n contains: a callable function \n dash_capstyle: ['butt' | 'round' | 'projecting'] \n dash_joinstyle: ['miter' | 'round' | 'bevel'] \n dashes: sequence of on/off ink in points \n data: 2D array (rows are x, y) or two 1D arrays \n drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] \n figure: a :class:`matplotlib.figure.Figure` instance \n fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] \n gid: an id string \n label: any string \n linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. \n linewidth or lw: float value in points \n lod: [True | False] \n marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ]\n markeredgecolor or mec: any matplotlib color \n markeredgewidth or mew: float value in points \n markerfacecolor or mfc: any matplotlib color \n markerfacecoloralt or mfcalt: any matplotlib color \n markersize or ms: float \n markevery: None | integer | (startind, stride)\n picker: float distance in points or callable pick function ``fn(artist, event)`` \n pickradius: float distance in points \n rasterized: [True | False | None] \n snap: unknown\n solid_capstyle: ['butt' | 'round' | 'projecting'] \n solid_joinstyle: ['miter' | 'round' | 'bevel'] \n transform: a :class:`matplotlib.transforms.Transform` instance \n url: a url string \n visible: [True | False] \n xdata: 1D array \n ydata: 1D array \n zorder: any number \n \n kwargs *scalex* and *scaley*, if defined, are passed on to\n :meth:`~matplotlib.axes.Axes.autoscale_view` to determine\n whether the *x* and *y* axes are autoscaled; the default is\n *True*.\n \n Additional kwargs: hold = [True|False] overrides default hold state\n " ]
Please provide a description of the function:def plot(self,plane,axis=None,**kargs): if(axis == None): axis = plt.gca() if(plane == 'xy'): axis.plot(self._pos[:,0], self._pos[:,0], 'k.', **kargs) elif(plane == 'xz'): axis.plot(self._pos[:,1], self._pos[:,2], 'k.', **kargs) elif(plane == 'yz'): axis.plot(self._pos[:,2], self._pos[:,2], 'k.', **kargs)
[ "\n Use this method to plot the set of particles stored by the Particles class.\n In order to plot the distribution of Particles, a *plane* parameter must be given.\n \"plane\" is one of the available orthogonal projections of the particles: \n |'xy'|'xz'|'yz'|. If there is multiple axes defined, the active one can be \n selected using the axis parameter. If axis paremeter is None (default), the \n distribution of particles is plotted in the active axis, which is returned by \n the matplotlib.pyplot.gca() method.\n " ]
Please provide a description of the function:def __det_hsml_old(self, pos, nb): manager = Manager() out_hsml = manager.Queue() size = multiprocessing.cpu_count() if self.__verbose: print('Building a KDTree...') tree = self.__make_kdtree(pos) index = np.arange(np.shape(pos)[1]) #I split the job among the number of available processors pos = np.array_split(pos, size, axis=1) procs = [] #We distribute the tasks among different processes if self.__verbose: print('Searching the ', nb, 'closer neighbors to each particle...') for rank in range(size): task = multiprocessing.Process(target=self.__nbsearch, args=(pos[rank], nb, tree, out_hsml,rank)) procs.append(task) task.start() #Wait until all processes finish for p in procs: p.join() index = [] hsml = [] for i in range(size): a, b = out_hsml.get() index.append(a) hsml.append(b) # if a == 0: print(b[0]) #I have to order the data before return it k = np.argsort(index) hsml1 = np.array([]) for i in k: hsml1 = np.append(hsml1,hsml[i]) if self.__verbose: print('Done...') return hsml1
[ "\n Use this function to find the smoothing lengths of the particles.\n hsml = det_hsml(pos, nb)\n " ]
Please provide a description of the function:def read(fname, fail_silently=False): try: filepath = os.path.join(os.path.dirname(__file__), fname) with io.open(filepath, 'rt', encoding='utf8') as f: return f.read() except: if not fail_silently: raise return ''
[ "\n Read the content of the given file. The path is evaluated from the\n directory containing this file.\n " ]
Please provide a description of the function:def pass_verbosity(f): def new_func(*args, **kwargs): kwargs['verbosity'] = click.get_current_context().verbosity return f(*args, **kwargs) return update_wrapper(new_func, f)
[ "\n Marks a callback as wanting to receive the verbosity as a keyword argument.\n " ]
Please provide a description of the function:def run_from_argv(self, argv): try: return self.main(args=argv[2:], standalone_mode=False) except click.ClickException as e: if getattr(e.ctx, 'traceback', False): raise e.show() sys.exit(e.exit_code)
[ "\n Called when run from the command line.\n " ]
Please provide a description of the function:def execute(self, *args, **kwargs): # Remove internal Django command handling machinery kwargs.pop('skip_checks', None) parent_ctx = click.get_current_context(silent=True) with self.make_context('', list(args), parent=parent_ctx) as ctx: # Rename kwargs to to the appropriate destination argument name opt_mapping = dict(self.map_names()) arg_options = {opt_mapping.get(key, key): value for key, value in six.iteritems(kwargs)} # Update the context with the passed (renamed) kwargs ctx.params.update(arg_options) # Invoke the command self.invoke(ctx)
[ "\n Called when run through `call_command`. `args` are passed through,\n while `kwargs` is the __dict__ of the return value of\n `self.create_parser('', name)` updated with the kwargs passed to\n `call_command`.\n " ]
Please provide a description of the function:def encrypt(data, key): '''encrypt the data with the key''' data = __tobytes(data) data_len = len(data) data = ffi.from_buffer(data) key = ffi.from_buffer(__tobytes(key)) out_len = ffi.new('size_t *') result = lib.xxtea_encrypt(data, data_len, key, out_len) ret = ffi.buffer(result, out_len[0])[:] lib.free(result) return ret
[]
Please provide a description of the function:def decrypt(data, key): '''decrypt the data with the key''' data_len = len(data) data = ffi.from_buffer(data) key = ffi.from_buffer(__tobytes(key)) out_len = ffi.new('size_t *') result = lib.xxtea_decrypt(data, data_len, key, out_len) ret = ffi.buffer(result, out_len[0])[:] lib.free(result) return ret
[]
Please provide a description of the function:def flaskrun(app, default_host="127.0.0.1", default_port="8000"): # Set up the command-line options parser = optparse.OptionParser() parser.add_option( "-H", "--host", help="Hostname of the Flask app " + "[default %s]" % default_host, default=default_host, ) parser.add_option( "-P", "--port", help="Port for the Flask app " + "[default %s]" % default_port, default=default_port, ) # Two options useful for debugging purposes, but # a bit dangerous so not exposed in the help message. parser.add_option( "-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP ) parser.add_option( "-p", "--profile", action="store_true", dest="profile", help=optparse.SUPPRESS_HELP, ) options, _ = parser.parse_args() # If the user selects the profiling option, then we need # to do a little extra setup if options.profile: from werkzeug.contrib.profiler import ProfilerMiddleware app.config["PROFILE"] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) options.debug = True app.run(debug=options.debug, host=options.host, port=int(options.port))
[ "\n Takes a flask.Flask instance and runs it. Parses\n command-line flags to configure the app.\n " ]
Please provide a description of the function:def get_randomized_guid_sample(self, item_count): dataset = self.get_whitelist() random.shuffle(dataset) return dataset[:item_count]
[ " Fetch a subset of randomzied GUIDs from the whitelist " ]
Please provide a description of the function:def can_recommend(self, client_data, extra_data={}): self.logger.info("Curated can_recommend: {}".format(True)) return True
[ "The Curated recommender will always be able to recommend\n something" ]
Please provide a description of the function:def recommend(self, client_data, limit, extra_data={}): guids = self._curated_wl.get_randomized_guid_sample(limit) results = [(guid, 1.0) for guid in guids] log_data = (client_data["client_id"], str(guids)) self.logger.info( "Curated recommendations client_id: [%s], guids: [%s]" % log_data ) return results
[ "\n Curated recommendations are just random selections\n " ]
Please provide a description of the function:def can_recommend(self, client_data, extra_data={}): ensemble_recommend = self._ensemble_recommender.can_recommend( client_data, extra_data ) curated_recommend = self._curated_recommender.can_recommend( client_data, extra_data ) result = ensemble_recommend and curated_recommend self.logger.info("Hybrid can_recommend: {}".format(result)) return result
[ "The ensemble recommender is always going to be\n available if at least one recommender is available" ]
Please provide a description of the function:def recommend(self, client_data, limit, extra_data={}): preinstalled_addon_ids = client_data.get("installed_addons", []) # Compute an extended limit by adding the length of # the list of any preinstalled addons. extended_limit = limit + len(preinstalled_addon_ids) ensemble_suggestions = self._ensemble_recommender.recommend( client_data, extended_limit, extra_data ) curated_suggestions = self._curated_recommender.recommend( client_data, extended_limit, extra_data ) # Generate a set of results from each of the composite # recommenders. We select one item from each recommender # sequentially so that we do not bias one recommender over the # other. merged_results = set() while ( len(merged_results) < limit and len(ensemble_suggestions) > 0 and len(curated_suggestions) > 0 ): r1 = ensemble_suggestions.pop() if r1[0] not in [temp[0] for temp in merged_results]: merged_results.add(r1) # Terminate early if we have an odd number for the limit if not ( len(merged_results) < limit and len(ensemble_suggestions) > 0 and len(curated_suggestions) > 0 ): break r2 = curated_suggestions.pop() if r2[0] not in [temp[0] for temp in merged_results]: merged_results.add(r2) if len(merged_results) < limit: msg = ( "Defaulting to empty results. Insufficient recommendations found for client: %s" % client_data["client_id"] ) self.logger.info(msg) return [] sorted_results = sorted( list(merged_results), key=op.itemgetter(1), reverse=True ) log_data = (client_data["client_id"], str([r[0] for r in sorted_results])) self.logger.info( "Hybrid recommendations client_id: [%s], guids: [%s]" % log_data ) return sorted_results
[ "\n Hybrid recommendations simply select half recommendations from\n the ensemble recommender, and half from the curated one.\n\n Duplicate recommendations are accomodated by rank ordering\n by weight.\n " ]
Please provide a description of the function:def can_recommend(self, client_data, extra_data={}): result = sum( [ self._recommender_map[rkey].can_recommend(client_data) for rkey in self.RECOMMENDER_KEYS ] ) self.logger.info("Ensemble can_recommend: {}".format(result)) return result
[ "The ensemble recommender is always going to be\n available if at least one recommender is available" ]
Please provide a description of the function:def _recommend(self, client_data, limit, extra_data={}): self.logger.info("Ensemble recommend invoked") preinstalled_addon_ids = client_data.get("installed_addons", []) # Compute an extended limit by adding the length of # the list of any preinstalled addons. extended_limit = limit + len(preinstalled_addon_ids) flattened_results = [] ensemble_weights = self._weight_cache.getWeights() for rkey in self.RECOMMENDER_KEYS: recommender = self._recommender_map[rkey] if recommender.can_recommend(client_data): raw_results = recommender.recommend( client_data, extended_limit, extra_data ) reweighted_results = [] for guid, weight in raw_results: item = (guid, weight * ensemble_weights[rkey]) reweighted_results.append(item) flattened_results.extend(reweighted_results) # Sort the results by the GUID flattened_results.sort(key=lambda item: item[0]) # group by the guid, sum up the weights for recurring GUID # suggestions across all recommenders guid_grouper = itertools.groupby(flattened_results, lambda item: item[0]) ensemble_suggestions = [] for (guid, guid_group) in guid_grouper: weight_sum = sum([v for (g, v) in guid_group]) item = (guid, weight_sum) ensemble_suggestions.append(item) # Sort in reverse order (greatest weight to least) ensemble_suggestions.sort(key=lambda x: -x[1]) filtered_ensemble_suggestions = [ (guid, weight) for (guid, weight) in ensemble_suggestions if guid not in preinstalled_addon_ids ] results = filtered_ensemble_suggestions[:limit] log_data = ( client_data["client_id"], str(ensemble_weights), str([r[0] for r in results]), ) self.logger.info( "client_id: [%s], ensemble_weight: [%s], guids: [%s]" % log_data ) return results
[ "\n Ensemble recommendations are aggregated from individual\n recommenders. The ensemble recommender applies a weight to\n the recommendation outputs of each recommender to reorder the\n recommendations to be a better fit.\n\n The intuitive understanding is that the total space of\n recommended addons across all recommenders will include the\n 'true' addons that should be recommended better than any\n individual recommender. The ensemble method simply needs to\n weight each recommender appropriate so that the ordering is\n correct.\n " ]
Please provide a description of the function:def synchronized(wrapped): @functools.wraps(wrapped) def wrapper(*args, **kwargs): self = args[0] with self._lock: return wrapped(*args, **kwargs) return wrapper
[ " Synchronization decorator. " ]
Please provide a description of the function:def get(self, transform=None): if not self.has_expired() and self._cached_copy is not None: return self._cached_copy, False return self._refresh_cache(transform), True
[ "\n Return the JSON defined at the S3 location in the constructor.\n\n The get method will reload the S3 object after the TTL has\n expired.\n Fetch the JSON object from cache or S3 if necessary\n " ]
Please provide a description of the function:def hashed_download(url, temp, digest): # Based on pip 1.4.1's URLOpener but with cert verification removed def opener(): opener = build_opener(HTTPSHandler()) # Strip out HTTPHandler to prevent MITM spoof: for handler in opener.handlers: if isinstance(handler, HTTPHandler): opener.handlers.remove(handler) return opener def read_chunks(response, chunk_size): while True: chunk = response.read(chunk_size) if not chunk: break yield chunk response = opener().open(url) path = join(temp, urlparse(url).path.split('/')[-1]) actual_hash = sha256() with open(path, 'wb') as file: for chunk in read_chunks(response, 4096): file.write(chunk) actual_hash.update(chunk) actual_digest = actual_hash.hexdigest() if actual_digest != digest: raise HashError(url, path, actual_digest, digest) return path
[ "Download ``url`` to ``temp``, make sure it has the SHA-256 ``digest``,\n and return its path." ]
Please provide a description of the function:def get_lr(self, score): # Find the index of the closest value that was precomputed in lr_curves # This will significantly speed up |get_lr|. # The lr_curves_cache is a list of scalar distance # measurements lr_curves_cache = np.array([s[0] for s in self.lr_curves]) # np.argmin produces the index to the part of the curve # where distance is the smallest to the score which we are # inspecting currently. idx = np.argmin(abs(score - lr_curves_cache)) numer_val = self.lr_curves[idx][1][0] denum_val = self.lr_curves[idx][1][1] # Compute LR based on numerator and denominator values return float(numer_val) / float(denum_val)
[ "Compute a :float: likelihood ratio from a provided similarity score when compared\n to two probability density functions which are computed and pre-loaded during init.\n\n The numerator indicates the probability density that a particular similarity score\n corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of\n telemetry variables. The denominator indicates the probability density that a particular\n similarity score corresponds to a 'poor' addon donor\n\n :param score: A similarity score between a pair of objects.\n :returns: The approximate float likelihood ratio corresponding to provided score.\n " ]
Please provide a description of the function:def get_similar_donors(self, client_data): # Compute the distance between self and any comparable client. distances = self.compute_clients_dist(client_data) # Compute the LR based on precomputed distributions that relate the score # to a probability of providing good addon recommendations. lrs_from_scores = np.array( [self.get_lr(distances[i]) for i in range(self.num_donors)] ) # Sort the LR values (descending) and return the sorted values together with # the original indices. indices = (-lrs_from_scores).argsort() return lrs_from_scores[indices], indices
[ "Computes a set of :float: similarity scores between a client and a set of candidate\n donors for which comparable variables have been measured.\n\n A custom similarity metric is defined in this function that combines the Hamming distance\n for categorical variables with the Canberra distance for continuous variables into a\n univariate similarity metric between the client and a set of candidate donors loaded during\n init.\n\n :param client_data: a client data payload including a subset fo telemetry fields.\n :return: the sorted approximate likelihood ratio (np.array) corresponding to the\n internally computed similarity score and a list of indices that link\n each LR score with the related donor in the |self.donors_pool|.\n " ]
Please provide a description of the function:def recommend(self, client_id, limit, extra_data={}): if client_id in TEST_CLIENT_IDS: data = self._whitelist_data.get()[0] random.shuffle(data) samples = data[:limit] self.logger.info("Test ID detected [{}]".format(client_id)) return [(s, 1.1) for s in samples] if client_id in EMPTY_TEST_CLIENT_IDS: self.logger.info("Empty Test ID detected [{}]".format(client_id)) return [] client_info = self.profile_fetcher.get(client_id) if client_info is None: self.logger.info( "Defaulting to empty results. No client info fetched from dynamo." ) return [] results = self._ensemble_recommender.recommend(client_info, limit, extra_data) return results
[ "Return recommendations for the given client.\n\n The recommendation logic will go through each recommender and\n pick the first one that \"can_recommend\".\n\n :param client_id: the client unique id.\n :param limit: the maximum number of recommendations to return.\n :param extra_data: a dictionary with extra client data.\n " ]
Please provide a description of the function:def get_client_profile(self, client_id): try: response = self._table.get_item(Key={'client_id': client_id}) compressed_bytes = response['Item']['json_payload'].value json_byte_data = zlib.decompress(compressed_bytes) json_str_data = json_byte_data.decode('utf8') return json.loads(json_str_data) except KeyError: # No client ID found - not really an error return None except Exception as e: # Return None on error. The caller in ProfileFetcher will # handle error logging msg = "Error loading client data for {}. Error: {}" self.logger.debug(msg.format(client_id, str(e))) return None
[ "This fetches a single client record out of DynamoDB\n " ]
Please provide a description of the function:def clean_promoted_guids(raw_promoted_guids): valid = True for row in raw_promoted_guids: if len(row) != 2: valid = False break if not ( (isinstance(row[0], str) or isinstance(row[0], unicode)) and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa ): valid = False break if valid: return raw_promoted_guids return []
[ " Verify that the promoted GUIDs are formatted correctly,\n otherwise strip it down into an empty list.\n " ]
Please provide a description of the function:def configure_plugin(app): # noqa: C901 @app.route( "/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/", methods=["GET"] ) def client_has_addon(hashed_client_id, addon_id): # Use the module global PROXY_MANAGER global PROXY_MANAGER recommendation_manager = check_proxy_manager(PROXY_MANAGER) pf = recommendation_manager._ctx["profile_fetcher"] client_meta = pf.get(hashed_client_id) if client_meta is None: # no valid client metadata was found for the given # clientId result = {"results": False, 'error': 'No client found'} response = app.response_class( response=json.dumps(result), status=200, mimetype="application/json" ) return response result = {"results": addon_id in client_meta.get("installed_addons", [])} response = app.response_class( response=json.dumps(result), status=200, mimetype="application/json" ) return response @app.route("/v1/api/recommendations/<hashed_client_id>/", methods=["GET", "POST"]) def recommendations(hashed_client_id): # Use the module global PROXY_MANAGER global PROXY_MANAGER extra_data = {} extra_data["options"] = {} extra_data["options"]["promoted"] = [] try: if request.method == "POST": json_data = request.data # At least Python3.5 returns request.data as bytes # type instead of a string type. # Both Python2.7 and Python3.7 return a string type if type(json_data) == bytes: json_data = json_data.decode("utf8") if json_data != "": post_data = json.loads(json_data) raw_promoted_guids = post_data.get("options", {}).get( "promoted", [] ) promoted_guids = clean_promoted_guids(raw_promoted_guids) extra_data["options"]["promoted"] = promoted_guids except Exception as e: jdata = {} jdata["results"] = [] jdata["error"] = "Invalid JSON in POST: {}".format(e) return app.response_class( response=json.dumps(jdata, status=400, mimetype="application/json") ) # Coerce the uuid.UUID type into a string client_id = str(hashed_client_id) locale = request.args.get("locale", None) if locale is not None: extra_data["locale"] = locale platform = request.args.get("platform", None) if platform is not None: extra_data["platform"] = platform recommendation_manager = check_proxy_manager(PROXY_MANAGER) recommendations = recommendation_manager.recommend( client_id=client_id, limit=TAAR_MAX_RESULTS, extra_data=extra_data ) promoted_guids = extra_data.get("options", {}).get("promoted", []) recommendations = merge_promoted_guids(promoted_guids, recommendations) # Strip out weights from TAAR results to maintain compatibility # with TAAR 1.0 jdata = {"results": [x[0] for x in recommendations]} response = app.response_class( response=json.dumps(jdata), status=200, mimetype="application/json" ) return response def check_proxy_manager(PROXY_MANAGER): if PROXY_MANAGER.getResource() is None: ctx = default_context() profile_fetcher = ProfileFetcher(ctx) ctx["profile_fetcher"] = profile_fetcher # Lock the context down after we've got basic bits installed root_ctx = ctx.child() r_factory = recommenders.RecommenderFactory(root_ctx) root_ctx["recommender_factory"] = r_factory instance = recommenders.RecommendationManager(root_ctx.child()) PROXY_MANAGER.setResource(instance) return PROXY_MANAGER.getResource() class MyPlugin: def set(self, config_options): global PROXY_MANAGER if "PROXY_RESOURCE" in config_options: PROXY_MANAGER._resource = config_options["PROXY_RESOURCE"] return MyPlugin()
[ "\n This is a factory function that configures all the routes for\n flask given a particular library.\n ", "Return a list of recommendations provided a telemetry client_id.", "\n This setter is primarily so that we can instrument the\n cached RecommendationManager implementation under test.\n\n All plugins should implement this set method to enable\n overwriting configuration options with a TAAR library.\n " ]
Please provide a description of the function:def login(self): if self.__logged_in: return login = {'userId': self.__username, 'userPassword': self.__password} header = BASE_HEADERS.copy() request = requests.post(BASE_URL + 'login', data=login, headers=header, timeout=10) try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for login, " + "protocol error: " + request.status_code + ' - ' + request.reason + "(" + str(error) + ")") if 'error' in result.keys(): raise Exception("Could not login: " + result['error']) if request.status_code != 200: raise Exception( "Could not login, HTTP code: " + str(request.status_code) + ' - ' + request.reason) if 'success' not in result.keys() or not result['success']: raise Exception("Could not login, no success") cookie = request.headers.get("set-cookie") if cookie is None: raise Exception("Could not login, no cookie set") self.__cookie = cookie self.__logged_in = True return self.__logged_in
[ "Login to Tahoma API." ]
Please provide a description of the function:def get_user(self): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get(BASE_URL + 'getEndUser', headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_user() return try: result = request.json() except ValueError: raise Exception( "Not a valid result for getEndUser, protocol error!") return result['endUser']
[ "Get the user informations from the server.\n\n :return: a dict with all the informations\n :rtype: dict\n\n raises ValueError in case of protocol issues\n\n :Example:\n\n >>> \"creationTime\": <time>,\n >>> \"lastUpdateTime\": <time>,\n >>> \"userId\": \"<email for login>\",\n >>> \"title\": 0,\n >>> \"firstName\": \"<First>\",\n >>> \"lastName\": \"<Last>\",\n >>> \"email\": \"<contact email>\",\n >>> \"phoneNumber\": \"<phone>\",\n >>> \"mobilePhone\": \"<mobile>\",\n >>> \"locale\": \"<two char country code>\"\n\n :Warning:\n\n The type and amount of values in the dictionary can change any time.\n " ]
Please provide a description of the function:def _get_setup(self, result): self.__devices = {} if ('setup' not in result.keys() or 'devices' not in result['setup'].keys()): raise Exception( "Did not find device definition.") for device_data in result['setup']['devices']: device = Device(self, device_data) self.__devices[device.url] = device self.__location = result['setup']['location'] self.__gateway = result['setup']['gateways']
[ "Internal method which process the results from the server." ]
Please provide a description of the function:def apply_actions(self, name_of_action, actions): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie actions_serialized = [] for action in actions: actions_serialized.append(action.serialize()) data = {"label": name_of_action, "actions": actions_serialized} json_data = json.dumps(data, indent=None, sort_keys=True) request = requests.post( BASE_URL + "apply", headers=header, data=json_data, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.apply_actions(name_of_action, actions) return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for applying an " + "action, protocol error: " + request.status_code + ' - ' + request.reason + " (" + error + ")") if 'execId' not in result.keys(): raise Exception("Could not run actions, missing execId.") return result['execId']
[ "Start to execute an action or a group of actions.\n\n This method takes a bunch of actions and runs them on your\n Tahoma box.\n\n :param name_of_action: the label/name for the action\n :param actions: an array of Action objects\n :return: the execution identifier **************\n what if it fails\n :rtype: string\n\n raises ValueError in case of protocol issues\n\n :Seealso:\n\n - get_events\n - get_current_executions\n " ]
Please provide a description of the function:def get_events(self): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.post(BASE_URL + 'getEvents', headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_events() return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for getEvent," + " protocol error: " + error) return self._get_events(result)
[ "Return a set of events.\n\n Which have been occured since the last call of this method.\n\n This method should be called regulary to get all occuring\n Events. There are three different Event types/classes\n which can be returned:\n\n - DeviceStateChangedEvent, if any device changed it's state\n due to an applied action or just because of other reasons\n - CommandExecutionStateChangedEvent, a executed command goes\n through several phases which can be followed\n - ExecutionStateChangedEvent, ******** todo\n\n :return: an array of Events or empty array\n :rtype: list\n\n raises ValueError in case of protocol issues\n\n :Seealso:\n\n - apply_actions\n - launch_action_group\n - get_history\n " ]
Please provide a description of the function:def _get_events(self, result): events = [] for event_data in result: event = Event.factory(event_data) if event is not None: events.append(event) if isinstance(event, DeviceStateChangedEvent): # change device state if self.__devices[event.device_url] is None: raise Exception( "Received device change " + "state for unknown device '" + event.device_url + "'") self.__devices[event.device_url].set_active_states( event.states) return events
[ "\"Internal method for being able to run unit tests." ]
Please provide a description of the function:def get_current_executions(self): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get( BASE_URL + 'getCurrentExecutions', headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_current_executions() return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for" + "get_current_executions, protocol error: " + error) if 'executions' not in result.keys(): return None executions = [] for execution_data in result['executions']: exe = Execution(execution_data) executions.append(exe) return executions
[ "Get all current running executions.\n\n :return: Returns a set of running Executions or empty list.\n :rtype: list\n\n raises ValueError in case of protocol issues\n\n :Seealso:\n\n - apply_actions\n - launch_action_group\n - get_history\n " ]
Please provide a description of the function:def get_action_groups(self): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get(BASE_URL + "getActionGroups", headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_action_groups() return try: result = request.json() except ValueError: raise Exception( "get_action_groups: Not a valid result for ") if 'actionGroups' not in result.keys(): return None groups = [] for group_data in result['actionGroups']: group = ActionGroup(group_data) groups.append(group) return groups
[ "Get all Action Groups.\n\n :return: List of Action Groups\n " ]
Please provide a description of the function:def launch_action_group(self, action_id): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get( BASE_URL + 'launchActionGroup?oid=' + action_id, headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.launch_action_group(action_id) return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for launch" + "action group, protocol error: " + request.status_code + ' - ' + request.reason + " (" + error + ")") if 'actionGroup' not in result.keys(): raise Exception( "Could not launch action" + "group, missing execId.") return result['actionGroup'][0]['execId']
[ "Start action group." ]
Please provide a description of the function:def get_states(self, devices): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie json_data = self._create_get_state_request(devices) request = requests.post( BASE_URL + 'getStates', headers=header, data=json_data, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_states(devices) return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for" + "getStates, protocol error:" + error) self._get_states(result)
[ "Get States of Devices." ]
Please provide a description of the function:def _create_get_state_request(self, given_devices): dev_list = [] if isinstance(given_devices, list): devices = given_devices else: devices = [] for dev_name, item in self.__devices.items(): if item: devices.append(self.__devices[dev_name]) for device in devices: states = [] for state_name in sorted(device.active_states.keys()): states.append({'name': state_name}) dev_list.append({'deviceURL': device.url, 'states': states}) return json.dumps( dev_list, indent=None, sort_keys=True, separators=(',', ': '))
[ "Create state request." ]
Please provide a description of the function:def _get_states(self, result): if 'devices' not in result.keys(): return for device_states in result['devices']: device = self.__devices[device_states['deviceURL']] try: device.set_active_states(device_states['states']) except KeyError: pass
[ "Get states of devices." ]
Please provide a description of the function:def refresh_all_states(self): header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get( BASE_URL + "refreshAllStates", headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.refresh_all_states() return
[ "Update all states." ]
Please provide a description of the function:def set_active_state(self, name, value): if name not in self.__active_states.keys(): raise ValueError("Can not set unknown state '" + name + "'") if (isinstance(self.__active_states[name], int) and isinstance(value, str)): # we get an update as str but current value is # an int, try to convert self.__active_states[name] = int(value) elif (isinstance(self.__active_states[name], float) and isinstance(value, str)): # we get an update as str but current value is # a float, try to convert self.__active_states[name] = float(value) else: self.__active_states[name] = value
[ "Set active state." ]
Please provide a description of the function:def add_command(self, cmd_name, *args): self.__commands.append(Command(cmd_name, args))
[ "Add command to action." ]
Please provide a description of the function:def serialize(self): commands = [] for cmd in self.commands: commands.append(cmd.serialize()) out = {'commands': commands, 'deviceURL': self.__device_url} return out
[ "Serialize action." ]
Please provide a description of the function:def factory(data): if data['name'] is "DeviceStateChangedEvent": return DeviceStateChangedEvent(data) elif data['name'] is "ExecutionStateChangedEvent": return ExecutionStateChangedEvent(data) elif data['name'] is "CommandExecutionStateChangedEvent": return CommandExecutionStateChangedEvent(data) else: raise ValueError("Unknown event '" + data['name'] + "' occurred.")
[ "Tahoma Event factory." ]
Please provide a description of the function:def parse(date, dayfirst=True): '''Parse a `date` into a `FlexiDate`. @param date: the date to parse - may be a string, datetime.date, datetime.datetime or FlexiDate. TODO: support for quarters e.g. Q4 1980 or 1954 Q3 TODO: support latin stuff like M.DCC.LIII TODO: convert '-' to '?' when used that way e.g. had this date [181-] ''' if not date: return None if isinstance(date, FlexiDate): return date if isinstance(date, int): return FlexiDate(year=date) elif isinstance(date, datetime.datetime): parser = PythonDateTimeParser() return parser.parse(date) elif isinstance(date, datetime.date): parser = PythonDateParser() return parser.parse(date) else: # assuming its a string parser = DateutilDateParser() out = parser.parse(date, **{'dayfirst': dayfirst}) if out is not None: return out # msg = 'Unable to parse %s' % date # raise ValueError(date) val = 'UNPARSED: %s' % date val = val.encode('ascii', 'ignore') return FlexiDate(qualifier=val)
[]
Please provide a description of the function:def isoformat(self, strict=False): '''Return date in isoformat (same as __str__ but without qualifier). WARNING: does not replace '?' in dates unless strict=True. ''' out = self.year # what do we do when no year ... for val in [self.month, self.day]: if not val: break out += u'-' + val if strict: out = out.replace('?', '0') if self.hour: out += u' ' out += self.hour for val in [self.minute, self.second]: if not val: break out += u':' + val if self.microsecond: out += u'.' + self.microsecond return out
[]
Please provide a description of the function:def from_str(self, instr): '''Undo affect of __str__''' if not instr: return FlexiDate() out = self.our_re.match(instr) if out is None: # no match TODO: raise Exception? return None else: return FlexiDate( out.group('year'), out.group('month'), out.group('day'), out.group('hour'), out.group('minute'), out.group('second'), out.group('microsecond'), qualifier=out.group('qualifier') )
[]
Please provide a description of the function:def as_float(self): '''Get as a float (year being the integer part). Replace '?' in year with 9 so as to be conservative (e.g. 19?? becomes 1999) and elsewhere (month, day) with 0 @return: float. ''' if not self.year: return None out = float(self.year.replace('?', '9')) if self.month: # TODO: we are assuming months are of equal length out += float(self.month.replace('?', '0')) / 12.0 if self.day: out += float(self.day.replace('?', '0')) / 365.0 return out
[]
Please provide a description of the function:def as_datetime(self): '''Get as python datetime.datetime. Require year to be a valid datetime year. Default month and day to 1 if do not exist. @return: datetime.datetime object. ''' year = int(self.year) month = int(self.month) if self.month else 1 day = int(self.day) if self.day else 1 hour = int(self.hour) if self.hour else 0 minute = int(self.minute) if self.minute else 0 second = int(self.second) if self.second else 0 microsecond = int(self.microsecond) if self.microsecond else 0 return datetime.datetime(year, month, day, hour, minute, second, microsecond)
[]
Please provide a description of the function:def parse(self, date, **kwargs): ''' :param **kwargs: any kwargs accepted by dateutil.parse function. ''' qualifiers = [] if dateutil_parser is None: return None date = orig_date = date.strip() # various normalizations # TODO: call .lower() first date = date.replace('B.C.E.', 'BC') date = date.replace('BCE', 'BC') date = date.replace('B.C.', 'BC') date = date.replace('A.D.', 'AD') date = date.replace('C.E.', 'AD') date = date.replace('CE', 'AD') # deal with pre 0AD dates if date.startswith('-') or 'BC' in date or 'B.C.' in date: pre0AD = True else: pre0AD = False # BC seems to mess up parser date = date.replace('BC', '') # deal with circa: 'c.1950' or 'c1950' circa_match = re.match('([^a-zA-Z]*)c\.?\s*(\d+.*)', date) if circa_match: # remove circa bit qualifiers.append("Note 'circa'") date = ''.join(circa_match.groups()) # deal with p1980 (what does this mean? it can appear in # field 008 of MARC records p_match = re.match("^p(\d+)", date) if p_match: date = date[1:] # Deal with uncertainty: '1985?' uncertainty_match = re.match('([0-9xX]{4})\?', date) if uncertainty_match: # remove the ? date = date[:-1] qualifiers.append('Uncertainty') # Parse the numbers intelligently # do not use std parser function as creates lots of default data res = dateutil_parser._parse(date, **kwargs) try: res = res[0] except: res = res if res is None: # Couldn't parse it return None # Note: Years of less than 3 digits not interpreted by # dateutil correctly # e.g. 87 -> 1987 # 4 -> day 4 (no year) # Both cases are handled in this routine if res.year is None and res.day: year = res.day # If the whole date is simply two digits then dateutil_parser makes # it '86' -> '1986'. So strip off the '19'. (If the date specified # day/month then a two digit year is more likely to be this century # and so allow the '19' prefix to it.) elif self._numeric.match(date) and (len(date) == 2 or date.startswith('00')): year = res.year % 100 else: year = res.year # finally add back in BC stuff if pre0AD: year = -year if not qualifiers: qualifier = '' else: qualifier = ', '.join(qualifiers) + (' : %s' % orig_date) return FlexiDate(year, res.month, res.day, res.hour, res.minute, res.second, res.microsecond, qualifier=qualifier)
[]
Please provide a description of the function:def md5sum( string ): h = hashlib.new( 'md5' ) h.update( string.encode( 'utf-8' ) ) return h.hexdigest()
[ "\n Generate the md5 checksum for a string\n\n Args:\n string (Str): The string to be checksummed.\n\n Returns:\n (Str): The hex checksum.\n " ]
Please provide a description of the function:def file_md5( filename ): with zopen( filename, 'r' ) as f: file_string = f.read() try: # attempt to decode byte object file_string = file_string.decode() except AttributeError: pass return( md5sum( file_string ) )
[ "\n Generate the md5 checksum for a file\n\n Args:\n filename (Str): The file to be checksummed.\n\n Returns:\n (Str): The hex checksum\n\n Notes:\n If the file is gzipped, the md5 checksum returned is\n for the uncompressed ASCII file.\n " ]
Please provide a description of the function:def match_filename( filename ): f = next( ( '{}{}'.format( filename, extension ) for extension in [ '', '.gz' ] if Path( '{}{}'.format( filename, extension ) ).is_file() ), None ) return f
[ "\n Checks whether a file exists, either as named, or as a a gzippped file (filename.gz)\n\n Args:\n (Str): The root filename.\n\n Returns:\n (Str|None): if the file exists (either as the root filename, or gzipped), the return\n value will be the actual filename. If no matching filename is found the return\n value is set to None\n " ]
Please provide a description of the function:def validate_checksum( filename, md5sum ): filename = match_filename( filename ) md5_hash = file_md5( filename=filename ) if md5_hash != md5sum: raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))
[ "\n Compares the md5 checksum of a file with an expected value.\n If the calculated and expected checksum values are not equal, \n ValueError is raised.\n If the filename `foo` is not found, will try to read a gzipped file named\n `foo.gz`. In this case, the checksum is calculated for the unzipped file.\n\n Args:\n filename (str): Path for the file to be checksummed.\n md5sum (str): The expected hex checksum.\n\n Returns:\n None\n " ]
Please provide a description of the function:def to_matrix( xx, yy, zz, xy, yz, xz ): matrix = np.array( [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]] ) return matrix
[ "\n Convert a list of matrix components to a symmetric 3x3 matrix.\n Inputs should be in the order xx, yy, zz, xy, yz, xz.\n\n Args:\n xx (float): xx component of the matrix.\n yy (float): yy component of the matrix.\n zz (float): zz component of the matrix.\n xy (float): xy component of the matrix.\n yz (float): yz component of the matrix.\n xz (float): xz component of the matrix.\n\n Returns:\n (np.array): The matrix, as a 3x3 numpy array.\n " ]
Please provide a description of the function:def absorption_coefficient( dielectric ): energies_in_eV = np.array( dielectric[0] ) real_dielectric = parse_dielectric_data( dielectric[1] ) imag_dielectric = parse_dielectric_data( dielectric[2] ) epsilon_1 = np.mean( real_dielectric, axis=1 ) epsilon_2 = np.mean( imag_dielectric, axis=1 ) return ( 2.0 * np.sqrt(2.0)*pi*eV_to_recip_cm*energies_in_eV * np.sqrt( -epsilon_1 + np.sqrt( epsilon_1**2 + epsilon_2**2 ) ) )
[ "\n Calculate the optical absorption coefficient from an input set of\n pymatgen vasprun dielectric constant data.\n\n Args:\n dielectric (list): A list containing the dielectric response function\n in the pymatgen vasprun format.\n\n | element 0: list of energies\n | element 1: real dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.\n | element 2: imaginary dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.\n \n Returns:\n (np.array): absorption coefficient using eV as frequency units (cm^-1).\n\n Notes:\n The absorption coefficient is calculated as\n\n .. math:: \\\\alpha = \\\\frac{2\\sqrt{2} \\pi}{\\lambda} \\sqrt{-\\epsilon_1+\\sqrt{\\epsilon_1^2+\\epsilon_2^2}}\n\n " ]
Please provide a description of the function:def murnaghan( vol, e0, b0, bp, v0 ): energy = e0 + b0 * vol / bp * (((v0 / vol)**bp) / (bp - 1) + 1) - v0 * b0 / (bp - 1.0) return energy
[ "\n Calculate the energy as a function of volume, using the Murnaghan equation of state\n [Murnaghan, Proc. Nat. Acad. Sci. 30, 244 (1944)]\n https://en.wikipedia.org/wiki/Murnaghan_equation_of_state\n cf. Fu and Ho, Phys. Rev. B 28, 5480 (1983).\n\n Args:\n vol (float): this volume.\n e0 (float): energy at the minimum-energy volume, E0.\n b0 (float): bulk modulus at the minimum-energy volume, B0.\n bp (float): pressure-derivative of the bulk modulus at the minimum-energy volume, B0'.\n v0 (float): volume at the minimum-energy volume, V0.\n \n Returns:\n (float): The energy at this volume. \n " ]
Please provide a description of the function:def add_dr( self, dr ): this_bin = int( dr / self.dr ) if this_bin > self.number_of_bins: raise IndexError( 'dr is larger than rdf max_r' ) self.data[ this_bin ] += 1
[ "\n Add an observed interatomic distance to the g(r) data at dr.\n\n Args:\n dr (Float): the interatomic distance, dr.\n\n Returns:\n None\n " ]
Please provide a description of the function:def dr( self, atom1, atom2 ): return self.cell.dr( atom1.r, atom2.r )
[ "\n Calculate the distance between two atoms.\n\n Args:\n atom1 (vasppy.Atom): Atom 1.\n atom2 (vasppy.Atom): Atom 2.\n\n Returns:\n (float): The distance between Atom 1 and Atom 2.\n " ]
Please provide a description of the function:def area_of_a_triangle_in_cartesian_space( a, b, c ): return 0.5 * np.linalg.norm( np.cross( b-a, c-a ) )
[ "\n Returns the area of a triangle defined by three points in Cartesian space.\n\n Args:\n a (np.array): Cartesian coordinates of point A.\n b (np.array): Cartesian coordinates of point B.\n c (np.array): Cartesian coordinates of point C.\n\n Returns:\n (float): the area of the triangle.\n " ]
Please provide a description of the function:def points_are_in_a_straight_line( points, tolerance=1e-7 ): a = points[0] b = points[1] for c in points[2:]: if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance: return False return True
[ "\n Check whether a set of points fall on a straight line.\n Calculates the areas of triangles formed by triplets of the points.\n Returns False is any of these areas are larger than the tolerance.\n\n Args:\n points (list(np.array)): list of Cartesian coordinates for each point.\n tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.\n\n Returns:\n (bool): True if all points fall on a straight line (within the allowed tolerance).\n " ]
Please provide a description of the function:def two_point_effective_mass( cartesian_k_points, eigenvalues ): assert( cartesian_k_points.shape[0] == 2 ) assert( eigenvalues.size == 2 ) dk = cartesian_k_points[ 1 ] - cartesian_k_points[ 0 ] mod_dk = np.sqrt( np.dot( dk, dk ) ) delta_e = ( eigenvalues[ 1 ] - eigenvalues[ 0 ] ) * ev_to_hartree * 2.0 effective_mass = mod_dk * mod_dk / delta_e return effective_mass
[ "\n Calculate the effective mass given eigenvalues at two k-points.\n Reimplemented from Aron Walsh's original effective mass Fortran code.\n\n Args:\n cartesian_k_points (np.array): 2D numpy array containing the k-points in (reciprocal) Cartesian coordinates.\n eigenvalues (np.array): numpy array containing the eigenvalues at each k-point.\n\n Returns:\n (float): The effective mass\n " ]
Please provide a description of the function:def least_squares_effective_mass( cartesian_k_points, eigenvalues ): if not points_are_in_a_straight_line( cartesian_k_points ): raise ValueError( 'k-points are not collinear' ) dk = cartesian_k_points - cartesian_k_points[0] mod_dk = np.linalg.norm( dk, axis = 1 ) delta_e = eigenvalues - eigenvalues[0] effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 ) return effective_mass
[ "\n Calculate the effective mass using a least squares quadratic fit.\n\n Args:\n cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points\n eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.\n\n Returns:\n (float): The fitted effective mass\n\n Notes:\n If the k-points do not sit on a straight line a ValueError will be raised.\n " ]
Please provide a description of the function:def read_from_file( self, filename, negative_occupancies='warn' ): valid_negative_occupancies = [ 'warn', 'raise', 'ignore', 'zero' ] if negative_occupancies not in valid_negative_occupancies: raise ValueError( '"{}" is not a valid value for the keyword `negative_occupancies`.'.format( negative_occupancies ) ) with open( filename, 'r' ) as file_in: file_in.readline() self.number_of_k_points, self.number_of_bands, self.number_of_ions = [ int( f ) for f in get_numbers_from_string( file_in.readline() ) ] self.read_in = file_in.read() self.parse_k_points() self.parse_bands() self.parse_occupancy() if np.any( self.occupancy[:,1] < 0 ): # Handle negative occupancies if negative_occupancies == 'warn': warnings.warn( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'raise': raise ValueError( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'zero': self.occupancy[ self.occupancy < 0 ] = 0.0 self.parse_projections() self.sanity_check() self.read_in = None if self.calculation[ 'spin_polarised' ]: self.data = self.projection_data.reshape( self.spin_channels, self.number_of_k_points, self.number_of_bands, self.number_of_ions + 1, self.number_of_projections )[:,:,:,:,1:].swapaxes( 0, 1).swapaxes( 1, 2 ) else: self.data = self.projection_data.reshape( self.number_of_k_points, self.number_of_bands, self.spin_channels, self.number_of_ions + 1, self.number_of_projections )[:,:,:,:,1:]
[ "\n Reads the projected wavefunction character of each band from a VASP PROCAR file.\n\n Args:\n filename (str): Filename of the PROCAR file.\n negative_occupancies (:obj:Str, optional): Sets the behaviour for handling\n negative occupancies. Default is `warn`. \n\n Returns:\n None\n \n Note:\n Valid options for `negative_occupancies` are:\n `warn` (default): Warn that some partial occupancies are negative,\n but do not alter any values.\n `raise`: Raise an AttributeError.\n `ignore`: Do nothing.\n `zero`: Negative partial occupancies will be set to zero.\n " ]
Please provide a description of the function:def stoichiometry( self ): return Counter( { label: number for label, number in zip( self.atoms, self.atom_numbers ) } )
[ "\n Stoichiometry for this POSCAR, as a Counter.\n e.g. AB_2O_4 -> Counter( { 'A': 1, 'B': 2, O: 4 } )\n \n Args:\n None\n\n Returns:\n None\n " ]
Please provide a description of the function:def load_vasp_summary( filename ): with open( filename, 'r' ) as stream: docs = yaml.load_all( stream, Loader=yaml.SafeLoader ) data = { d['title']: d for d in docs } return data
[ "\n Reads a `vasp_summary.yaml` format YAML file and returns\n a dictionary of dictionaries. Each YAML document in the file\n corresponds to one sub-dictionary, with the corresponding\n top-level key given by the `title` value.\n\n Example:\n The file:\n \n ---\n title: foo\n data: foo_data\n ---\n title: bar\n data: bar_data\n\n is converted to the dictionary\n\n { 'foo': { 'title': 'foo', 'data': 'foo_data' },\n 'bar': { 'title': 'bar', 'data': 'bar_data' } }\n\n Args:\n filename (str): File path for the `vasp_summary.yaml` file.\n\n Returns:\n dict(dict,dict,...): A dictionary of separate YAML documents,\n each as dictionaries.a\n\n " ]
Please provide a description of the function:def potcar_spec( filename ): p_spec = {} with open( filename, 'r' ) as f: potcars = re.split('(End of Dataset\n)', f.read() ) potcar_md5sums = [ md5sum( ''.join( pair ) ) for pair in zip( potcars[::2], potcars[1:-1:2] ) ] for this_md5sum in potcar_md5sums: for ps in potcar_sets: for p, p_md5sum in potcar_md5sum_data[ ps ].items(): if this_md5sum == p_md5sum: p_spec[ p ] = ps if len( p_spec ) != len( potcar_md5sums ): raise ValueError( 'One or more POTCARs did not have matching md5 hashes' ) return p_spec
[ "\n Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.\n\n Args:\n filename (Str): The name of the POTCAR file to process.\n\n Returns:\n (Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.\n { 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }\n " ]
Please provide a description of the function:def find_vasp_calculations(): dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ] gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ] return dir_list + gz_dir_list
[ "\n Returns a list of all subdirectories that contain either a vasprun.xml file\n or a compressed vasprun.xml.gz file.\n\n Args:\n None\n\n Returns:\n (List): list of all VASP calculation subdirectories.\n " ]