code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def stream(self, opNames=[], *args, **kwargs): """ Yield specific operations (e.g. comments) only :param array opNames: List of operations to filter for :param int start: Start at this block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! The dict output is formated such that ``type`` caries the operation type, timestamp and block_num are taken from the block the operation was stored in and the other key depend on the actualy operation. """ for op in self.ops(**kwargs): if not opNames or op["op"][0] in opNames: r = { "type": op["op"][0], "timestamp": op.get("timestamp"), "block_num": op.get("block_num"), } r.update(op["op"][1]) yield r
Yield specific operations (e.g. comments) only :param array opNames: List of operations to filter for :param int start: Start at this block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! The dict output is formated such that ``type`` caries the operation type, timestamp and block_num are taken from the block the operation was stored in and the other key depend on the actualy operation.
Below is the the instruction that describes the task: ### Input: Yield specific operations (e.g. comments) only :param array opNames: List of operations to filter for :param int start: Start at this block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! The dict output is formated such that ``type`` caries the operation type, timestamp and block_num are taken from the block the operation was stored in and the other key depend on the actualy operation. ### Response: def stream(self, opNames=[], *args, **kwargs): """ Yield specific operations (e.g. comments) only :param array opNames: List of operations to filter for :param int start: Start at this block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! The dict output is formated such that ``type`` caries the operation type, timestamp and block_num are taken from the block the operation was stored in and the other key depend on the actualy operation. """ for op in self.ops(**kwargs): if not opNames or op["op"][0] in opNames: r = { "type": op["op"][0], "timestamp": op.get("timestamp"), "block_num": op.get("block_num"), } r.update(op["op"][1]) yield r
def apparent_dip_correction(axes): """ Produces a two-dimensional rotation matrix that rotates a projected dataset to correct for apparent dip """ a1 = axes[0].copy() a1[-1] = 0 cosa = angle(axes[0],a1,cos=True) _ = 1-cosa**2 if _ > 1e-12: sina = N.sqrt(_) if cosa < 0: sina *= -1 # Construct rotation matrix R= N.array([[cosa,sina],[-sina,cosa]]) else: # Small angle, don't bother # (small angles can lead to spurious results) R = N.identity(2) #if axes[0,0] < 0: # return R.T #else: return R
Produces a two-dimensional rotation matrix that rotates a projected dataset to correct for apparent dip
Below is the the instruction that describes the task: ### Input: Produces a two-dimensional rotation matrix that rotates a projected dataset to correct for apparent dip ### Response: def apparent_dip_correction(axes): """ Produces a two-dimensional rotation matrix that rotates a projected dataset to correct for apparent dip """ a1 = axes[0].copy() a1[-1] = 0 cosa = angle(axes[0],a1,cos=True) _ = 1-cosa**2 if _ > 1e-12: sina = N.sqrt(_) if cosa < 0: sina *= -1 # Construct rotation matrix R= N.array([[cosa,sina],[-sina,cosa]]) else: # Small angle, don't bother # (small angles can lead to spurious results) R = N.identity(2) #if axes[0,0] < 0: # return R.T #else: return R
def keybd_event(bVk: int, bScan: int, dwFlags: int, dwExtraInfo: int) -> None: """keybd_event from Win32.""" ctypes.windll.user32.keybd_event(bVk, bScan, dwFlags, dwExtraInfo)
keybd_event from Win32.
Below is the the instruction that describes the task: ### Input: keybd_event from Win32. ### Response: def keybd_event(bVk: int, bScan: int, dwFlags: int, dwExtraInfo: int) -> None: """keybd_event from Win32.""" ctypes.windll.user32.keybd_event(bVk, bScan, dwFlags, dwExtraInfo)
def _parse_document(self): """Parse system.profile doc, copy all values to member variables.""" self._reset() doc = self._profile_doc self._split_tokens_calculated = True self._split_tokens = None self._duration_calculated = True self._duration = doc[u'millis'] self._datetime_calculated = True self._datetime = doc[u'ts'] if self._datetime.tzinfo is None: self._datetime = self._datetime.replace(tzinfo=tzutc()) self._datetime_format = None self._reformat_timestamp('ctime', force=True) self._thread_calculated = True self._thread = doc['thread'] self._operation_calculated = True self._operation = doc[u'op'] self._namespace = doc[u'ns'] self._command_calculated = True if self.operation == 'command': self._command = doc[u'command'].keys()[0] # query pattern for system.profile events, all three cases. # See SERVER-13245 if 'query' in doc: if 'query' in doc['query'] and isinstance(doc['query']['query'], dict): self._pattern = str(doc['query']['query']).replace("'", '"') elif '$query' in doc['query']: self._pattern = str(doc['query']['$query']).replace("'", '"') else: self._pattern = str(doc['query']).replace("'", '"') # sort pattern if ('orderby' in doc['query'] and isinstance(doc['query']['orderby'], dict)): self._sort_pattern = str(doc['query'] ['orderby']).replace("'", '"') elif '$orderby' in doc['query']: self._sort_pattern = str(doc['query'] ['$orderby']).replace("'", '"') else: self._sort_pattern = None self._counters_calculated = True self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None self._numYields = doc[u'numYield'] if 'numYield' in doc else None if u'lockStats' in doc: self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r'] self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w'] self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r'] self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w'] locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r elif u'locks' in doc: locks = json.dumps(doc[u'locks']) else: locks = '' # build a fake line_str payload = '' if 'query' in doc: payload += ('query: %s' % str(doc[u'query']) .replace("u'", "'").replace("'", '"')) if 'command' in doc: payload += ('command: %s' % str(doc[u'command']) .replace("u'", "'").replace("'", '"')) if 'updateobj' in doc: payload += (' update: %s' % str(doc[u'updateobj']) .replace("u'", "'").replace("'", '"')) scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else '' yields = 'numYields:%i' % self._numYields if 'numYield' in doc else '' duration = '%ims' % self.duration if self.duration is not None else '' self._line_str = ("[{thread}] {operation} {namespace} {payload} " "{scanned} {yields} locks(micros) {locks} " "{duration}".format(datetime=self.datetime, thread=self.thread, operation=self.operation, namespace=self.namespace, payload=payload, scanned=scanned, yields=yields, locks=locks, duration=duration))
Parse system.profile doc, copy all values to member variables.
Below is the the instruction that describes the task: ### Input: Parse system.profile doc, copy all values to member variables. ### Response: def _parse_document(self): """Parse system.profile doc, copy all values to member variables.""" self._reset() doc = self._profile_doc self._split_tokens_calculated = True self._split_tokens = None self._duration_calculated = True self._duration = doc[u'millis'] self._datetime_calculated = True self._datetime = doc[u'ts'] if self._datetime.tzinfo is None: self._datetime = self._datetime.replace(tzinfo=tzutc()) self._datetime_format = None self._reformat_timestamp('ctime', force=True) self._thread_calculated = True self._thread = doc['thread'] self._operation_calculated = True self._operation = doc[u'op'] self._namespace = doc[u'ns'] self._command_calculated = True if self.operation == 'command': self._command = doc[u'command'].keys()[0] # query pattern for system.profile events, all three cases. # See SERVER-13245 if 'query' in doc: if 'query' in doc['query'] and isinstance(doc['query']['query'], dict): self._pattern = str(doc['query']['query']).replace("'", '"') elif '$query' in doc['query']: self._pattern = str(doc['query']['$query']).replace("'", '"') else: self._pattern = str(doc['query']).replace("'", '"') # sort pattern if ('orderby' in doc['query'] and isinstance(doc['query']['orderby'], dict)): self._sort_pattern = str(doc['query'] ['orderby']).replace("'", '"') elif '$orderby' in doc['query']: self._sort_pattern = str(doc['query'] ['$orderby']).replace("'", '"') else: self._sort_pattern = None self._counters_calculated = True self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None self._numYields = doc[u'numYield'] if 'numYield' in doc else None if u'lockStats' in doc: self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r'] self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w'] self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r'] self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w'] locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r elif u'locks' in doc: locks = json.dumps(doc[u'locks']) else: locks = '' # build a fake line_str payload = '' if 'query' in doc: payload += ('query: %s' % str(doc[u'query']) .replace("u'", "'").replace("'", '"')) if 'command' in doc: payload += ('command: %s' % str(doc[u'command']) .replace("u'", "'").replace("'", '"')) if 'updateobj' in doc: payload += (' update: %s' % str(doc[u'updateobj']) .replace("u'", "'").replace("'", '"')) scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else '' yields = 'numYields:%i' % self._numYields if 'numYield' in doc else '' duration = '%ims' % self.duration if self.duration is not None else '' self._line_str = ("[{thread}] {operation} {namespace} {payload} " "{scanned} {yields} locks(micros) {locks} " "{duration}".format(datetime=self.datetime, thread=self.thread, operation=self.operation, namespace=self.namespace, payload=payload, scanned=scanned, yields=yields, locks=locks, duration=duration))
def buildhtmlheader(self): """generate HTML header content""" #Highcharts lib/ needs to make sure it's up to date if self.drilldown_flag: self.add_JSsource('https://code.highcharts.com/maps/modules/drilldown.js') self.header_css = [ '<link href="%s" rel="stylesheet" />' % h for h in self.CSSsource ] self.header_js = [ '<script type="text/javascript" src="%s"></script>' % h for h in self.JSsource ] self.htmlheader = '' for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
generate HTML header content
Below is the the instruction that describes the task: ### Input: generate HTML header content ### Response: def buildhtmlheader(self): """generate HTML header content""" #Highcharts lib/ needs to make sure it's up to date if self.drilldown_flag: self.add_JSsource('https://code.highcharts.com/maps/modules/drilldown.js') self.header_css = [ '<link href="%s" rel="stylesheet" />' % h for h in self.CSSsource ] self.header_js = [ '<script type="text/javascript" src="%s"></script>' % h for h in self.JSsource ] self.htmlheader = '' for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
async def open_session(self, request: BaseRequestWebsocket) -> Session: """Open and return a Session using the request.""" return await ensure_coroutine(self.session_interface.open_session)(self, request)
Open and return a Session using the request.
Below is the the instruction that describes the task: ### Input: Open and return a Session using the request. ### Response: async def open_session(self, request: BaseRequestWebsocket) -> Session: """Open and return a Session using the request.""" return await ensure_coroutine(self.session_interface.open_session)(self, request)
def clean_time(time_string): """Return a datetime from the Amazon-provided datetime string""" # Get a timezone-aware datetime object from the string time = dateutil.parser.parse(time_string) if not settings.USE_TZ: # If timezone support is not active, convert the time to UTC and # remove the timezone field time = time.astimezone(timezone.utc).replace(tzinfo=None) return time
Return a datetime from the Amazon-provided datetime string
Below is the the instruction that describes the task: ### Input: Return a datetime from the Amazon-provided datetime string ### Response: def clean_time(time_string): """Return a datetime from the Amazon-provided datetime string""" # Get a timezone-aware datetime object from the string time = dateutil.parser.parse(time_string) if not settings.USE_TZ: # If timezone support is not active, convert the time to UTC and # remove the timezone field time = time.astimezone(timezone.utc).replace(tzinfo=None) return time
def SetFileAttributes(filepath, *attrs): """ Set file attributes. e.g.: SetFileAttributes('C:\\foo', 'hidden') Each attr must be either a numeric value, a constant defined in jaraco.windows.filesystem.api, or one of the nice names defined in this function. """ nice_names = collections.defaultdict( lambda key: key, hidden='FILE_ATTRIBUTE_HIDDEN', read_only='FILE_ATTRIBUTE_READONLY', ) flags = (getattr(api, nice_names[attr], attr) for attr in attrs) flags = functools.reduce(operator.or_, flags) handle_nonzero_success(api.SetFileAttributes(filepath, flags))
Set file attributes. e.g.: SetFileAttributes('C:\\foo', 'hidden') Each attr must be either a numeric value, a constant defined in jaraco.windows.filesystem.api, or one of the nice names defined in this function.
Below is the the instruction that describes the task: ### Input: Set file attributes. e.g.: SetFileAttributes('C:\\foo', 'hidden') Each attr must be either a numeric value, a constant defined in jaraco.windows.filesystem.api, or one of the nice names defined in this function. ### Response: def SetFileAttributes(filepath, *attrs): """ Set file attributes. e.g.: SetFileAttributes('C:\\foo', 'hidden') Each attr must be either a numeric value, a constant defined in jaraco.windows.filesystem.api, or one of the nice names defined in this function. """ nice_names = collections.defaultdict( lambda key: key, hidden='FILE_ATTRIBUTE_HIDDEN', read_only='FILE_ATTRIBUTE_READONLY', ) flags = (getattr(api, nice_names[attr], attr) for attr in attrs) flags = functools.reduce(operator.or_, flags) handle_nonzero_success(api.SetFileAttributes(filepath, flags))
def _current_web_port(self): """ return just the port number for the web container, or None if not running """ info = inspect_container(self._get_container_name('web')) if info is None: return None try: if not info['State']['Running']: return None return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort'] except TypeError: return None
return just the port number for the web container, or None if not running
Below is the the instruction that describes the task: ### Input: return just the port number for the web container, or None if not running ### Response: def _current_web_port(self): """ return just the port number for the web container, or None if not running """ info = inspect_container(self._get_container_name('web')) if info is None: return None try: if not info['State']['Running']: return None return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort'] except TypeError: return None
def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
return the storer object for a key, raise if not in the file
Below is the the instruction that describes the task: ### Input: return the storer object for a key, raise if not in the file ### Response: def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
def main(): """Main function calls the test functs""" print("Python version %s" % sys.version) print("Testing compatibility for function defined with *args") test_func_args(func_old_args) test_func_args(func_new) print("Testing compatibility for function defined with **kwargs") test_func_kwargs(func_old_kwargs) test_func_kwargs(func_new) print("All tests successful - we can change *args and **kwargs to' \ ' named args.") return 0
Main function calls the test functs
Below is the the instruction that describes the task: ### Input: Main function calls the test functs ### Response: def main(): """Main function calls the test functs""" print("Python version %s" % sys.version) print("Testing compatibility for function defined with *args") test_func_args(func_old_args) test_func_args(func_new) print("Testing compatibility for function defined with **kwargs") test_func_kwargs(func_old_kwargs) test_func_kwargs(func_new) print("All tests successful - we can change *args and **kwargs to' \ ' named args.") return 0
def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"): "Iniciar la creación del archivo PDF" # genero el renderizador con propiedades del PDF t = Template( format=papel, orientation=orientacion, title="F 1116 B/C %s" % (self.NroOrden), author="CUIT %s" % self.Cuit, subject="COE %s" % self.params_out.get('coe'), keywords="AFIP Liquidacion Electronica Primaria de Granos", creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,) self.template = t return True
Iniciar la creación del archivo PDF
Below is the the instruction that describes the task: ### Input: Iniciar la creación del archivo PDF ### Response: def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"): "Iniciar la creación del archivo PDF" # genero el renderizador con propiedades del PDF t = Template( format=papel, orientation=orientacion, title="F 1116 B/C %s" % (self.NroOrden), author="CUIT %s" % self.Cuit, subject="COE %s" % self.params_out.get('coe'), keywords="AFIP Liquidacion Electronica Primaria de Granos", creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,) self.template = t return True
def safestr(value): '''Ensure type to string serialization''' if not value or isinstance(value, (int, float, bool, long)): return value elif isinstance(value, (date, datetime)): return value.isoformat() else: return unicode(value)
Ensure type to string serialization
Below is the the instruction that describes the task: ### Input: Ensure type to string serialization ### Response: def safestr(value): '''Ensure type to string serialization''' if not value or isinstance(value, (int, float, bool, long)): return value elif isinstance(value, (date, datetime)): return value.isoformat() else: return unicode(value)
def get_comment_form_for_create(self, reference_id, comment_record_types): """Gets the comment form for creating new comments. A new form should be requested for each create transaction. arg: reference_id (osid.id.Id): the ``Id`` for the reference object arg: comment_record_types (osid.type.Type[]): array of comment record types return: (osid.commenting.CommentForm) - the comment form raise: NullArgument - ``reference_id or comment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.CommentAdminSession.get_comment_form_for_create_template # These really need to be in module imports: if not isinstance(reference_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in comment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if comment_record_types == []: # WHY are we passing book_id = self._catalog_id below, seems redundant: # Probably don't need to send effective_agent_id, since the form can get that from proxy. obj_form = objects.CommentForm( book_id=self._catalog_id, reference_id=reference_id, effective_agent_id=str(self.get_effective_agent_id()), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.CommentForm( book_id=self._catalog_id, record_types=comment_record_types, reference_id=reference_id, effective_agent_id=self.get_effective_agent_id(), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
Gets the comment form for creating new comments. A new form should be requested for each create transaction. arg: reference_id (osid.id.Id): the ``Id`` for the reference object arg: comment_record_types (osid.type.Type[]): array of comment record types return: (osid.commenting.CommentForm) - the comment form raise: NullArgument - ``reference_id or comment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the comment form for creating new comments. A new form should be requested for each create transaction. arg: reference_id (osid.id.Id): the ``Id`` for the reference object arg: comment_record_types (osid.type.Type[]): array of comment record types return: (osid.commenting.CommentForm) - the comment form raise: NullArgument - ``reference_id or comment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* ### Response: def get_comment_form_for_create(self, reference_id, comment_record_types): """Gets the comment form for creating new comments. A new form should be requested for each create transaction. arg: reference_id (osid.id.Id): the ``Id`` for the reference object arg: comment_record_types (osid.type.Type[]): array of comment record types return: (osid.commenting.CommentForm) - the comment form raise: NullArgument - ``reference_id or comment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.CommentAdminSession.get_comment_form_for_create_template # These really need to be in module imports: if not isinstance(reference_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in comment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if comment_record_types == []: # WHY are we passing book_id = self._catalog_id below, seems redundant: # Probably don't need to send effective_agent_id, since the form can get that from proxy. obj_form = objects.CommentForm( book_id=self._catalog_id, reference_id=reference_id, effective_agent_id=str(self.get_effective_agent_id()), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.CommentForm( book_id=self._catalog_id, record_types=comment_record_types, reference_id=reference_id, effective_agent_id=self.get_effective_agent_id(), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
def gen_df_forcing( path_csv_in='SSss_YYYY_data_tt.csv', url_base=url_repo_input,)->pd.DataFrame: '''Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data ''' try: # load info from SUEWS docs repo # this is regarded as the official source urlpath_table = url_base/path_csv_in df_var_info = pd.read_csv(urlpath_table) except: print(f'{urlpath_table} not existing!') else: # clean info dataframe df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1) # set index with `Column name` df_var_forcing = df_var_forcing.set_index('Column Name') df_var_forcing.index = df_var_forcing.index\ .map(lambda x: x.replace('`', ''))\ .rename('variable') # add `Second` info df_var_forcing.loc['isec'] = 'Second [S]' return df_var_forcing
Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data
Below is the the instruction that describes the task: ### Input: Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data ### Response: def gen_df_forcing( path_csv_in='SSss_YYYY_data_tt.csv', url_base=url_repo_input,)->pd.DataFrame: '''Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data ''' try: # load info from SUEWS docs repo # this is regarded as the official source urlpath_table = url_base/path_csv_in df_var_info = pd.read_csv(urlpath_table) except: print(f'{urlpath_table} not existing!') else: # clean info dataframe df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1) # set index with `Column name` df_var_forcing = df_var_forcing.set_index('Column Name') df_var_forcing.index = df_var_forcing.index\ .map(lambda x: x.replace('`', ''))\ .rename('variable') # add `Second` info df_var_forcing.loc['isec'] = 'Second [S]' return df_var_forcing
def window_size(self): """ Returns: namedtuple: eg Size(width=320, height=568) """ value = self.http.get('/window/size').value w = roundint(value['width']) h = roundint(value['height']) return namedtuple('Size', ['width', 'height'])(w, h)
Returns: namedtuple: eg Size(width=320, height=568)
Below is the the instruction that describes the task: ### Input: Returns: namedtuple: eg Size(width=320, height=568) ### Response: def window_size(self): """ Returns: namedtuple: eg Size(width=320, height=568) """ value = self.http.get('/window/size').value w = roundint(value['width']) h = roundint(value['height']) return namedtuple('Size', ['width', 'height'])(w, h)
def p_const_vector_elem_list(p): """ const_number_list : expr """ if p[1] is None: return if not is_static(p[1]): if isinstance(p[1], symbols.UNARY): tmp = make_constexpr(p.lineno(1), p[1]) else: api.errmsg.syntax_error_not_constant(p.lexer.lineno) p[0] = None return else: tmp = p[1] p[0] = [tmp]
const_number_list : expr
Below is the the instruction that describes the task: ### Input: const_number_list : expr ### Response: def p_const_vector_elem_list(p): """ const_number_list : expr """ if p[1] is None: return if not is_static(p[1]): if isinstance(p[1], symbols.UNARY): tmp = make_constexpr(p.lineno(1), p[1]) else: api.errmsg.syntax_error_not_constant(p.lexer.lineno) p[0] = None return else: tmp = p[1] p[0] = [tmp]
def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector.
Below is the the instruction that describes the task: ### Input: Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. ### Response: def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
def send_request(self, kind, url_components, **kwargs): """ Send a request for this resource to the API Parameters ---------- kind: str, {'get', 'delete', 'put', 'post', 'head'} """ return self.api.send_request(kind, self.resource_path, url_components, **kwargs)
Send a request for this resource to the API Parameters ---------- kind: str, {'get', 'delete', 'put', 'post', 'head'}
Below is the the instruction that describes the task: ### Input: Send a request for this resource to the API Parameters ---------- kind: str, {'get', 'delete', 'put', 'post', 'head'} ### Response: def send_request(self, kind, url_components, **kwargs): """ Send a request for this resource to the API Parameters ---------- kind: str, {'get', 'delete', 'put', 'post', 'head'} """ return self.api.send_request(kind, self.resource_path, url_components, **kwargs)
def well(self, well_x=1, well_y=1): """ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement """ xpath = './ScanWellData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) # assume we find only one return self.well_array.find(xpath)
ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement
Below is the the instruction that describes the task: ### Input: ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement ### Response: def well(self, well_x=1, well_y=1): """ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement """ xpath = './ScanWellData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) # assume we find only one return self.well_array.find(xpath)
def _low_level_dispatch(pcapdev, devname, pktqueue): ''' Thread entrypoint for doing low-level receive and dispatch for a single pcap device. ''' while LLNetReal.running: # a non-zero timeout value is ok here; this is an # independent thread that handles input for this # one pcap device. it throws any packets received # into the shared queue (which is read by the actual # user code) pktinfo = pcapdev.recv_packet(timeout=0.2) if pktinfo is None: continue pktqueue.put( (devname,pcapdev.dlt,pktinfo) ) log_debug("Receiver thread for {} exiting".format(devname)) stats = pcapdev.stats() log_debug("Final device statistics {}: {} received, {} dropped, {} dropped/if".format(devname, stats.ps_recv, stats.ps_drop, stats.ps_ifdrop))
Thread entrypoint for doing low-level receive and dispatch for a single pcap device.
Below is the the instruction that describes the task: ### Input: Thread entrypoint for doing low-level receive and dispatch for a single pcap device. ### Response: def _low_level_dispatch(pcapdev, devname, pktqueue): ''' Thread entrypoint for doing low-level receive and dispatch for a single pcap device. ''' while LLNetReal.running: # a non-zero timeout value is ok here; this is an # independent thread that handles input for this # one pcap device. it throws any packets received # into the shared queue (which is read by the actual # user code) pktinfo = pcapdev.recv_packet(timeout=0.2) if pktinfo is None: continue pktqueue.put( (devname,pcapdev.dlt,pktinfo) ) log_debug("Receiver thread for {} exiting".format(devname)) stats = pcapdev.stats() log_debug("Final device statistics {}: {} received, {} dropped, {} dropped/if".format(devname, stats.ps_recv, stats.ps_drop, stats.ps_ifdrop))
def fix_encoding_and_explain(text): """ Re-decodes text that has been decoded incorrectly, and also return a "plan" indicating all the steps required to fix it. The resulting plan could be used with :func:`ftfy.fixes.apply_plan` to fix additional strings that are broken in the same way. """ best_version = text best_cost = text_cost(text) best_plan = [] plan_so_far = [] while True: prevtext = text text, plan = fix_one_step_and_explain(text) plan_so_far.extend(plan) cost = text_cost(text) for _, _, step_cost in plan_so_far: cost += step_cost if cost < best_cost: best_cost = cost best_version = text best_plan = list(plan_so_far) if text == prevtext: return best_version, best_plan
Re-decodes text that has been decoded incorrectly, and also return a "plan" indicating all the steps required to fix it. The resulting plan could be used with :func:`ftfy.fixes.apply_plan` to fix additional strings that are broken in the same way.
Below is the the instruction that describes the task: ### Input: Re-decodes text that has been decoded incorrectly, and also return a "plan" indicating all the steps required to fix it. The resulting plan could be used with :func:`ftfy.fixes.apply_plan` to fix additional strings that are broken in the same way. ### Response: def fix_encoding_and_explain(text): """ Re-decodes text that has been decoded incorrectly, and also return a "plan" indicating all the steps required to fix it. The resulting plan could be used with :func:`ftfy.fixes.apply_plan` to fix additional strings that are broken in the same way. """ best_version = text best_cost = text_cost(text) best_plan = [] plan_so_far = [] while True: prevtext = text text, plan = fix_one_step_and_explain(text) plan_so_far.extend(plan) cost = text_cost(text) for _, _, step_cost in plan_so_far: cost += step_cost if cost < best_cost: best_cost = cost best_version = text best_plan = list(plan_so_far) if text == prevtext: return best_version, best_plan
def washburn(target, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', diameter='throat.diameter'): r""" Computes the capillary entry pressure assuming the throat in a cylindrical tube. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. surface_tension : string The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : string The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. diameter : string The dictionary key containing the throat diameter values to be used. Notes ----- The Washburn equation is: .. math:: P_c = -\frac{2\sigma(cos(\theta))}{r} This is the most basic approach to calculating entry pressure and is suitable for highly non-wetting invading phases in most materials. """ network = target.project.network phase = target.project.find_phase(target) element, sigma, theta = _get_key_props(phase=phase, diameter=diameter, surface_tension=surface_tension, contact_angle=contact_angle) r = network[diameter]/2 value = -2*sigma*_sp.cos(_sp.radians(theta))/r if diameter.split('.')[0] == 'throat': value = value[phase.throats(target.name)] else: value = value[phase.pores(target.name)] value[_sp.absolute(value) == _sp.inf] = 0 return value
r""" Computes the capillary entry pressure assuming the throat in a cylindrical tube. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. surface_tension : string The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : string The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. diameter : string The dictionary key containing the throat diameter values to be used. Notes ----- The Washburn equation is: .. math:: P_c = -\frac{2\sigma(cos(\theta))}{r} This is the most basic approach to calculating entry pressure and is suitable for highly non-wetting invading phases in most materials.
Below is the the instruction that describes the task: ### Input: r""" Computes the capillary entry pressure assuming the throat in a cylindrical tube. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. surface_tension : string The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : string The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. diameter : string The dictionary key containing the throat diameter values to be used. Notes ----- The Washburn equation is: .. math:: P_c = -\frac{2\sigma(cos(\theta))}{r} This is the most basic approach to calculating entry pressure and is suitable for highly non-wetting invading phases in most materials. ### Response: def washburn(target, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', diameter='throat.diameter'): r""" Computes the capillary entry pressure assuming the throat in a cylindrical tube. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. surface_tension : string The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : string The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. diameter : string The dictionary key containing the throat diameter values to be used. Notes ----- The Washburn equation is: .. math:: P_c = -\frac{2\sigma(cos(\theta))}{r} This is the most basic approach to calculating entry pressure and is suitable for highly non-wetting invading phases in most materials. """ network = target.project.network phase = target.project.find_phase(target) element, sigma, theta = _get_key_props(phase=phase, diameter=diameter, surface_tension=surface_tension, contact_angle=contact_angle) r = network[diameter]/2 value = -2*sigma*_sp.cos(_sp.radians(theta))/r if diameter.split('.')[0] == 'throat': value = value[phase.throats(target.name)] else: value = value[phase.pores(target.name)] value[_sp.absolute(value) == _sp.inf] = 0 return value
def _group_get_hostnames(self, group_name): """ Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups. """ hostnames = [] hosts_section = self._get_section(group_name, 'hosts') if hosts_section: for entry in hosts_section['entries']: hostnames.extend(self.expand_hostdef(entry['name'])) children_section = self._get_section(group_name, 'children') if children_section: for entry in children_section['entries']: hostnames.extend(self._group_get_hostnames(entry['name'])) return hostnames
Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups.
Below is the the instruction that describes the task: ### Input: Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups. ### Response: def _group_get_hostnames(self, group_name): """ Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups. """ hostnames = [] hosts_section = self._get_section(group_name, 'hosts') if hosts_section: for entry in hosts_section['entries']: hostnames.extend(self.expand_hostdef(entry['name'])) children_section = self._get_section(group_name, 'children') if children_section: for entry in children_section['entries']: hostnames.extend(self._group_get_hostnames(entry['name'])) return hostnames
def preprocess_belscript(lines): """ Convert any multi-line SET statements into single line SET statements""" set_flag = False for line in lines: if set_flag is False and re.match("SET", line): set_flag = True set_line = [line.rstrip()] # SET following SET elif set_flag and re.match("SET", line): yield f"{' '.join(set_line)}\n" set_line = [line.rstrip()] # Blank line following SET yields single line SET elif set_flag and re.match("\s+$", line): yield f"{' '.join(set_line)}\n" yield line set_flag = False # Append second, third, ... lines to SET elif set_flag: set_line.append(line.rstrip()) else: yield line
Convert any multi-line SET statements into single line SET statements
Below is the the instruction that describes the task: ### Input: Convert any multi-line SET statements into single line SET statements ### Response: def preprocess_belscript(lines): """ Convert any multi-line SET statements into single line SET statements""" set_flag = False for line in lines: if set_flag is False and re.match("SET", line): set_flag = True set_line = [line.rstrip()] # SET following SET elif set_flag and re.match("SET", line): yield f"{' '.join(set_line)}\n" set_line = [line.rstrip()] # Blank line following SET yields single line SET elif set_flag and re.match("\s+$", line): yield f"{' '.join(set_line)}\n" yield line set_flag = False # Append second, third, ... lines to SET elif set_flag: set_line.append(line.rstrip()) else: yield line
def get_jamo_class(jamo): """Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class. """ # TODO: Perhaps raise a separate error for U+3xxx jamo. if jamo in JAMO_LEADS or jamo == chr(0x115F): return "lead" if jamo in JAMO_VOWELS or jamo == chr(0x1160) or\ 0x314F <= ord(jamo) <= 0x3163: return "vowel" if jamo in JAMO_TAILS: return "tail" else: raise InvalidJamoError("Invalid or classless jamo argument.", jamo)
Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class.
Below is the the instruction that describes the task: ### Input: Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class. ### Response: def get_jamo_class(jamo): """Determine if a jamo character is a lead, vowel, or tail. Integers and U+11xx characters are valid arguments. HCJ consonants are not valid here. get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a given character or integer. Note: jamo class directly corresponds to the Unicode 7.0 specification, thus includes filler characters as having a class. """ # TODO: Perhaps raise a separate error for U+3xxx jamo. if jamo in JAMO_LEADS or jamo == chr(0x115F): return "lead" if jamo in JAMO_VOWELS or jamo == chr(0x1160) or\ 0x314F <= ord(jamo) <= 0x3163: return "vowel" if jamo in JAMO_TAILS: return "tail" else: raise InvalidJamoError("Invalid or classless jamo argument.", jamo)
def upload_file(self, simple_upload_url, chunked_upload_url, file_obj, chunk_size=CHUNK_SIZE, force_chunked=False, extra_data=None): """ Generic method to upload files to AmigoCloud. Can be used for different API endpoints. `file_obj` could be a file-like object or a filepath. If the size of the file is greater than MAX_SIZE_SIMPLE_UPLOAD (8MB) `chunked_upload_url` will be used, otherwise `simple_upload_url` will be. If `simple_upload_url` evaluates to False, or `force_chunked` is True, the `chunked_upload_url` will always be used. """ if isinstance(file_obj, string_types): # file_obj is a filepath: open file and close it at the end file_obj = open(file_obj, 'rb') close_file = True else: # assume file_obj is a file-like object close_file = False # Get file size file_obj.seek(0, os.SEEK_END) file_size = file_obj.tell() file_obj.seek(0) try: # Simple upload? if (simple_upload_url and not force_chunked and file_size < MAX_SIZE_SIMPLE_UPLOAD): return self.post(simple_upload_url, data=extra_data, files={'datafile': file_obj}) # Chunked upload data = {} md5_hash = hashlib.md5() start_byte = 0 while True: chunk = file_obj.read(chunk_size) md5_hash.update(chunk) end_byte = start_byte + len(chunk) - 1 content_range = 'bytes %d-%d/%d' % (start_byte, end_byte, file_size) ret = self.post(chunked_upload_url, data=data, files={'datafile': chunk}, headers={'Content-Range': content_range}) data.setdefault('upload_id', ret['upload_id']) start_byte = end_byte + 1 if start_byte == file_size: break # Complete request if chunked_upload_url.endswith('/'): chunked_upload_complete_url = chunked_upload_url + 'complete' else: chunked_upload_complete_url = chunked_upload_url + '/complete' data['md5'] = md5_hash.hexdigest() if extra_data: data.update(extra_data) return self.post(chunked_upload_complete_url, data=data) finally: if close_file: file_obj.close()
Generic method to upload files to AmigoCloud. Can be used for different API endpoints. `file_obj` could be a file-like object or a filepath. If the size of the file is greater than MAX_SIZE_SIMPLE_UPLOAD (8MB) `chunked_upload_url` will be used, otherwise `simple_upload_url` will be. If `simple_upload_url` evaluates to False, or `force_chunked` is True, the `chunked_upload_url` will always be used.
Below is the the instruction that describes the task: ### Input: Generic method to upload files to AmigoCloud. Can be used for different API endpoints. `file_obj` could be a file-like object or a filepath. If the size of the file is greater than MAX_SIZE_SIMPLE_UPLOAD (8MB) `chunked_upload_url` will be used, otherwise `simple_upload_url` will be. If `simple_upload_url` evaluates to False, or `force_chunked` is True, the `chunked_upload_url` will always be used. ### Response: def upload_file(self, simple_upload_url, chunked_upload_url, file_obj, chunk_size=CHUNK_SIZE, force_chunked=False, extra_data=None): """ Generic method to upload files to AmigoCloud. Can be used for different API endpoints. `file_obj` could be a file-like object or a filepath. If the size of the file is greater than MAX_SIZE_SIMPLE_UPLOAD (8MB) `chunked_upload_url` will be used, otherwise `simple_upload_url` will be. If `simple_upload_url` evaluates to False, or `force_chunked` is True, the `chunked_upload_url` will always be used. """ if isinstance(file_obj, string_types): # file_obj is a filepath: open file and close it at the end file_obj = open(file_obj, 'rb') close_file = True else: # assume file_obj is a file-like object close_file = False # Get file size file_obj.seek(0, os.SEEK_END) file_size = file_obj.tell() file_obj.seek(0) try: # Simple upload? if (simple_upload_url and not force_chunked and file_size < MAX_SIZE_SIMPLE_UPLOAD): return self.post(simple_upload_url, data=extra_data, files={'datafile': file_obj}) # Chunked upload data = {} md5_hash = hashlib.md5() start_byte = 0 while True: chunk = file_obj.read(chunk_size) md5_hash.update(chunk) end_byte = start_byte + len(chunk) - 1 content_range = 'bytes %d-%d/%d' % (start_byte, end_byte, file_size) ret = self.post(chunked_upload_url, data=data, files={'datafile': chunk}, headers={'Content-Range': content_range}) data.setdefault('upload_id', ret['upload_id']) start_byte = end_byte + 1 if start_byte == file_size: break # Complete request if chunked_upload_url.endswith('/'): chunked_upload_complete_url = chunked_upload_url + 'complete' else: chunked_upload_complete_url = chunked_upload_url + '/complete' data['md5'] = md5_hash.hexdigest() if extra_data: data.update(extra_data) return self.post(chunked_upload_complete_url, data=data) finally: if close_file: file_obj.close()
def daily_forecast_at_coords(self, lat, lon, limit=None): """ Queries the OWM Weather API for daily weather forecast for the specified geographic coordinate (eg: latitude: 51.5073509, longitude: -0.1277583). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param lat: location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit """ geo.assert_is_lon(lon) geo.assert_is_lat(lat) if limit is not None: assert isinstance(limit, int), "'limit' must be an int or None" if limit < 1: raise ValueError("'limit' must be None or greater than zero") params = {'lon': lon, 'lat': lat, 'lang': self._language} if limit is not None: params['cnt'] = limit uri = http_client.HttpClient.to_url(DAILY_FORECAST_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) forecast = self._parsers['forecast'].parse_JSON(json_data) if forecast is not None: forecast.set_interval("daily") return forecaster.Forecaster(forecast) else: return None
Queries the OWM Weather API for daily weather forecast for the specified geographic coordinate (eg: latitude: 51.5073509, longitude: -0.1277583). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param lat: location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit
Below is the the instruction that describes the task: ### Input: Queries the OWM Weather API for daily weather forecast for the specified geographic coordinate (eg: latitude: 51.5073509, longitude: -0.1277583). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param lat: location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit ### Response: def daily_forecast_at_coords(self, lat, lon, limit=None): """ Queries the OWM Weather API for daily weather forecast for the specified geographic coordinate (eg: latitude: 51.5073509, longitude: -0.1277583). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param lat: location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit """ geo.assert_is_lon(lon) geo.assert_is_lat(lat) if limit is not None: assert isinstance(limit, int), "'limit' must be an int or None" if limit < 1: raise ValueError("'limit' must be None or greater than zero") params = {'lon': lon, 'lat': lat, 'lang': self._language} if limit is not None: params['cnt'] = limit uri = http_client.HttpClient.to_url(DAILY_FORECAST_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) forecast = self._parsers['forecast'].parse_JSON(json_data) if forecast is not None: forecast.set_interval("daily") return forecaster.Forecaster(forecast) else: return None
def perform_extended_selection(self, event=None): """ Performs extended word selection. :param event: QMouseEvent """ TextHelper(self.editor).select_extended_word( continuation_chars=self.continuation_characters) if event: event.accept()
Performs extended word selection. :param event: QMouseEvent
Below is the the instruction that describes the task: ### Input: Performs extended word selection. :param event: QMouseEvent ### Response: def perform_extended_selection(self, event=None): """ Performs extended word selection. :param event: QMouseEvent """ TextHelper(self.editor).select_extended_word( continuation_chars=self.continuation_characters) if event: event.accept()
def p_delays_floatnumber(self, p): 'delays : DELAY floatnumber' p[0] = DelayStatement(FloatConst( p[2], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
delays : DELAY floatnumber
Below is the the instruction that describes the task: ### Input: delays : DELAY floatnumber ### Response: def p_delays_floatnumber(self, p): 'delays : DELAY floatnumber' p[0] = DelayStatement(FloatConst( p[2], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def _is_builtin_module(module): """Is builtin or part of standard library """ if (not hasattr(module, '__file__')) or module.__name__ in sys.builtin_module_names: return True if module.__name__ in _stdlib._STD_LIB_MODULES: return True amp = os.path.abspath(module.__file__) if 'site-packages' in amp: return False if amp.startswith(_STD_MODULE_DIR): return True if not '.' in module.__name__: return False mn_top = module.__name__.split('.')[0] return mn_top in _stdlib._STD_LIB_MODULES
Is builtin or part of standard library
Below is the the instruction that describes the task: ### Input: Is builtin or part of standard library ### Response: def _is_builtin_module(module): """Is builtin or part of standard library """ if (not hasattr(module, '__file__')) or module.__name__ in sys.builtin_module_names: return True if module.__name__ in _stdlib._STD_LIB_MODULES: return True amp = os.path.abspath(module.__file__) if 'site-packages' in amp: return False if amp.startswith(_STD_MODULE_DIR): return True if not '.' in module.__name__: return False mn_top = module.__name__.split('.')[0] return mn_top in _stdlib._STD_LIB_MODULES
def services(doc): """View for getting services""" for service_id, service in doc.get('services', {}).items(): service_type = service.get('service_type') org = doc['_id'] service['id'] = service_id service['organisation_id'] = org yield service_id, service yield [service_type, org], service yield [service_type, None], service yield [None, org], service yield [None, None], service
View for getting services
Below is the the instruction that describes the task: ### Input: View for getting services ### Response: def services(doc): """View for getting services""" for service_id, service in doc.get('services', {}).items(): service_type = service.get('service_type') org = doc['_id'] service['id'] = service_id service['organisation_id'] = org yield service_id, service yield [service_type, org], service yield [service_type, None], service yield [None, org], service yield [None, None], service
def get_checksum32(oqparam, hazard=False): """ Build an unsigned 32 bit integer from the input files of a calculation. :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: the checkume """ # NB: using adler32 & 0xffffffff is the documented way to get a checksum # which is the same between Python 2 and Python 3 checksum = 0 for fname in get_input_files(oqparam, hazard): checksum = _checksum(fname, checksum) if hazard: hazard_params = [] for key, val in vars(oqparam).items(): if key in ('rupture_mesh_spacing', 'complex_fault_mesh_spacing', 'width_of_mfd_bin', 'area_source_discretization', 'random_seed', 'ses_seed', 'truncation_level', 'maximum_distance', 'investigation_time', 'number_of_logic_tree_samples', 'imtls', 'ses_per_logic_tree_path', 'minimum_magnitude', 'prefilter_sources', 'sites', 'pointsource_distance', 'filter_distance'): hazard_params.append('%s = %s' % (key, val)) data = '\n'.join(hazard_params).encode('utf8') checksum = zlib.adler32(data, checksum) & 0xffffffff return checksum
Build an unsigned 32 bit integer from the input files of a calculation. :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: the checkume
Below is the the instruction that describes the task: ### Input: Build an unsigned 32 bit integer from the input files of a calculation. :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: the checkume ### Response: def get_checksum32(oqparam, hazard=False): """ Build an unsigned 32 bit integer from the input files of a calculation. :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: the checkume """ # NB: using adler32 & 0xffffffff is the documented way to get a checksum # which is the same between Python 2 and Python 3 checksum = 0 for fname in get_input_files(oqparam, hazard): checksum = _checksum(fname, checksum) if hazard: hazard_params = [] for key, val in vars(oqparam).items(): if key in ('rupture_mesh_spacing', 'complex_fault_mesh_spacing', 'width_of_mfd_bin', 'area_source_discretization', 'random_seed', 'ses_seed', 'truncation_level', 'maximum_distance', 'investigation_time', 'number_of_logic_tree_samples', 'imtls', 'ses_per_logic_tree_path', 'minimum_magnitude', 'prefilter_sources', 'sites', 'pointsource_distance', 'filter_distance'): hazard_params.append('%s = %s' % (key, val)) data = '\n'.join(hazard_params).encode('utf8') checksum = zlib.adler32(data, checksum) & 0xffffffff return checksum
def traverse(self, node): """Traverse the document tree rooted at node. node : docutil node current root node to traverse """ old_level = self.current_level if isinstance(node, nodes.section): if 'level' in node: self.current_level = node['level'] to_visit = [] to_replace = [] for c in node.children[:]: newnode = self.find_replace(c) if newnode is not None: to_replace.append((c, newnode)) else: to_visit.append(c) for oldnode, newnodes in to_replace: node.replace(oldnode, newnodes) for child in to_visit: self.traverse(child) self.current_level = old_level
Traverse the document tree rooted at node. node : docutil node current root node to traverse
Below is the the instruction that describes the task: ### Input: Traverse the document tree rooted at node. node : docutil node current root node to traverse ### Response: def traverse(self, node): """Traverse the document tree rooted at node. node : docutil node current root node to traverse """ old_level = self.current_level if isinstance(node, nodes.section): if 'level' in node: self.current_level = node['level'] to_visit = [] to_replace = [] for c in node.children[:]: newnode = self.find_replace(c) if newnode is not None: to_replace.append((c, newnode)) else: to_visit.append(c) for oldnode, newnodes in to_replace: node.replace(oldnode, newnodes) for child in to_visit: self.traverse(child) self.current_level = old_level
def get_closest(cls, *locale_codes: str) -> "Locale": """Returns the closest match for the given locale code.""" for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale)
Returns the closest match for the given locale code.
Below is the the instruction that describes the task: ### Input: Returns the closest match for the given locale code. ### Response: def get_closest(cls, *locale_codes: str) -> "Locale": """Returns the closest match for the given locale code.""" for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale)
def ParseFileSystemsStruct(struct_class, fs_count, data): """Take the struct type and parse it into a list of structs.""" results = [] cstr = lambda x: x.split(b"\x00", 1)[0] for count in range(0, fs_count): struct_size = struct_class.GetSize() s_data = data[count * struct_size:(count + 1) * struct_size] s = struct_class(s_data) s.f_fstypename = cstr(s.f_fstypename) s.f_mntonname = cstr(s.f_mntonname) s.f_mntfromname = cstr(s.f_mntfromname) results.append(s) return results
Take the struct type and parse it into a list of structs.
Below is the the instruction that describes the task: ### Input: Take the struct type and parse it into a list of structs. ### Response: def ParseFileSystemsStruct(struct_class, fs_count, data): """Take the struct type and parse it into a list of structs.""" results = [] cstr = lambda x: x.split(b"\x00", 1)[0] for count in range(0, fs_count): struct_size = struct_class.GetSize() s_data = data[count * struct_size:(count + 1) * struct_size] s = struct_class(s_data) s.f_fstypename = cstr(s.f_fstypename) s.f_mntonname = cstr(s.f_mntonname) s.f_mntfromname = cstr(s.f_mntfromname) results.append(s) return results
def dumps_content(self): """Return a string representing the matrix in LaTeX syntax. Returns ------- str """ import numpy as np string = '' shape = self.matrix.shape for (y, x), value in np.ndenumerate(self.matrix): if x: string += '&' string += str(value) if x == shape[1] - 1 and y != shape[0] - 1: string += r'\\' + '%\n' super().dumps_content() return string
Return a string representing the matrix in LaTeX syntax. Returns ------- str
Below is the the instruction that describes the task: ### Input: Return a string representing the matrix in LaTeX syntax. Returns ------- str ### Response: def dumps_content(self): """Return a string representing the matrix in LaTeX syntax. Returns ------- str """ import numpy as np string = '' shape = self.matrix.shape for (y, x), value in np.ndenumerate(self.matrix): if x: string += '&' string += str(value) if x == shape[1] - 1 and y != shape[0] - 1: string += r'\\' + '%\n' super().dumps_content() return string
def diff(left, right): """ Take two VCALENDAR components, compare VEVENTs and VTODOs in them, return a list of object pairs containing just UID and the bits that didn't match, using None for objects that weren't present in one version or the other. When there are multiple ContentLines in one VEVENT, for instance many DESCRIPTION lines, such lines original order is assumed to be meaningful. Order is also preserved when comparing (the unlikely case of) multiple parameters of the same type in a ContentLine """ def processComponentLists(leftList, rightList): output = [] rightIndex = 0 rightListSize = len(rightList) for comp in leftList: if rightIndex >= rightListSize: output.append((comp, None)) else: leftKey = getSortKey(comp) rightComp = rightList[rightIndex] rightKey = getSortKey(rightComp) while leftKey > rightKey: output.append((None, rightComp)) rightIndex += 1 if rightIndex >= rightListSize: output.append((comp, None)) break else: rightComp = rightList[rightIndex] rightKey = getSortKey(rightComp) if leftKey < rightKey: output.append((comp, None)) elif leftKey == rightKey: rightIndex += 1 matchResult = processComponentPair(comp, rightComp) if matchResult is not None: output.append(matchResult) return output def newComponent(name, body): if body is None: return None else: c = Component(name) c.behavior = getBehavior(name) c.isNative = True return c def processComponentPair(leftComp, rightComp): """ Return None if a match, or a pair of components including UIDs and any differing children. """ leftChildKeys = leftComp.contents.keys() rightChildKeys = rightComp.contents.keys() differentContentLines = [] differentComponents = {} for key in leftChildKeys: rightList = rightComp.contents.get(key, []) if isinstance(leftComp.contents[key][0], Component): compDifference = processComponentLists(leftComp.contents[key], rightList) if len(compDifference) > 0: differentComponents[key] = compDifference elif leftComp.contents[key] != rightList: differentContentLines.append((leftComp.contents[key], rightList)) for key in rightChildKeys: if key not in leftChildKeys: if isinstance(rightComp.contents[key][0], Component): differentComponents[key] = ([], rightComp.contents[key]) else: differentContentLines.append(([], rightComp.contents[key])) if len(differentContentLines) == 0 and len(differentComponents) == 0: return None else: left = newFromBehavior(leftComp.name) right = newFromBehavior(leftComp.name) # add a UID, if one existed, despite the fact that they'll always be # the same uid = leftComp.getChildValue('uid') if uid is not None: left.add( 'uid').value = uid right.add('uid').value = uid for name, childPairList in differentComponents.items(): leftComponents, rightComponents = zip(*childPairList) if len(leftComponents) > 0: # filter out None left.contents[name] = filter(None, leftComponents) if len(rightComponents) > 0: # filter out None right.contents[name] = filter(None, rightComponents) for leftChildLine, rightChildLine in differentContentLines: nonEmpty = leftChildLine or rightChildLine name = nonEmpty[0].name if leftChildLine is not None: left.contents[name] = leftChildLine if rightChildLine is not None: right.contents[name] = rightChildLine return left, right vevents = processComponentLists(sortByUID(getattr(left, 'vevent_list', [])), sortByUID(getattr(right, 'vevent_list', []))) vtodos = processComponentLists(sortByUID(getattr(left, 'vtodo_list', [])), sortByUID(getattr(right, 'vtodo_list', []))) return vevents + vtodos
Take two VCALENDAR components, compare VEVENTs and VTODOs in them, return a list of object pairs containing just UID and the bits that didn't match, using None for objects that weren't present in one version or the other. When there are multiple ContentLines in one VEVENT, for instance many DESCRIPTION lines, such lines original order is assumed to be meaningful. Order is also preserved when comparing (the unlikely case of) multiple parameters of the same type in a ContentLine
Below is the the instruction that describes the task: ### Input: Take two VCALENDAR components, compare VEVENTs and VTODOs in them, return a list of object pairs containing just UID and the bits that didn't match, using None for objects that weren't present in one version or the other. When there are multiple ContentLines in one VEVENT, for instance many DESCRIPTION lines, such lines original order is assumed to be meaningful. Order is also preserved when comparing (the unlikely case of) multiple parameters of the same type in a ContentLine ### Response: def diff(left, right): """ Take two VCALENDAR components, compare VEVENTs and VTODOs in them, return a list of object pairs containing just UID and the bits that didn't match, using None for objects that weren't present in one version or the other. When there are multiple ContentLines in one VEVENT, for instance many DESCRIPTION lines, such lines original order is assumed to be meaningful. Order is also preserved when comparing (the unlikely case of) multiple parameters of the same type in a ContentLine """ def processComponentLists(leftList, rightList): output = [] rightIndex = 0 rightListSize = len(rightList) for comp in leftList: if rightIndex >= rightListSize: output.append((comp, None)) else: leftKey = getSortKey(comp) rightComp = rightList[rightIndex] rightKey = getSortKey(rightComp) while leftKey > rightKey: output.append((None, rightComp)) rightIndex += 1 if rightIndex >= rightListSize: output.append((comp, None)) break else: rightComp = rightList[rightIndex] rightKey = getSortKey(rightComp) if leftKey < rightKey: output.append((comp, None)) elif leftKey == rightKey: rightIndex += 1 matchResult = processComponentPair(comp, rightComp) if matchResult is not None: output.append(matchResult) return output def newComponent(name, body): if body is None: return None else: c = Component(name) c.behavior = getBehavior(name) c.isNative = True return c def processComponentPair(leftComp, rightComp): """ Return None if a match, or a pair of components including UIDs and any differing children. """ leftChildKeys = leftComp.contents.keys() rightChildKeys = rightComp.contents.keys() differentContentLines = [] differentComponents = {} for key in leftChildKeys: rightList = rightComp.contents.get(key, []) if isinstance(leftComp.contents[key][0], Component): compDifference = processComponentLists(leftComp.contents[key], rightList) if len(compDifference) > 0: differentComponents[key] = compDifference elif leftComp.contents[key] != rightList: differentContentLines.append((leftComp.contents[key], rightList)) for key in rightChildKeys: if key not in leftChildKeys: if isinstance(rightComp.contents[key][0], Component): differentComponents[key] = ([], rightComp.contents[key]) else: differentContentLines.append(([], rightComp.contents[key])) if len(differentContentLines) == 0 and len(differentComponents) == 0: return None else: left = newFromBehavior(leftComp.name) right = newFromBehavior(leftComp.name) # add a UID, if one existed, despite the fact that they'll always be # the same uid = leftComp.getChildValue('uid') if uid is not None: left.add( 'uid').value = uid right.add('uid').value = uid for name, childPairList in differentComponents.items(): leftComponents, rightComponents = zip(*childPairList) if len(leftComponents) > 0: # filter out None left.contents[name] = filter(None, leftComponents) if len(rightComponents) > 0: # filter out None right.contents[name] = filter(None, rightComponents) for leftChildLine, rightChildLine in differentContentLines: nonEmpty = leftChildLine or rightChildLine name = nonEmpty[0].name if leftChildLine is not None: left.contents[name] = leftChildLine if rightChildLine is not None: right.contents[name] = rightChildLine return left, right vevents = processComponentLists(sortByUID(getattr(left, 'vevent_list', [])), sortByUID(getattr(right, 'vevent_list', []))) vtodos = processComponentLists(sortByUID(getattr(left, 'vtodo_list', [])), sortByUID(getattr(right, 'vtodo_list', []))) return vevents + vtodos
def get_platform_info(): """Gets platform info :return: platform info """ try: system_name = platform.system() release_name = platform.release() except: system_name = "Unknown" release_name = "Unknown" return { 'system': system_name, 'release': release_name, }
Gets platform info :return: platform info
Below is the the instruction that describes the task: ### Input: Gets platform info :return: platform info ### Response: def get_platform_info(): """Gets platform info :return: platform info """ try: system_name = platform.system() release_name = platform.release() except: system_name = "Unknown" release_name = "Unknown" return { 'system': system_name, 'release': release_name, }
def merge(self, other_rel): """ Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object. """ if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds): self.frequencies += other_rel.frequencies else: print("Input table thresholds do not match.")
Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object.
Below is the the instruction that describes the task: ### Input: Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object. ### Response: def merge(self, other_rel): """ Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object. """ if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds): self.frequencies += other_rel.frequencies else: print("Input table thresholds do not match.")
def emailComment(comment, obj, request): """Send an email to the author about a new comment""" if not obj.author.frog_prefs.get().json()['emailComments']: return if obj.author == request.user: return html = render_to_string('frog/comment_email.html', { 'user': comment.user, 'comment': comment.comment, 'object': obj, 'action_type': 'commented on', 'image': isinstance(obj, Image), 'SITE_URL': FROG_SITE_URL, }) subject = '{}: Comment from {}'.format(getSiteConfig()['name'], comment.user_name) fromemail = comment.user_email to = obj.author.email text_content = 'This is an important message.' html_content = html send_mail(subject, text_content, fromemail, [to], html_message=html_content)
Send an email to the author about a new comment
Below is the the instruction that describes the task: ### Input: Send an email to the author about a new comment ### Response: def emailComment(comment, obj, request): """Send an email to the author about a new comment""" if not obj.author.frog_prefs.get().json()['emailComments']: return if obj.author == request.user: return html = render_to_string('frog/comment_email.html', { 'user': comment.user, 'comment': comment.comment, 'object': obj, 'action_type': 'commented on', 'image': isinstance(obj, Image), 'SITE_URL': FROG_SITE_URL, }) subject = '{}: Comment from {}'.format(getSiteConfig()['name'], comment.user_name) fromemail = comment.user_email to = obj.author.email text_content = 'This is an important message.' html_content = html send_mail(subject, text_content, fromemail, [to], html_message=html_content)
def _hole_end(self, position, ignore=None): """ Retrieves the end of hole index from position. :param position: :type position: :param ignore: :type ignore: :return: :rtype: """ for rindex in range(position, self.max_end): for starting in self.starting(rindex): if not ignore or not ignore(starting): return rindex return self.max_end
Retrieves the end of hole index from position. :param position: :type position: :param ignore: :type ignore: :return: :rtype:
Below is the the instruction that describes the task: ### Input: Retrieves the end of hole index from position. :param position: :type position: :param ignore: :type ignore: :return: :rtype: ### Response: def _hole_end(self, position, ignore=None): """ Retrieves the end of hole index from position. :param position: :type position: :param ignore: :type ignore: :return: :rtype: """ for rindex in range(position, self.max_end): for starting in self.starting(rindex): if not ignore or not ignore(starting): return rindex return self.max_end
def run(self, input): """Runs :attr:`executable` with ``input`` as stdin. :class:`AssetHandlerError` exception is raised, if execution is failed, otherwise stdout is returned. """ p = self.get_process() output, errors = p.communicate(input=input.encode('utf-8')) if p.returncode != 0: raise AssetHandlerError(errors) return output.decode('utf-8')
Runs :attr:`executable` with ``input`` as stdin. :class:`AssetHandlerError` exception is raised, if execution is failed, otherwise stdout is returned.
Below is the the instruction that describes the task: ### Input: Runs :attr:`executable` with ``input`` as stdin. :class:`AssetHandlerError` exception is raised, if execution is failed, otherwise stdout is returned. ### Response: def run(self, input): """Runs :attr:`executable` with ``input`` as stdin. :class:`AssetHandlerError` exception is raised, if execution is failed, otherwise stdout is returned. """ p = self.get_process() output, errors = p.communicate(input=input.encode('utf-8')) if p.returncode != 0: raise AssetHandlerError(errors) return output.decode('utf-8')
def check( state_engine, nameop, block_id, checked_ops ): """ Given a NAMESPACE_PREORDER nameop, see if we can preorder it. It must be unqiue. Return True if accepted. Return False if not. """ namespace_id_hash = nameop['preorder_hash'] consensus_hash = nameop['consensus_hash'] token_fee = nameop['token_fee'] # cannot be preordered already if not state_engine.is_new_namespace_preorder( namespace_id_hash ): log.warning("Namespace preorder '%s' already in use" % namespace_id_hash) return False # has to have a reasonable consensus hash if not state_engine.is_consensus_hash_valid( block_id, consensus_hash ): valid_consensus_hashes = state_engine.get_valid_consensus_hashes( block_id ) log.warning("Invalid consensus hash '%s': expected any of %s" % (consensus_hash, ",".join( valid_consensus_hashes )) ) return False # has to have paid a fee if not 'op_fee' in nameop: log.warning("Missing namespace preorder fee") return False # paid to the right burn address if nameop['burn_address'] != BLOCKSTACK_BURN_ADDRESS: log.warning("Invalid burn address: expected {}, got {}".format(BLOCKSTACK_BURN_ADDRESS, nameop['burn_address'])) return False # token burn fee must be present, if we're in the right epoch for it epoch_features = get_epoch_features(block_id) if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features: # must pay in STACKs if 'token_fee' not in nameop: log.warning("Missing token fee") return False token_fee = nameop['token_fee'] token_address = nameop['address'] token_type = TOKEN_TYPE_STACKS # was a token fee paid? if token_fee is None: log.warning("No tokens paid by this NAMESPACE_PREORDER") return False # does this account have enough balance? account_info = state_engine.get_account(token_address, token_type) if account_info is None: log.warning("No account for {} ({})".format(token_address, token_type)) return False account_balance = state_engine.get_account_balance(account_info) assert isinstance(account_balance, (int,long)), 'BUG: account_balance of {} is {} (type {})'.format(token_address, account_balance, type(account_balance)) assert isinstance(token_fee, (int,long)), 'BUG: token_fee is {} (type {})'.format(token_fee, type(token_fee)) if account_balance < token_fee: # can't afford log.warning("Account {} has balance {} {}, but needs to pay {} {}".format(token_address, account_balance, token_type, token_fee, token_type)) return False # debit this account when we commit state_preorder_put_account_payment_info(nameop, token_address, token_type, token_fee) # NOTE: must be a string, to avoid overflow nameop['token_fee'] = '{}'.format(token_fee) nameop['token_units'] = TOKEN_TYPE_STACKS else: # must pay in BTC # not paying in tokens, but say so! state_preorder_put_account_payment_info(nameop, None, None, None) nameop['token_fee'] = '0' nameop['token_units'] = 'BTC' return True
Given a NAMESPACE_PREORDER nameop, see if we can preorder it. It must be unqiue. Return True if accepted. Return False if not.
Below is the the instruction that describes the task: ### Input: Given a NAMESPACE_PREORDER nameop, see if we can preorder it. It must be unqiue. Return True if accepted. Return False if not. ### Response: def check( state_engine, nameop, block_id, checked_ops ): """ Given a NAMESPACE_PREORDER nameop, see if we can preorder it. It must be unqiue. Return True if accepted. Return False if not. """ namespace_id_hash = nameop['preorder_hash'] consensus_hash = nameop['consensus_hash'] token_fee = nameop['token_fee'] # cannot be preordered already if not state_engine.is_new_namespace_preorder( namespace_id_hash ): log.warning("Namespace preorder '%s' already in use" % namespace_id_hash) return False # has to have a reasonable consensus hash if not state_engine.is_consensus_hash_valid( block_id, consensus_hash ): valid_consensus_hashes = state_engine.get_valid_consensus_hashes( block_id ) log.warning("Invalid consensus hash '%s': expected any of %s" % (consensus_hash, ",".join( valid_consensus_hashes )) ) return False # has to have paid a fee if not 'op_fee' in nameop: log.warning("Missing namespace preorder fee") return False # paid to the right burn address if nameop['burn_address'] != BLOCKSTACK_BURN_ADDRESS: log.warning("Invalid burn address: expected {}, got {}".format(BLOCKSTACK_BURN_ADDRESS, nameop['burn_address'])) return False # token burn fee must be present, if we're in the right epoch for it epoch_features = get_epoch_features(block_id) if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features: # must pay in STACKs if 'token_fee' not in nameop: log.warning("Missing token fee") return False token_fee = nameop['token_fee'] token_address = nameop['address'] token_type = TOKEN_TYPE_STACKS # was a token fee paid? if token_fee is None: log.warning("No tokens paid by this NAMESPACE_PREORDER") return False # does this account have enough balance? account_info = state_engine.get_account(token_address, token_type) if account_info is None: log.warning("No account for {} ({})".format(token_address, token_type)) return False account_balance = state_engine.get_account_balance(account_info) assert isinstance(account_balance, (int,long)), 'BUG: account_balance of {} is {} (type {})'.format(token_address, account_balance, type(account_balance)) assert isinstance(token_fee, (int,long)), 'BUG: token_fee is {} (type {})'.format(token_fee, type(token_fee)) if account_balance < token_fee: # can't afford log.warning("Account {} has balance {} {}, but needs to pay {} {}".format(token_address, account_balance, token_type, token_fee, token_type)) return False # debit this account when we commit state_preorder_put_account_payment_info(nameop, token_address, token_type, token_fee) # NOTE: must be a string, to avoid overflow nameop['token_fee'] = '{}'.format(token_fee) nameop['token_units'] = TOKEN_TYPE_STACKS else: # must pay in BTC # not paying in tokens, but say so! state_preorder_put_account_payment_info(nameop, None, None, None) nameop['token_fee'] = '0' nameop['token_units'] = 'BTC' return True
def opt_pagesize(self, pagesize): """ Get or set the page size of the query output """ if pagesize != "auto": pagesize = int(pagesize) self.conf["pagesize"] = pagesize
Get or set the page size of the query output
Below is the the instruction that describes the task: ### Input: Get or set the page size of the query output ### Response: def opt_pagesize(self, pagesize): """ Get or set the page size of the query output """ if pagesize != "auto": pagesize = int(pagesize) self.conf["pagesize"] = pagesize
def swarm(self, predictedField=None, swarmParams=None): """ Runs a swarm on data and swarm description found within the given working directory. If no predictedField is provided, it is assumed that the first stream listed in the streamIds provided to the Menorah constructor is the predicted field. :param predictedField: (string) :param swarmParams: (dict) overrides any swarm params :return: """ self.prepareSwarm( predictedField=predictedField, swarmParams=swarmParams ) self.runSwarm(self._workingDir)
Runs a swarm on data and swarm description found within the given working directory. If no predictedField is provided, it is assumed that the first stream listed in the streamIds provided to the Menorah constructor is the predicted field. :param predictedField: (string) :param swarmParams: (dict) overrides any swarm params :return:
Below is the the instruction that describes the task: ### Input: Runs a swarm on data and swarm description found within the given working directory. If no predictedField is provided, it is assumed that the first stream listed in the streamIds provided to the Menorah constructor is the predicted field. :param predictedField: (string) :param swarmParams: (dict) overrides any swarm params :return: ### Response: def swarm(self, predictedField=None, swarmParams=None): """ Runs a swarm on data and swarm description found within the given working directory. If no predictedField is provided, it is assumed that the first stream listed in the streamIds provided to the Menorah constructor is the predicted field. :param predictedField: (string) :param swarmParams: (dict) overrides any swarm params :return: """ self.prepareSwarm( predictedField=predictedField, swarmParams=swarmParams ) self.runSwarm(self._workingDir)
def create_button_label(icon, font_size=constants.FONT_SIZE_NORMAL): """Create a button label with a chosen icon. :param icon: The icon :param font_size: The size of the icon :return: The created label """ label = Gtk.Label() set_label_markup(label, '&#x' + icon + ';', constants.ICON_FONT, font_size) label.show() return label
Create a button label with a chosen icon. :param icon: The icon :param font_size: The size of the icon :return: The created label
Below is the the instruction that describes the task: ### Input: Create a button label with a chosen icon. :param icon: The icon :param font_size: The size of the icon :return: The created label ### Response: def create_button_label(icon, font_size=constants.FONT_SIZE_NORMAL): """Create a button label with a chosen icon. :param icon: The icon :param font_size: The size of the icon :return: The created label """ label = Gtk.Label() set_label_markup(label, '&#x' + icon + ';', constants.ICON_FONT, font_size) label.show() return label
def draw(self, label, expire): """ Return a Serial number for this resource queue, after bootstrapping. """ # get next number with self.client.pipeline() as pipe: pipe.msetnx({self.keys.dispenser: 0, self.keys.indicator: 1}) pipe.incr(self.keys.dispenser) number = pipe.execute()[-1] # publish for humans self.message('{} assigned to "{}"'.format(number, label)) # launch keeper kwargs = {'client': self.client, 'key': self.keys.key(number)} keeper = Keeper(label=label, expire=expire, **kwargs) try: yield number except: self.message('{} crashed!'.format(number)) raise finally: keeper.close() self.message('{} completed by "{}"'.format(number, label)) number += 1 self.client.set(self.keys.indicator, number) self.announce(number)
Return a Serial number for this resource queue, after bootstrapping.
Below is the the instruction that describes the task: ### Input: Return a Serial number for this resource queue, after bootstrapping. ### Response: def draw(self, label, expire): """ Return a Serial number for this resource queue, after bootstrapping. """ # get next number with self.client.pipeline() as pipe: pipe.msetnx({self.keys.dispenser: 0, self.keys.indicator: 1}) pipe.incr(self.keys.dispenser) number = pipe.execute()[-1] # publish for humans self.message('{} assigned to "{}"'.format(number, label)) # launch keeper kwargs = {'client': self.client, 'key': self.keys.key(number)} keeper = Keeper(label=label, expire=expire, **kwargs) try: yield number except: self.message('{} crashed!'.format(number)) raise finally: keeper.close() self.message('{} completed by "{}"'.format(number, label)) number += 1 self.client.set(self.keys.indicator, number) self.announce(number)
def highlight(self, rect, color="red", seconds=None): """ Simulates a transparent rectangle over the specified ``rect`` on the screen. Actually takes a screenshot of the region and displays with a rectangle border in a borderless window (due to Tkinter limitations) If a Tkinter root window has already been created somewhere else, uses that instead of creating a new one. """ if tk._default_root is None: Debug.log(3, "Creating new temporary Tkinter root") temporary_root = True root = tk.Tk() root.withdraw() else: Debug.log(3, "Borrowing existing Tkinter root") temporary_root = False root = tk._default_root image_to_show = self.getBitmapFromRect(*rect) app = highlightWindow(root, rect, color, image_to_show) if seconds == 0: t = threading.Thread(target=app.do_until_timeout) t.start() return app app.do_until_timeout(seconds)
Simulates a transparent rectangle over the specified ``rect`` on the screen. Actually takes a screenshot of the region and displays with a rectangle border in a borderless window (due to Tkinter limitations) If a Tkinter root window has already been created somewhere else, uses that instead of creating a new one.
Below is the the instruction that describes the task: ### Input: Simulates a transparent rectangle over the specified ``rect`` on the screen. Actually takes a screenshot of the region and displays with a rectangle border in a borderless window (due to Tkinter limitations) If a Tkinter root window has already been created somewhere else, uses that instead of creating a new one. ### Response: def highlight(self, rect, color="red", seconds=None): """ Simulates a transparent rectangle over the specified ``rect`` on the screen. Actually takes a screenshot of the region and displays with a rectangle border in a borderless window (due to Tkinter limitations) If a Tkinter root window has already been created somewhere else, uses that instead of creating a new one. """ if tk._default_root is None: Debug.log(3, "Creating new temporary Tkinter root") temporary_root = True root = tk.Tk() root.withdraw() else: Debug.log(3, "Borrowing existing Tkinter root") temporary_root = False root = tk._default_root image_to_show = self.getBitmapFromRect(*rect) app = highlightWindow(root, rect, color, image_to_show) if seconds == 0: t = threading.Thread(target=app.do_until_timeout) t.start() return app app.do_until_timeout(seconds)
def img(self): '''return a cv image for the icon''' SlipThumbnail.img(self) if self.rotation: # rotate the image mat = cv2.getRotationMatrix2D((self.height//2, self.width//2), -self.rotation, 1.0) self._rotated = cv2.warpAffine(self._img, mat, (self.height, self.width)) else: self._rotated = self._img return self._rotated
return a cv image for the icon
Below is the the instruction that describes the task: ### Input: return a cv image for the icon ### Response: def img(self): '''return a cv image for the icon''' SlipThumbnail.img(self) if self.rotation: # rotate the image mat = cv2.getRotationMatrix2D((self.height//2, self.width//2), -self.rotation, 1.0) self._rotated = cv2.warpAffine(self._img, mat, (self.height, self.width)) else: self._rotated = self._img return self._rotated
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TerminatingSipDomainContext for this TerminatingSipDomainInstance :rtype: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext """ if self._context is None: self._context = TerminatingSipDomainContext( self._version, trunk_sid=self._solution['trunk_sid'], sid=self._solution['sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TerminatingSipDomainContext for this TerminatingSipDomainInstance :rtype: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext
Below is the the instruction that describes the task: ### Input: Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TerminatingSipDomainContext for this TerminatingSipDomainInstance :rtype: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext ### Response: def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TerminatingSipDomainContext for this TerminatingSipDomainInstance :rtype: twilio.rest.trunking.v1.trunk.terminating_sip_domain.TerminatingSipDomainContext """ if self._context is None: self._context = TerminatingSipDomainContext( self._version, trunk_sid=self._solution['trunk_sid'], sid=self._solution['sid'], ) return self._context
def fft(a, n=None, axis=-1, norm=None): """ Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation. """ output = mkl_fft.fft(a, n, axis) if _unitary(norm): output *= 1 / sqrt(output.shape[axis]) return output
Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation.
Below is the the instruction that describes the task: ### Input: Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation. ### Response: def fft(a, n=None, axis=-1, norm=None): """ Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation. """ output = mkl_fft.fft(a, n, axis) if _unitary(norm): output *= 1 / sqrt(output.shape[axis]) return output
def render_cheetah_tmpl(tmplstr, context, tmplpath=None): ''' Render a Cheetah template. ''' from Cheetah.Template import Template return salt.utils.data.decode(Template(tmplstr, searchList=[context]))
Render a Cheetah template.
Below is the the instruction that describes the task: ### Input: Render a Cheetah template. ### Response: def render_cheetah_tmpl(tmplstr, context, tmplpath=None): ''' Render a Cheetah template. ''' from Cheetah.Template import Template return salt.utils.data.decode(Template(tmplstr, searchList=[context]))
def appendData(self, content): """ Add characters to the element's pcdata. """ if self.pcdata is not None: self.pcdata += content else: self.pcdata = content
Add characters to the element's pcdata.
Below is the the instruction that describes the task: ### Input: Add characters to the element's pcdata. ### Response: def appendData(self, content): """ Add characters to the element's pcdata. """ if self.pcdata is not None: self.pcdata += content else: self.pcdata = content
def _zforce(self,R,z,phi=0.,t=0.): """ NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2011-04-10 - Written - Bovy (NYU) 2018-10-18 - Updated for general object potential - James Lane (UofT) """ #Cylindrical distance Rdist = _cylR(R,phi,self._orb.R(t),self._orb.phi(t)) # Difference vector (xd,yd,zd) = _cyldiff(self._orb.R(t), self._orb.phi(t), self._orb.z(t), R, phi, z) #Evaluate and return z force return -evaluatezforces(self._pot,Rdist,zd, use_physical=False)
NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2011-04-10 - Written - Bovy (NYU) 2018-10-18 - Updated for general object potential - James Lane (UofT)
Below is the the instruction that describes the task: ### Input: NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2011-04-10 - Written - Bovy (NYU) 2018-10-18 - Updated for general object potential - James Lane (UofT) ### Response: def _zforce(self,R,z,phi=0.,t=0.): """ NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2011-04-10 - Written - Bovy (NYU) 2018-10-18 - Updated for general object potential - James Lane (UofT) """ #Cylindrical distance Rdist = _cylR(R,phi,self._orb.R(t),self._orb.phi(t)) # Difference vector (xd,yd,zd) = _cyldiff(self._orb.R(t), self._orb.phi(t), self._orb.z(t), R, phi, z) #Evaluate and return z force return -evaluatezforces(self._pot,Rdist,zd, use_physical=False)
def header_body_from_content(content): """\ Tries to extract the header and the message from the cable content. The header is something like UNCLASSIFIED ... SUBJECT ... REF ... while the message begins usually with a summary 1. SUMMARY ... ... 10. ... Returns (header, msg) or (None, None) if the header/message cannot be detected. `content` The "content" part of a cable. """ m = _CLASSIFIED_BY_PATTERN.search(content) idx = m and m.end() or 0 m = _SUMMARY_PATTERN.search(content) summary_idx = m and m.start() or None m = _FIRST_PARAGRAPH_PATTERN.search(content) para_idx = m and m.start() or None if summary_idx and para_idx: idx = max(idx, min(summary_idx, para_idx)) elif summary_idx: idx = max(summary_idx, idx) elif para_idx: idx = max(para_idx, idx) if idx > 0: return content[:idx], content[idx:] return None, None
\ Tries to extract the header and the message from the cable content. The header is something like UNCLASSIFIED ... SUBJECT ... REF ... while the message begins usually with a summary 1. SUMMARY ... ... 10. ... Returns (header, msg) or (None, None) if the header/message cannot be detected. `content` The "content" part of a cable.
Below is the the instruction that describes the task: ### Input: \ Tries to extract the header and the message from the cable content. The header is something like UNCLASSIFIED ... SUBJECT ... REF ... while the message begins usually with a summary 1. SUMMARY ... ... 10. ... Returns (header, msg) or (None, None) if the header/message cannot be detected. `content` The "content" part of a cable. ### Response: def header_body_from_content(content): """\ Tries to extract the header and the message from the cable content. The header is something like UNCLASSIFIED ... SUBJECT ... REF ... while the message begins usually with a summary 1. SUMMARY ... ... 10. ... Returns (header, msg) or (None, None) if the header/message cannot be detected. `content` The "content" part of a cable. """ m = _CLASSIFIED_BY_PATTERN.search(content) idx = m and m.end() or 0 m = _SUMMARY_PATTERN.search(content) summary_idx = m and m.start() or None m = _FIRST_PARAGRAPH_PATTERN.search(content) para_idx = m and m.start() or None if summary_idx and para_idx: idx = max(idx, min(summary_idx, para_idx)) elif summary_idx: idx = max(summary_idx, idx) elif para_idx: idx = max(para_idx, idx) if idx > 0: return content[:idx], content[idx:] return None, None
def parallel_apply(func, arg_iterable, **kwargs): """Apply function to iterable with parallelisation and a tqdm progress bar. Roughly equivalent to >>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in arg_iterable] but will **not** necessarily return results in input order. Parameters ---------- func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. func_args: tuple, optional Additional positional arguments for func. func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs """ max_workers = kwargs.pop('max_workers', None) parallel = kwargs.pop('parallel', True) parallel_warning = kwargs.pop('parallel_warning', True) func_args = kwargs.pop('func_args', ()) func_pre_args = kwargs.pop('func_pre_args', ()) func_kwargs = kwargs.pop('func_kwargs', {}) tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if 'leave' not in tqdm_kwargs: # default to leave=False tqdm_kwargs['leave'] = False assert isinstance(func_args, tuple), ( str(func_args) + ' is type ' + str(type(func_args))) assert isinstance(func_pre_args, tuple), ( str(func_pre_args) + ' is type ' + str(type(func_pre_args))) progress = select_tqdm() if not parallel: if parallel_warning: warnings.warn(('parallel_map has parallel=False - turn on ' 'parallelisation for faster processing'), UserWarning) return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for x in progress(arg_iterable, **tqdm_kwargs)] else: pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) futures = [] for element in arg_iterable: futures.append(pool.submit( func, *(func_pre_args + (element,) + func_args), **func_kwargs)) results = [] for fut in progress(concurrent.futures.as_completed(futures), total=len(arg_iterable), **tqdm_kwargs): results.append(fut.result()) return results
Apply function to iterable with parallelisation and a tqdm progress bar. Roughly equivalent to >>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in arg_iterable] but will **not** necessarily return results in input order. Parameters ---------- func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. func_args: tuple, optional Additional positional arguments for func. func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs
Below is the the instruction that describes the task: ### Input: Apply function to iterable with parallelisation and a tqdm progress bar. Roughly equivalent to >>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in arg_iterable] but will **not** necessarily return results in input order. Parameters ---------- func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. func_args: tuple, optional Additional positional arguments for func. func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs ### Response: def parallel_apply(func, arg_iterable, **kwargs): """Apply function to iterable with parallelisation and a tqdm progress bar. Roughly equivalent to >>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in arg_iterable] but will **not** necessarily return results in input order. Parameters ---------- func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. func_args: tuple, optional Additional positional arguments for func. func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs """ max_workers = kwargs.pop('max_workers', None) parallel = kwargs.pop('parallel', True) parallel_warning = kwargs.pop('parallel_warning', True) func_args = kwargs.pop('func_args', ()) func_pre_args = kwargs.pop('func_pre_args', ()) func_kwargs = kwargs.pop('func_kwargs', {}) tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if 'leave' not in tqdm_kwargs: # default to leave=False tqdm_kwargs['leave'] = False assert isinstance(func_args, tuple), ( str(func_args) + ' is type ' + str(type(func_args))) assert isinstance(func_pre_args, tuple), ( str(func_pre_args) + ' is type ' + str(type(func_pre_args))) progress = select_tqdm() if not parallel: if parallel_warning: warnings.warn(('parallel_map has parallel=False - turn on ' 'parallelisation for faster processing'), UserWarning) return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for x in progress(arg_iterable, **tqdm_kwargs)] else: pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) futures = [] for element in arg_iterable: futures.append(pool.submit( func, *(func_pre_args + (element,) + func_args), **func_kwargs)) results = [] for fut in progress(concurrent.futures.as_completed(futures), total=len(arg_iterable), **tqdm_kwargs): results.append(fut.result()) return results
def remove_masquerade(zone=None, permanent=True): ''' Remove masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_masquerade To remove masquerade on a specific zone .. code-block:: bash salt '*' firewalld.remove_masquerade dmz ''' if zone: cmd = '--zone={0} --remove-masquerade'.format(zone) else: cmd = '--remove-masquerade' if permanent: cmd += ' --permanent' return __firewall_cmd(cmd)
Remove masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_masquerade To remove masquerade on a specific zone .. code-block:: bash salt '*' firewalld.remove_masquerade dmz
Below is the the instruction that describes the task: ### Input: Remove masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_masquerade To remove masquerade on a specific zone .. code-block:: bash salt '*' firewalld.remove_masquerade dmz ### Response: def remove_masquerade(zone=None, permanent=True): ''' Remove masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_masquerade To remove masquerade on a specific zone .. code-block:: bash salt '*' firewalld.remove_masquerade dmz ''' if zone: cmd = '--zone={0} --remove-masquerade'.format(zone) else: cmd = '--remove-masquerade' if permanent: cmd += ' --permanent' return __firewall_cmd(cmd)
def get_page_full_export(self, page_id): """ Get full page info for export and body html code """ try: result = self._request('/getpagefullexport/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
Get full page info for export and body html code
Below is the the instruction that describes the task: ### Input: Get full page info for export and body html code ### Response: def get_page_full_export(self, page_id): """ Get full page info for export and body html code """ try: result = self._request('/getpagefullexport/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
def elemc(item, inset): """ Determine whether an item is an element of a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemc_c.html :param item: Item to be tested. :type item: str :param inset: Set to be tested. :type inset: spiceypy.utils.support_types.SpiceCell :return: True if item is an element of set. :rtype: bool """ assert isinstance(inset, stypes.SpiceCell) item = stypes.stringToCharP(item) return bool(libspice.elemc_c(item, ctypes.byref(inset)))
Determine whether an item is an element of a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemc_c.html :param item: Item to be tested. :type item: str :param inset: Set to be tested. :type inset: spiceypy.utils.support_types.SpiceCell :return: True if item is an element of set. :rtype: bool
Below is the the instruction that describes the task: ### Input: Determine whether an item is an element of a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemc_c.html :param item: Item to be tested. :type item: str :param inset: Set to be tested. :type inset: spiceypy.utils.support_types.SpiceCell :return: True if item is an element of set. :rtype: bool ### Response: def elemc(item, inset): """ Determine whether an item is an element of a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemc_c.html :param item: Item to be tested. :type item: str :param inset: Set to be tested. :type inset: spiceypy.utils.support_types.SpiceCell :return: True if item is an element of set. :rtype: bool """ assert isinstance(inset, stypes.SpiceCell) item = stypes.stringToCharP(item) return bool(libspice.elemc_c(item, ctypes.byref(inset)))
def override_root_main_ref(config, remotes, banner): """Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool """ log = logging.getLogger(__name__) greatest_tag = config.banner_greatest_tag if banner else config.greatest_tag recent_tag = config.banner_recent_tag if banner else config.recent_tag if greatest_tag or recent_tag: candidates = [r for r in remotes if r['kind'] == 'tags'] if candidates: multi_sort(candidates, ['semver' if greatest_tag else 'time']) config.update({'banner_main_ref' if banner else 'root_ref': candidates[0]['name']}, overwrite=True) else: flag = '--banner-main-ref' if banner else '--root-ref' log.warning('No git tags with docs found in remote. Falling back to %s value.', flag) ref = config.banner_main_ref if banner else config.root_ref return ref in [r['name'] for r in remotes]
Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool
Below is the the instruction that describes the task: ### Input: Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool ### Response: def override_root_main_ref(config, remotes, banner): """Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool """ log = logging.getLogger(__name__) greatest_tag = config.banner_greatest_tag if banner else config.greatest_tag recent_tag = config.banner_recent_tag if banner else config.recent_tag if greatest_tag or recent_tag: candidates = [r for r in remotes if r['kind'] == 'tags'] if candidates: multi_sort(candidates, ['semver' if greatest_tag else 'time']) config.update({'banner_main_ref' if banner else 'root_ref': candidates[0]['name']}, overwrite=True) else: flag = '--banner-main-ref' if banner else '--root-ref' log.warning('No git tags with docs found in remote. Falling back to %s value.', flag) ref = config.banner_main_ref if banner else config.root_ref return ref in [r['name'] for r in remotes]
def get_object(model, meteor_id, *args, **kwargs): """Return an object for the given meteor_id.""" # Django model._meta is now public API -> pylint: disable=W0212 meta = model._meta if isinstance(meta.pk, AleaIdField): # meteor_id is the primary key return model.objects.filter(*args, **kwargs).get(pk=meteor_id) alea_unique_fields = [ field for field in meta.local_fields if isinstance(field, AleaIdField) and field.unique and not field.null ] if len(alea_unique_fields) == 1: return model.objects.filter(*args, **kwargs).get(**{ alea_unique_fields[0].name: meteor_id, }) return model.objects.filter(*args, **kwargs).get( pk=get_object_id(model, meteor_id), )
Return an object for the given meteor_id.
Below is the the instruction that describes the task: ### Input: Return an object for the given meteor_id. ### Response: def get_object(model, meteor_id, *args, **kwargs): """Return an object for the given meteor_id.""" # Django model._meta is now public API -> pylint: disable=W0212 meta = model._meta if isinstance(meta.pk, AleaIdField): # meteor_id is the primary key return model.objects.filter(*args, **kwargs).get(pk=meteor_id) alea_unique_fields = [ field for field in meta.local_fields if isinstance(field, AleaIdField) and field.unique and not field.null ] if len(alea_unique_fields) == 1: return model.objects.filter(*args, **kwargs).get(**{ alea_unique_fields[0].name: meteor_id, }) return model.objects.filter(*args, **kwargs).get( pk=get_object_id(model, meteor_id), )
def GetTransactionResults(self): """ Get the execution results of the transaction. Returns: None: if the transaction has no references. list: of TransactionResult objects. """ if self.References is None: return None results = [] realresults = [] for ref_output in self.References.values(): results.append(TransactionResult(ref_output.AssetId, ref_output.Value)) for output in self.outputs: results.append(TransactionResult(output.AssetId, output.Value * Fixed8(-1))) for key, group in groupby(results, lambda x: x.AssetId): sum = Fixed8(0) for item in group: sum = sum + item.Amount if sum != Fixed8.Zero(): realresults.append(TransactionResult(key, sum)) return realresults
Get the execution results of the transaction. Returns: None: if the transaction has no references. list: of TransactionResult objects.
Below is the the instruction that describes the task: ### Input: Get the execution results of the transaction. Returns: None: if the transaction has no references. list: of TransactionResult objects. ### Response: def GetTransactionResults(self): """ Get the execution results of the transaction. Returns: None: if the transaction has no references. list: of TransactionResult objects. """ if self.References is None: return None results = [] realresults = [] for ref_output in self.References.values(): results.append(TransactionResult(ref_output.AssetId, ref_output.Value)) for output in self.outputs: results.append(TransactionResult(output.AssetId, output.Value * Fixed8(-1))) for key, group in groupby(results, lambda x: x.AssetId): sum = Fixed8(0) for item in group: sum = sum + item.Amount if sum != Fixed8.Zero(): realresults.append(TransactionResult(key, sum)) return realresults
def to_pydatetime(self): """ Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime """ return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime
Below is the the instruction that describes the task: ### Input: Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime ### Response: def to_pydatetime(self): """ Converts datetime2 object into Python's datetime.datetime object @return: naive datetime.datetime """ return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
def schemaValidateDoc(self, ctxt): """Validate a document tree in memory. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o) return ret
Validate a document tree in memory.
Below is the the instruction that describes the task: ### Input: Validate a document tree in memory. ### Response: def schemaValidateDoc(self, ctxt): """Validate a document tree in memory. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o) return ret
def _apply_axis_properties(self, axis, rot=None, fontsize=None): """ Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick. """ if rot is not None or fontsize is not None: # rot=0 is a valid setting, hence the explicit None check labels = axis.get_majorticklabels() + axis.get_minorticklabels() for label in labels: if rot is not None: label.set_rotation(rot) if fontsize is not None: label.set_fontsize(fontsize)
Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick.
Below is the the instruction that describes the task: ### Input: Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick. ### Response: def _apply_axis_properties(self, axis, rot=None, fontsize=None): """ Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed multiple times per draw. It's therefore beneficial for us to avoid accessing unless we will act on the Tick. """ if rot is not None or fontsize is not None: # rot=0 is a valid setting, hence the explicit None check labels = axis.get_majorticklabels() + axis.get_minorticklabels() for label in labels: if rot is not None: label.set_rotation(rot) if fontsize is not None: label.set_fontsize(fontsize)
def convolutional_layer_series(initial_size, layer_sequence): """ Execute a series of convolutional layer transformations to the size number """ size = initial_size for filter_size, padding, stride in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) return size
Execute a series of convolutional layer transformations to the size number
Below is the the instruction that describes the task: ### Input: Execute a series of convolutional layer transformations to the size number ### Response: def convolutional_layer_series(initial_size, layer_sequence): """ Execute a series of convolutional layer transformations to the size number """ size = initial_size for filter_size, padding, stride in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) return size
def error(self, message): """Prints error message, then help.""" sys.stderr.write('error: %s\n\n' % message) self.print_help() sys.exit(2)
Prints error message, then help.
Below is the the instruction that describes the task: ### Input: Prints error message, then help. ### Response: def error(self, message): """Prints error message, then help.""" sys.stderr.write('error: %s\n\n' % message) self.print_help() sys.exit(2)
def mb_handler(self, args): '''Handler for mb command''' if len(args) == 1: raise InvalidArgument('No s3 bucketname provided') self.validate('cmd|s3', args) self.s3handler().create_bucket(args[1])
Handler for mb command
Below is the the instruction that describes the task: ### Input: Handler for mb command ### Response: def mb_handler(self, args): '''Handler for mb command''' if len(args) == 1: raise InvalidArgument('No s3 bucketname provided') self.validate('cmd|s3', args) self.s3handler().create_bucket(args[1])
def _periodicfeatures_worker(task): ''' This is a parallel worker for the drivers below. ''' pfpickle, lcbasedir, outdir, starfeatures, kwargs = task try: return get_periodicfeatures(pfpickle, lcbasedir, outdir, starfeatures=starfeatures, **kwargs) except Exception as e: LOGEXCEPTION('failed to get periodicfeatures for %s' % pfpickle)
This is a parallel worker for the drivers below.
Below is the the instruction that describes the task: ### Input: This is a parallel worker for the drivers below. ### Response: def _periodicfeatures_worker(task): ''' This is a parallel worker for the drivers below. ''' pfpickle, lcbasedir, outdir, starfeatures, kwargs = task try: return get_periodicfeatures(pfpickle, lcbasedir, outdir, starfeatures=starfeatures, **kwargs) except Exception as e: LOGEXCEPTION('failed to get periodicfeatures for %s' % pfpickle)
def from_object(self, instance: Union[object, str]) -> None: """Load the configuration from a Python object. This can be used to reference modules or objects within modules for example, .. code-block:: python app.config.from_object('module') app.config.from_object('module.instance') from module import instance app.config.from_object(instance) are valid. Arguments: instance: Either a str referencing a python object or the object itself. """ if isinstance(instance, str): try: path, config = instance.rsplit('.', 1) except ValueError: path = instance instance = importlib.import_module(path) else: module = importlib.import_module(path) instance = getattr(module, config) for key in dir(instance): if key.isupper(): self[key] = getattr(instance, key)
Load the configuration from a Python object. This can be used to reference modules or objects within modules for example, .. code-block:: python app.config.from_object('module') app.config.from_object('module.instance') from module import instance app.config.from_object(instance) are valid. Arguments: instance: Either a str referencing a python object or the object itself.
Below is the the instruction that describes the task: ### Input: Load the configuration from a Python object. This can be used to reference modules or objects within modules for example, .. code-block:: python app.config.from_object('module') app.config.from_object('module.instance') from module import instance app.config.from_object(instance) are valid. Arguments: instance: Either a str referencing a python object or the object itself. ### Response: def from_object(self, instance: Union[object, str]) -> None: """Load the configuration from a Python object. This can be used to reference modules or objects within modules for example, .. code-block:: python app.config.from_object('module') app.config.from_object('module.instance') from module import instance app.config.from_object(instance) are valid. Arguments: instance: Either a str referencing a python object or the object itself. """ if isinstance(instance, str): try: path, config = instance.rsplit('.', 1) except ValueError: path = instance instance = importlib.import_module(path) else: module = importlib.import_module(path) instance = getattr(module, config) for key in dir(instance): if key.isupper(): self[key] = getattr(instance, key)
def registry_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'query_uri': obj.query_uri, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'applications': [{ 'title': a.title, 'uri': a.uri, 'service_url': a.service_url, 'success': a.success, 'has_references': a.has_references, 'count': a.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in a.items] if a.items is not None else None } for a in obj.applications] if obj.applications is not None else None }
Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict`
Below is the the instruction that describes the task: ### Input: Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict` ### Response: def registry_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'query_uri': obj.query_uri, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'applications': [{ 'title': a.title, 'uri': a.uri, 'service_url': a.service_url, 'success': a.success, 'has_references': a.has_references, 'count': a.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in a.items] if a.items is not None else None } for a in obj.applications] if obj.applications is not None else None }
def IPID_count(lst, funcID=lambda x: x[1].id, funcpres=lambda x: x[1].summary()): # noqa: E501 """Identify IP id values classes in a list of packets lst: a list of packets funcID: a function that returns IP id values funcpres: a function used to summarize packets""" idlst = [funcID(e) for e in lst] idlst.sort() classes = [idlst[0]] classes += [t[1] for t in zip(idlst[:-1], idlst[1:]) if abs(t[0] - t[1]) > 50] # noqa: E501 lst = [(funcID(x), funcpres(x)) for x in lst] lst.sort() print("Probably %i classes:" % len(classes), classes) for id, pr in lst: print("%5i" % id, pr)
Identify IP id values classes in a list of packets lst: a list of packets funcID: a function that returns IP id values funcpres: a function used to summarize packets
Below is the the instruction that describes the task: ### Input: Identify IP id values classes in a list of packets lst: a list of packets funcID: a function that returns IP id values funcpres: a function used to summarize packets ### Response: def IPID_count(lst, funcID=lambda x: x[1].id, funcpres=lambda x: x[1].summary()): # noqa: E501 """Identify IP id values classes in a list of packets lst: a list of packets funcID: a function that returns IP id values funcpres: a function used to summarize packets""" idlst = [funcID(e) for e in lst] idlst.sort() classes = [idlst[0]] classes += [t[1] for t in zip(idlst[:-1], idlst[1:]) if abs(t[0] - t[1]) > 50] # noqa: E501 lst = [(funcID(x), funcpres(x)) for x in lst] lst.sort() print("Probably %i classes:" % len(classes), classes) for id, pr in lst: print("%5i" % id, pr)
def filter(self, extractions, case_sensitive=False) -> List[Extraction]: """filters out the extraction if extracted value is in the blacklist""" filtered_extractions = [] if not isinstance(extractions, list): extractions = [extractions] for extraction in extractions: if case_sensitive: try: if extraction.value.lower() not in self.black_list: filtered_extractions.append(extraction) except Exception as e: print('Error in BlackListFilter: {} while filtering out extraction: {}'.format(e, extraction.value)) # most likely it s a unicode character which is messing things up, return it filtered_extractions.append(extraction) else: if extraction.value not in self.black_list: filtered_extractions.append(extraction) return filtered_extractions
filters out the extraction if extracted value is in the blacklist
Below is the the instruction that describes the task: ### Input: filters out the extraction if extracted value is in the blacklist ### Response: def filter(self, extractions, case_sensitive=False) -> List[Extraction]: """filters out the extraction if extracted value is in the blacklist""" filtered_extractions = [] if not isinstance(extractions, list): extractions = [extractions] for extraction in extractions: if case_sensitive: try: if extraction.value.lower() not in self.black_list: filtered_extractions.append(extraction) except Exception as e: print('Error in BlackListFilter: {} while filtering out extraction: {}'.format(e, extraction.value)) # most likely it s a unicode character which is messing things up, return it filtered_extractions.append(extraction) else: if extraction.value not in self.black_list: filtered_extractions.append(extraction) return filtered_extractions
def mixed_list_file(cls, filename, values, bits): """ Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size} """ fd = open(filename, 'w') for original in values: try: parsed = cls.integer(original, bits) except TypeError: parsed = repr(original) print >> fd, parsed fd.close()
Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size}
Below is the the instruction that describes the task: ### Input: Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size} ### Response: def mixed_list_file(cls, filename, values, bits): """ Write a list of mixed values to a file. If a file of the same name exists, it's contents are replaced. See L{HexInput.mixed_list_file} for a description of the file format. @type filename: str @param filename: Name of the file to write. @type values: list( int ) @param values: List of mixed values to write to the file. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexOutput.integer_size} """ fd = open(filename, 'w') for original in values: try: parsed = cls.integer(original, bits) except TypeError: parsed = repr(original) print >> fd, parsed fd.close()
def setup(self): """Initialize filter just before it will be used.""" super(CleanCSSFilter, self).setup() self.root = current_app.config.get('COLLECT_STATIC_ROOT')
Initialize filter just before it will be used.
Below is the the instruction that describes the task: ### Input: Initialize filter just before it will be used. ### Response: def setup(self): """Initialize filter just before it will be used.""" super(CleanCSSFilter, self).setup() self.root = current_app.config.get('COLLECT_STATIC_ROOT')
def get_default_home_dir(): """ Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path """ ding0_dir = str(cfg_ding0.get('config', 'config_dir')) return os.path.join(os.path.expanduser('~'), ding0_dir)
Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path
Below is the the instruction that describes the task: ### Input: Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path ### Response: def get_default_home_dir(): """ Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path """ ding0_dir = str(cfg_ding0.get('config', 'config_dir')) return os.path.join(os.path.expanduser('~'), ding0_dir)
def _checkCanIndex(self): ''' _checkCanIndex - Check if we CAN index (if all fields are indexable). Also checks the right-most field for "hashIndex" - if it needs to hash we will hash. ''' # NOTE: We can't just check the right-most field. For types like pickle that don't support indexing, they don't # support it because python2 and python3 have different results for pickle.dumps on the same object. # So if we have a field chain like Pickle, Compressed then we will have two different results. if not self.chainedFields: return (False, False) for chainedField in self.chainedFields: if chainedField.CAN_INDEX is False: return (False, False) return (True, self.chainedFields[-1].hashIndex)
_checkCanIndex - Check if we CAN index (if all fields are indexable). Also checks the right-most field for "hashIndex" - if it needs to hash we will hash.
Below is the the instruction that describes the task: ### Input: _checkCanIndex - Check if we CAN index (if all fields are indexable). Also checks the right-most field for "hashIndex" - if it needs to hash we will hash. ### Response: def _checkCanIndex(self): ''' _checkCanIndex - Check if we CAN index (if all fields are indexable). Also checks the right-most field for "hashIndex" - if it needs to hash we will hash. ''' # NOTE: We can't just check the right-most field. For types like pickle that don't support indexing, they don't # support it because python2 and python3 have different results for pickle.dumps on the same object. # So if we have a field chain like Pickle, Compressed then we will have two different results. if not self.chainedFields: return (False, False) for chainedField in self.chainedFields: if chainedField.CAN_INDEX is False: return (False, False) return (True, self.chainedFields[-1].hashIndex)
def Beta(alpha, beta, low=0, high=1, tag=None): """ A Beta random variate Parameters ---------- alpha : scalar The first shape parameter beta : scalar The second shape parameter Optional -------- low : scalar Lower bound of the distribution support (default=0) high : scalar Upper bound of the distribution support (default=1) """ assert ( alpha > 0 and beta > 0 ), 'Beta "alpha" and "beta" parameters must be greater than zero' assert low < high, 'Beta "low" must be less than "high"' return uv(ss.beta(alpha, beta, loc=low, scale=high - low), tag=tag)
A Beta random variate Parameters ---------- alpha : scalar The first shape parameter beta : scalar The second shape parameter Optional -------- low : scalar Lower bound of the distribution support (default=0) high : scalar Upper bound of the distribution support (default=1)
Below is the the instruction that describes the task: ### Input: A Beta random variate Parameters ---------- alpha : scalar The first shape parameter beta : scalar The second shape parameter Optional -------- low : scalar Lower bound of the distribution support (default=0) high : scalar Upper bound of the distribution support (default=1) ### Response: def Beta(alpha, beta, low=0, high=1, tag=None): """ A Beta random variate Parameters ---------- alpha : scalar The first shape parameter beta : scalar The second shape parameter Optional -------- low : scalar Lower bound of the distribution support (default=0) high : scalar Upper bound of the distribution support (default=1) """ assert ( alpha > 0 and beta > 0 ), 'Beta "alpha" and "beta" parameters must be greater than zero' assert low < high, 'Beta "low" must be less than "high"' return uv(ss.beta(alpha, beta, loc=low, scale=high - low), tag=tag)
def __balance(self, account_id, **kwargs): """Call documentation: `/account/balance <https://www.wepay.com/developer/reference/account-2011-01-15#balance>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. warning :: This call is depricated as of API version '2014-01-08'. """ params = { 'account_id': account_id } return self.make_call(self.__balance, params, kwargs)
Call documentation: `/account/balance <https://www.wepay.com/developer/reference/account-2011-01-15#balance>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. warning :: This call is depricated as of API version '2014-01-08'.
Below is the the instruction that describes the task: ### Input: Call documentation: `/account/balance <https://www.wepay.com/developer/reference/account-2011-01-15#balance>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. warning :: This call is depricated as of API version '2014-01-08'. ### Response: def __balance(self, account_id, **kwargs): """Call documentation: `/account/balance <https://www.wepay.com/developer/reference/account-2011-01-15#balance>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. warning :: This call is depricated as of API version '2014-01-08'. """ params = { 'account_id': account_id } return self.make_call(self.__balance, params, kwargs)
def get_checklists( self ): """ Get the checklists for this board. Returns a list of Checklist objects. """ checklists = self.getChecklistsJson( self.base_uri ) checklists_list = [] for checklist_json in checklists: checklists_list.append( self.createChecklist( checklist_json ) ) return checklists_list
Get the checklists for this board. Returns a list of Checklist objects.
Below is the the instruction that describes the task: ### Input: Get the checklists for this board. Returns a list of Checklist objects. ### Response: def get_checklists( self ): """ Get the checklists for this board. Returns a list of Checklist objects. """ checklists = self.getChecklistsJson( self.base_uri ) checklists_list = [] for checklist_json in checklists: checklists_list.append( self.createChecklist( checklist_json ) ) return checklists_list
def sort_data(x_vals, y_vals): """Sort the data so that x is monotonically increasing and contains no duplicates. """ # Sort data idxs = np.argsort(x_vals) x_vals = x_vals[idxs] y_vals = y_vals[idxs] # De-duplicate data mask = np.r_[True, (np.diff(x_vals) > 0)] if not mask.all(): # what is this for? numof_duplicates = np.repeat(mask, np.equal(mask, False)).shape[0] del numof_duplicates x_vals = x_vals[mask] y_vals = y_vals[mask] return x_vals, y_vals
Sort the data so that x is monotonically increasing and contains no duplicates.
Below is the the instruction that describes the task: ### Input: Sort the data so that x is monotonically increasing and contains no duplicates. ### Response: def sort_data(x_vals, y_vals): """Sort the data so that x is monotonically increasing and contains no duplicates. """ # Sort data idxs = np.argsort(x_vals) x_vals = x_vals[idxs] y_vals = y_vals[idxs] # De-duplicate data mask = np.r_[True, (np.diff(x_vals) > 0)] if not mask.all(): # what is this for? numof_duplicates = np.repeat(mask, np.equal(mask, False)).shape[0] del numof_duplicates x_vals = x_vals[mask] y_vals = y_vals[mask] return x_vals, y_vals
def resolve_object_property(obj, path: str): """Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property. """ value = obj for path_part in path.split('.'): value = getattr(value, path_part) return value
Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property.
Below is the the instruction that describes the task: ### Input: Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property. ### Response: def resolve_object_property(obj, path: str): """Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property. """ value = obj for path_part in path.split('.'): value = getattr(value, path_part) return value
def computeEntropyAndEnthalpy(self, uncertainty_method=None, verbose=False, warning_cutoff=1.0e-10): """Decompose free energy differences into enthalpy and entropy differences. Compute the decomposition of the free energy difference between states 1 and N into reduced free energy differences, reduced potential (enthalpy) differences, and reduced entropy (S/k) differences. Parameters ---------- uncertainty_method : string , optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Delta_f_ij : np.ndarray, float, shape=(K, K) Delta_f_ij[i,j] is the dimensionless free energy difference f_j - f_i dDelta_f_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_u_ij : np.ndarray, float, shape=(K, K) Delta_u_ij[i,j] is the reduced potential energy difference u_j - u_i dDelta_u_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_s_ij : np.ndarray, float, shape=(K, K) Delta_s_ij[i,j] is the reduced entropy difference S/k between states i and j (s_j - s_i) dDelta_s_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_s_ij Notes ----- This method is EXPERIMENTAL and should be used at your own risk. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy() """ if verbose: print("Computing average energy and entropy by MBAR.") # Retrieve N and K for convenience. N = self.N K = self.K # Augment W_nk, N_k, and c_k for q_A(x) for the potential energies, # with one extra row/column for each state. # weight matrix Log_W_nk = np.zeros([N, K * 2], dtype=np.float64) N_k = np.zeros([K * 2], dtype=np.int32) # counts # "free energies" of average states f_k = np.zeros(K, dtype=np.float64) # Fill in first half of matrix with existing q_k(x) from states. Log_W_nk[:, 0:K] = self.Log_W_nk N_k[0:K] = self.N_k # Compute the remaining rows/columns of W_nk and c_k for the potential # energy observable. u_min = self.u_kn.min() u_i = np.zeros([K], dtype=np.float64) for l in range(0, K): u_kn = self.u_kn[l, :] - (u_min-1) # all positive now! Subtracting off arbitrary constants doesn't affect results # since they are all differences. # Compute unnormalized weights. # A(x_n) exp[f_{k} - q_{k}(x_n)] / \sum_{k'=1}^K N_{k'} exp[f_{k'} - q_{k'}(x_n)] # harden for over/underflow with logarithms Log_W_nk[:, K + l] = np.log(u_kn) + self.Log_W_nk[:, l] f_k[l] = -_logsum(Log_W_nk[:, K + l]) Log_W_nk[:, K + l] += f_k[l] # normalize the row u_i[l] = np.exp(-f_k[l]) # print "MBAR u_i[%d]: %10.5f,%10.5f" % (l,u_i[l]+u_min, u_i[l]) # Compute augmented asymptotic covariance matrix. W_nk = np.exp(Log_W_nk) Theta_ij = self._computeAsymptoticCovarianceMatrix( W_nk, N_k, method=uncertainty_method) # Compute estimators and uncertainties. dDelta_f_ij = np.zeros([K, K], dtype=np.float64) dDelta_u_ij = np.zeros([K, K], dtype=np.float64) dDelta_s_ij = np.zeros([K, K], dtype=np.float64) # Compute reduced free energy difference. f_k = np.matrix(self.f_k) Delta_f_ij = f_k - f_k.transpose() # Compute reduced enthalpy difference. u_k = np.matrix(u_i) Delta_u_ij = u_k - u_k.transpose() # Compute reduced entropy difference s_k = u_k - f_k Delta_s_ij = s_k - s_k.transpose() # compute uncertainty matrix in free energies: # d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j] diag = Theta_ij.diagonal() dii = diag[0:K, 0:K] d2DeltaF = dii + dii.transpose() - 2 * Theta_ij[0:K, 0:K] # check for any numbers below zero. if (np.any(d2DeltaF < 0.0)): if(np.any(d2DeltaF) < warning_cutoff): # Hmm. Will this print correctly? print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)]) else: d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0 # take the square root of the entries of matrix dDelta_f_ij = np.sqrt(d2DeltaF) # TODO -- vectorize this calculation for entropy and enthalpy! for i in range(0, K): for j in range(0, K): try: dDelta_u_ij[i, j] = math.sqrt( + u_i[i] * Theta_ij[i, i] * u_i[i] - u_i[i] * Theta_ij[i, j] * u_i[j] - u_i[ i] * Theta_ij[i, K + i] * u_i[i] + u_i[i] * Theta_ij[i, K + j] * u_i[j] - u_i[j] * Theta_ij[j, i] * u_i[i] + u_i[j] * Theta_ij[j, j] * u_i[j] + u_i[ j] * Theta_ij[j, K + i] * u_i[i] - u_i[j] * Theta_ij[j, K + j] * u_i[j] - u_i[i] * Theta_ij[K + i, i] * u_i[i] + u_i[i] * Theta_ij[K + i, j] * u_i[ j] + u_i[i] * Theta_ij[K + i, K + i] * u_i[i] - u_i[i] * Theta_ij[K + i, K + j] * u_i[j] + u_i[j] * Theta_ij[K + j, i] * u_i[i] - u_i[j] * Theta_ij[K + j, j] * u_i[ j] - u_i[j] * Theta_ij[K + j, K + i] * u_i[i] + u_i[j] * Theta_ij[K + j, K + j] * u_i[j] ) except: dDelta_u_ij[i, j] = 0.0 # Compute reduced entropy difference. try: dDelta_s_ij[i, j] = math.sqrt( + (u_i[i] - 1) * Theta_ij[i, i] * (u_i[i] - 1) + (u_i[i] - 1) * Theta_ij[i, j] * (-u_i[j] + 1) + ( u_i[i] - 1) * Theta_ij[i, K + i] * (-u_i[i]) + (u_i[i] - 1) * Theta_ij[i, K + j] * u_i[j] + (-u_i[j] + 1) * Theta_ij[j, i] * (u_i[i] - 1) + (-u_i[j] + 1) * Theta_ij[j, j] * (-u_i[j] + 1) + (-u_i[j] + 1) * Theta_ij[j, K + i] * (-u_i[i]) + (-u_i[j] + 1) * Theta_ij[j, K + j] * u_i[j] + (-u_i[i]) * Theta_ij[K + i, i] * (u_i[i] - 1) + (-u_i[i]) * Theta_ij[K + i, j] * (-u_i[j] + 1) + (-u_i[i]) * Theta_ij[K + i, K + i] * (-u_i[i]) + (-u_i[i]) * Theta_ij[K + i, K + j] * u_i[j] + u_i[j] * Theta_ij[K + j, i] * (u_i[i] - 1) + u_i[j] * Theta_ij[K + j, j] * (-u_i[j] + 1) + u_i[ j] * Theta_ij[K + j, K + i] * (-u_i[i]) + u_i[j] * Theta_ij[K + j, K + j] * u_i[j] ) except: dDelta_s_ij[i, j] = 0.0 # Return expectations and uncertainties. return (Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij)
Decompose free energy differences into enthalpy and entropy differences. Compute the decomposition of the free energy difference between states 1 and N into reduced free energy differences, reduced potential (enthalpy) differences, and reduced entropy (S/k) differences. Parameters ---------- uncertainty_method : string , optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Delta_f_ij : np.ndarray, float, shape=(K, K) Delta_f_ij[i,j] is the dimensionless free energy difference f_j - f_i dDelta_f_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_u_ij : np.ndarray, float, shape=(K, K) Delta_u_ij[i,j] is the reduced potential energy difference u_j - u_i dDelta_u_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_s_ij : np.ndarray, float, shape=(K, K) Delta_s_ij[i,j] is the reduced entropy difference S/k between states i and j (s_j - s_i) dDelta_s_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_s_ij Notes ----- This method is EXPERIMENTAL and should be used at your own risk. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy()
Below is the the instruction that describes the task: ### Input: Decompose free energy differences into enthalpy and entropy differences. Compute the decomposition of the free energy difference between states 1 and N into reduced free energy differences, reduced potential (enthalpy) differences, and reduced entropy (S/k) differences. Parameters ---------- uncertainty_method : string , optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Delta_f_ij : np.ndarray, float, shape=(K, K) Delta_f_ij[i,j] is the dimensionless free energy difference f_j - f_i dDelta_f_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_u_ij : np.ndarray, float, shape=(K, K) Delta_u_ij[i,j] is the reduced potential energy difference u_j - u_i dDelta_u_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_s_ij : np.ndarray, float, shape=(K, K) Delta_s_ij[i,j] is the reduced entropy difference S/k between states i and j (s_j - s_i) dDelta_s_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_s_ij Notes ----- This method is EXPERIMENTAL and should be used at your own risk. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy() ### Response: def computeEntropyAndEnthalpy(self, uncertainty_method=None, verbose=False, warning_cutoff=1.0e-10): """Decompose free energy differences into enthalpy and entropy differences. Compute the decomposition of the free energy difference between states 1 and N into reduced free energy differences, reduced potential (enthalpy) differences, and reduced entropy (S/k) differences. Parameters ---------- uncertainty_method : string , optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Delta_f_ij : np.ndarray, float, shape=(K, K) Delta_f_ij[i,j] is the dimensionless free energy difference f_j - f_i dDelta_f_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_u_ij : np.ndarray, float, shape=(K, K) Delta_u_ij[i,j] is the reduced potential energy difference u_j - u_i dDelta_u_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_f_ij Delta_s_ij : np.ndarray, float, shape=(K, K) Delta_s_ij[i,j] is the reduced entropy difference S/k between states i and j (s_j - s_i) dDelta_s_ij : np.ndarray, float, shape=(K, K) uncertainty in Delta_s_ij Notes ----- This method is EXPERIMENTAL and should be used at your own risk. Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy() """ if verbose: print("Computing average energy and entropy by MBAR.") # Retrieve N and K for convenience. N = self.N K = self.K # Augment W_nk, N_k, and c_k for q_A(x) for the potential energies, # with one extra row/column for each state. # weight matrix Log_W_nk = np.zeros([N, K * 2], dtype=np.float64) N_k = np.zeros([K * 2], dtype=np.int32) # counts # "free energies" of average states f_k = np.zeros(K, dtype=np.float64) # Fill in first half of matrix with existing q_k(x) from states. Log_W_nk[:, 0:K] = self.Log_W_nk N_k[0:K] = self.N_k # Compute the remaining rows/columns of W_nk and c_k for the potential # energy observable. u_min = self.u_kn.min() u_i = np.zeros([K], dtype=np.float64) for l in range(0, K): u_kn = self.u_kn[l, :] - (u_min-1) # all positive now! Subtracting off arbitrary constants doesn't affect results # since they are all differences. # Compute unnormalized weights. # A(x_n) exp[f_{k} - q_{k}(x_n)] / \sum_{k'=1}^K N_{k'} exp[f_{k'} - q_{k'}(x_n)] # harden for over/underflow with logarithms Log_W_nk[:, K + l] = np.log(u_kn) + self.Log_W_nk[:, l] f_k[l] = -_logsum(Log_W_nk[:, K + l]) Log_W_nk[:, K + l] += f_k[l] # normalize the row u_i[l] = np.exp(-f_k[l]) # print "MBAR u_i[%d]: %10.5f,%10.5f" % (l,u_i[l]+u_min, u_i[l]) # Compute augmented asymptotic covariance matrix. W_nk = np.exp(Log_W_nk) Theta_ij = self._computeAsymptoticCovarianceMatrix( W_nk, N_k, method=uncertainty_method) # Compute estimators and uncertainties. dDelta_f_ij = np.zeros([K, K], dtype=np.float64) dDelta_u_ij = np.zeros([K, K], dtype=np.float64) dDelta_s_ij = np.zeros([K, K], dtype=np.float64) # Compute reduced free energy difference. f_k = np.matrix(self.f_k) Delta_f_ij = f_k - f_k.transpose() # Compute reduced enthalpy difference. u_k = np.matrix(u_i) Delta_u_ij = u_k - u_k.transpose() # Compute reduced entropy difference s_k = u_k - f_k Delta_s_ij = s_k - s_k.transpose() # compute uncertainty matrix in free energies: # d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j] diag = Theta_ij.diagonal() dii = diag[0:K, 0:K] d2DeltaF = dii + dii.transpose() - 2 * Theta_ij[0:K, 0:K] # check for any numbers below zero. if (np.any(d2DeltaF < 0.0)): if(np.any(d2DeltaF) < warning_cutoff): # Hmm. Will this print correctly? print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)]) else: d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0 # take the square root of the entries of matrix dDelta_f_ij = np.sqrt(d2DeltaF) # TODO -- vectorize this calculation for entropy and enthalpy! for i in range(0, K): for j in range(0, K): try: dDelta_u_ij[i, j] = math.sqrt( + u_i[i] * Theta_ij[i, i] * u_i[i] - u_i[i] * Theta_ij[i, j] * u_i[j] - u_i[ i] * Theta_ij[i, K + i] * u_i[i] + u_i[i] * Theta_ij[i, K + j] * u_i[j] - u_i[j] * Theta_ij[j, i] * u_i[i] + u_i[j] * Theta_ij[j, j] * u_i[j] + u_i[ j] * Theta_ij[j, K + i] * u_i[i] - u_i[j] * Theta_ij[j, K + j] * u_i[j] - u_i[i] * Theta_ij[K + i, i] * u_i[i] + u_i[i] * Theta_ij[K + i, j] * u_i[ j] + u_i[i] * Theta_ij[K + i, K + i] * u_i[i] - u_i[i] * Theta_ij[K + i, K + j] * u_i[j] + u_i[j] * Theta_ij[K + j, i] * u_i[i] - u_i[j] * Theta_ij[K + j, j] * u_i[ j] - u_i[j] * Theta_ij[K + j, K + i] * u_i[i] + u_i[j] * Theta_ij[K + j, K + j] * u_i[j] ) except: dDelta_u_ij[i, j] = 0.0 # Compute reduced entropy difference. try: dDelta_s_ij[i, j] = math.sqrt( + (u_i[i] - 1) * Theta_ij[i, i] * (u_i[i] - 1) + (u_i[i] - 1) * Theta_ij[i, j] * (-u_i[j] + 1) + ( u_i[i] - 1) * Theta_ij[i, K + i] * (-u_i[i]) + (u_i[i] - 1) * Theta_ij[i, K + j] * u_i[j] + (-u_i[j] + 1) * Theta_ij[j, i] * (u_i[i] - 1) + (-u_i[j] + 1) * Theta_ij[j, j] * (-u_i[j] + 1) + (-u_i[j] + 1) * Theta_ij[j, K + i] * (-u_i[i]) + (-u_i[j] + 1) * Theta_ij[j, K + j] * u_i[j] + (-u_i[i]) * Theta_ij[K + i, i] * (u_i[i] - 1) + (-u_i[i]) * Theta_ij[K + i, j] * (-u_i[j] + 1) + (-u_i[i]) * Theta_ij[K + i, K + i] * (-u_i[i]) + (-u_i[i]) * Theta_ij[K + i, K + j] * u_i[j] + u_i[j] * Theta_ij[K + j, i] * (u_i[i] - 1) + u_i[j] * Theta_ij[K + j, j] * (-u_i[j] + 1) + u_i[ j] * Theta_ij[K + j, K + i] * (-u_i[i]) + u_i[j] * Theta_ij[K + j, K + j] * u_i[j] ) except: dDelta_s_ij[i, j] = 0.0 # Return expectations and uncertainties. return (Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij)
def savefits(cube, fitsname, **kwargs): """Save a cube to a 3D-cube FITS file. Args: cube (xarray.DataArray): Cube to be saved. fitsname (str): Name of output FITS file. kwargs (optional): Other arguments common with astropy.io.fits.writeto(). """ ### pick up kwargs dropdeg = kwargs.pop('dropdeg', False) ndim = len(cube.dims) ### load yaml FITSINFO = get_data('decode', 'data/fitsinfo.yaml') hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader) ### default header if ndim == 2: header = fits.Header(hdrdata['dcube_2d']) data = cube.values.T elif ndim == 3: if dropdeg: header = fits.Header(hdrdata['dcube_2d']) data = cube.values[:, :, 0].T else: header = fits.Header(hdrdata['dcube_3d']) kidfq = cube.kidfq.values freqrange = ~np.isnan(kidfq) orderedfq = np.argsort(kidfq[freqrange]) newcube = cube[:, :, orderedfq] data = newcube.values.T else: raise TypeError(ndim) ### update Header if cube.coordsys == 'AZEL': header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'}) elif cube.coordsys == 'RADEC': header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)}) else: pass header.update({'CRVAL1': float(cube.x[0]), 'CDELT1': float(cube.x[1] - cube.x[0]), 'CRVAL2': float(cube.y[0]), 'CDELT2': float(cube.y[1] - cube.y[0]), 'DATE': datetime.now(timezone('UTC')).isoformat()}) if (ndim == 3) and (not dropdeg): header.update({'CRVAL3': float(newcube.kidfq[0]), 'CDELT3': float(newcube.kidfq[1] - newcube.kidfq[0])}) fitsname = str(Path(fitsname).expanduser()) fits.writeto(fitsname, data, header, **kwargs) logger.info('{} has been created.'.format(fitsname))
Save a cube to a 3D-cube FITS file. Args: cube (xarray.DataArray): Cube to be saved. fitsname (str): Name of output FITS file. kwargs (optional): Other arguments common with astropy.io.fits.writeto().
Below is the the instruction that describes the task: ### Input: Save a cube to a 3D-cube FITS file. Args: cube (xarray.DataArray): Cube to be saved. fitsname (str): Name of output FITS file. kwargs (optional): Other arguments common with astropy.io.fits.writeto(). ### Response: def savefits(cube, fitsname, **kwargs): """Save a cube to a 3D-cube FITS file. Args: cube (xarray.DataArray): Cube to be saved. fitsname (str): Name of output FITS file. kwargs (optional): Other arguments common with astropy.io.fits.writeto(). """ ### pick up kwargs dropdeg = kwargs.pop('dropdeg', False) ndim = len(cube.dims) ### load yaml FITSINFO = get_data('decode', 'data/fitsinfo.yaml') hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader) ### default header if ndim == 2: header = fits.Header(hdrdata['dcube_2d']) data = cube.values.T elif ndim == 3: if dropdeg: header = fits.Header(hdrdata['dcube_2d']) data = cube.values[:, :, 0].T else: header = fits.Header(hdrdata['dcube_3d']) kidfq = cube.kidfq.values freqrange = ~np.isnan(kidfq) orderedfq = np.argsort(kidfq[freqrange]) newcube = cube[:, :, orderedfq] data = newcube.values.T else: raise TypeError(ndim) ### update Header if cube.coordsys == 'AZEL': header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'}) elif cube.coordsys == 'RADEC': header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)}) else: pass header.update({'CRVAL1': float(cube.x[0]), 'CDELT1': float(cube.x[1] - cube.x[0]), 'CRVAL2': float(cube.y[0]), 'CDELT2': float(cube.y[1] - cube.y[0]), 'DATE': datetime.now(timezone('UTC')).isoformat()}) if (ndim == 3) and (not dropdeg): header.update({'CRVAL3': float(newcube.kidfq[0]), 'CDELT3': float(newcube.kidfq[1] - newcube.kidfq[0])}) fitsname = str(Path(fitsname).expanduser()) fits.writeto(fitsname, data, header, **kwargs) logger.info('{} has been created.'.format(fitsname))
def kibana_install(self): """ kibana install :return: """ with cd('/tmp'): if not exists('kibana.deb'): sudo('wget {0} -O kibana.deb'.format( bigdata_conf.kibana_download_url )) sudo('dpkg -i kibana.deb') sudo('apt-get install -y')
kibana install :return:
Below is the the instruction that describes the task: ### Input: kibana install :return: ### Response: def kibana_install(self): """ kibana install :return: """ with cd('/tmp'): if not exists('kibana.deb'): sudo('wget {0} -O kibana.deb'.format( bigdata_conf.kibana_download_url )) sudo('dpkg -i kibana.deb') sudo('apt-get install -y')
def feed_data(self, data: bytes) -> None: """ 代理 feed_data """ if self._parser is not None: self._parser.feed_data(data)
代理 feed_data
Below is the the instruction that describes the task: ### Input: 代理 feed_data ### Response: def feed_data(self, data: bytes) -> None: """ 代理 feed_data """ if self._parser is not None: self._parser.feed_data(data)
def make_pose(translation, rotation): """ Makes a homogenous pose matrix from a translation vector and a rotation matrix. Args: translation: a 3-dim iterable rotation: a 3x3 matrix Returns: pose: a 4x4 homogenous matrix """ pose = np.zeros((4, 4)) pose[:3, :3] = rotation pose[:3, 3] = translation pose[3, 3] = 1.0 return pose
Makes a homogenous pose matrix from a translation vector and a rotation matrix. Args: translation: a 3-dim iterable rotation: a 3x3 matrix Returns: pose: a 4x4 homogenous matrix
Below is the the instruction that describes the task: ### Input: Makes a homogenous pose matrix from a translation vector and a rotation matrix. Args: translation: a 3-dim iterable rotation: a 3x3 matrix Returns: pose: a 4x4 homogenous matrix ### Response: def make_pose(translation, rotation): """ Makes a homogenous pose matrix from a translation vector and a rotation matrix. Args: translation: a 3-dim iterable rotation: a 3x3 matrix Returns: pose: a 4x4 homogenous matrix """ pose = np.zeros((4, 4)) pose[:3, :3] = rotation pose[:3, 3] = translation pose[3, 3] = 1.0 return pose
def _solve(self, A=None, b=None): r""" Sends the A and b matrices to the specified solver, and solves for *x* given the boundary conditions, and source terms based on the present value of *x*. This method does NOT iterate to solve for non-linear source terms or march time steps. Parameters ---------- A : sparse matrix The coefficient matrix in sparse format. If not specified, then it uses the ``A`` matrix attached to the object. b : ND-array The RHS matrix in any format. If not specified, then it uses the ``b`` matrix attached to the object. Notes ----- The solver used here is specified in the ``settings`` attribute of the algorithm. """ # Fetch A and b from self if not given, and throw error if they've not # been calculated if A is None: A = self.A if A is None: raise Exception('The A matrix has not been built yet') if b is None: b = self.b if b is None: raise Exception('The b matrix has not been built yet') A = A.tocsr() # Default behavior -> use Scipy's default solver (spsolve) if self.settings['solver'] == 'pyamg': self.settings['solver_family'] = 'pyamg' if self.settings['solver'] == 'petsc': self.settings['solver_family'] = 'petsc' # Set tolerance for iterative solvers rtol = self.settings['solver_rtol'] min_A = np.abs(A.data).min() min_b = np.abs(b).min() or 1e100 atol = min(min_A, min_b) * rtol # SciPy if self.settings['solver_family'] == 'scipy': if importlib.util.find_spec('scikit-umfpack'): A.indices = A.indices.astype(np.int64) A.indptr = A.indptr.astype(np.int64) iterative = ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'lgmres', 'minres', 'gcrotmk', 'qmr'] solver = getattr(sprs.linalg, self.settings['solver_type']) if self.settings['solver_type'] in iterative: x, exit_code = solver(A=A, b=b, atol=atol, tol=rtol, maxiter=self.settings['solver_maxiter']) if exit_code > 0: raise Exception('SciPy solver did not converge! ' + 'Exit code: ' + str(exit_code)) else: x = solver(A=A, b=b) return x # PETSc if self.settings['solver_family'] == 'petsc': # Check if petsc is available if importlib.util.find_spec('petsc4py'): from openpnm.utils.petsc import PETScSparseLinearSolver as SLS else: raise Exception('PETSc is not installed.') # Define the petsc linear system converting the scipy objects ls = SLS(A=A, b=b) sets = self.settings sets = {k: v for k, v in sets.items() if k.startswith('solver_')} sets = {k.split('solver_')[1]: v for k, v in sets.items()} ls.settings.update(sets) x = SLS.solve(ls) del(ls) return x # PyAMG if self.settings['solver_family'] == 'pyamg': if importlib.util.find_spec('pyamg'): import pyamg else: raise Exception('pyamg is not installed.') ml = pyamg.ruge_stuben_solver(A) x = ml.solve(b=b, tol=1e-6) return x
r""" Sends the A and b matrices to the specified solver, and solves for *x* given the boundary conditions, and source terms based on the present value of *x*. This method does NOT iterate to solve for non-linear source terms or march time steps. Parameters ---------- A : sparse matrix The coefficient matrix in sparse format. If not specified, then it uses the ``A`` matrix attached to the object. b : ND-array The RHS matrix in any format. If not specified, then it uses the ``b`` matrix attached to the object. Notes ----- The solver used here is specified in the ``settings`` attribute of the algorithm.
Below is the the instruction that describes the task: ### Input: r""" Sends the A and b matrices to the specified solver, and solves for *x* given the boundary conditions, and source terms based on the present value of *x*. This method does NOT iterate to solve for non-linear source terms or march time steps. Parameters ---------- A : sparse matrix The coefficient matrix in sparse format. If not specified, then it uses the ``A`` matrix attached to the object. b : ND-array The RHS matrix in any format. If not specified, then it uses the ``b`` matrix attached to the object. Notes ----- The solver used here is specified in the ``settings`` attribute of the algorithm. ### Response: def _solve(self, A=None, b=None): r""" Sends the A and b matrices to the specified solver, and solves for *x* given the boundary conditions, and source terms based on the present value of *x*. This method does NOT iterate to solve for non-linear source terms or march time steps. Parameters ---------- A : sparse matrix The coefficient matrix in sparse format. If not specified, then it uses the ``A`` matrix attached to the object. b : ND-array The RHS matrix in any format. If not specified, then it uses the ``b`` matrix attached to the object. Notes ----- The solver used here is specified in the ``settings`` attribute of the algorithm. """ # Fetch A and b from self if not given, and throw error if they've not # been calculated if A is None: A = self.A if A is None: raise Exception('The A matrix has not been built yet') if b is None: b = self.b if b is None: raise Exception('The b matrix has not been built yet') A = A.tocsr() # Default behavior -> use Scipy's default solver (spsolve) if self.settings['solver'] == 'pyamg': self.settings['solver_family'] = 'pyamg' if self.settings['solver'] == 'petsc': self.settings['solver_family'] = 'petsc' # Set tolerance for iterative solvers rtol = self.settings['solver_rtol'] min_A = np.abs(A.data).min() min_b = np.abs(b).min() or 1e100 atol = min(min_A, min_b) * rtol # SciPy if self.settings['solver_family'] == 'scipy': if importlib.util.find_spec('scikit-umfpack'): A.indices = A.indices.astype(np.int64) A.indptr = A.indptr.astype(np.int64) iterative = ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'lgmres', 'minres', 'gcrotmk', 'qmr'] solver = getattr(sprs.linalg, self.settings['solver_type']) if self.settings['solver_type'] in iterative: x, exit_code = solver(A=A, b=b, atol=atol, tol=rtol, maxiter=self.settings['solver_maxiter']) if exit_code > 0: raise Exception('SciPy solver did not converge! ' + 'Exit code: ' + str(exit_code)) else: x = solver(A=A, b=b) return x # PETSc if self.settings['solver_family'] == 'petsc': # Check if petsc is available if importlib.util.find_spec('petsc4py'): from openpnm.utils.petsc import PETScSparseLinearSolver as SLS else: raise Exception('PETSc is not installed.') # Define the petsc linear system converting the scipy objects ls = SLS(A=A, b=b) sets = self.settings sets = {k: v for k, v in sets.items() if k.startswith('solver_')} sets = {k.split('solver_')[1]: v for k, v in sets.items()} ls.settings.update(sets) x = SLS.solve(ls) del(ls) return x # PyAMG if self.settings['solver_family'] == 'pyamg': if importlib.util.find_spec('pyamg'): import pyamg else: raise Exception('pyamg is not installed.') ml = pyamg.ruge_stuben_solver(A) x = ml.solve(b=b, tol=1e-6) return x
def _get_base_command(self): """Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_args = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass) command_parts = [cd_command, jvm_command, jvm_args, cp_args] return self._commandline_join(command_parts).strip()
Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method.
Below is the the instruction that describes the task: ### Input: Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method. ### Response: def _get_base_command(self): """Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_args = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass) command_parts = [cd_command, jvm_command, jvm_args, cp_args] return self._commandline_join(command_parts).strip()
def exhandler(function, parser): """If -examples was specified in 'args', the specified function is called and the application exits. :arg function: the function that prints the examples. :arg parser: the initialized instance of the parser that has the additional, script-specific parameters. """ args = vars(bparser.parse_known_args()[0]) if args["examples"]: function() exit(0) if args["verbose"]: from msg import set_verbosity set_verbosity(args["verbose"]) args.update(vars(parser.parse_known_args()[0])) return args
If -examples was specified in 'args', the specified function is called and the application exits. :arg function: the function that prints the examples. :arg parser: the initialized instance of the parser that has the additional, script-specific parameters.
Below is the the instruction that describes the task: ### Input: If -examples was specified in 'args', the specified function is called and the application exits. :arg function: the function that prints the examples. :arg parser: the initialized instance of the parser that has the additional, script-specific parameters. ### Response: def exhandler(function, parser): """If -examples was specified in 'args', the specified function is called and the application exits. :arg function: the function that prints the examples. :arg parser: the initialized instance of the parser that has the additional, script-specific parameters. """ args = vars(bparser.parse_known_args()[0]) if args["examples"]: function() exit(0) if args["verbose"]: from msg import set_verbosity set_verbosity(args["verbose"]) args.update(vars(parser.parse_known_args()[0])) return args
def check(self, return_code=0): """Run command with arguments. Wait for command to complete. If the exit code was as expected and there is no exception then return, otherwise raise EasyProcessError. :param return_code: int, expected return code :rtype: self """ ret = self.call().return_code ok = ret == return_code if not ok: raise EasyProcessError( self, 'check error, return code is not {0}!'.format(return_code)) return self
Run command with arguments. Wait for command to complete. If the exit code was as expected and there is no exception then return, otherwise raise EasyProcessError. :param return_code: int, expected return code :rtype: self
Below is the the instruction that describes the task: ### Input: Run command with arguments. Wait for command to complete. If the exit code was as expected and there is no exception then return, otherwise raise EasyProcessError. :param return_code: int, expected return code :rtype: self ### Response: def check(self, return_code=0): """Run command with arguments. Wait for command to complete. If the exit code was as expected and there is no exception then return, otherwise raise EasyProcessError. :param return_code: int, expected return code :rtype: self """ ret = self.call().return_code ok = ret == return_code if not ok: raise EasyProcessError( self, 'check error, return code is not {0}!'.format(return_code)) return self
def weld_str_lower(array): """Convert values to lowercase. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Returns ------- WeldObject Representation of this computation. """ obj_id, weld_obj = create_weld_object(array) weld_template = """map( {array}, |e: vec[i8]| result( for(e, appender[i8], |c: appender[i8], j: i64, f: i8| if(f > 64c && f < 91c, merge(c, f + 32c), merge(c, f)) ) ) )""" weld_obj.weld_code = weld_template.format(array=obj_id) return weld_obj
Convert values to lowercase. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Returns ------- WeldObject Representation of this computation.
Below is the the instruction that describes the task: ### Input: Convert values to lowercase. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Returns ------- WeldObject Representation of this computation. ### Response: def weld_str_lower(array): """Convert values to lowercase. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Returns ------- WeldObject Representation of this computation. """ obj_id, weld_obj = create_weld_object(array) weld_template = """map( {array}, |e: vec[i8]| result( for(e, appender[i8], |c: appender[i8], j: i64, f: i8| if(f > 64c && f < 91c, merge(c, f + 32c), merge(c, f)) ) ) )""" weld_obj.weld_code = weld_template.format(array=obj_id) return weld_obj
def groupby(xs, key_fn): """ Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`. """ result = defaultdict(list) for x in xs: key = key_fn(x) result[key].append(x) return result
Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`.
Below is the the instruction that describes the task: ### Input: Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`. ### Response: def groupby(xs, key_fn): """ Group elements of the list `xs` by keys generated from calling `key_fn`. Returns a dictionary which maps keys to sub-lists of `xs`. """ result = defaultdict(list) for x in xs: key = key_fn(x) result[key].append(x) return result
def get_artist_by_mbid(self, mbid): """Looks up an artist by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "artist.getInfo", params).execute(True) return Artist(_extract(doc, "name"), self)
Looks up an artist by its MusicBrainz ID
Below is the the instruction that describes the task: ### Input: Looks up an artist by its MusicBrainz ID ### Response: def get_artist_by_mbid(self, mbid): """Looks up an artist by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "artist.getInfo", params).execute(True) return Artist(_extract(doc, "name"), self)
def _handle_upsert(self, parts, unwritten_lobs=()): """Handle reply messages from INSERT or UPDATE statements""" self.description = None self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list for part in parts: if part.kind == part_kinds.ROWSAFFECTED: self.rowcount = part.values[0] elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA): pass elif part.kind == part_kinds.WRITELOBREPLY: # This part occurrs after lobs have been submitted not at all or only partially during an insert. # In this case the parameter part of the Request message contains a list called 'unwritten_lobs' # with LobBuffer instances. # Those instances are in the same order as 'locator_ids' received in the reply message. These IDs # are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs. for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids): # store locator_id in every lob buffer instance for later reference: lob_buffer.locator_id = lob_locator_id self._perform_lob_write_requests(unwritten_lobs) else: raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind) self._executed = True
Handle reply messages from INSERT or UPDATE statements
Below is the the instruction that describes the task: ### Input: Handle reply messages from INSERT or UPDATE statements ### Response: def _handle_upsert(self, parts, unwritten_lobs=()): """Handle reply messages from INSERT or UPDATE statements""" self.description = None self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list for part in parts: if part.kind == part_kinds.ROWSAFFECTED: self.rowcount = part.values[0] elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA): pass elif part.kind == part_kinds.WRITELOBREPLY: # This part occurrs after lobs have been submitted not at all or only partially during an insert. # In this case the parameter part of the Request message contains a list called 'unwritten_lobs' # with LobBuffer instances. # Those instances are in the same order as 'locator_ids' received in the reply message. These IDs # are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs. for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids): # store locator_id in every lob buffer instance for later reference: lob_buffer.locator_id = lob_locator_id self._perform_lob_write_requests(unwritten_lobs) else: raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind) self._executed = True
def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
validate that kind=category does not change the categories
Below is the the instruction that describes the task: ### Input: validate that kind=category does not change the categories ### Response: def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
def placeholder(type_): """Returns the EmptyVal instance for the given type""" typetuple = type_ if isinstance(type_, tuple) else (type_,) if any in typetuple: typetuple = any if typetuple not in EMPTY_VALS: EMPTY_VALS[typetuple] = EmptyVal(typetuple) return EMPTY_VALS[typetuple]
Returns the EmptyVal instance for the given type
Below is the the instruction that describes the task: ### Input: Returns the EmptyVal instance for the given type ### Response: def placeholder(type_): """Returns the EmptyVal instance for the given type""" typetuple = type_ if isinstance(type_, tuple) else (type_,) if any in typetuple: typetuple = any if typetuple not in EMPTY_VALS: EMPTY_VALS[typetuple] = EmptyVal(typetuple) return EMPTY_VALS[typetuple]
def join(self, joiner, formatter=lambda s, t: t.format(s), template="{}"): """Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4' """ return ww.s(joiner).join(self, formatter, template)
Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4'
Below is the the instruction that describes the task: ### Input: Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4' ### Response: def join(self, joiner, formatter=lambda s, t: t.format(s), template="{}"): """Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4' """ return ww.s(joiner).join(self, formatter, template)
def filter(self, model=None, context=None): """ Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None """ if model is None: return # properties self.filter_properties(model, context=context) # entities self.filter_entities(model, context=context) # collections self.filter_collections(model, context=context)
Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None
Below is the the instruction that describes the task: ### Input: Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None ### Response: def filter(self, model=None, context=None): """ Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None """ if model is None: return # properties self.filter_properties(model, context=context) # entities self.filter_entities(model, context=context) # collections self.filter_collections(model, context=context)