repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
LinePlot.do_execute
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None data = self.input.payload pltdataset.line_plot( data, atts=self.resolve_option("attributes"), percent=float(self.resolve_option("percent")), seed=int(self.resolve_option("seed")), title=self.resolve_option("title"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
python
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None data = self.input.payload pltdataset.line_plot( data, atts=self.resolve_option("attributes"), percent=float(self.resolve_option("percent")), seed=int(self.resolve_option("seed")), title=self.resolve_option("title"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L547-L564
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
ClassifierErrors.fix_config
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(ClassifierErrors, self).fix_config(options) opt = "absolute" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to use absolute errors as size or relative ones (bool)." opt = "max_relative_size" if opt not in options: options[opt] = 50 if opt not in self.help: self.help[opt] = "The maximum size in point in case of relative mode (int)." opt = "absolute_size" if opt not in options: options[opt] = 50 if opt not in self.help: self.help[opt] = "The size in point in case of absolute mode (int)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
python
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(ClassifierErrors, self).fix_config(options) opt = "absolute" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to use absolute errors as size or relative ones (bool)." opt = "max_relative_size" if opt not in options: options[opt] = 50 if opt not in self.help: self.help[opt] = "The maximum size in point in case of relative mode (int)." opt = "absolute_size" if opt not in options: options[opt] = 50 if opt not in self.help: self.help[opt] = "The size in point in case of absolute mode (int)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L592-L639
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
ClassifierErrors.check_input
def check_input(self, token): """ Performs checks on the input token. Raises an exception if unsupported. :param token: the token to check :type token: Token """ if not isinstance(token.payload, Evaluation): raise Exception(self.full_name + ": Input token is not an Evaluation object!")
python
def check_input(self, token): """ Performs checks on the input token. Raises an exception if unsupported. :param token: the token to check :type token: Token """ if not isinstance(token.payload, Evaluation): raise Exception(self.full_name + ": Input token is not an Evaluation object!")
Performs checks on the input token. Raises an exception if unsupported. :param token: the token to check :type token: Token
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L654-L662
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
ClassifierErrors.do_execute
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None evl = self.input.payload pltclassifier.plot_classifier_errors( evl.predictions, absolute=bool(self.resolve_option("absolute")), max_relative_size=int(self.resolve_option("max_relative_size")), absolute_size=int(self.resolve_option("absolute_size")), title=self.resolve_option("title"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
python
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None evl = self.input.payload pltclassifier.plot_classifier_errors( evl.predictions, absolute=bool(self.resolve_option("absolute")), max_relative_size=int(self.resolve_option("max_relative_size")), absolute_size=int(self.resolve_option("absolute_size")), title=self.resolve_option("title"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L664-L681
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
ROC.fix_config
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(ROC, self).fix_config(options) opt = "class_index" if opt not in options: options[opt] = [0] if opt not in self.help: self.help[opt] = "The list of 0-based class-label indices to display (list)." opt = "key_loc" if opt not in options: options[opt] = "lower right" if opt not in self.help: self.help[opt] = "The location of the key in the plot (str)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
python
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(ROC, self).fix_config(options) opt = "class_index" if opt not in options: options[opt] = [0] if opt not in self.help: self.help[opt] = "The list of 0-based class-label indices to display (list)." opt = "key_loc" if opt not in options: options[opt] = "lower right" if opt not in self.help: self.help[opt] = "The location of the key in the plot (str)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L709-L750
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
PRC.fix_config
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(PRC, self).fix_config(options) opt = "class_index" if opt not in options: options[opt] = [0] if opt not in self.help: self.help[opt] = "The list of 0-based class-label indices to display (list)." opt = "key_loc" if opt not in options: options[opt] = "lower center" if opt not in self.help: self.help[opt] = "The location of the key in the plot (str)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
python
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(PRC, self).fix_config(options) opt = "class_index" if opt not in options: options[opt] = [0] if opt not in self.help: self.help[opt] = "The list of 0-based class-label indices to display (list)." opt = "key_loc" if opt not in options: options[opt] = "lower center" if opt not in self.help: self.help[opt] = "The location of the key in the plot (str)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L819-L860
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
PRC.do_execute
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None evl = self.input.payload pltclassifier.plot_prc( evl, class_index=self.resolve_option("class_index"), title=self.resolve_option("title"), key_loc=self.resolve_option("key_loc"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
python
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None evl = self.input.payload pltclassifier.plot_prc( evl, class_index=self.resolve_option("class_index"), title=self.resolve_option("title"), key_loc=self.resolve_option("key_loc"), outfile=self.resolve_option("outfile"), wait=bool(self.resolve_option("wait"))) return result
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L885-L901
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
InstanceDumper.do_execute
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None data = self.input.payload if isinstance(self._input.payload, Instance): inst = self.input.payload data = inst.dataset elif isinstance(self.input.payload, Instances): data = self.input.payload inst = None append = True if self._header is None or (self._header.equal_headers(data) is not None): self._header = Instances.template_instances(data, 0) outstr = str(data) append = False elif inst is not None: outstr = str(inst) else: outstr = str(data) f = None try: if append: f = open(str(self.resolve_option("output")), "a") else: f = open(str(self.resolve_option("output")), "w") f.write(outstr) f.write("\n") except Exception as e: result = self.full_name + "\n" + traceback.format_exc() finally: if f is not None: f.close() return result
python
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ result = None data = self.input.payload if isinstance(self._input.payload, Instance): inst = self.input.payload data = inst.dataset elif isinstance(self.input.payload, Instances): data = self.input.payload inst = None append = True if self._header is None or (self._header.equal_headers(data) is not None): self._header = Instances.template_instances(data, 0) outstr = str(data) append = False elif inst is not None: outstr = str(inst) else: outstr = str(data) f = None try: if append: f = open(str(self.resolve_option("output")), "a") else: f = open(str(self.resolve_option("output")), "w") f.write(outstr) f.write("\n") except Exception as e: result = self.full_name + "\n" + traceback.format_exc() finally: if f is not None: f.close() return result
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L943-L983
fracpete/python-weka-wrapper3
python/weka/experiments.py
SimpleExperiment.configure_splitevaluator
def configure_splitevaluator(self): """ Configures and returns the SplitEvaluator and Classifier instance as tuple. :return: evaluator and classifier :rtype: tuple """ if self.classification: speval = javabridge.make_instance("weka/experiment/ClassifierSplitEvaluator", "()V") else: speval = javabridge.make_instance("weka/experiment/RegressionSplitEvaluator", "()V") classifier = javabridge.call(speval, "getClassifier", "()Lweka/classifiers/Classifier;") return speval, classifier
python
def configure_splitevaluator(self): """ Configures and returns the SplitEvaluator and Classifier instance as tuple. :return: evaluator and classifier :rtype: tuple """ if self.classification: speval = javabridge.make_instance("weka/experiment/ClassifierSplitEvaluator", "()V") else: speval = javabridge.make_instance("weka/experiment/RegressionSplitEvaluator", "()V") classifier = javabridge.call(speval, "getClassifier", "()Lweka/classifiers/Classifier;") return speval, classifier
Configures and returns the SplitEvaluator and Classifier instance as tuple. :return: evaluator and classifier :rtype: tuple
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L86-L98
fracpete/python-weka-wrapper3
python/weka/experiments.py
SimpleExperiment.setup
def setup(self): """ Initializes the experiment. """ # basic options javabridge.call( self.jobject, "setPropertyArray", "(Ljava/lang/Object;)V", javabridge.get_env().make_object_array(0, javabridge.get_env().find_class("weka/classifiers/Classifier"))) javabridge.call( self.jobject, "setUsePropertyIterator", "(Z)V", True) javabridge.call( self.jobject, "setRunLower", "(I)V", 1) javabridge.call( self.jobject, "setRunUpper", "(I)V", self.runs) # setup result producer rproducer, prop_path = self.configure_resultproducer() javabridge.call( self.jobject, "setResultProducer", "(Lweka/experiment/ResultProducer;)V", rproducer) javabridge.call( self.jobject, "setPropertyPath", "([Lweka/experiment/PropertyNode;)V", prop_path) # classifiers classifiers = javabridge.get_env().make_object_array( len(self.classifiers), javabridge.get_env().find_class("weka/classifiers/Classifier")) for i, classifier in enumerate(self.classifiers): if type(classifier) is Classifier: javabridge.get_env().set_object_array_element( classifiers, i, classifier.jobject) else: javabridge.get_env().set_object_array_element( classifiers, i, from_commandline(classifier).jobject) javabridge.call( self.jobject, "setPropertyArray", "(Ljava/lang/Object;)V", classifiers) # datasets datasets = javabridge.make_instance("javax/swing/DefaultListModel", "()V") for dataset in self.datasets: f = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", dataset) javabridge.call(datasets, "addElement", "(Ljava/lang/Object;)V", f) javabridge.call( self.jobject, "setDatasets", "(Ljavax/swing/DefaultListModel;)V", datasets) # output file if str(self.result).lower().endswith(".arff"): rlistener = javabridge.make_instance("weka/experiment/InstancesResultListener", "()V") elif str(self.result).lower().endswith(".csv"): rlistener = javabridge.make_instance("weka/experiment/CSVResultListener", "()V") else: raise Exception("Unhandled output format for results: " + self.result) rfile = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", self.result) javabridge.call( rlistener, "setOutputFile", "(Ljava/io/File;)V", rfile) javabridge.call( self.jobject, "setResultListener", "(Lweka/experiment/ResultListener;)V", rlistener)
python
def setup(self): """ Initializes the experiment. """ # basic options javabridge.call( self.jobject, "setPropertyArray", "(Ljava/lang/Object;)V", javabridge.get_env().make_object_array(0, javabridge.get_env().find_class("weka/classifiers/Classifier"))) javabridge.call( self.jobject, "setUsePropertyIterator", "(Z)V", True) javabridge.call( self.jobject, "setRunLower", "(I)V", 1) javabridge.call( self.jobject, "setRunUpper", "(I)V", self.runs) # setup result producer rproducer, prop_path = self.configure_resultproducer() javabridge.call( self.jobject, "setResultProducer", "(Lweka/experiment/ResultProducer;)V", rproducer) javabridge.call( self.jobject, "setPropertyPath", "([Lweka/experiment/PropertyNode;)V", prop_path) # classifiers classifiers = javabridge.get_env().make_object_array( len(self.classifiers), javabridge.get_env().find_class("weka/classifiers/Classifier")) for i, classifier in enumerate(self.classifiers): if type(classifier) is Classifier: javabridge.get_env().set_object_array_element( classifiers, i, classifier.jobject) else: javabridge.get_env().set_object_array_element( classifiers, i, from_commandline(classifier).jobject) javabridge.call( self.jobject, "setPropertyArray", "(Ljava/lang/Object;)V", classifiers) # datasets datasets = javabridge.make_instance("javax/swing/DefaultListModel", "()V") for dataset in self.datasets: f = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", dataset) javabridge.call(datasets, "addElement", "(Ljava/lang/Object;)V", f) javabridge.call( self.jobject, "setDatasets", "(Ljavax/swing/DefaultListModel;)V", datasets) # output file if str(self.result).lower().endswith(".arff"): rlistener = javabridge.make_instance("weka/experiment/InstancesResultListener", "()V") elif str(self.result).lower().endswith(".csv"): rlistener = javabridge.make_instance("weka/experiment/CSVResultListener", "()V") else: raise Exception("Unhandled output format for results: " + self.result) rfile = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", self.result) javabridge.call( rlistener, "setOutputFile", "(Ljava/io/File;)V", rfile) javabridge.call( self.jobject, "setResultListener", "(Lweka/experiment/ResultListener;)V", rlistener)
Initializes the experiment.
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L109-L164
fracpete/python-weka-wrapper3
python/weka/experiments.py
SimpleExperiment.run
def run(self): """ Executes the experiment. """ logger.info("Initializing...") javabridge.call(self.jobject, "initialize", "()V") logger.info("Running...") javabridge.call(self.jobject, "runExperiment", "()V") logger.info("Finished...") javabridge.call(self.jobject, "postProcess", "()V")
python
def run(self): """ Executes the experiment. """ logger.info("Initializing...") javabridge.call(self.jobject, "initialize", "()V") logger.info("Running...") javabridge.call(self.jobject, "runExperiment", "()V") logger.info("Finished...") javabridge.call(self.jobject, "postProcess", "()V")
Executes the experiment.
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L166-L175
fracpete/python-weka-wrapper3
python/weka/experiments.py
SimpleExperiment.load
def load(cls, filename): """ Loads the experiment from disk. :param filename: the filename of the experiment to load :type filename: str :return: the experiment :rtype: Experiment """ jobject = javabridge.static_call( "weka/experiment/Experiment", "read", "(Ljava/lang/String;)Lweka/experiment/Experiment;", filename) return Experiment(jobject=jobject)
python
def load(cls, filename): """ Loads the experiment from disk. :param filename: the filename of the experiment to load :type filename: str :return: the experiment :rtype: Experiment """ jobject = javabridge.static_call( "weka/experiment/Experiment", "read", "(Ljava/lang/String;)Lweka/experiment/Experiment;", filename) return Experiment(jobject=jobject)
Loads the experiment from disk. :param filename: the filename of the experiment to load :type filename: str :return: the experiment :rtype: Experiment
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L190-L202
fracpete/python-weka-wrapper3
python/weka/experiments.py
SimpleRandomSplitExperiment.configure_resultproducer
def configure_resultproducer(self): """ Configures and returns the ResultProducer and PropertyPath as tuple. :return: producer and property path :rtype: tuple """ rproducer = javabridge.make_instance("weka/experiment/RandomSplitResultProducer", "()V") javabridge.call(rproducer, "setRandomizeData", "(Z)V", not self.preserve_order) javabridge.call(rproducer, "setTrainPercent", "(D)V", self.percentage) speval, classifier = self.configure_splitevaluator() javabridge.call(rproducer, "setSplitEvaluator", "(Lweka/experiment/SplitEvaluator;)V", speval) prop_path = javabridge.get_env().make_object_array( 2, javabridge.get_env().find_class("weka/experiment/PropertyNode")) cls = javabridge.get_env().find_class("weka/experiment/RandomSplitResultProducer") desc = javabridge.make_instance( "java/beans/PropertyDescriptor", "(Ljava/lang/String;Ljava/lang/Class;)V", "splitEvaluator", cls) node = javabridge.make_instance( "weka/experiment/PropertyNode", "(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V", speval, desc, cls) javabridge.get_env().set_object_array_element(prop_path, 0, node) cls = javabridge.get_env().get_object_class(speval) desc = javabridge.make_instance( "java/beans/PropertyDescriptor", "(Ljava/lang/String;Ljava/lang/Class;)V", "classifier", cls) node = javabridge.make_instance( "weka/experiment/PropertyNode", "(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V", javabridge.call(speval, "getClass", "()Ljava/lang/Class;"), desc, cls) javabridge.get_env().set_object_array_element(prop_path, 1, node) return rproducer, prop_path
python
def configure_resultproducer(self): """ Configures and returns the ResultProducer and PropertyPath as tuple. :return: producer and property path :rtype: tuple """ rproducer = javabridge.make_instance("weka/experiment/RandomSplitResultProducer", "()V") javabridge.call(rproducer, "setRandomizeData", "(Z)V", not self.preserve_order) javabridge.call(rproducer, "setTrainPercent", "(D)V", self.percentage) speval, classifier = self.configure_splitevaluator() javabridge.call(rproducer, "setSplitEvaluator", "(Lweka/experiment/SplitEvaluator;)V", speval) prop_path = javabridge.get_env().make_object_array( 2, javabridge.get_env().find_class("weka/experiment/PropertyNode")) cls = javabridge.get_env().find_class("weka/experiment/RandomSplitResultProducer") desc = javabridge.make_instance( "java/beans/PropertyDescriptor", "(Ljava/lang/String;Ljava/lang/Class;)V", "splitEvaluator", cls) node = javabridge.make_instance( "weka/experiment/PropertyNode", "(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V", speval, desc, cls) javabridge.get_env().set_object_array_element(prop_path, 0, node) cls = javabridge.get_env().get_object_class(speval) desc = javabridge.make_instance( "java/beans/PropertyDescriptor", "(Ljava/lang/String;Ljava/lang/Class;)V", "classifier", cls) node = javabridge.make_instance( "weka/experiment/PropertyNode", "(Ljava/lang/Object;Ljava/beans/PropertyDescriptor;Ljava/lang/Class;)V", javabridge.call(speval, "getClass", "()Ljava/lang/Class;"), desc, cls) javabridge.get_env().set_object_array_element(prop_path, 1, node) return rproducer, prop_path
Configures and returns the ResultProducer and PropertyPath as tuple. :return: producer and property path :rtype: tuple
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L336-L365
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.set_row_name
def set_row_name(self, index, name): """ Sets the row name. :param index: the 0-based row index :type index: int :param name: the name of the row :type name: str """ javabridge.call(self.jobject, "setRowName", "(ILjava/lang/String;)V", index, name)
python
def set_row_name(self, index, name): """ Sets the row name. :param index: the 0-based row index :type index: int :param name: the name of the row :type name: str """ javabridge.call(self.jobject, "setRowName", "(ILjava/lang/String;)V", index, name)
Sets the row name. :param index: the 0-based row index :type index: int :param name: the name of the row :type name: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L478-L487
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.set_col_name
def set_col_name(self, index, name): """ Sets the column name. :param index: the 0-based row index :type index: int :param name: the name of the column :type name: str """ javabridge.call(self.jobject, "setColName", "(ILjava/lang/String;)V", index, name)
python
def set_col_name(self, index, name): """ Sets the column name. :param index: the 0-based row index :type index: int :param name: the name of the column :type name: str """ javabridge.call(self.jobject, "setColName", "(ILjava/lang/String;)V", index, name)
Sets the column name. :param index: the 0-based row index :type index: int :param name: the name of the column :type name: str
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L500-L509
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.get_mean
def get_mean(self, col, row): """ Returns the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the mean :rtype: float """ return javabridge.call(self.jobject, "getMean", "(II)D", col, row)
python
def get_mean(self, col, row): """ Returns the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the mean :rtype: float """ return javabridge.call(self.jobject, "getMean", "(II)D", col, row)
Returns the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the mean :rtype: float
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L511-L522
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.set_mean
def set_mean(self, col, row, mean): """ Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float """ javabridge.call(self.jobject, "setMean", "(IID)V", col, row, mean)
python
def set_mean(self, col, row, mean): """ Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float """ javabridge.call(self.jobject, "setMean", "(IID)V", col, row, mean)
Sets the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param mean: the mean to set :type mean: float
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L524-L535
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.get_stdev
def get_stdev(self, col, row): """ Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float """ return javabridge.call(self.jobject, "getStdDev", "(II)D", col, row)
python
def get_stdev(self, col, row): """ Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float """ return javabridge.call(self.jobject, "getStdDev", "(II)D", col, row)
Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L537-L548
fracpete/python-weka-wrapper3
python/weka/experiments.py
ResultMatrix.set_stdev
def set_stdev(self, col, row, stdev): """ Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float """ javabridge.call(self.jobject, "setStdDev", "(IID)V", col, row, stdev)
python
def set_stdev(self, col, row, stdev): """ Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float """ javabridge.call(self.jobject, "setStdDev", "(IID)V", col, row, stdev)
Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/experiments.py#L550-L561
redcap-tools/PyCap
redcap/request.py
RCRequest.validate
def validate(self): """Checks that at least required params exist""" required = ['token', 'content'] valid_data = { 'exp_record': (['type', 'format'], 'record', 'Exporting record but content is not record'), 'imp_record': (['type', 'overwriteBehavior', 'data', 'format'], 'record', 'Importing record but content is not record'), 'metadata': (['format'], 'metadata', 'Requesting metadata but content != metadata'), 'exp_file': (['action', 'record', 'field'], 'file', 'Exporting file but content is not file'), 'imp_file': (['action', 'record', 'field'], 'file', 'Importing file but content is not file'), 'del_file': (['action', 'record', 'field'], 'file', 'Deleteing file but content is not file'), 'exp_event': (['format'], 'event', 'Exporting events but content is not event'), 'exp_arm': (['format'], 'arm', 'Exporting arms but content is not arm'), 'exp_fem': (['format'], 'formEventMapping', 'Exporting form-event mappings but content != formEventMapping'), 'exp_user': (['format'], 'user', 'Exporting users but content is not user'), 'exp_survey_participant_list': (['instrument'], 'participantList', 'Exporting Survey Participant List but content != participantList'), 'version': (['format'], 'version', 'Requesting version but content != version') } extra, req_content, err_msg = valid_data[self.type] required.extend(extra) required = set(required) pl_keys = set(self.payload.keys()) # if req is not subset of payload keys, this call is wrong if not set(required) <= pl_keys: # what is not in pl_keys? not_pre = required - pl_keys raise RCAPIError("Required keys: %s" % ', '.join(not_pre)) # Check content, raise with err_msg if not good try: if self.payload['content'] != req_content: raise RCAPIError(err_msg) except KeyError: raise RCAPIError('content not in payload')
python
def validate(self): """Checks that at least required params exist""" required = ['token', 'content'] valid_data = { 'exp_record': (['type', 'format'], 'record', 'Exporting record but content is not record'), 'imp_record': (['type', 'overwriteBehavior', 'data', 'format'], 'record', 'Importing record but content is not record'), 'metadata': (['format'], 'metadata', 'Requesting metadata but content != metadata'), 'exp_file': (['action', 'record', 'field'], 'file', 'Exporting file but content is not file'), 'imp_file': (['action', 'record', 'field'], 'file', 'Importing file but content is not file'), 'del_file': (['action', 'record', 'field'], 'file', 'Deleteing file but content is not file'), 'exp_event': (['format'], 'event', 'Exporting events but content is not event'), 'exp_arm': (['format'], 'arm', 'Exporting arms but content is not arm'), 'exp_fem': (['format'], 'formEventMapping', 'Exporting form-event mappings but content != formEventMapping'), 'exp_user': (['format'], 'user', 'Exporting users but content is not user'), 'exp_survey_participant_list': (['instrument'], 'participantList', 'Exporting Survey Participant List but content != participantList'), 'version': (['format'], 'version', 'Requesting version but content != version') } extra, req_content, err_msg = valid_data[self.type] required.extend(extra) required = set(required) pl_keys = set(self.payload.keys()) # if req is not subset of payload keys, this call is wrong if not set(required) <= pl_keys: # what is not in pl_keys? not_pre = required - pl_keys raise RCAPIError("Required keys: %s" % ', '.join(not_pre)) # Check content, raise with err_msg if not good try: if self.payload['content'] != req_content: raise RCAPIError(err_msg) except KeyError: raise RCAPIError('content not in payload')
Checks that at least required params exist
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L64-L107
redcap-tools/PyCap
redcap/request.py
RCRequest.execute
def execute(self, **kwargs): """Execute the API request and return data Parameters ---------- kwargs : passed to requests.post() Returns ------- response : list, str data object from JSON decoding process if format=='json', else return raw string (ie format=='csv'|'xml') """ r = post(self.url, data=self.payload, **kwargs) # Raise if we need to self.raise_for_status(r) content = self.get_content(r) return content, r.headers
python
def execute(self, **kwargs): """Execute the API request and return data Parameters ---------- kwargs : passed to requests.post() Returns ------- response : list, str data object from JSON decoding process if format=='json', else return raw string (ie format=='csv'|'xml') """ r = post(self.url, data=self.payload, **kwargs) # Raise if we need to self.raise_for_status(r) content = self.get_content(r) return content, r.headers
Execute the API request and return data Parameters ---------- kwargs : passed to requests.post() Returns ------- response : list, str data object from JSON decoding process if format=='json', else return raw string (ie format=='csv'|'xml')
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L109-L127
redcap-tools/PyCap
redcap/request.py
RCRequest.get_content
def get_content(self, r): """Abstraction for grabbing content from a returned response""" if self.type == 'exp_file': # don't use the decoded r.text return r.content elif self.type == 'version': return r.content else: if self.fmt == 'json': content = {} # Decode try: # Watch out for bad/empty json content = json.loads(r.text, strict=False) except ValueError as e: if not self.expect_empty_json(): # reraise for requests that shouldn't send empty json raise ValueError(e) finally: return content else: return r.text
python
def get_content(self, r): """Abstraction for grabbing content from a returned response""" if self.type == 'exp_file': # don't use the decoded r.text return r.content elif self.type == 'version': return r.content else: if self.fmt == 'json': content = {} # Decode try: # Watch out for bad/empty json content = json.loads(r.text, strict=False) except ValueError as e: if not self.expect_empty_json(): # reraise for requests that shouldn't send empty json raise ValueError(e) finally: return content else: return r.text
Abstraction for grabbing content from a returned response
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L129-L150
redcap-tools/PyCap
redcap/request.py
RCRequest.raise_for_status
def raise_for_status(self, r): """Given a response, raise for bad status for certain actions Some redcap api methods don't return error messages that the user could test for or otherwise use. Therefore, we need to do the testing ourself Raising for everything wouldn't let the user see the (hopefully helpful) error message""" if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'): r.raise_for_status() # see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html # specifically 10.5 if 500 <= r.status_code < 600: raise RedcapError(r.content)
python
def raise_for_status(self, r): """Given a response, raise for bad status for certain actions Some redcap api methods don't return error messages that the user could test for or otherwise use. Therefore, we need to do the testing ourself Raising for everything wouldn't let the user see the (hopefully helpful) error message""" if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'): r.raise_for_status() # see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html # specifically 10.5 if 500 <= r.status_code < 600: raise RedcapError(r.content)
Given a response, raise for bad status for certain actions Some redcap api methods don't return error messages that the user could test for or otherwise use. Therefore, we need to do the testing ourself Raising for everything wouldn't let the user see the (hopefully helpful) error message
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/request.py#L156-L170
redcap-tools/PyCap
redcap/project.py
Project.__basepl
def __basepl(self, content, rec_type='flat', format='json'): """Return a dictionary which can be used as is or added to for payloads""" d = {'token': self.token, 'content': content, 'format': format} if content not in ['metadata', 'file']: d['type'] = rec_type return d
python
def __basepl(self, content, rec_type='flat', format='json'): """Return a dictionary which can be used as is or added to for payloads""" d = {'token': self.token, 'content': content, 'format': format} if content not in ['metadata', 'file']: d['type'] = rec_type return d
Return a dictionary which can be used as is or added to for payloads
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L95-L101
redcap-tools/PyCap
redcap/project.py
Project.is_longitudinal
def is_longitudinal(self): """ Returns ------- boolean : longitudinal status of this project """ return len(self.events) > 0 and \ len(self.arm_nums) > 0 and \ len(self.arm_names) > 0
python
def is_longitudinal(self): """ Returns ------- boolean : longitudinal status of this project """ return len(self.events) > 0 and \ len(self.arm_nums) > 0 and \ len(self.arm_names) > 0
Returns ------- boolean : longitudinal status of this project
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L114-L123
redcap-tools/PyCap
redcap/project.py
Project.filter_metadata
def filter_metadata(self, key): """ Return a list of values for the metadata key from each field of the project's metadata. Parameters ---------- key: str A known key in the metadata structure Returns ------- filtered : attribute list from each field """ filtered = [field[key] for field in self.metadata if key in field] if len(filtered) == 0: raise KeyError("Key not found in metadata") return filtered
python
def filter_metadata(self, key): """ Return a list of values for the metadata key from each field of the project's metadata. Parameters ---------- key: str A known key in the metadata structure Returns ------- filtered : attribute list from each field """ filtered = [field[key] for field in self.metadata if key in field] if len(filtered) == 0: raise KeyError("Key not found in metadata") return filtered
Return a list of values for the metadata key from each field of the project's metadata. Parameters ---------- key: str A known key in the metadata structure Returns ------- filtered : attribute list from each field
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L125-L143
redcap-tools/PyCap
redcap/project.py
Project.export_fem
def export_fem(self, arms=None, format='json', df_kwargs=None): """ Export the project's form to event mapping Parameters ---------- arms : list Limit exported form event mappings to these arm numbers format : (``'json'``), ``'csv'``, ``'xml'`` Return the form event mappings in native objects, csv or xml, ``'df''`` will return a ``pandas.DataFrame`` df_kwargs : dict Passed to pandas.read_csv to control construction of returned DataFrame Returns ------- fem : list, str, ``pandas.DataFrame`` form-event mapping for the project """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('formEventMapping', format=ret_format) to_add = [arms] str_add = ['arms'] for key, data in zip(str_add, to_add): if data: pl[key] = ','.join(data) response, _ = self._call_api(pl, 'exp_fem') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: return read_csv(StringIO(response)) else: return read_csv(StringIO(response), **df_kwargs)
python
def export_fem(self, arms=None, format='json', df_kwargs=None): """ Export the project's form to event mapping Parameters ---------- arms : list Limit exported form event mappings to these arm numbers format : (``'json'``), ``'csv'``, ``'xml'`` Return the form event mappings in native objects, csv or xml, ``'df''`` will return a ``pandas.DataFrame`` df_kwargs : dict Passed to pandas.read_csv to control construction of returned DataFrame Returns ------- fem : list, str, ``pandas.DataFrame`` form-event mapping for the project """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('formEventMapping', format=ret_format) to_add = [arms] str_add = ['arms'] for key, data in zip(str_add, to_add): if data: pl[key] = ','.join(data) response, _ = self._call_api(pl, 'exp_fem') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: return read_csv(StringIO(response)) else: return read_csv(StringIO(response), **df_kwargs)
Export the project's form to event mapping Parameters ---------- arms : list Limit exported form event mappings to these arm numbers format : (``'json'``), ``'csv'``, ``'xml'`` Return the form event mappings in native objects, csv or xml, ``'df''`` will return a ``pandas.DataFrame`` df_kwargs : dict Passed to pandas.read_csv to control construction of returned DataFrame Returns ------- fem : list, str, ``pandas.DataFrame`` form-event mapping for the project
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L157-L194
redcap-tools/PyCap
redcap/project.py
Project.export_metadata
def export_metadata(self, fields=None, forms=None, format='json', df_kwargs=None): """ Export the project's metadata Parameters ---------- fields : list Limit exported metadata to these fields forms : list Limit exported metadata to these forms format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Return the metadata in native objects, csv or xml. ``'df'`` will return a ``pandas.DataFrame``. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default ``{'index_col': 'field_name'}`` Returns ------- metadata : list, str, ``pandas.DataFrame`` metadata sttructure for the project. """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('metadata', format=ret_format) to_add = [fields, forms] str_add = ['fields', 'forms'] for key, data in zip(str_add, to_add): if data: pl[key] = ','.join(data) response, _ = self._call_api(pl, 'metadata') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: df_kwargs = {'index_col': 'field_name'} return read_csv(StringIO(response), **df_kwargs)
python
def export_metadata(self, fields=None, forms=None, format='json', df_kwargs=None): """ Export the project's metadata Parameters ---------- fields : list Limit exported metadata to these fields forms : list Limit exported metadata to these forms format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Return the metadata in native objects, csv or xml. ``'df'`` will return a ``pandas.DataFrame``. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default ``{'index_col': 'field_name'}`` Returns ------- metadata : list, str, ``pandas.DataFrame`` metadata sttructure for the project. """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('metadata', format=ret_format) to_add = [fields, forms] str_add = ['fields', 'forms'] for key, data in zip(str_add, to_add): if data: pl[key] = ','.join(data) response, _ = self._call_api(pl, 'metadata') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: df_kwargs = {'index_col': 'field_name'} return read_csv(StringIO(response), **df_kwargs)
Export the project's metadata Parameters ---------- fields : list Limit exported metadata to these fields forms : list Limit exported metadata to these forms format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Return the metadata in native objects, csv or xml. ``'df'`` will return a ``pandas.DataFrame``. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default ``{'index_col': 'field_name'}`` Returns ------- metadata : list, str, ``pandas.DataFrame`` metadata sttructure for the project.
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L196-L236
redcap-tools/PyCap
redcap/project.py
Project.export_records
def export_records(self, records=None, fields=None, forms=None, events=None, raw_or_label='raw', event_name='label', format='json', export_survey_fields=False, export_data_access_groups=False, df_kwargs=None, export_checkbox_labels=False, filter_logic=None): """ Export data from the REDCap project. Parameters ---------- records : list array of record names specifying specific records to export. by default, all records are exported fields : list array of field names specifying specific fields to pull by default, all fields are exported forms : list array of form names to export. If in the web UI, the form name has a space in it, replace the space with an underscore by default, all forms are exported events : list an array of unique event names from which to export records :note: this only applies to longitudinal projects raw_or_label : (``'raw'``), ``'label'``, ``'both'`` export the raw coded values or labels for the options of multiple choice fields, or both event_name : (``'label'``), ``'unique'`` export the unique event name or the event label format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Format of returned data. ``'json'`` returns json-decoded objects while ``'csv'`` and ``'xml'`` return other formats. ``'df'`` will attempt to return a ``pandas.DataFrame``. export_survey_fields : (``False``), True specifies whether or not to export the survey identifier field (e.g., "redcap_survey_identifier") or survey timestamp fields (e.g., form_name+"_timestamp") when surveys are utilized in the project. export_data_access_groups : (``False``), ``True`` specifies whether or not to export the ``"redcap_data_access_group"`` field when data access groups are utilized in the project. :note: This flag is only viable if the user whose token is being used to make the API request is *not* in a data access group. If the user is in a group, then this flag will revert to its default value. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default, ``{'index_col': self.def_field}`` export_checkbox_labels : (``False``), ``True`` specify whether to export checkbox values as their label on export. filter_logic : string specify the filterLogic to be sent to the API. Returns ------- data : list, str, ``pandas.DataFrame`` exported data """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('record', format=ret_format) fields = self.backfill_fields(fields, forms) keys_to_add = (records, fields, forms, events, raw_or_label, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels) str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel') for key, data in zip(str_keys, keys_to_add): if data: # Make a url-ok string if key in ('fields', 'records', 'forms', 'events'): pl[key] = ','.join(data) else: pl[key] = data if filter_logic: pl["filterLogic"] = filter_logic response, _ = self._call_api(pl, 'exp_record') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: if self.is_longitudinal(): df_kwargs = {'index_col': [self.def_field, 'redcap_event_name']} else: df_kwargs = {'index_col': self.def_field} buf = StringIO(response) df = read_csv(buf, **df_kwargs) buf.close() return df
python
def export_records(self, records=None, fields=None, forms=None, events=None, raw_or_label='raw', event_name='label', format='json', export_survey_fields=False, export_data_access_groups=False, df_kwargs=None, export_checkbox_labels=False, filter_logic=None): """ Export data from the REDCap project. Parameters ---------- records : list array of record names specifying specific records to export. by default, all records are exported fields : list array of field names specifying specific fields to pull by default, all fields are exported forms : list array of form names to export. If in the web UI, the form name has a space in it, replace the space with an underscore by default, all forms are exported events : list an array of unique event names from which to export records :note: this only applies to longitudinal projects raw_or_label : (``'raw'``), ``'label'``, ``'both'`` export the raw coded values or labels for the options of multiple choice fields, or both event_name : (``'label'``), ``'unique'`` export the unique event name or the event label format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Format of returned data. ``'json'`` returns json-decoded objects while ``'csv'`` and ``'xml'`` return other formats. ``'df'`` will attempt to return a ``pandas.DataFrame``. export_survey_fields : (``False``), True specifies whether or not to export the survey identifier field (e.g., "redcap_survey_identifier") or survey timestamp fields (e.g., form_name+"_timestamp") when surveys are utilized in the project. export_data_access_groups : (``False``), ``True`` specifies whether or not to export the ``"redcap_data_access_group"`` field when data access groups are utilized in the project. :note: This flag is only viable if the user whose token is being used to make the API request is *not* in a data access group. If the user is in a group, then this flag will revert to its default value. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default, ``{'index_col': self.def_field}`` export_checkbox_labels : (``False``), ``True`` specify whether to export checkbox values as their label on export. filter_logic : string specify the filterLogic to be sent to the API. Returns ------- data : list, str, ``pandas.DataFrame`` exported data """ ret_format = format if format == 'df': from pandas import read_csv ret_format = 'csv' pl = self.__basepl('record', format=ret_format) fields = self.backfill_fields(fields, forms) keys_to_add = (records, fields, forms, events, raw_or_label, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels) str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel') for key, data in zip(str_keys, keys_to_add): if data: # Make a url-ok string if key in ('fields', 'records', 'forms', 'events'): pl[key] = ','.join(data) else: pl[key] = data if filter_logic: pl["filterLogic"] = filter_logic response, _ = self._call_api(pl, 'exp_record') if format in ('json', 'csv', 'xml'): return response elif format == 'df': if not df_kwargs: if self.is_longitudinal(): df_kwargs = {'index_col': [self.def_field, 'redcap_event_name']} else: df_kwargs = {'index_col': self.def_field} buf = StringIO(response) df = read_csv(buf, **df_kwargs) buf.close() return df
Export data from the REDCap project. Parameters ---------- records : list array of record names specifying specific records to export. by default, all records are exported fields : list array of field names specifying specific fields to pull by default, all fields are exported forms : list array of form names to export. If in the web UI, the form name has a space in it, replace the space with an underscore by default, all forms are exported events : list an array of unique event names from which to export records :note: this only applies to longitudinal projects raw_or_label : (``'raw'``), ``'label'``, ``'both'`` export the raw coded values or labels for the options of multiple choice fields, or both event_name : (``'label'``), ``'unique'`` export the unique event name or the event label format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'`` Format of returned data. ``'json'`` returns json-decoded objects while ``'csv'`` and ``'xml'`` return other formats. ``'df'`` will attempt to return a ``pandas.DataFrame``. export_survey_fields : (``False``), True specifies whether or not to export the survey identifier field (e.g., "redcap_survey_identifier") or survey timestamp fields (e.g., form_name+"_timestamp") when surveys are utilized in the project. export_data_access_groups : (``False``), ``True`` specifies whether or not to export the ``"redcap_data_access_group"`` field when data access groups are utilized in the project. :note: This flag is only viable if the user whose token is being used to make the API request is *not* in a data access group. If the user is in a group, then this flag will revert to its default value. df_kwargs : dict Passed to ``pandas.read_csv`` to control construction of returned DataFrame. by default, ``{'index_col': self.def_field}`` export_checkbox_labels : (``False``), ``True`` specify whether to export checkbox values as their label on export. filter_logic : string specify the filterLogic to be sent to the API. Returns ------- data : list, str, ``pandas.DataFrame`` exported data
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L238-L335
redcap-tools/PyCap
redcap/project.py
Project.__meta_metadata
def __meta_metadata(self, field, key): """Return the value for key for the field in the metadata""" mf = '' try: mf = str([f[key] for f in self.metadata if f['field_name'] == field][0]) except IndexError: print("%s not in metadata field:%s" % (key, field)) return mf else: return mf
python
def __meta_metadata(self, field, key): """Return the value for key for the field in the metadata""" mf = '' try: mf = str([f[key] for f in self.metadata if f['field_name'] == field][0]) except IndexError: print("%s not in metadata field:%s" % (key, field)) return mf else: return mf
Return the value for key for the field in the metadata
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L342-L352
redcap-tools/PyCap
redcap/project.py
Project.backfill_fields
def backfill_fields(self, fields, forms): """ Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms """ if forms and not fields: new_fields = [self.def_field] elif fields and self.def_field not in fields: new_fields = list(fields) if self.def_field not in fields: new_fields.append(self.def_field) elif not fields: new_fields = self.field_names else: new_fields = list(fields) return new_fields
python
def backfill_fields(self, fields, forms): """ Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms """ if forms and not fields: new_fields = [self.def_field] elif fields and self.def_field not in fields: new_fields = list(fields) if self.def_field not in fields: new_fields.append(self.def_field) elif not fields: new_fields = self.field_names else: new_fields = list(fields) return new_fields
Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L354-L382
redcap-tools/PyCap
redcap/project.py
Project.filter
def filter(self, query, output_fields=None): """Query the database and return subject information for those who match the query logic Parameters ---------- query: Query or QueryGroup Query(Group) object to process output_fields: list The fields desired for matching subjects Returns ------- A list of dictionaries whose keys contains at least the default field and at most each key passed in with output_fields, each dictionary representing a surviving row in the database. """ query_keys = query.fields() if not set(query_keys).issubset(set(self.field_names)): raise ValueError("One or more query keys not in project keys") query_keys.append(self.def_field) data = self.export_records(fields=query_keys) matches = query.filter(data, self.def_field) if matches: # if output_fields is empty, we'll download all fields, which is # not desired, so we limit download to def_field if not output_fields: output_fields = [self.def_field] # But if caller passed a string and not list, we need to listify if isinstance(output_fields, basestring): output_fields = [output_fields] return self.export_records(records=matches, fields=output_fields) else: # If there are no matches, then sending an empty list to # export_records will actually return all rows, which is not # what we want return []
python
def filter(self, query, output_fields=None): """Query the database and return subject information for those who match the query logic Parameters ---------- query: Query or QueryGroup Query(Group) object to process output_fields: list The fields desired for matching subjects Returns ------- A list of dictionaries whose keys contains at least the default field and at most each key passed in with output_fields, each dictionary representing a surviving row in the database. """ query_keys = query.fields() if not set(query_keys).issubset(set(self.field_names)): raise ValueError("One or more query keys not in project keys") query_keys.append(self.def_field) data = self.export_records(fields=query_keys) matches = query.filter(data, self.def_field) if matches: # if output_fields is empty, we'll download all fields, which is # not desired, so we limit download to def_field if not output_fields: output_fields = [self.def_field] # But if caller passed a string and not list, we need to listify if isinstance(output_fields, basestring): output_fields = [output_fields] return self.export_records(records=matches, fields=output_fields) else: # If there are no matches, then sending an empty list to # export_records will actually return all rows, which is not # what we want return []
Query the database and return subject information for those who match the query logic Parameters ---------- query: Query or QueryGroup Query(Group) object to process output_fields: list The fields desired for matching subjects Returns ------- A list of dictionaries whose keys contains at least the default field and at most each key passed in with output_fields, each dictionary representing a surviving row in the database.
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L384-L420
redcap-tools/PyCap
redcap/project.py
Project.names_labels
def names_labels(self, do_print=False): """Simple helper function to get all field names and labels """ if do_print: for name, label in zip(self.field_names, self.field_labels): print('%s --> %s' % (str(name), str(label))) return self.field_names, self.field_labels
python
def names_labels(self, do_print=False): """Simple helper function to get all field names and labels """ if do_print: for name, label in zip(self.field_names, self.field_labels): print('%s --> %s' % (str(name), str(label))) return self.field_names, self.field_labels
Simple helper function to get all field names and labels
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L422-L427
redcap-tools/PyCap
redcap/project.py
Project.import_records
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): """ Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'`` """ pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): # We'll assume it's a df buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif format == 'json': pl['data'] = json.dumps(to_import, separators=(',', ':')) else: # don't do anything to csv/xml pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if 'error' in response: raise RedcapError(str(response)) return response
python
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): """ Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'`` """ pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): # We'll assume it's a df buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif format == 'json': pl['data'] = json.dumps(to_import, separators=(',', ':')) else: # don't do anything to csv/xml pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if 'error' in response: raise RedcapError(str(response)) return response
Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'``
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L429-L501
redcap-tools/PyCap
redcap/project.py
Project.export_file
def export_file(self, record, field, event=None, return_format='json'): """ Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # there's no format field in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event content, headers = self._call_api(pl, 'exp_file') #REDCap adds some useful things in content-type if 'content-type' in headers: splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv in splat if '=' in kv] content_map = dict(kv) else: content_map = {} return content, content_map
python
def export_file(self, record, field, event=None, return_format='json'): """ Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # there's no format field in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event content, headers = self._call_api(pl, 'exp_file') #REDCap adds some useful things in content-type if 'content-type' in headers: splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv in splat if '=' in kv] content_map = dict(kv) else: content_map = {} return content, content_map
Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L503-L549
redcap-tools/PyCap
redcap/project.py
Project.import_file
def import_file(self, record, field, fname, fobj, event=None, return_format='json'): """ Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format`` """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # no format in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]
python
def import_file(self, record, field, fname, fobj, event=None, return_format='json'): """ Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format`` """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # no format in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'import' pl['field'] = field pl['record'] = record if event: pl['event'] = event file_kwargs = {'files': {'file': (fname, fobj)}} return self._call_api(pl, 'imp_file', **file_kwargs)[0]
Import the contents of a file represented by fobj to a particular records field Parameters ---------- record : str record ID field : str field name where the file will go fname : str file name visible in REDCap UI fobj : file object file object as returned by `open` event : str for longitudinal projects, specify the unique event here return_format : ('json'), 'csv', 'xml' format of error message Returns ------- response : response from server as specified by ``return_format``
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L551-L589
redcap-tools/PyCap
redcap/project.py
Project.delete_file
def delete_file(self, record, field, return_format='json', event=None): """ Delete a file from REDCap Notes ----- There is no undo button to this. Parameters ---------- record : str record ID field : str field name return_format : (``'json'``), ``'csv'``, ``'xml'`` return format for error message event : str If longitudinal project, event to delete file from Returns ------- response : dict, str response from REDCap after deleting file """ self._check_file_field(field) # Load up payload pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'delete' pl['record'] = record pl['field'] = field if event: pl['event'] = event return self._call_api(pl, 'del_file')[0]
python
def delete_file(self, record, field, return_format='json', event=None): """ Delete a file from REDCap Notes ----- There is no undo button to this. Parameters ---------- record : str record ID field : str field name return_format : (``'json'``), ``'csv'``, ``'xml'`` return format for error message event : str If longitudinal project, event to delete file from Returns ------- response : dict, str response from REDCap after deleting file """ self._check_file_field(field) # Load up payload pl = self.__basepl(content='file', format=return_format) del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'delete' pl['record'] = record pl['field'] = field if event: pl['event'] = event return self._call_api(pl, 'del_file')[0]
Delete a file from REDCap Notes ----- There is no undo button to this. Parameters ---------- record : str record ID field : str field name return_format : (``'json'``), ``'csv'``, ``'xml'`` return format for error message event : str If longitudinal project, event to delete file from Returns ------- response : dict, str response from REDCap after deleting file
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L591-L625
redcap-tools/PyCap
redcap/project.py
Project._check_file_field
def _check_file_field(self, field): """Check that field exists and is a file field""" is_field = field in self.field_names is_file = self.__meta_metadata(field, 'field_type') == 'file' if not (is_field and is_file): msg = "'%s' is not a field or not a 'file' field" % field raise ValueError(msg) else: return True
python
def _check_file_field(self, field): """Check that field exists and is a file field""" is_field = field in self.field_names is_file = self.__meta_metadata(field, 'field_type') == 'file' if not (is_field and is_file): msg = "'%s' is not a field or not a 'file' field" % field raise ValueError(msg) else: return True
Check that field exists and is a file field
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L627-L635
redcap-tools/PyCap
redcap/project.py
Project.export_users
def export_users(self, format='json'): """ Export the users of the Project Notes ----- Each user will have the following keys: * ``'firstname'`` : User's first name * ``'lastname'`` : User's last name * ``'email'`` : Email address * ``'username'`` : User's username * ``'expiration'`` : Project access expiration date * ``'data_access_group'`` : data access group ID * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set) * ``'forms'`` : a list of dicts with a single key as the form name and value is an integer describing that user's form rights, where: 0=no access, 1=view records/responses and edit records (survey responses are read-only), 2=read only, and 3=edit survey responses, Parameters ---------- format : (``'json'``), ``'csv'``, ``'xml'`` response return format Returns ------- users: list, str list of users dicts when ``'format'='json'``, otherwise a string """ pl = self.__basepl(content='user', format=format) return self._call_api(pl, 'exp_user')[0]
python
def export_users(self, format='json'): """ Export the users of the Project Notes ----- Each user will have the following keys: * ``'firstname'`` : User's first name * ``'lastname'`` : User's last name * ``'email'`` : Email address * ``'username'`` : User's username * ``'expiration'`` : Project access expiration date * ``'data_access_group'`` : data access group ID * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set) * ``'forms'`` : a list of dicts with a single key as the form name and value is an integer describing that user's form rights, where: 0=no access, 1=view records/responses and edit records (survey responses are read-only), 2=read only, and 3=edit survey responses, Parameters ---------- format : (``'json'``), ``'csv'``, ``'xml'`` response return format Returns ------- users: list, str list of users dicts when ``'format'='json'``, otherwise a string """ pl = self.__basepl(content='user', format=format) return self._call_api(pl, 'exp_user')[0]
Export the users of the Project Notes ----- Each user will have the following keys: * ``'firstname'`` : User's first name * ``'lastname'`` : User's last name * ``'email'`` : Email address * ``'username'`` : User's username * ``'expiration'`` : Project access expiration date * ``'data_access_group'`` : data access group ID * ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set) * ``'forms'`` : a list of dicts with a single key as the form name and value is an integer describing that user's form rights, where: 0=no access, 1=view records/responses and edit records (survey responses are read-only), 2=read only, and 3=edit survey responses, Parameters ---------- format : (``'json'``), ``'csv'``, ``'xml'`` response return format Returns ------- users: list, str list of users dicts when ``'format'='json'``, otherwise a string
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L637-L671
redcap-tools/PyCap
redcap/project.py
Project.export_survey_participant_list
def export_survey_participant_list(self, instrument, event=None, format='json'): """ Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data """ pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')
python
def export_survey_participant_list(self, instrument, event=None, format='json'): """ Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data """ pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')
Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L673-L694
quentinsf/qhue
qhue/qhue.py
create_new_username
def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT): """Interactive helper function to generate a new anonymous username. Args: ip: ip address of the bridge devicetype (optional): devicetype to register with the bridge. If unprovided, generates a device type based on the local hostname. timeout (optional, default=5): request timeout in seconds Raises: QhueException if something went wrong with username generation (for example, if the bridge button wasn't pressed). """ res = Resource(_api_url(ip), timeout) prompt = "Press the Bridge button, then press Return: " # Deal with one of the sillier python3 changes if sys.version_info.major == 2: _ = raw_input(prompt) else: _ = input(prompt) if devicetype is None: devicetype = "qhue#{}".format(getfqdn()) # raises QhueException if something went wrong response = res(devicetype=devicetype, http_method="post") return response[0]["success"]["username"]
python
def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT): """Interactive helper function to generate a new anonymous username. Args: ip: ip address of the bridge devicetype (optional): devicetype to register with the bridge. If unprovided, generates a device type based on the local hostname. timeout (optional, default=5): request timeout in seconds Raises: QhueException if something went wrong with username generation (for example, if the bridge button wasn't pressed). """ res = Resource(_api_url(ip), timeout) prompt = "Press the Bridge button, then press Return: " # Deal with one of the sillier python3 changes if sys.version_info.major == 2: _ = raw_input(prompt) else: _ = input(prompt) if devicetype is None: devicetype = "qhue#{}".format(getfqdn()) # raises QhueException if something went wrong response = res(devicetype=devicetype, http_method="post") return response[0]["success"]["username"]
Interactive helper function to generate a new anonymous username. Args: ip: ip address of the bridge devicetype (optional): devicetype to register with the bridge. If unprovided, generates a device type based on the local hostname. timeout (optional, default=5): request timeout in seconds Raises: QhueException if something went wrong with username generation (for example, if the bridge button wasn't pressed).
https://github.com/quentinsf/qhue/blob/faddc49de844134784f4742f4783066976d76c08/qhue/qhue.py#L73-L99
spotify/gordon
gordon/router.py
GordonRouter.run
async def run(self): """Entrypoint to route messages between plugins.""" logging.info('Starting message router...') coroutines = set() while True: coro = self._poll_channel() coroutines.add(coro) _, coroutines = await asyncio.wait(coroutines, timeout=0.1)
python
async def run(self): """Entrypoint to route messages between plugins.""" logging.info('Starting message router...') coroutines = set() while True: coro = self._poll_channel() coroutines.add(coro) _, coroutines = await asyncio.wait(coroutines, timeout=0.1)
Entrypoint to route messages between plugins.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/router.py#L193-L201
spotify/gordon
gordon/main.py
shutdown
async def shutdown(sig, loop): """Gracefully cancel current tasks when app receives a shutdown signal.""" logging.info(f'Received exit signal {sig.name}...') tasks = [task for task in asyncio.Task.all_tasks() if task is not asyncio.tasks.Task.current_task()] for task in tasks: logging.debug(f'Cancelling task: {task}') task.cancel() results = await asyncio.gather(*tasks, return_exceptions=True) logging.debug(f'Done awaiting cancelled tasks, results: {results}') loop.stop() logging.info('Shutdown complete.')
python
async def shutdown(sig, loop): """Gracefully cancel current tasks when app receives a shutdown signal.""" logging.info(f'Received exit signal {sig.name}...') tasks = [task for task in asyncio.Task.all_tasks() if task is not asyncio.tasks.Task.current_task()] for task in tasks: logging.debug(f'Cancelling task: {task}') task.cancel() results = await asyncio.gather(*tasks, return_exceptions=True) logging.debug(f'Done awaiting cancelled tasks, results: {results}') loop.stop() logging.info('Shutdown complete.')
Gracefully cancel current tasks when app receives a shutdown signal.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/main.py#L51-L65
spotify/gordon
gordon/main.py
_deep_merge_dict
def _deep_merge_dict(a, b): """Additively merge right side dict into left side dict.""" for k, v in b.items(): if k in a and isinstance(a[k], dict) and isinstance(v, dict): _deep_merge_dict(a[k], v) else: a[k] = v
python
def _deep_merge_dict(a, b): """Additively merge right side dict into left side dict.""" for k, v in b.items(): if k in a and isinstance(a[k], dict) and isinstance(v, dict): _deep_merge_dict(a[k], v) else: a[k] = v
Additively merge right side dict into left side dict.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/main.py#L68-L74
spotify/gordon
gordon/plugins_loader.py
load_plugins
def load_plugins(config, plugin_kwargs): """ Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config. """ installed_plugins = _gather_installed_plugins() metrics_plugin = _get_metrics_plugin(config, installed_plugins) if metrics_plugin: plugin_kwargs['metrics'] = metrics_plugin active_plugins = _get_activated_plugins(config, installed_plugins) if not active_plugins: return [], [], [], None plugin_namespaces = _get_plugin_config_keys(active_plugins) plugin_configs = _load_plugin_configs(plugin_namespaces, config) plugin_names, plugins, errors = _init_plugins( active_plugins, installed_plugins, plugin_configs, plugin_kwargs) return plugin_names, plugins, errors, plugin_kwargs
python
def load_plugins(config, plugin_kwargs): """ Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config. """ installed_plugins = _gather_installed_plugins() metrics_plugin = _get_metrics_plugin(config, installed_plugins) if metrics_plugin: plugin_kwargs['metrics'] = metrics_plugin active_plugins = _get_activated_plugins(config, installed_plugins) if not active_plugins: return [], [], [], None plugin_namespaces = _get_plugin_config_keys(active_plugins) plugin_configs = _load_plugin_configs(plugin_namespaces, config) plugin_names, plugins, errors = _init_plugins( active_plugins, installed_plugins, plugin_configs, plugin_kwargs) return plugin_names, plugins, errors, plugin_kwargs
Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/plugins_loader.py#L210-L237
spotify/gordon
gordon/metrics/ffwd.py
UDPClientProtocol.connection_made
def connection_made(self, transport): """Create connection, use to send message and close. Args: transport (asyncio.DatagramTransport): Transport used for sending. """ self.transport = transport self.transport.sendto(self.message) self.transport.close()
python
def connection_made(self, transport): """Create connection, use to send message and close. Args: transport (asyncio.DatagramTransport): Transport used for sending. """ self.transport = transport self.transport.sendto(self.message) self.transport.close()
Create connection, use to send message and close. Args: transport (asyncio.DatagramTransport): Transport used for sending.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/metrics/ffwd.py#L59-L67
spotify/gordon
gordon/metrics/ffwd.py
UDPClient.send
async def send(self, metric): """Transform metric to JSON bytestring and send to server. Args: metric (dict): Complete metric to send as JSON. """ message = json.dumps(metric).encode('utf-8') await self.loop.create_datagram_endpoint( lambda: UDPClientProtocol(message), remote_addr=(self.ip, self.port))
python
async def send(self, metric): """Transform metric to JSON bytestring and send to server. Args: metric (dict): Complete metric to send as JSON. """ message = json.dumps(metric).encode('utf-8') await self.loop.create_datagram_endpoint( lambda: UDPClientProtocol(message), remote_addr=(self.ip, self.port))
Transform metric to JSON bytestring and send to server. Args: metric (dict): Complete metric to send as JSON.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/metrics/ffwd.py#L87-L96
spotify/gordon
gordon/record_checker.py
RecordChecker.check_record
async def check_record(self, record, timeout=60): """Measures the time for a DNS record to become available. Query a provided DNS server multiple times until the reply matches the information in the record or until timeout is reached. Args: record (dict): DNS record as a dict with record properties. timeout (int): Time threshold to query the DNS server. """ start_time = time.time() name, rr_data, r_type, ttl = self._extract_record_data(record) r_type_code = async_dns.types.get_code(r_type) resolvable_record = False retries = 0 sleep_time = 5 while not resolvable_record and \ timeout > retries * sleep_time: retries += 1 resolver_res = await self._resolver.query(name, r_type_code) possible_ans = resolver_res.an resolvable_record = \ await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code) if not resolvable_record: await asyncio.sleep(sleep_time) if not resolvable_record: logging.info( f'Sending metric record-checker-failed: {record}.') else: final_time = float(time.time() - start_time) success_msg = (f'This record: {record} took {final_time} to ' 'register.') logging.info(success_msg)
python
async def check_record(self, record, timeout=60): """Measures the time for a DNS record to become available. Query a provided DNS server multiple times until the reply matches the information in the record or until timeout is reached. Args: record (dict): DNS record as a dict with record properties. timeout (int): Time threshold to query the DNS server. """ start_time = time.time() name, rr_data, r_type, ttl = self._extract_record_data(record) r_type_code = async_dns.types.get_code(r_type) resolvable_record = False retries = 0 sleep_time = 5 while not resolvable_record and \ timeout > retries * sleep_time: retries += 1 resolver_res = await self._resolver.query(name, r_type_code) possible_ans = resolver_res.an resolvable_record = \ await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code) if not resolvable_record: await asyncio.sleep(sleep_time) if not resolvable_record: logging.info( f'Sending metric record-checker-failed: {record}.') else: final_time = float(time.time() - start_time) success_msg = (f'This record: {record} took {final_time} to ' 'register.') logging.info(success_msg)
Measures the time for a DNS record to become available. Query a provided DNS server multiple times until the reply matches the information in the record or until timeout is reached. Args: record (dict): DNS record as a dict with record properties. timeout (int): Time threshold to query the DNS server.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/record_checker.py#L54-L94
spotify/gordon
gordon/record_checker.py
RecordChecker._check_resolver_ans
async def _check_resolver_ans( self, dns_answer_list, record_name, record_data_list, record_ttl, record_type_code): """Check if resolver answer is equal to record data. Args: dns_answer_list (list): DNS answer list contains record objects. record_name (str): Record name. record_data_list (list): List of data values for the record. record_ttl (int): Record time-to-live info. record_type_code (int): Record type code. Returns: boolean indicating if DNS answer data is equal to record data. """ type_filtered_list = [ ans for ans in dns_answer_list if ans.qtype == record_type_code ] # check to see that type_filtered_lst has # the same number of records as record_data_list if len(type_filtered_list) != len(record_data_list): return False # check each record data is equal to the given data for rec in type_filtered_list: conditions = [rec.name == record_name, rec.ttl == record_ttl, rec.data in record_data_list] # if ans record data is not equal # to the given data return False if not all(conditions): return False return True
python
async def _check_resolver_ans( self, dns_answer_list, record_name, record_data_list, record_ttl, record_type_code): """Check if resolver answer is equal to record data. Args: dns_answer_list (list): DNS answer list contains record objects. record_name (str): Record name. record_data_list (list): List of data values for the record. record_ttl (int): Record time-to-live info. record_type_code (int): Record type code. Returns: boolean indicating if DNS answer data is equal to record data. """ type_filtered_list = [ ans for ans in dns_answer_list if ans.qtype == record_type_code ] # check to see that type_filtered_lst has # the same number of records as record_data_list if len(type_filtered_list) != len(record_data_list): return False # check each record data is equal to the given data for rec in type_filtered_list: conditions = [rec.name == record_name, rec.ttl == record_ttl, rec.data in record_data_list] # if ans record data is not equal # to the given data return False if not all(conditions): return False return True
Check if resolver answer is equal to record data. Args: dns_answer_list (list): DNS answer list contains record objects. record_name (str): Record name. record_data_list (list): List of data values for the record. record_ttl (int): Record time-to-live info. record_type_code (int): Record type code. Returns: boolean indicating if DNS answer data is equal to record data.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/record_checker.py#L96-L131
spotify/gordon
setup.py
read
def read(*filenames, **kwargs): """ Build an absolute path from ``*filenames``, and return contents of resulting file. Defaults to UTF-8 encoding. """ encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for fl in filenames: with codecs.open(os.path.join(HERE, fl), 'rb', encoding) as f: buf.append(f.read()) return sep.join(buf)
python
def read(*filenames, **kwargs): """ Build an absolute path from ``*filenames``, and return contents of resulting file. Defaults to UTF-8 encoding. """ encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for fl in filenames: with codecs.open(os.path.join(HERE, fl), 'rb', encoding) as f: buf.append(f.read()) return sep.join(buf)
Build an absolute path from ``*filenames``, and return contents of resulting file. Defaults to UTF-8 encoding.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/setup.py#L32-L43
spotify/gordon
gordon/metrics/log.py
LoggerAdapter.log
def log(self, metric): """Format and output metric. Args: metric (dict): Complete metric. """ message = self.LOGFMT.format(**metric) if metric['context']: message += ' context: {context}'.format(context=metric['context']) self._logger.log(self.level, message)
python
def log(self, metric): """Format and output metric. Args: metric (dict): Complete metric. """ message = self.LOGFMT.format(**metric) if metric['context']: message += ' context: {context}'.format(context=metric['context']) self._logger.log(self.level, message)
Format and output metric. Args: metric (dict): Complete metric.
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/metrics/log.py#L89-L98
jameslyons/pycipher
pycipher/atbash.py
Atbash.encipher
def encipher(self,string,keep_punct=False): """Encipher string using Atbash cipher. Example:: ciphertext = Atbash().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.key[self.a2i(c)] else: ret += c return ret
python
def encipher(self,string,keep_punct=False): """Encipher string using Atbash cipher. Example:: ciphertext = Atbash().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.key[self.a2i(c)] else: ret += c return ret
Encipher string using Atbash cipher. Example:: ciphertext = Atbash().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/atbash.py#L16-L32
jameslyons/pycipher
pycipher/polybius.py
PolybiusSquare.encipher
def encipher(self,string): """Encipher string using Polybius square cipher according to initialised key. Example:: ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be twice the length of the plaintext. """ string = self.remove_punctuation(string)#,filter='[^'+self.key+']') ret = '' for c in range(0,len(string)): ret += self.encipher_char(string[c]) return ret
python
def encipher(self,string): """Encipher string using Polybius square cipher according to initialised key. Example:: ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be twice the length of the plaintext. """ string = self.remove_punctuation(string)#,filter='[^'+self.key+']') ret = '' for c in range(0,len(string)): ret += self.encipher_char(string[c]) return ret
Encipher string using Polybius square cipher according to initialised key. Example:: ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be twice the length of the plaintext.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/polybius.py#L36-L50
jameslyons/pycipher
pycipher/polybius.py
PolybiusSquare.decipher
def decipher(self,string): """Decipher string using Polybius square cipher according to initialised key. Example:: plaintext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be half the length of the ciphertext. """ string = self.remove_punctuation(string)#,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),2): ret += self.decipher_pair(string[i:i+2]) return ret
python
def decipher(self,string): """Decipher string using Polybius square cipher according to initialised key. Example:: plaintext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be half the length of the ciphertext. """ string = self.remove_punctuation(string)#,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),2): ret += self.decipher_pair(string[i:i+2]) return ret
Decipher string using Polybius square cipher according to initialised key. Example:: plaintext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be half the length of the ciphertext.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/polybius.py#L52-L66
jameslyons/pycipher
pycipher/adfgvx.py
ADFGVX.decipher
def decipher(self,string): """Decipher string using ADFGVX cipher according to initialised key information. Punctuation and whitespace are removed from the input. Example:: plaintext = ADFGVX('ph0qg64mea1yl2nofdxkr3cvs5zw7bj9uti8','HELLO').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string. """ step2 = ColTrans(self.keyword).decipher(string) step1 = PolybiusSquare(self.key,size=6,chars='ADFGVX').decipher(step2) return step1
python
def decipher(self,string): """Decipher string using ADFGVX cipher according to initialised key information. Punctuation and whitespace are removed from the input. Example:: plaintext = ADFGVX('ph0qg64mea1yl2nofdxkr3cvs5zw7bj9uti8','HELLO').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string. """ step2 = ColTrans(self.keyword).decipher(string) step1 = PolybiusSquare(self.key,size=6,chars='ADFGVX').decipher(step2) return step1
Decipher string using ADFGVX cipher according to initialised key information. Punctuation and whitespace are removed from the input. Example:: plaintext = ADFGVX('ph0qg64mea1yl2nofdxkr3cvs5zw7bj9uti8','HELLO').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/adfgvx.py#L40-L53
jameslyons/pycipher
pycipher/enigma.py
Enigma.encipher
def encipher(self,string): """Encipher string using Enigma M3 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Enigma(settings=('A','A','A'),rotors=(1,2,3),reflector='B', ringstellung=('F','V','N'),steckers=[('P','O'),('M','L'), ('I','U'),('K','J'),('N','H'),('Y','T'),('G','B'),('V','F'), ('R','E'),('D','C')])).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.encipher_char(c) else: ret += c return ret
python
def encipher(self,string): """Encipher string using Enigma M3 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Enigma(settings=('A','A','A'),rotors=(1,2,3),reflector='B', ringstellung=('F','V','N'),steckers=[('P','O'),('M','L'), ('I','U'),('K','J'),('N','H'),('Y','T'),('G','B'),('V','F'), ('R','E'),('D','C')])).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.encipher_char(c) else: ret += c return ret
Encipher string using Enigma M3 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Enigma(settings=('A','A','A'),rotors=(1,2,3),reflector='B', ringstellung=('F','V','N'),steckers=[('P','O'),('M','L'), ('I','U'),('K','J'),('N','H'),('Y','T'),('G','B'),('V','F'), ('R','E'),('D','C')])).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/enigma.py#L128-L147
jameslyons/pycipher
pycipher/util.py
ic
def ic(ctext): ''' takes ciphertext, calculates index of coincidence.''' counts = ngram_count(ctext,N=1) icval = 0 for k in counts.keys(): icval += counts[k]*(counts[k]-1) icval /= (len(ctext)*(len(ctext)-1)) return icval
python
def ic(ctext): ''' takes ciphertext, calculates index of coincidence.''' counts = ngram_count(ctext,N=1) icval = 0 for k in counts.keys(): icval += counts[k]*(counts[k]-1) icval /= (len(ctext)*(len(ctext)-1)) return icval
takes ciphertext, calculates index of coincidence.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/util.py#L7-L14
jameslyons/pycipher
pycipher/util.py
ngram_count
def ngram_count(text,N=1,keep_punct=False): ''' if N=1, return a dict containing each letter along with how many times the letter occurred. if N=2, returns a dict containing counts of each bigram (pair of letters) etc. There is an option to remove all spaces and punctuation prior to processing ''' if not keep_punct: text = re.sub('[^A-Z]','',text.upper()) count = {} for i in range(len(text)-N+1): c = text[i:i+N] if c in count: count[c] += 1 else: count[c] = 1.0 return count
python
def ngram_count(text,N=1,keep_punct=False): ''' if N=1, return a dict containing each letter along with how many times the letter occurred. if N=2, returns a dict containing counts of each bigram (pair of letters) etc. There is an option to remove all spaces and punctuation prior to processing ''' if not keep_punct: text = re.sub('[^A-Z]','',text.upper()) count = {} for i in range(len(text)-N+1): c = text[i:i+N] if c in count: count[c] += 1 else: count[c] = 1.0 return count
if N=1, return a dict containing each letter along with how many times the letter occurred. if N=2, returns a dict containing counts of each bigram (pair of letters) etc. There is an option to remove all spaces and punctuation prior to processing
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/util.py#L16-L27
jameslyons/pycipher
pycipher/util.py
ngram_freq
def ngram_freq(text,N=1,log=False,floor=0.01): ''' returns the n-gram frequencies of all n-grams encountered in text. Option to return log probabilities or standard probabilities. Note that only n-grams occurring in 'text' will have probabilities. For the probability of not-occurring n-grams, use freq['floor']. This is set to floor/len(text) ''' freq = ngram_count(text,N) L = 1.0*(len(text)-N+1) for c in freq.keys(): if log: freq[c] = math.log10(freq[c]/L) else: freq[c] = freq[c]/L if log: freq['floor'] = math.log10(floor/L) else: freq['floor'] = floor/L return freq
python
def ngram_freq(text,N=1,log=False,floor=0.01): ''' returns the n-gram frequencies of all n-grams encountered in text. Option to return log probabilities or standard probabilities. Note that only n-grams occurring in 'text' will have probabilities. For the probability of not-occurring n-grams, use freq['floor']. This is set to floor/len(text) ''' freq = ngram_count(text,N) L = 1.0*(len(text)-N+1) for c in freq.keys(): if log: freq[c] = math.log10(freq[c]/L) else: freq[c] = freq[c]/L if log: freq['floor'] = math.log10(floor/L) else: freq['floor'] = floor/L return freq
returns the n-gram frequencies of all n-grams encountered in text. Option to return log probabilities or standard probabilities. Note that only n-grams occurring in 'text' will have probabilities. For the probability of not-occurring n-grams, use freq['floor']. This is set to floor/len(text)
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/util.py#L29-L42
jameslyons/pycipher
pycipher/util.py
restore_punctuation
def restore_punctuation(original,modified): ''' If punctuation was accidently removed, use this function to restore it. requires the orignial string with punctuation. ''' ret = '' count = 0 try: for c in original: if c.isalpha(): ret+=modified[count] count+=1 else: ret+=c except IndexError: print('restore_punctuation: strings must have same number of alphabetic chars') raise return ret
python
def restore_punctuation(original,modified): ''' If punctuation was accidently removed, use this function to restore it. requires the orignial string with punctuation. ''' ret = '' count = 0 try: for c in original: if c.isalpha(): ret+=modified[count] count+=1 else: ret+=c except IndexError: print('restore_punctuation: strings must have same number of alphabetic chars') raise return ret
If punctuation was accidently removed, use this function to restore it. requires the orignial string with punctuation.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/util.py#L44-L58
jameslyons/pycipher
pycipher/util.py
keyword_to_key
def keyword_to_key(word,alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ'): ''' convert a key word to a key by appending on the other letters of the alphabet. e.g. MONARCHY -> MONARCHYBDEFGIJKLPQSTUVWXZ ''' ret = '' word = (word + alphabet).upper() for i in word: if i in ret: continue ret += i return ret
python
def keyword_to_key(word,alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ'): ''' convert a key word to a key by appending on the other letters of the alphabet. e.g. MONARCHY -> MONARCHYBDEFGIJKLPQSTUVWXZ ''' ret = '' word = (word + alphabet).upper() for i in word: if i in ret: continue ret += i return ret
convert a key word to a key by appending on the other letters of the alphabet. e.g. MONARCHY -> MONARCHYBDEFGIJKLPQSTUVWXZ
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/util.py#L61-L70
jameslyons/pycipher
pycipher/playfair.py
Playfair.encipher
def encipher(self, string): """Encipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) string = re.sub(r'[J]', 'I', string) if len(string) % 2 == 1: string += 'X' ret = '' for c in range(0, len(string), 2): ret += self.encipher_pair(string[c], string[c + 1]) return ret
python
def encipher(self, string): """Encipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) string = re.sub(r'[J]', 'I', string) if len(string) % 2 == 1: string += 'X' ret = '' for c in range(0, len(string), 2): ret += self.encipher_pair(string[c], string[c + 1]) return ret
Encipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/playfair.py#L44-L62
jameslyons/pycipher
pycipher/playfair.py
Playfair.decipher
def decipher(self, string): """Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ string = self.remove_punctuation(string) if len(string) % 2 == 1: string += 'X' ret = '' for c in range(0, len(string), 2): ret += self.decipher_pair(string[c], string[c + 1]) return ret
python
def decipher(self, string): """Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ string = self.remove_punctuation(string) if len(string) % 2 == 1: string += 'X' ret = '' for c in range(0, len(string), 2): ret += self.decipher_pair(string[c], string[c + 1]) return ret
Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/playfair.py#L64-L81
jameslyons/pycipher
pycipher/delastelle.py
Delastelle.encipher
def encipher(self,string): """Encipher string using Delastelle cipher according to initialised key. Example:: ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext. """ string = self.remove_punctuation(string,filter='[^'+self.key+']') ctext = "" for c in string: ctext += ''.join([str(i) for i in L2IND[c]]) return ctext
python
def encipher(self,string): """Encipher string using Delastelle cipher according to initialised key. Example:: ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext. """ string = self.remove_punctuation(string,filter='[^'+self.key+']') ctext = "" for c in string: ctext += ''.join([str(i) for i in L2IND[c]]) return ctext
Encipher string using Delastelle cipher according to initialised key. Example:: ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/delastelle.py#L26-L40
jameslyons/pycipher
pycipher/delastelle.py
Delastelle.decipher
def decipher(self,string): """Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext. """ string = self.remove_punctuation(string,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
python
def decipher(self,string): """Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext. """ string = self.remove_punctuation(string,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/delastelle.py#L42-L57
jameslyons/pycipher
pycipher/foursquare.py
Foursquare.encipher
def encipher(self,string): """Encipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) if len(string)%2 == 1: string = string + 'X' ret = '' for c in range(0,len(string.upper()),2): a,b = self.encipher_pair(string[c],string[c+1]) ret += a + b return ret
python
def encipher(self,string): """Encipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) if len(string)%2 == 1: string = string + 'X' ret = '' for c in range(0,len(string.upper()),2): a,b = self.encipher_pair(string[c],string[c+1]) ret += a + b return ret
Encipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended. Example:: ciphertext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/foursquare.py#L34-L51
jameslyons/pycipher
pycipher/foursquare.py
Foursquare.decipher
def decipher(self,string): """Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ string = self.remove_punctuation(string) if len(string)%2 == 1: string = string + 'X' ret = '' for c in range(0,len(string.upper()),2): a,b = self.decipher_pair(string[c],string[c+1]) ret += a + b return ret
python
def decipher(self,string): """Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ string = self.remove_punctuation(string) if len(string)%2 == 1: string = string + 'X' ret = '' for c in range(0,len(string.upper()),2): a,b = self.decipher_pair(string[c],string[c+1]) ret += a + b return ret
Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended. Example:: plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/foursquare.py#L53-L70
jameslyons/pycipher
pycipher/rot13.py
Rot13.encipher
def encipher(self,string,keep_punct=False): r"""Encipher string using rot13 cipher. Example:: ciphertext = Rot13().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string: if c.isalpha(): ret += self.i2a( self.a2i(c) + 13 ) else: ret += c return ret
python
def encipher(self,string,keep_punct=False): r"""Encipher string using rot13 cipher. Example:: ciphertext = Rot13().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string: if c.isalpha(): ret += self.i2a( self.a2i(c) + 13 ) else: ret += c return ret
r"""Encipher string using rot13 cipher. Example:: ciphertext = Rot13().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/rot13.py#L20-L36
jameslyons/pycipher
pycipher/porta.py
Porta.encipher
def encipher(self,string): """Encipher string using Porta cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Porta('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for (i,c) in enumerate(string): i = i%len(self.key) if self.key[i] in 'AB': ret += 'NOPQRSTUVWXYZABCDEFGHIJKLM'[self.a2i(c)] elif self.key[i] in 'YZ': ret += 'ZNOPQRSTUVWXYBCDEFGHIJKLMA'[self.a2i(c)] elif self.key[i] in 'WX': ret += 'YZNOPQRSTUVWXCDEFGHIJKLMAB'[self.a2i(c)] elif self.key[i] in 'UV': ret += 'XYZNOPQRSTUVWDEFGHIJKLMABC'[self.a2i(c)] elif self.key[i] in 'ST': ret += 'WXYZNOPQRSTUVEFGHIJKLMABCD'[self.a2i(c)] elif self.key[i] in 'QR': ret += 'VWXYZNOPQRSTUFGHIJKLMABCDE'[self.a2i(c)] elif self.key[i] in 'OP': ret += 'UVWXYZNOPQRSTGHIJKLMABCDEF'[self.a2i(c)] elif self.key[i] in 'MN': ret += 'TUVWXYZNOPQRSHIJKLMABCDEFG'[self.a2i(c)] elif self.key[i] in 'KL': ret += 'STUVWXYZNOPQRIJKLMABCDEFGH'[self.a2i(c)] elif self.key[i] in 'IJ': ret += 'RSTUVWXYZNOPQJKLMABCDEFGHI'[self.a2i(c)] elif self.key[i] in 'GH': ret += 'QRSTUVWXYZNOPKLMABCDEFGHIJ'[self.a2i(c)] elif self.key[i] in 'EF': ret += 'PQRSTUVWXYZNOLMABCDEFGHIJK'[self.a2i(c)] elif self.key[i] in 'CD': ret += 'OPQRSTUVWXYZNMABCDEFGHIJKL'[self.a2i(c)] return ret
python
def encipher(self,string): """Encipher string using Porta cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Porta('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for (i,c) in enumerate(string): i = i%len(self.key) if self.key[i] in 'AB': ret += 'NOPQRSTUVWXYZABCDEFGHIJKLM'[self.a2i(c)] elif self.key[i] in 'YZ': ret += 'ZNOPQRSTUVWXYBCDEFGHIJKLMA'[self.a2i(c)] elif self.key[i] in 'WX': ret += 'YZNOPQRSTUVWXCDEFGHIJKLMAB'[self.a2i(c)] elif self.key[i] in 'UV': ret += 'XYZNOPQRSTUVWDEFGHIJKLMABC'[self.a2i(c)] elif self.key[i] in 'ST': ret += 'WXYZNOPQRSTUVEFGHIJKLMABCD'[self.a2i(c)] elif self.key[i] in 'QR': ret += 'VWXYZNOPQRSTUFGHIJKLMABCDE'[self.a2i(c)] elif self.key[i] in 'OP': ret += 'UVWXYZNOPQRSTGHIJKLMABCDEF'[self.a2i(c)] elif self.key[i] in 'MN': ret += 'TUVWXYZNOPQRSHIJKLMABCDEFG'[self.a2i(c)] elif self.key[i] in 'KL': ret += 'STUVWXYZNOPQRIJKLMABCDEFGH'[self.a2i(c)] elif self.key[i] in 'IJ': ret += 'RSTUVWXYZNOPQJKLMABCDEFGHI'[self.a2i(c)] elif self.key[i] in 'GH': ret += 'QRSTUVWXYZNOPKLMABCDEFGHIJ'[self.a2i(c)] elif self.key[i] in 'EF': ret += 'PQRSTUVWXYZNOLMABCDEFGHIJK'[self.a2i(c)] elif self.key[i] in 'CD': ret += 'OPQRSTUVWXYZNMABCDEFGHIJKL'[self.a2i(c)] return ret
Encipher string using Porta cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Porta('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/porta.py#L17-L45
jameslyons/pycipher
pycipher/m209.py
M209.encipher
def encipher(self,message): """Encipher string using M209 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example (continuing from the example above):: ciphertext = m.encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ message = self.remove_punctuation(message) effective_ch = [0,0,0,0,0,0,0] # these are the wheels which are effective currently, 1 for yes, 0 no # -the zero at the beginning is extra, indicates lug was in pos 0 ret = '' # from now we no longer need the wheel starts, we can just increment the actual key for j in range(len(message)): shift = 0 effective_ch[0] = 0; effective_ch[1] = self.wheel_1_settings[self.actual_key[0]] effective_ch[2] = self.wheel_2_settings[self.actual_key[1]] effective_ch[3] = self.wheel_3_settings[self.actual_key[2]] effective_ch[4] = self.wheel_4_settings[self.actual_key[3]] effective_ch[5] = self.wheel_5_settings[self.actual_key[4]] effective_ch[6] = self.wheel_6_settings[self.actual_key[5]] for i in range(0,27): # implements the cylindrical drum with lugs on it if effective_ch[self.lug_positions[i][0]] or effective_ch[self.lug_positions[i][1]]: shift+=1 # shift has been found, now actually encrypt letter ret += self.subst(message[j],key='ZYXWVUTSRQPONMLKJIHGFEDCBA',offset=-shift); # encrypt letter self.advance_key(); # advance the key wheels return ret
python
def encipher(self,message): """Encipher string using M209 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example (continuing from the example above):: ciphertext = m.encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ message = self.remove_punctuation(message) effective_ch = [0,0,0,0,0,0,0] # these are the wheels which are effective currently, 1 for yes, 0 no # -the zero at the beginning is extra, indicates lug was in pos 0 ret = '' # from now we no longer need the wheel starts, we can just increment the actual key for j in range(len(message)): shift = 0 effective_ch[0] = 0; effective_ch[1] = self.wheel_1_settings[self.actual_key[0]] effective_ch[2] = self.wheel_2_settings[self.actual_key[1]] effective_ch[3] = self.wheel_3_settings[self.actual_key[2]] effective_ch[4] = self.wheel_4_settings[self.actual_key[3]] effective_ch[5] = self.wheel_5_settings[self.actual_key[4]] effective_ch[6] = self.wheel_6_settings[self.actual_key[5]] for i in range(0,27): # implements the cylindrical drum with lugs on it if effective_ch[self.lug_positions[i][0]] or effective_ch[self.lug_positions[i][1]]: shift+=1 # shift has been found, now actually encrypt letter ret += self.subst(message[j],key='ZYXWVUTSRQPONMLKJIHGFEDCBA',offset=-shift); # encrypt letter self.advance_key(); # advance the key wheels return ret
Encipher string using M209 cipher according to initialised key. Punctuation and whitespace are removed from the input. Example (continuing from the example above):: ciphertext = m.encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/m209.py#L59-L90
jameslyons/pycipher
pycipher/fracmorse.py
FracMorse.encipher
def encipher(self,string): """Encipher string using FracMorse cipher according to initialised key. Example:: ciphertext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = string.upper() #print string morsestr = self.enmorse(string) # make sure the morse string is a multiple of 3 in length if len(morsestr) % 3 == 1: morsestr = morsestr[0:-1] elif len(morsestr) % 3 == 2: morsestr = morsestr + 'x' #print morsestr mapping = dict(zip(self.table,self.key)) ctext = "" for i in range(0,len(morsestr),3): ctext += mapping[morsestr[i:i+3]] return ctext
python
def encipher(self,string): """Encipher string using FracMorse cipher according to initialised key. Example:: ciphertext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = string.upper() #print string morsestr = self.enmorse(string) # make sure the morse string is a multiple of 3 in length if len(morsestr) % 3 == 1: morsestr = morsestr[0:-1] elif len(morsestr) % 3 == 2: morsestr = morsestr + 'x' #print morsestr mapping = dict(zip(self.table,self.key)) ctext = "" for i in range(0,len(morsestr),3): ctext += mapping[morsestr[i:i+3]] return ctext
Encipher string using FracMorse cipher according to initialised key. Example:: ciphertext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/fracmorse.py#L21-L44
jameslyons/pycipher
pycipher/fracmorse.py
FracMorse.decipher
def decipher(self,string): """Decipher string using FracMorse cipher according to initialised key. Example:: plaintext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string. """ string = string.upper() mapping = dict(zip(self.key,self.table)) ptext = "" for i in string: ptext += mapping[i] return self.demorse(ptext)
python
def decipher(self,string): """Decipher string using FracMorse cipher according to initialised key. Example:: plaintext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string. """ string = string.upper() mapping = dict(zip(self.key,self.table)) ptext = "" for i in string: ptext += mapping[i] return self.demorse(ptext)
Decipher string using FracMorse cipher according to initialised key. Example:: plaintext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/fracmorse.py#L46-L61
jameslyons/pycipher
pycipher/columnartransposition.py
ColTrans.encipher
def encipher(self,string): """Encipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = ColTrans('GERMAN').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' ind = self.sortind(self.keyword) for i in range(len(self.keyword)): ret += string[ind.index(i)::len(self.keyword)] return ret
python
def encipher(self,string): """Encipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = ColTrans('GERMAN').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' ind = self.sortind(self.keyword) for i in range(len(self.keyword)): ret += string[ind.index(i)::len(self.keyword)] return ret
Encipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = ColTrans('GERMAN').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/columnartransposition.py#L32-L48
jameslyons/pycipher
pycipher/columnartransposition.py
ColTrans.decipher
def decipher(self,string): '''Decipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = ColTrans('GERMAN').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. ''' string = self.remove_punctuation(string) ret = ['_']*len(string) L,M = len(string),len(self.keyword) ind = self.unsortind(self.keyword) upto = 0 for i in range(len(self.keyword)): thiscollen = (int)(L/M) if ind[i]< L%M: thiscollen += 1 ret[ind[i]::M] = string[upto:upto+thiscollen] upto += thiscollen return ''.join(ret)
python
def decipher(self,string): '''Decipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = ColTrans('GERMAN').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. ''' string = self.remove_punctuation(string) ret = ['_']*len(string) L,M = len(string),len(self.keyword) ind = self.unsortind(self.keyword) upto = 0 for i in range(len(self.keyword)): thiscollen = (int)(L/M) if ind[i]< L%M: thiscollen += 1 ret[ind[i]::M] = string[upto:upto+thiscollen] upto += thiscollen return ''.join(ret)
Decipher string using Columnar Transposition cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = ColTrans('GERMAN').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/columnartransposition.py#L51-L72
jameslyons/pycipher
pycipher/railfence.py
Railfence.encipher
def encipher(self,string,keep_punct=False): """Encipher string using Railfence cipher according to initialised key. Example:: ciphertext = Railfence(3).encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) return ''.join(self.buildfence(string, self.key))
python
def encipher(self,string,keep_punct=False): """Encipher string using Railfence cipher according to initialised key. Example:: ciphertext = Railfence(3).encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) return ''.join(self.buildfence(string, self.key))
Encipher string using Railfence cipher according to initialised key. Example:: ciphertext = Railfence(3).encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/railfence.py#L20-L32
jameslyons/pycipher
pycipher/railfence.py
Railfence.decipher
def decipher(self,string,keep_punct=False): """Decipher string using Railfence cipher according to initialised key. Example:: plaintext = Railfence(3).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ind = range(len(string)) pos = self.buildfence(ind, self.key) return ''.join(string[pos.index(i)] for i in ind)
python
def decipher(self,string,keep_punct=False): """Decipher string using Railfence cipher according to initialised key. Example:: plaintext = Railfence(3).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ind = range(len(string)) pos = self.buildfence(ind, self.key) return ''.join(string[pos.index(i)] for i in ind)
Decipher string using Railfence cipher according to initialised key. Example:: plaintext = Railfence(3).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/railfence.py#L34-L48
jameslyons/pycipher
pycipher/affine.py
Affine.decipher
def decipher(self,string,keep_punct=False): """Decipher string using affine cipher according to initialised key. Example:: plaintext = Affine(a,b).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string: if c.isalpha(): ret += self.i2a(self.inva*(self.a2i(c) - self.b)) else: ret += c return ret
python
def decipher(self,string,keep_punct=False): """Decipher string using affine cipher according to initialised key. Example:: plaintext = Affine(a,b).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string: if c.isalpha(): ret += self.i2a(self.inva*(self.a2i(c) - self.b)) else: ret += c return ret
Decipher string using affine cipher according to initialised key. Example:: plaintext = Affine(a,b).decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/affine.py#L50-L66
jameslyons/pycipher
pycipher/autokey.py
Autokey.encipher
def encipher(self,string): """Encipher string using Autokey cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Autokey('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for (i,c) in enumerate(string): if i<len(self.key): offset = self.a2i(self.key[i]) else: offset = self.a2i(string[i-len(self.key)]) ret += self.i2a(self.a2i(c)+offset) return ret
python
def encipher(self,string): """Encipher string using Autokey cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Autokey('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) ret = '' for (i,c) in enumerate(string): if i<len(self.key): offset = self.a2i(self.key[i]) else: offset = self.a2i(string[i-len(self.key)]) ret += self.i2a(self.a2i(c)+offset) return ret
Encipher string using Autokey cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Autokey('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/autokey.py#L19-L36
jameslyons/pycipher
pycipher/bifid.py
Bifid.encipher
def encipher(self,string): """Encipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) step1 = self.pb.encipher(string) evens = step1[::2] odds = step1[1::2] step2 = [] for i in range(0,len(string),self.period): step2 += evens[i:int(i+self.period)] step2 += odds[i:int(i+self.period)] return self.pb.decipher(''.join(step2))
python
def encipher(self,string): """Encipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. """ string = self.remove_punctuation(string) step1 = self.pb.encipher(string) evens = step1[::2] odds = step1[1::2] step2 = [] for i in range(0,len(string),self.period): step2 += evens[i:int(i+self.period)] step2 += odds[i:int(i+self.period)] return self.pb.decipher(''.join(step2))
Encipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/bifid.py#L25-L44
jameslyons/pycipher
pycipher/bifid.py
Bifid.decipher
def decipher(self,string): """Decipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ ret = '' string = string.upper() rowseq,colseq = [],[] # take blocks of length period, reform rowseq,colseq from them for i in range(0,len(string),self.period): tempseq = [] for j in range(0,self.period): if i+j >= len(string): continue tempseq.append(int(self.key.index(string[i + j]) / 5)) tempseq.append(int(self.key.index(string[i + j]) % 5)) rowseq.extend(tempseq[0:int(len(tempseq)/2)]) colseq.extend(tempseq[int(len(tempseq)/2):]) for i in range(len(rowseq)): ret += self.key[rowseq[i]*5 + colseq[i]] return ret
python
def decipher(self,string): """Decipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. """ ret = '' string = string.upper() rowseq,colseq = [],[] # take blocks of length period, reform rowseq,colseq from them for i in range(0,len(string),self.period): tempseq = [] for j in range(0,self.period): if i+j >= len(string): continue tempseq.append(int(self.key.index(string[i + j]) / 5)) tempseq.append(int(self.key.index(string[i + j]) % 5)) rowseq.extend(tempseq[0:int(len(tempseq)/2)]) colseq.extend(tempseq[int(len(tempseq)/2):]) for i in range(len(rowseq)): ret += self.key[rowseq[i]*5 + colseq[i]] return ret
Decipher string using Bifid cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: plaintext = Bifid('phqgmeaylnofdxkrcvszwbuti',5).decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/bifid.py#L46-L71
jameslyons/pycipher
pycipher/simplesubstitution.py
SimpleSubstitution.decipher
def decipher(self,string,keep_punct=False): """Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ # if we have not yet calculated the inverse key, calculate it now if self.invkey == '': for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': self.invkey += self.i2a(self.key.index(i)) if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.invkey[self.a2i(c)] else: ret += c return ret
python
def decipher(self,string,keep_punct=False): """Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string. """ # if we have not yet calculated the inverse key, calculate it now if self.invkey == '': for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': self.invkey += self.i2a(self.key.index(i)) if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.invkey[self.a2i(c)] else: ret += c return ret
Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string.
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/simplesubstitution.py#L45-L65
oseledets/ttpy
tt/core/tools.py
matvec
def matvec(a, b, compression=False): """Matrix-vector product in TT format.""" acrs = _vector.vector.to_list(a.tt) bcrs = _vector.vector.to_list(b) ccrs = [] d = b.d def get_core(i): acr = _np.reshape( acrs[i], (a.tt.r[i], a.n[i], a.m[i], a.tt.r[ i + 1]), order='F') acr = acr.transpose([3, 0, 1, 2]) # a(R_{i+1}, R_i, n_i, m_i) bcr = bcrs[i].transpose([1, 0, 2]) # b(m_i, r_i, r_{i+1}) # c(R_{i+1}, R_i, n_i, r_i, r_{i+1}) ccr = _np.tensordot(acr, bcr, axes=(3, 0)) ccr = ccr.transpose([1, 3, 2, 0, 4]).reshape( (a.tt.r[i] * b.r[i], a.n[i], a.tt.r[i + 1] * b.r[i + 1]), order='F') return ccr if compression: # the compression is laaaaazy and one-directioned # calculate norm of resulting _vector first nrm = _np.array([[1.0]]) # 1 x 1 v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) # print(str(ccr.shape) + " -> "), # minimal loss compression ccr = _np.tensordot(v, ccr, (1, 0)) rl, n, rr = ccr.shape if i < d - 1: u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) newr = min(rl * n, rr) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) # print(ccr.shape) # r x r . r x n x R -> r x n x R nrm = _np.tensordot(nrm, ccr, (0, 0)) # r x n x R . r x n x R -> n x R x n x R nrm = _np.tensordot(nrm, _np.conj(ccr), (0, 0)) nrm = nrm.diagonal(axis1=0, axis2=2) # n x R x n x R -> R x R x n nrm = nrm.sum(axis=2) # R x R x n -> R x R if nrm.size > 1: raise Exception('too many numbers in norm') # print("Norm calculated:", nrm) nrm = _np.sqrt(_np.linalg.norm(nrm)) # print("Norm predicted:", nrm) compression = compression * nrm / _np.sqrt(d - 1) v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) rl, n, rr = ccr.shape if compression: ccr = _np.tensordot(v, ccr, (1, 0)) # c(s_i, n_i, r_i, r_{i+1}) if i < d - 1: rl = v.shape[0] u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) ss = _np.cumsum(s[::-1])[::-1] newr = max(min([r for r in range(ss.size) if ss[ r] <= compression] + [min(rl * n, rr)]), 1) # print("Rank % 4d replaced by % 4d" % (rr, newr)) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) ccrs.append(ccr) result = _vector.vector.from_list(ccrs) if compression: # print(result) print("Norm actual:", result.norm(), " mean rank:", result.rmean()) # print("Norm very actual:", matvec(a,b).norm()) return result
python
def matvec(a, b, compression=False): """Matrix-vector product in TT format.""" acrs = _vector.vector.to_list(a.tt) bcrs = _vector.vector.to_list(b) ccrs = [] d = b.d def get_core(i): acr = _np.reshape( acrs[i], (a.tt.r[i], a.n[i], a.m[i], a.tt.r[ i + 1]), order='F') acr = acr.transpose([3, 0, 1, 2]) # a(R_{i+1}, R_i, n_i, m_i) bcr = bcrs[i].transpose([1, 0, 2]) # b(m_i, r_i, r_{i+1}) # c(R_{i+1}, R_i, n_i, r_i, r_{i+1}) ccr = _np.tensordot(acr, bcr, axes=(3, 0)) ccr = ccr.transpose([1, 3, 2, 0, 4]).reshape( (a.tt.r[i] * b.r[i], a.n[i], a.tt.r[i + 1] * b.r[i + 1]), order='F') return ccr if compression: # the compression is laaaaazy and one-directioned # calculate norm of resulting _vector first nrm = _np.array([[1.0]]) # 1 x 1 v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) # print(str(ccr.shape) + " -> "), # minimal loss compression ccr = _np.tensordot(v, ccr, (1, 0)) rl, n, rr = ccr.shape if i < d - 1: u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) newr = min(rl * n, rr) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) # print(ccr.shape) # r x r . r x n x R -> r x n x R nrm = _np.tensordot(nrm, ccr, (0, 0)) # r x n x R . r x n x R -> n x R x n x R nrm = _np.tensordot(nrm, _np.conj(ccr), (0, 0)) nrm = nrm.diagonal(axis1=0, axis2=2) # n x R x n x R -> R x R x n nrm = nrm.sum(axis=2) # R x R x n -> R x R if nrm.size > 1: raise Exception('too many numbers in norm') # print("Norm calculated:", nrm) nrm = _np.sqrt(_np.linalg.norm(nrm)) # print("Norm predicted:", nrm) compression = compression * nrm / _np.sqrt(d - 1) v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) rl, n, rr = ccr.shape if compression: ccr = _np.tensordot(v, ccr, (1, 0)) # c(s_i, n_i, r_i, r_{i+1}) if i < d - 1: rl = v.shape[0] u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) ss = _np.cumsum(s[::-1])[::-1] newr = max(min([r for r in range(ss.size) if ss[ r] <= compression] + [min(rl * n, rr)]), 1) # print("Rank % 4d replaced by % 4d" % (rr, newr)) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) ccrs.append(ccr) result = _vector.vector.from_list(ccrs) if compression: # print(result) print("Norm actual:", result.norm(), " mean rank:", result.rmean()) # print("Norm very actual:", matvec(a,b).norm()) return result
Matrix-vector product in TT format.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L25-L103
oseledets/ttpy
tt/core/tools.py
kron
def kron(a, b): """Kronecker product of two TT-matrices or two TT-vectors""" if hasattr(a, '__kron__'): return a.__kron__(b) if a is None: return b else: raise ValueError( 'Kron is waiting for two TT-vectors or two TT-matrices')
python
def kron(a, b): """Kronecker product of two TT-matrices or two TT-vectors""" if hasattr(a, '__kron__'): return a.__kron__(b) if a is None: return b else: raise ValueError( 'Kron is waiting for two TT-vectors or two TT-matrices')
Kronecker product of two TT-matrices or two TT-vectors
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L127-L135
oseledets/ttpy
tt/core/tools.py
dot
def dot(a, b): """Dot product of two TT-matrices or two TT-vectors""" if hasattr(a, '__dot__'): return a.__dot__(b) if a is None: return b else: raise ValueError( 'Dot is waiting for two TT-vectors or two TT- matrices')
python
def dot(a, b): """Dot product of two TT-matrices or two TT-vectors""" if hasattr(a, '__dot__'): return a.__dot__(b) if a is None: return b else: raise ValueError( 'Dot is waiting for two TT-vectors or two TT- matrices')
Dot product of two TT-matrices or two TT-vectors
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L138-L146
oseledets/ttpy
tt/core/tools.py
mkron
def mkron(a, *args): """Kronecker product of all the arguments""" if not isinstance(a, list): a = [a] a = list(a) # copy list for i in args: if isinstance(i, list): a.extend(i) else: a.append(i) c = _vector.vector() c.d = 0 c.n = _np.array([], dtype=_np.int32) c.r = _np.array([], dtype=_np.int32) c.core = [] for t in a: thetensor = t.tt if isinstance(t, _matrix.matrix) else t c.d += thetensor.d c.n = _np.concatenate((c.n, thetensor.n)) c.r = _np.concatenate((c.r[:-1], thetensor.r)) c.core = _np.concatenate((c.core, thetensor.core)) c.get_ps() return c
python
def mkron(a, *args): """Kronecker product of all the arguments""" if not isinstance(a, list): a = [a] a = list(a) # copy list for i in args: if isinstance(i, list): a.extend(i) else: a.append(i) c = _vector.vector() c.d = 0 c.n = _np.array([], dtype=_np.int32) c.r = _np.array([], dtype=_np.int32) c.core = [] for t in a: thetensor = t.tt if isinstance(t, _matrix.matrix) else t c.d += thetensor.d c.n = _np.concatenate((c.n, thetensor.n)) c.r = _np.concatenate((c.r[:-1], thetensor.r)) c.core = _np.concatenate((c.core, thetensor.core)) c.get_ps() return c
Kronecker product of all the arguments
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L157-L182
oseledets/ttpy
tt/core/tools.py
zkron
def zkron(ttA, ttB): """ Do kronecker product between cores of two matrices ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-matrix; :param ttB: second TT-matrix; :return: TT-matrix in z-order """ Al = _matrix.matrix.to_list(ttA) Bl = _matrix.matrix.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _matrix.matrix.from_list(Hl)
python
def zkron(ttA, ttB): """ Do kronecker product between cores of two matrices ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-matrix; :param ttB: second TT-matrix; :return: TT-matrix in z-order """ Al = _matrix.matrix.to_list(ttA) Bl = _matrix.matrix.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _matrix.matrix.from_list(Hl)
Do kronecker product between cores of two matrices ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-matrix; :param ttB: second TT-matrix; :return: TT-matrix in z-order
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L185-L197
oseledets/ttpy
tt/core/tools.py
zkronv
def zkronv(ttA, ttB): """ Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order """ Al = _vector.vector.to_list(ttA) Bl = _vector.vector.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _vector.vector.from_list(Hl)
python
def zkronv(ttA, ttB): """ Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order """ Al = _vector.vector.to_list(ttA) Bl = _vector.vector.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _vector.vector.from_list(Hl)
Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L200-L212
oseledets/ttpy
tt/core/tools.py
zmeshgrid
def zmeshgrid(d): """ Returns a meshgrid like np.meshgrid but in z-order :param d: you'll get 4**d nodes in meshgrid :return: xx, yy in z-order """ lin = xfun(2, d) one = ones(2, d) xx = zkronv(lin, one) yy = zkronv(one, lin) return xx, yy
python
def zmeshgrid(d): """ Returns a meshgrid like np.meshgrid but in z-order :param d: you'll get 4**d nodes in meshgrid :return: xx, yy in z-order """ lin = xfun(2, d) one = ones(2, d) xx = zkronv(lin, one) yy = zkronv(one, lin) return xx, yy
Returns a meshgrid like np.meshgrid but in z-order :param d: you'll get 4**d nodes in meshgrid :return: xx, yy in z-order
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L215-L227
oseledets/ttpy
tt/core/tools.py
zaffine
def zaffine(c0, c1, c2, d): """ Generate linear function c0 + c1 ex + c2 ey in z ordering with d cores in QTT :param c0: :param c1: :param c2: :param d: :return: """ xx, yy = zmeshgrid(d) Hx, Hy = _vector.vector.to_list(xx), _vector.vector.to_list(yy) Hs = _cp.deepcopy(Hx) Hs[0][:, :, 0] = c1 * Hx[0][:, :, 0] + c2 * Hy[0][:, :, 0] Hs[-1][1, :, :] = c1 * Hx[-1][1, :, :] + (c0 + c2 * Hy[-1][1, :, :]) d = len(Hs) for k in range(1, d - 1): Hs[k][1, :, 0] = c1 * Hx[k][1, :, 0] + c2 * Hy[k][1, :, 0] return _vector.vector.from_list(Hs)
python
def zaffine(c0, c1, c2, d): """ Generate linear function c0 + c1 ex + c2 ey in z ordering with d cores in QTT :param c0: :param c1: :param c2: :param d: :return: """ xx, yy = zmeshgrid(d) Hx, Hy = _vector.vector.to_list(xx), _vector.vector.to_list(yy) Hs = _cp.deepcopy(Hx) Hs[0][:, :, 0] = c1 * Hx[0][:, :, 0] + c2 * Hy[0][:, :, 0] Hs[-1][1, :, :] = c1 * Hx[-1][1, :, :] + (c0 + c2 * Hy[-1][1, :, :]) d = len(Hs) for k in range(1, d - 1): Hs[k][1, :, 0] = c1 * Hx[k][1, :, 0] + c2 * Hy[k][1, :, 0] return _vector.vector.from_list(Hs)
Generate linear function c0 + c1 ex + c2 ey in z ordering with d cores in QTT :param c0: :param c1: :param c2: :param d: :return:
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L230-L251
oseledets/ttpy
tt/core/tools.py
concatenate
def concatenate(*args): """Concatenates given TT-vectors. For two tensors :math:`X(i_1,\\ldots,i_d),Y(i_1,\\ldots,i_d)` returns :math:`(d+1)`-dimensional tensor :math:`Z(i_0,i_1,\\ldots,i_d)`, :math:`i_0=\\overline{0,1}`, such that .. math:: Z(0, i_1, \\ldots, i_d) = X(i_1, \\ldots, i_d), Z(1, i_1, \\ldots, i_d) = Y(i_1, \\ldots, i_d). """ tmp = _np.array([[1] + [0] * (len(args) - 1)]) result = kron(_vector.vector(tmp), args[0]) for i in range(1, len(args)): result += kron(_vector.vector(_np.array([[0] * i + [1] + [0] * (len(args) - i - 1)])), args[i]) return result
python
def concatenate(*args): """Concatenates given TT-vectors. For two tensors :math:`X(i_1,\\ldots,i_d),Y(i_1,\\ldots,i_d)` returns :math:`(d+1)`-dimensional tensor :math:`Z(i_0,i_1,\\ldots,i_d)`, :math:`i_0=\\overline{0,1}`, such that .. math:: Z(0, i_1, \\ldots, i_d) = X(i_1, \\ldots, i_d), Z(1, i_1, \\ldots, i_d) = Y(i_1, \\ldots, i_d). """ tmp = _np.array([[1] + [0] * (len(args) - 1)]) result = kron(_vector.vector(tmp), args[0]) for i in range(1, len(args)): result += kron(_vector.vector(_np.array([[0] * i + [1] + [0] * (len(args) - i - 1)])), args[i]) return result
Concatenates given TT-vectors. For two tensors :math:`X(i_1,\\ldots,i_d),Y(i_1,\\ldots,i_d)` returns :math:`(d+1)`-dimensional tensor :math:`Z(i_0,i_1,\\ldots,i_d)`, :math:`i_0=\\overline{0,1}`, such that .. math:: Z(0, i_1, \\ldots, i_d) = X(i_1, \\ldots, i_d), Z(1, i_1, \\ldots, i_d) = Y(i_1, \\ldots, i_d).
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L254-L271
oseledets/ttpy
tt/core/tools.py
sum
def sum(a, axis=-1): """Sum TT-vector over specified axes""" d = a.d crs = _vector.vector.to_list(a.tt if isinstance(a, _matrix.matrix) else a) if axis < 0: axis = range(a.d) elif isinstance(axis, int): axis = [axis] axis = list(axis)[::-1] for ax in axis: crs[ax] = _np.sum(crs[ax], axis=1) rleft, rright = crs[ax].shape if (rleft >= rright or rleft < rright and ax + 1 >= d) and ax > 0: crs[ax - 1] = _np.tensordot(crs[ax - 1], crs[ax], axes=(2, 0)) elif ax + 1 < d: crs[ax + 1] = _np.tensordot(crs[ax], crs[ax + 1], axes=(1, 0)) else: return _np.sum(crs[ax]) crs.pop(ax) d -= 1 return _vector.vector.from_list(crs)
python
def sum(a, axis=-1): """Sum TT-vector over specified axes""" d = a.d crs = _vector.vector.to_list(a.tt if isinstance(a, _matrix.matrix) else a) if axis < 0: axis = range(a.d) elif isinstance(axis, int): axis = [axis] axis = list(axis)[::-1] for ax in axis: crs[ax] = _np.sum(crs[ax], axis=1) rleft, rright = crs[ax].shape if (rleft >= rright or rleft < rright and ax + 1 >= d) and ax > 0: crs[ax - 1] = _np.tensordot(crs[ax - 1], crs[ax], axes=(2, 0)) elif ax + 1 < d: crs[ax + 1] = _np.tensordot(crs[ax], crs[ax + 1], axes=(1, 0)) else: return _np.sum(crs[ax]) crs.pop(ax) d -= 1 return _vector.vector.from_list(crs)
Sum TT-vector over specified axes
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L274-L294
oseledets/ttpy
tt/core/tools.py
ones
def ones(n, d=None): """ Creates a TT-vector of all ones""" c = _vector.vector() if d is None: c.n = _np.array(n, dtype=_np.int32) c.d = c.n.size else: c.n = _np.array([n] * d, dtype=_np.int32) c.d = d c.r = _np.ones((c.d + 1,), dtype=_np.int32) c.get_ps() c.core = _np.ones(c.ps[c.d] - 1) return c
python
def ones(n, d=None): """ Creates a TT-vector of all ones""" c = _vector.vector() if d is None: c.n = _np.array(n, dtype=_np.int32) c.d = c.n.size else: c.n = _np.array([n] * d, dtype=_np.int32) c.d = d c.r = _np.ones((c.d + 1,), dtype=_np.int32) c.get_ps() c.core = _np.ones(c.ps[c.d] - 1) return c
Creates a TT-vector of all ones
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L300-L312
oseledets/ttpy
tt/core/tools.py
rand
def rand(n, d=None, r=2, samplefunc=_np.random.randn): """Generate a random d-dimensional TT-vector with ranks ``r``. Distribution to sample cores is provided by the samplefunc. Default is to sample from normal distribution. """ n0 = _np.asanyarray(n, dtype=_np.int32) r0 = _np.asanyarray(r, dtype=_np.int32) if d is None: d = n.size if n0.size is 1: n0 = _np.ones((d,), dtype=_np.int32) * n0 if r0.size is 1: r0 = _np.ones((d + 1,), dtype=_np.int32) * r0 r0[0] = 1 r0[d] = 1 c = _vector.vector() c.d = d c.n = n0 c.r = r0 c.get_ps() c.core = samplefunc(c.ps[d] - 1) return c
python
def rand(n, d=None, r=2, samplefunc=_np.random.randn): """Generate a random d-dimensional TT-vector with ranks ``r``. Distribution to sample cores is provided by the samplefunc. Default is to sample from normal distribution. """ n0 = _np.asanyarray(n, dtype=_np.int32) r0 = _np.asanyarray(r, dtype=_np.int32) if d is None: d = n.size if n0.size is 1: n0 = _np.ones((d,), dtype=_np.int32) * n0 if r0.size is 1: r0 = _np.ones((d + 1,), dtype=_np.int32) * r0 r0[0] = 1 r0[d] = 1 c = _vector.vector() c.d = d c.n = n0 c.r = r0 c.get_ps() c.core = samplefunc(c.ps[d] - 1) return c
Generate a random d-dimensional TT-vector with ranks ``r``. Distribution to sample cores is provided by the samplefunc. Default is to sample from normal distribution.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L315-L336
oseledets/ttpy
tt/core/tools.py
eye
def eye(n, d=None): """ Creates an identity TT-matrix""" c = _matrix.matrix() c.tt = _vector.vector() if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) c.tt.d = n0.size else: n0 = _np.asanyarray([n] * d, dtype=_np.int32) c.tt.d = d c.n = n0.copy() c.m = n0.copy() c.tt.n = (c.n) * (c.m) c.tt.r = _np.ones((c.tt.d + 1,), dtype=_np.int32) c.tt.get_ps() c.tt.alloc_core() for i in xrange(c.tt.d): c.tt.core[ c.tt.ps[i] - 1:c.tt.ps[ i + 1] - 1] = _np.eye( c.n[i]).flatten() return c
python
def eye(n, d=None): """ Creates an identity TT-matrix""" c = _matrix.matrix() c.tt = _vector.vector() if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) c.tt.d = n0.size else: n0 = _np.asanyarray([n] * d, dtype=_np.int32) c.tt.d = d c.n = n0.copy() c.m = n0.copy() c.tt.n = (c.n) * (c.m) c.tt.r = _np.ones((c.tt.d + 1,), dtype=_np.int32) c.tt.get_ps() c.tt.alloc_core() for i in xrange(c.tt.d): c.tt.core[ c.tt.ps[i] - 1:c.tt.ps[ i + 1] - 1] = _np.eye( c.n[i]).flatten() return c
Creates an identity TT-matrix
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L340-L364
oseledets/ttpy
tt/core/tools.py
Toeplitz
def Toeplitz(x, d=None, D=None, kind='F'): """ Creates multilevel Toeplitz TT-matrix with ``D`` levels. Possible _matrix types: * 'F' - full Toeplitz _matrix, size(x) = 2^{d+1} * 'C' - circulant _matrix, size(x) = 2^d * 'L' - lower triangular Toeplitz _matrix, size(x) = 2^d * 'U' - upper triangular Toeplitz _matrix, size(x) = 2^d Sample calls: >>> # one-level Toeplitz _matrix: >>> T = tt.Toeplitz(x) >>> # one-level circulant _matrix: >>> T = tt.Toeplitz(x, kind='C') >>> # three-level upper-triangular Toeplitz _matrix: >>> T = tt.Toeplitz(x, D=3, kind='U') >>> # two-level mixed-type Toeplitz _matrix: >>> T = tt.Toeplitz(x, kind=['L', 'U']) >>> # two-level mixed-size Toeplitz _matrix: >>> T = tt.Toeplitz(x, [3, 4], kind='C') """ # checking for arguments consistency def check_kinds(D, kind): if D % len(kind) == 0: kind.extend(kind * (D // len(kind) - 1)) if len(kind) != D: raise ValueError( "Must give proper amount of _matrix kinds (one or D, for example)") kind = list(kind) if not set(kind).issubset(['F', 'C', 'L', 'U']): raise ValueError("Toeplitz _matrix kind must be one of F, C, L, U.") if d is None: if D is None: D = len(kind) if x.d % D: raise ValueError( "x.d must be divisible by D when d is not specified!") if len(kind) == 1: d = _np.array([x.d // D - (1 if kind[0] == 'F' else 0)] * D, dtype=_np.int32) kind = kind * D else: check_kinds(D, kind) if set(kind).issubset(['F']): d = _np.array([x.d // D - 1] * D, dtype=_np.int32) elif set(kind).issubset(['C', 'L', 'U']): d = _np.array([x.d // D] * D, dtype=_np.int32) else: raise ValueError( "Only similar _matrix kinds (only F or only C, L and U) are accepted when d is not specified!") elif d is not None: d = _np.asarray(d, dtype=_np.int32).flatten() if D is None: D = d.size elif d.size == 1: d = _np.array([d[0]] * D, dtype=_np.int32) if D != d.size: raise ValueError("D must be equal to len(d)") check_kinds(D, kind) if _np.sum(d) + _np.sum([(1 if knd == 'F' else 0) for knd in kind]) != x.d: raise ValueError( "Dimensions inconsistency: x.d != d_1 + d_2 + ... + d_D") # predefined matrices and tensors: I = [[1, 0], [0, 1]] J = [[0, 1], [0, 0]] JT = [[0, 0], [1, 0]] H = [[0, 1], [1, 0]] S = _np.array([[[0], [1]], [[1], [0]]]).transpose() # 2 x 2 x 1 P = _np.zeros((2, 2, 2, 2)) P[:, :, 0, 0] = I P[:, :, 1, 0] = H P[:, :, 0, 1] = H P[:, :, 1, 1] = I P = _np.transpose(P) # 2 x 2! x 2 x 2 x '1' Q = _np.zeros((2, 2, 2, 2)) Q[:, :, 0, 0] = I Q[:, :, 1, 0] = JT Q[:, :, 0, 1] = JT Q = _np.transpose(Q) # 2 x 2! x 2 x 2 x '1' R = _np.zeros((2, 2, 2, 2)) R[:, :, 1, 0] = J R[:, :, 0, 1] = J R[:, :, 1, 1] = I R = _np.transpose(R) # 2 x 2! x 2 x 2 x '1' W = _np.zeros([2] * 5) # 2 x 2! x 2 x 2 x 2 W[0, :, :, 0, 0] = W[1, :, :, 1, 1] = I W[0, :, :, 1, 0] = W[0, :, :, 0, 1] = JT W[1, :, :, 1, 0] = W[1, :, :, 0, 1] = J W = _np.transpose(W) # 2 x 2! x 2 x 2 x 2 V = _np.zeros((2, 2, 2, 2)) V[0, :, :, 0] = I V[0, :, :, 1] = JT V[1, :, :, 1] = J V = _np.transpose(V) # '1' x 2! x 2 x 2 x 2 crs = [] xcrs = _vector.vector.to_list(x) dp = 0 # dimensions passed for j in xrange(D): currd = d[j] xcr = xcrs[dp] cr = _np.tensordot(V, xcr, (0, 1)) cr = cr.transpose(3, 0, 1, 2, 4) # <r_dp| x 2 x 2 x |2> x |r_{dp+1}> cr = cr.reshape((x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') # <r_dp| x 2 x 2 x |2r_{dp+1}> dp += 1 crs.append(cr) for i in xrange(1, currd - 1): xcr = xcrs[dp] # (<2| x 2 x 2 x |2>) x <r_dp| x |r_{dp+1}> cr = _np.tensordot(W, xcr, (1, 1)) # <2| x <r_dp| x 2 x 2 x |2> x |r_{dp+1}> cr = cr.transpose([0, 4, 1, 2, 3, 5]) # <2r_dp| x 2 x 2 x |2r_{dp+1}> cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 crs.append(cr) if kind[j] == 'F': xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} cr = _np.tensordot(W, xcr, (1, 1)).transpose([0, 4, 1, 2, 3, 5]) # <2r_dp| x 2 x 2 x |2r_{dp+1}> cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} # <2| x |1> x <r_dp| x |r_{dp+1}> tmp = _np.tensordot(S, xcr, (1, 1)) # tmp = tmp.transpose([0, 2, 1, 3]) # TODO: figure out WHY THE HELL # this spoils everything # <2r_dp| x |r_{dp+1}> tmp = tmp.reshape((2 * x.r[dp], x.r[dp + 1]), order='F') # <2r_{dp-1}| x 2 x 2 x |r_{dp+1}> cr = _np.tensordot(cr, tmp, (3, 0)) dp += 1 crs.append(cr) else: dotcore = None if kind[j] == 'C': dotcore = P elif kind[j] == 'L': dotcore = Q elif kind[j] == 'U': dotcore = R xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} # <2| x 2 x 2 x |'1'> x <r_dp| x |r_{dp+1}> cr = _np.tensordot(dotcore, xcr, (1, 1)) # <2| x <r_dp| x 2 x 2 x |r_{dp+1}> cr = cr.transpose([0, 3, 1, 2, 4]) cr = cr.reshape((2 * x.r[dp], 2, 2, x.r[dp + 1]), order='F') dp += 1 crs.append(cr) return _matrix.matrix.from_list(crs)
python
def Toeplitz(x, d=None, D=None, kind='F'): """ Creates multilevel Toeplitz TT-matrix with ``D`` levels. Possible _matrix types: * 'F' - full Toeplitz _matrix, size(x) = 2^{d+1} * 'C' - circulant _matrix, size(x) = 2^d * 'L' - lower triangular Toeplitz _matrix, size(x) = 2^d * 'U' - upper triangular Toeplitz _matrix, size(x) = 2^d Sample calls: >>> # one-level Toeplitz _matrix: >>> T = tt.Toeplitz(x) >>> # one-level circulant _matrix: >>> T = tt.Toeplitz(x, kind='C') >>> # three-level upper-triangular Toeplitz _matrix: >>> T = tt.Toeplitz(x, D=3, kind='U') >>> # two-level mixed-type Toeplitz _matrix: >>> T = tt.Toeplitz(x, kind=['L', 'U']) >>> # two-level mixed-size Toeplitz _matrix: >>> T = tt.Toeplitz(x, [3, 4], kind='C') """ # checking for arguments consistency def check_kinds(D, kind): if D % len(kind) == 0: kind.extend(kind * (D // len(kind) - 1)) if len(kind) != D: raise ValueError( "Must give proper amount of _matrix kinds (one or D, for example)") kind = list(kind) if not set(kind).issubset(['F', 'C', 'L', 'U']): raise ValueError("Toeplitz _matrix kind must be one of F, C, L, U.") if d is None: if D is None: D = len(kind) if x.d % D: raise ValueError( "x.d must be divisible by D when d is not specified!") if len(kind) == 1: d = _np.array([x.d // D - (1 if kind[0] == 'F' else 0)] * D, dtype=_np.int32) kind = kind * D else: check_kinds(D, kind) if set(kind).issubset(['F']): d = _np.array([x.d // D - 1] * D, dtype=_np.int32) elif set(kind).issubset(['C', 'L', 'U']): d = _np.array([x.d // D] * D, dtype=_np.int32) else: raise ValueError( "Only similar _matrix kinds (only F or only C, L and U) are accepted when d is not specified!") elif d is not None: d = _np.asarray(d, dtype=_np.int32).flatten() if D is None: D = d.size elif d.size == 1: d = _np.array([d[0]] * D, dtype=_np.int32) if D != d.size: raise ValueError("D must be equal to len(d)") check_kinds(D, kind) if _np.sum(d) + _np.sum([(1 if knd == 'F' else 0) for knd in kind]) != x.d: raise ValueError( "Dimensions inconsistency: x.d != d_1 + d_2 + ... + d_D") # predefined matrices and tensors: I = [[1, 0], [0, 1]] J = [[0, 1], [0, 0]] JT = [[0, 0], [1, 0]] H = [[0, 1], [1, 0]] S = _np.array([[[0], [1]], [[1], [0]]]).transpose() # 2 x 2 x 1 P = _np.zeros((2, 2, 2, 2)) P[:, :, 0, 0] = I P[:, :, 1, 0] = H P[:, :, 0, 1] = H P[:, :, 1, 1] = I P = _np.transpose(P) # 2 x 2! x 2 x 2 x '1' Q = _np.zeros((2, 2, 2, 2)) Q[:, :, 0, 0] = I Q[:, :, 1, 0] = JT Q[:, :, 0, 1] = JT Q = _np.transpose(Q) # 2 x 2! x 2 x 2 x '1' R = _np.zeros((2, 2, 2, 2)) R[:, :, 1, 0] = J R[:, :, 0, 1] = J R[:, :, 1, 1] = I R = _np.transpose(R) # 2 x 2! x 2 x 2 x '1' W = _np.zeros([2] * 5) # 2 x 2! x 2 x 2 x 2 W[0, :, :, 0, 0] = W[1, :, :, 1, 1] = I W[0, :, :, 1, 0] = W[0, :, :, 0, 1] = JT W[1, :, :, 1, 0] = W[1, :, :, 0, 1] = J W = _np.transpose(W) # 2 x 2! x 2 x 2 x 2 V = _np.zeros((2, 2, 2, 2)) V[0, :, :, 0] = I V[0, :, :, 1] = JT V[1, :, :, 1] = J V = _np.transpose(V) # '1' x 2! x 2 x 2 x 2 crs = [] xcrs = _vector.vector.to_list(x) dp = 0 # dimensions passed for j in xrange(D): currd = d[j] xcr = xcrs[dp] cr = _np.tensordot(V, xcr, (0, 1)) cr = cr.transpose(3, 0, 1, 2, 4) # <r_dp| x 2 x 2 x |2> x |r_{dp+1}> cr = cr.reshape((x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') # <r_dp| x 2 x 2 x |2r_{dp+1}> dp += 1 crs.append(cr) for i in xrange(1, currd - 1): xcr = xcrs[dp] # (<2| x 2 x 2 x |2>) x <r_dp| x |r_{dp+1}> cr = _np.tensordot(W, xcr, (1, 1)) # <2| x <r_dp| x 2 x 2 x |2> x |r_{dp+1}> cr = cr.transpose([0, 4, 1, 2, 3, 5]) # <2r_dp| x 2 x 2 x |2r_{dp+1}> cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 crs.append(cr) if kind[j] == 'F': xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} cr = _np.tensordot(W, xcr, (1, 1)).transpose([0, 4, 1, 2, 3, 5]) # <2r_dp| x 2 x 2 x |2r_{dp+1}> cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} # <2| x |1> x <r_dp| x |r_{dp+1}> tmp = _np.tensordot(S, xcr, (1, 1)) # tmp = tmp.transpose([0, 2, 1, 3]) # TODO: figure out WHY THE HELL # this spoils everything # <2r_dp| x |r_{dp+1}> tmp = tmp.reshape((2 * x.r[dp], x.r[dp + 1]), order='F') # <2r_{dp-1}| x 2 x 2 x |r_{dp+1}> cr = _np.tensordot(cr, tmp, (3, 0)) dp += 1 crs.append(cr) else: dotcore = None if kind[j] == 'C': dotcore = P elif kind[j] == 'L': dotcore = Q elif kind[j] == 'U': dotcore = R xcr = xcrs[dp] # r_dp x 2 x r_{dp+1} # <2| x 2 x 2 x |'1'> x <r_dp| x |r_{dp+1}> cr = _np.tensordot(dotcore, xcr, (1, 1)) # <2| x <r_dp| x 2 x 2 x |r_{dp+1}> cr = cr.transpose([0, 3, 1, 2, 4]) cr = cr.reshape((2 * x.r[dp], 2, 2, x.r[dp + 1]), order='F') dp += 1 crs.append(cr) return _matrix.matrix.from_list(crs)
Creates multilevel Toeplitz TT-matrix with ``D`` levels. Possible _matrix types: * 'F' - full Toeplitz _matrix, size(x) = 2^{d+1} * 'C' - circulant _matrix, size(x) = 2^d * 'L' - lower triangular Toeplitz _matrix, size(x) = 2^d * 'U' - upper triangular Toeplitz _matrix, size(x) = 2^d Sample calls: >>> # one-level Toeplitz _matrix: >>> T = tt.Toeplitz(x) >>> # one-level circulant _matrix: >>> T = tt.Toeplitz(x, kind='C') >>> # three-level upper-triangular Toeplitz _matrix: >>> T = tt.Toeplitz(x, D=3, kind='U') >>> # two-level mixed-type Toeplitz _matrix: >>> T = tt.Toeplitz(x, kind=['L', 'U']) >>> # two-level mixed-size Toeplitz _matrix: >>> T = tt.Toeplitz(x, [3, 4], kind='C')
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L368-L525
oseledets/ttpy
tt/core/tools.py
qlaplace_dd
def qlaplace_dd(d): """Creates a QTT representation of the Laplace operator""" res = _matrix.matrix() d0 = d[::-1] D = len(d0) I = _np.eye(2) J = _np.array([[0, 1], [0, 0]]) cr = [] if D is 1: for k in xrange(1, d0[0] + 1): if k is 1: cur_core = _np.zeros((1, 2, 2, 3)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T elif k is d0[0]: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cr.append(cur_core) else: for k in xrange(D): for kappa in xrange(1, d0[k] + 1): if kappa is 1: if k is 0: cur_core = _np.zeros((1, 2, 2, 4)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T cur_core[:, :, :, 3] = I elif k is D - 1: cur_core = _np.zeros((2, 2, 2, 3)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[1, :, :, 0] = I else: cur_core = _np.zeros((2, 2, 2, 4)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[0, :, :, 3] = I cur_core[1, :, :, 0] = I elif kappa is d0[k]: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 2)) cur_core[3, :, :, 0] = I cur_core[0, :, :, 1] = I cur_core[1, :, :, 1] = J.T cur_core[2, :, :, 1] = J else: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 4)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cur_core[3, :, :, 3] = I cr.append(cur_core) return _matrix.matrix.from_list(cr)
python
def qlaplace_dd(d): """Creates a QTT representation of the Laplace operator""" res = _matrix.matrix() d0 = d[::-1] D = len(d0) I = _np.eye(2) J = _np.array([[0, 1], [0, 0]]) cr = [] if D is 1: for k in xrange(1, d0[0] + 1): if k is 1: cur_core = _np.zeros((1, 2, 2, 3)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T elif k is d0[0]: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cr.append(cur_core) else: for k in xrange(D): for kappa in xrange(1, d0[k] + 1): if kappa is 1: if k is 0: cur_core = _np.zeros((1, 2, 2, 4)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T cur_core[:, :, :, 3] = I elif k is D - 1: cur_core = _np.zeros((2, 2, 2, 3)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[1, :, :, 0] = I else: cur_core = _np.zeros((2, 2, 2, 4)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[0, :, :, 3] = I cur_core[1, :, :, 0] = I elif kappa is d0[k]: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 2)) cur_core[3, :, :, 0] = I cur_core[0, :, :, 1] = I cur_core[1, :, :, 1] = J.T cur_core[2, :, :, 1] = J else: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 4)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cur_core[3, :, :, 3] = I cr.append(cur_core) return _matrix.matrix.from_list(cr)
Creates a QTT representation of the Laplace operator
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L529-L609
oseledets/ttpy
tt/core/tools.py
xfun
def xfun(n, d=None): """ Create a QTT-representation of 0:prod(n) _vector call examples: tt.xfun(2, 5) # create 2 x 2 x 2 x 2 x 2 TT-vector tt.xfun(3) # create [0, 1, 2] one-dimensional TT-vector tt.xfun([3, 5, 7], 2) # create 3 x 5 x 7 x 3 x 5 x 7 TT-vector """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if d == 1: return _vector.vector.from_list( [_np.reshape(_np.arange(n0[0]), (1, n0[0], 1))]) cr = [] cur_core = _np.ones((1, n0[0], 2)) cur_core[0, :, 0] = _np.arange(n0[0]) cr.append(cur_core) ni = float(n0[0]) for i in xrange(1, d - 1): cur_core = _np.zeros((2, n0[i], 2)) for j in xrange(n0[i]): cur_core[:, j, :] = _np.eye(2) cur_core[1, :, 0] = ni * _np.arange(n0[i]) ni *= n0[i] cr.append(cur_core) cur_core = _np.ones((2, n0[d - 1], 1)) cur_core[1, :, 0] = ni * _np.arange(n0[d - 1]) cr.append(cur_core) return _vector.vector.from_list(cr)
python
def xfun(n, d=None): """ Create a QTT-representation of 0:prod(n) _vector call examples: tt.xfun(2, 5) # create 2 x 2 x 2 x 2 x 2 TT-vector tt.xfun(3) # create [0, 1, 2] one-dimensional TT-vector tt.xfun([3, 5, 7], 2) # create 3 x 5 x 7 x 3 x 5 x 7 TT-vector """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if d == 1: return _vector.vector.from_list( [_np.reshape(_np.arange(n0[0]), (1, n0[0], 1))]) cr = [] cur_core = _np.ones((1, n0[0], 2)) cur_core[0, :, 0] = _np.arange(n0[0]) cr.append(cur_core) ni = float(n0[0]) for i in xrange(1, d - 1): cur_core = _np.zeros((2, n0[i], 2)) for j in xrange(n0[i]): cur_core[:, j, :] = _np.eye(2) cur_core[1, :, 0] = ni * _np.arange(n0[i]) ni *= n0[i] cr.append(cur_core) cur_core = _np.ones((2, n0[d - 1], 1)) cur_core[1, :, 0] = ni * _np.arange(n0[d - 1]) cr.append(cur_core) return _vector.vector.from_list(cr)
Create a QTT-representation of 0:prod(n) _vector call examples: tt.xfun(2, 5) # create 2 x 2 x 2 x 2 x 2 TT-vector tt.xfun(3) # create [0, 1, 2] one-dimensional TT-vector tt.xfun([3, 5, 7], 2) # create 3 x 5 x 7 x 3 x 5 x 7 TT-vector
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L612-L644
oseledets/ttpy
tt/core/tools.py
linspace
def linspace(n, d=None, a=0.0, b=1.0, right=True, left=True): """ Create a QTT-representation of a uniform grid on an interval [a, b] """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size t = xfun(n0) e = ones(n0) N = _np.prod(n0) # Size if left and right: h = (b - a) * 1.0 / (N - 1) res = a * e + t * h elif left and not right: h = (b - a) * 1.0 / N res = a * e + t * h elif right and not left: h = (b - a) * 1.0 / N res = a * e + (t + e) * h else: h = (b - a) * 1.0 / (N - 1) res = a * e + (t + e) * h return res.round(1e-13)
python
def linspace(n, d=None, a=0.0, b=1.0, right=True, left=True): """ Create a QTT-representation of a uniform grid on an interval [a, b] """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size t = xfun(n0) e = ones(n0) N = _np.prod(n0) # Size if left and right: h = (b - a) * 1.0 / (N - 1) res = a * e + t * h elif left and not right: h = (b - a) * 1.0 / N res = a * e + t * h elif right and not left: h = (b - a) * 1.0 / N res = a * e + (t + e) * h else: h = (b - a) * 1.0 / (N - 1) res = a * e + (t + e) * h return res.round(1e-13)
Create a QTT-representation of a uniform grid on an interval [a, b]
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L647-L671
oseledets/ttpy
tt/core/tools.py
sin
def sin(d, alpha=1.0, phase=0.0): """ Create TT-vector for :math:`\\sin(\\alpha n + \\varphi)`.""" cr = [] cur_core = _np.zeros([1, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [_math.cos(phase), _math.sin(phase)] cur_core[0, 1, :] = [_math.cos(alpha + phase), _math.sin(alpha + phase)] cr.append(cur_core) for i in xrange(1, d - 1): cur_core = _np.zeros([2, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [1.0, 0.0] cur_core[1, 0, :] = [0.0, 1.0] cur_core[ 0, 1, :] = [ _math.cos( alpha * 2 ** i), _math.sin( alpha * 2 ** i)] cur_core[1, 1, :] = [-_math.sin(alpha * 2 ** i), _math.cos(alpha * 2 ** i)] cr.append(cur_core) cur_core = _np.zeros([2, 2, 1], dtype=_np.float) cur_core[0, :, 0] = [0.0, _math.sin(alpha * 2 ** (d - 1))] cur_core[1, :, 0] = [1.0, _math.cos(alpha * 2 ** (d - 1))] cr.append(cur_core) return _vector.vector.from_list(cr)
python
def sin(d, alpha=1.0, phase=0.0): """ Create TT-vector for :math:`\\sin(\\alpha n + \\varphi)`.""" cr = [] cur_core = _np.zeros([1, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [_math.cos(phase), _math.sin(phase)] cur_core[0, 1, :] = [_math.cos(alpha + phase), _math.sin(alpha + phase)] cr.append(cur_core) for i in xrange(1, d - 1): cur_core = _np.zeros([2, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [1.0, 0.0] cur_core[1, 0, :] = [0.0, 1.0] cur_core[ 0, 1, :] = [ _math.cos( alpha * 2 ** i), _math.sin( alpha * 2 ** i)] cur_core[1, 1, :] = [-_math.sin(alpha * 2 ** i), _math.cos(alpha * 2 ** i)] cr.append(cur_core) cur_core = _np.zeros([2, 2, 1], dtype=_np.float) cur_core[0, :, 0] = [0.0, _math.sin(alpha * 2 ** (d - 1))] cur_core[1, :, 0] = [1.0, _math.cos(alpha * 2 ** (d - 1))] cr.append(cur_core) return _vector.vector.from_list(cr)
Create TT-vector for :math:`\\sin(\\alpha n + \\varphi)`.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L674-L704
oseledets/ttpy
tt/core/tools.py
cos
def cos(d, alpha=1.0, phase=0.0): """ Create TT-vector for :math:`\\cos(\\alpha n + \\varphi)`.""" return sin(d, alpha, phase + _math.pi * 0.5)
python
def cos(d, alpha=1.0, phase=0.0): """ Create TT-vector for :math:`\\cos(\\alpha n + \\varphi)`.""" return sin(d, alpha, phase + _math.pi * 0.5)
Create TT-vector for :math:`\\cos(\\alpha n + \\varphi)`.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L707-L709