id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/ModularTorch-0.1.4.tar.gz/ModularTorch-0.1.4/modular_torch/DataSetup.py
import os import zipfile import requests from pathlib import Path import torch import torchvision from torch.utils.data import DataLoader from torchvision.datasets import ImageFolder def get_data(data_link: str = None, name: str = "food_data", data_path: any = None, remove_zip: bool = True) -> None: """Fetches data from an URL and saves in given directory args: data_link: str: URL to fetch data from. ex: 'https://github.com/mrdbourke/pytorch-deep-learning/raw/main/data/pizza_steak_sushi.zip' name: str: folder name to save the data to. creates if doesnt exist data_path: str: parent directory to name (present working directory by default.) return: image_path: image path where data is saved creates a folder at given location with all data downloaded. """ if data_link is None: data_link = "https://github.com/mrdbourke/pytorch-deep-learning/raw/main/data/pizza_steak_sushi.zip" data_path = Path("data") if data_path is None else Path(data_path) image_path = data_path / name print(f"[INFO] Data will be saved at '{image_path}'") if image_path.is_dir(): print("[INFO] Path exists, Skipping Download!") else: image_path.mkdir(parents = True, exist_ok = True) zip_path = data_path / f"{name}.zip" with open(zip_path, "wb") as f: request = requests.get(data_link) print("[INFO] Data downloaded") f.write(request.content) with zipfile.ZipFile(zip_path, "r") as zip_ref: print(f"[INFO] Unzipping '{name}' data") zip_ref.extractall(image_path) if remove_zip: os.remove(zip_path) print("[SUCCESS] Done") return image_path def get_data_loaders(train_path: str, test_path: str, batch_size: int, train_transform: torchvision.transforms.Compose, test_transform: torchvision.transforms.Compose = None, shuffle: bool = True, **kwargs) -> tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, list]: """Reads and makes dataloaders given train and test paths args: train_path: str: train directory path test_path: str test directory path train_transform: torch transform to perform operations on image test_transform: torch transfrom to perform operations on image shuffle: bool: shuffle the train set returns: train_loader: DataLoader: train data generator test_loader: DataLoader: test data generator class_names: list: list of all unique class names """ if test_transform is None: test_transform = train_transform train_data = ImageFolder(train_path, train_transform, target_transform = None) train_loader = DataLoader(train_data, shuffle = shuffle, pin_memory = True, batch_size = batch_size, **kwargs) test_data = ImageFolder(test_path, test_transform, target_transform = None) test_loader = DataLoader(test_data, shuffle = False, pin_memory = True, batch_size = batch_size, **kwargs) return train_loader, test_loader, train_data.classes
PypiClean
/ConGen-0.0.5.tar.gz/ConGen-0.0.5/src/congen/exporters/GeoTIFFExporter.py
import os, re, unicodedata import numpy as np from exporters.Exporter import Exporter from osgeo import gdal, gdal_array class GeoTIFFExporter(Exporter): def export(self): self.export_raster_layers() def export_raster_layers(self, no_data_value=65535): """ Useful links: https://here.isnew.info/how-to-save-a-numpy-array-as-a-geotiff-file-using-gdal.html https://borealperspectives.org/2014/01/16/data-type-mapping-when-using-pythongdal-to-write-numpy-arrays-to-geotiff/ https://gis.stackexchange.com/questions/164853/reading-modifying-and-writing-a-geotiff-with-gdal-in-python https://gis.stackexchange.com/questions/198013/numpy-to-geotiff-for-use-with-gdal """ driver = gdal.GetDriverByName("GTiff") n_bands = 1 driver_specific_options = [] for raster_layer in self.raster_layers: pixels = np.copy(raster_layer.pixels) pixels[pixels == None]=no_data_value filename = os.path.join(self.dest_path, f"{slugify(raster_layer.name)}.tif") typecode = gdal_array.NumericTypeCodeToGDALTypeCode(pixels.dtype) shape = pixels.shape out_ds = driver.Create(filename, shape[1], shape[0], n_bands, typecode, driver_specific_options) band = out_ds.GetRasterBand(1) band.WriteArray(pixels) band.SetNoDataValue(no_data_value) band.ComputeStatistics(False) out_ds.FlushCache() def slugify(value, allow_unicode=False): """ Taken from https://github.com/django/django/blob/master/django/utils/text.py Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = str(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value.lower()) return re.sub(r'[-\s]+', '-', value).strip('-_')
PypiClean
/GT_521F52-1.0.0.tar.gz/GT_521F52-1.0.0/README.md
# GT_521F52 This is a library for working with the ADH-Tech GT521F52 fingerprint scanner. ## GETTING STARTED Clone this repository into your projects folder and import the GT-521F52 module to your application. The project is well documented, but if there are any doubts feel free to contact. The [examples.py](examples.py) shows a few examples of how to use the API. ### PRE-REQUISITES AND INSTALLING: Your system must be running linux with stty installed and have the python3 interpreter. To install GT_521F52 simply run: pip3 install GT_521F52 ## CONTRIBUTING Fell free to contribute to our project but please read our CONTRIBUTING.md for details of our code of conduct. ## AUTHORS - Lucas Costa Cabral ## LICENSE This project is under Apache License 2.0 license. For information about [license read our LICENSE doc](LICENSE). ## ACKNOWLEDGEMENTS - Eronides Neto
PypiClean
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/docs/how-to/contributing/strategy/writing_the_new_strategy.rst
Writing the new strategy ======================== Identify a new strategy ----------------------- If you're not sure if you have a strategy that has already been implemented, you can search the :ref:`strategies-index` to see if they are implemented. If you are still unsure please get in touch: `via the gitter room <https://gitter.im/Axelrod-Python/Axelrod>`_ or `open an issue <https://github.com/Axelrod-Python/Axelrod/issues>`_. Several strategies are special cases of other strategies. For example, both :code:`Cooperator` and :code:`Defector` are special cases of :code:`Random`, :code:`Random(1)` and :code:`Random(0)` respectively. While we could eliminate :code:`Cooperator` in its current form, these strategies are intentionally left as is as simple examples for new users and contributors. Nevertheless, please feel free to update the docstrings of strategies like :code:`Random` to point out such cases. The code -------- There are a couple of things that need to be created in a strategy.py file. Let us take a look at the :class:`TitForTat <axelrod.strategies.titfortat.TitForTat>` class (located in the :code:`axelrod/strategies/titfortat.py` file):: class TitForTat(Player): """ A player starts by cooperating and then mimics previous move by opponent. Note that the code for this strategy is written in a fairly verbose way. This is done so that it can serve as an example strategy for those who might be new to Python. Names - Rapoport's strategy: [Axelrod1980]_ - TitForTat: [Axelrod1980]_ """ # These are various properties for the strategy name = 'Tit For Tat' classifier = { 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def strategy(self, opponent): """This is the actual strategy""" # First move if len(self.history) == 0: return C # React to the opponent's last move if opponent.history[-1] == D: return D return C The first thing that is needed is a docstring that explains what the strategy does:: """A player starts by cooperating and then mimics previous move by opponent.""" Secondly, any alternate names should be included and if possible references provided (this helps when trying to identify if a strategy has already been implemented or not):: - Rapoport's strategy: [Axelrod1980]_ - TitForTat: [Axelrod1980]_ These references can be found in the :ref:`bibliography`. If a required references is not there please feel free to add it or just get in touch and we'd be happy to help. After that simply add in the string that will appear as the name of the strategy:: name = 'Tit For Tat' Note that this is mainly used in plots by :code:`matplotlib` so you can use LaTeX if you want to. For example there is strategy with :math:`\pi` as a name:: name = '$\pi$' Following that you can add in the :code:`classifier` dictionary:: classifier = { 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } This helps classify the strategy as described in :ref:`classification-of-strategies`. After that the only thing required is to write the :code:`strategy` method which takes an opponent as an argument. In the case of :class:`TitForTat <axelrod.strategies.titfortat.TitForTat>` the strategy checks if it has any history (:code:`if len(self.history) == 0`). If it does not (ie this is the first play of the match) then it returns :code:`C`. If not, the strategy simply repeats the opponent's last move (:code:`return opponent.history[-1]`):: def strategy(opponent): """This is the actual strategy""" # First move if len(self.history) == 0: return C # Repeat the opponent's last move return opponent.history[-1] The variables :code:`C` and :code:`D` represent the cooperate and defect actions respectively. Some strategies make specific use of the variables of a match to create their own attributes. In principle these attributes could change throughout a match or tournament if the match properties (like the game matrix) change, so we require that this logic live in the :code:`receive_match_attributes` method for correct dynamic updating. Here is how this is done for :class:`Stalker <axelrod.strategies.stalker.Stalker>`:: def receive_match_attributes(self) R, P, S, T = self.match_attributes["game"].RPST() self.very_good_score = R self.very_bad_score = P self.wish_score = (R + P) / 2 There are various examples of helpful functions and properties that make writing strategies easier. Do not hesitate to get in touch with the Axelrod-Python team for guidance.
PypiClean
/ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/format_ascii.rst
.. _format_ascii: ============ ASCII tables ============ .. note:: There are probably as many ASCII table formats as astronomers (if not more). These generally store a single table, and can sometimes include meta-data. Overview -------- Reading ASCII tables is supported thanks to the `asciitable <http://cxc.harvard.edu/contrib/asciitable/>`_ module, which makes it easy to read in arbitrary ASCII files. By default, several pre-defined formats are available. These include `CDS <http://vizier.u-strasbg.fr/doc/catstd.htx>`_ tables (also called Machine-Readable tables), DAOPhot tables, and RDB tables. To read these formats, simply use:: >>> t = atpy.Table('table.mrt', type='mrt') >>> t = atpy.Table('table.cds', type='cds') >>> t = atpy.Table('table.phot', type='daophot') >>> t = atpy.Table('table.rdb', type='rdb') The `type=` argument is optional for these formats, if they have appropriate file extensions, but due to the large number of ASCII file formats, it is safer to include it. ATpy also allows full access to asciitable. If the ``type='ascii'`` argument is specified in ``Table()``, all arguments are passed to ``asciitable.read``, and the result is automatically stored in the ATpy ``Table`` instance. For more information on the arguments available in ``asciitable.read``, see `here <http://cxc.harvard.edu/contrib/asciitable/#basic-usage-with-read>`_. .. note:: As for all file formats, the ``verbose`` argument can be specified to control whether warning messages are shown when reading (the default is ``verbose=True``), and the ``overwrite`` argument can be used when writing to overwrite a file (the default is ``overwrite=False``). Full API for advanced users --------------------------- .. note :: The following functions should not be called directly - the arguments should be passed to ``Table()/Table.read()``. .. autofunction:: atpy.asciitables.read_cds .. autofunction:: atpy.asciitables.read_daophot .. autofunction:: atpy.asciitables.read_rdb .. autofunction:: atpy.asciitables.read_ascii .. autofunction:: atpy.asciitables.write_ascii
PypiClean
/EISeg-1.1.1-py3-none-any.whl/eiseg/util/voc.py
import xml.etree.ElementTree as ET class VocAnnotations: def __init__(self, filename, witdh, height, depth=3, foldername="VOC2007", sourcename="Unknown"): self.root = ET.Element("annotation") self.foleder = ET.SubElement(self.root, "folder") self.filename = ET.SubElement(self.root, "filename") self.source = ET.SubElement(self.root, "source") self.size = ET.SubElement(self.root, "size") self.width = ET.SubElement(self.size, "width") self.height = ET.SubElement(self.size, "height") self.depth = ET.SubElement(self.size, "depth") self.foleder.text = foldername self.filename.text = filename self.source.text = sourcename self.width.text = str(witdh) self.height.text = str(height) self.depth.text = str(depth) def savefile(self, filename): tree = ET.ElementTree(self.root) tree.write(filename, xml_declaration=False, encoding='utf-8') def add_object(self, label_name, xmin, ymin, xmax, ymax, tpose=0, ttruncated=0, tdifficult=0): object = ET.SubElement(self.root, "object") namen = ET.SubElement(object, "name") namen.text = label_name pose = ET.SubElement(object, "pose") pose.text = str(tpose) truncated = ET.SubElement(object, "truncated") truncated.text = str(ttruncated) difficult = ET.SubElement(object, "difficult") difficult.text = str(tdifficult) bndbox = ET.SubElement(object, "bndbox") xminn = ET.SubElement(bndbox, "xmin") xminn.text = str(xmin) yminn = ET.SubElement(bndbox, "ymin") yminn.text = str(ymin) xmaxn = ET.SubElement(bndbox, "xmax") xmaxn.text = str(xmax) ymaxn = ET.SubElement(bndbox, "ymax") ymaxn.text = str(ymax)
PypiClean
/Autoneuro-0.0.1.tar.gz/Autoneuro-0.0.1/application/modelTraining/classificationModels.py
import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV from sklearn.metrics import accuracy_score, roc_auc_score, classification_report from sklearn.model_selection import cross_val_score import xgboost as xgb from xgboost import XGBClassifier from sklearn.neighbors import KNeighborsClassifier class ClassificationModelTuner(): """ This class shall be used to get the best suited classification model Written By: iNeuron Version: 1.0 Revisions: None """ def __init__(self): self.file_object = open('logs/classificationModelsLogs.txt', 'a+') self.logger_object = AppLogger() self.file_operation = FileOperation() def get_tuned_random_forest_classifier(self, x_train, y_train): """ Method Name: get_tuned_random_forest_classifier Description: This method will be used to build RandomForestClassifier model Input Description: It takes x_train and y_train data for training the model. Output: It return Optimized RandomForestClassifier model. On Failure: Raise Exception Written By: Akhil Sagar Version: 1.0 Revisions: None """ try: self.rf_parameters = { 'max_depth': [5, 10, 15, 20, 25, None], 'n_estimators': range(10, 500, 50), 'criterion': ['gini', 'entropy'], 'bootstrap': [True, False], 'min_samples_split': range(2, 10, 1), 'max_features': ['auto', 'log2'], 'min_samples_leaf': range(1, 10), } """ self.rmdsearch = RandomizedSearchCV(RandomForestClassifier(), self.rf_parameters, n_iter=10, cv=10, random_state=22, n_jobs=-1) self.rmdsearch.fit(x_train, y_train) hyperparameters = self.rmdsearch.best_params_ max_depth, n_estimators, criterion, bootstrap, min_samples_split, max_features, min_samples_leaf = \ hyperparameters['max_depth'], hyperparameters['n_estimators'], hyperparameters['criterion'], \ hyperparameters['bootstrap'], hyperparameters['min_samples_split'], hyperparameters['max_features'], \ hyperparameters['min_samples_leaf'] self.model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, criterion=criterion, min_samples_leaf=min_samples_leaf, max_features=max_features, min_samples_split=min_samples_split, bootstrap=bootstrap, random_state=25, n_jobs=-1) """ self.model = RandomForestClassifier(n_jobs=-1) self.model.fit(x_train, y_train) return self.model except Exception as e: self.logger_object.log(self.file_object, 'Exception occured in performing Model Building and Tuning. Exception message: ' + str( e)) raise Exception() def get_tuned_xgboost_classifier(self, x_train, y_train): """ Method Name: get_tuned_xgboost_classifier Description: This method will be used to build XGBoost Classifier model Input Description: It takes x_train and y_train data for training the model. Output: It return Optimized XGBoost model. On Failure: Raise Exception Written By: Akhil Sagar Version: 1.0 Revisions: None """ try: self.xg_parameters = {"n_estimators": [10, 50, 100, 200], "learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30], "max_depth": [3, 4, 5, 6, 8, 10, 12, 15, 20], "min_child_weight": [1, 3, 5, 7], "gamma": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5], "colsample_bytree": [0.3, 0.4, 0.5, 0.7] } """ self.rmdsearch = RandomizedSearchCV(XGBClassifier(objective='binary:logistic'), param_distributions=self.xg_parameters, n_iter=10, cv=10, n_jobs=-1) self.rmdsearch.fit(x_train, y_train) hyperparameters = self.rmdsearch.best_params_ n_estimators, min_child_weight, max_depth, learning_rate, gamma, colsample_bytree = hyperparameters[ 'n_estimators'], \ hyperparameters[ 'min_child_weight'], \ hyperparameters[ 'max_depth'], \ hyperparameters[ 'learning_rate'], \ hyperparameters[ 'gamma'], \ hyperparameters[ 'colsample_bytree'] self.xgboost_model = XGBClassifier(n_estimators=n_estimators, learning_rate=learning_rate, gamma=gamma, min_child_weight=min_child_weight, max_depth=max_depth, colsample_bytree=colsample_bytree) """ self.xgboost_model = XGBClassifier(n_jobs=-1) self.xgboost_model.fit(x_train, y_train) return self.xgboost_model except Exception as e: self.logger_object.log(self.file_object, 'Exception occured in performing Model Building and Tuning. Exception message: ' + str( e)) raise Exception() def get_tuned_knn_classifier(self, x_train, y_train): """ Method Name: get_tuned_knn_classifier Description: This method will be used to build KNearestNeighbour Classifier model Input Description: It takes x_train and y_train data for training the model. Output: It return Optimized KNearestNeighbourClassifier model. On Failure: Raise Exception Written By: Akshay Anvekar Version: 1.0 Revisions: None """ try: knn_parameters = {'n_neighbors': [50, 100, 200, 250, 300, 350], 'weights': ['uniform', 'distance'], 'algorithm': ['ball_tree', 'kd_tree'], 'leaf_size': [20, 25, 30, 35, 40, 45, 50], } rmdsearch = RandomizedSearchCV(KNeighborsClassifier(), knn_parameters, n_iter=10, cv=10, random_state=22, n_jobs=-1) rmdsearch.fit(x_train, y_train) hyperparameters = rmdsearch.best_params_ n_neighbors, weights, algorithm, leaf_size = hyperparameters['n_neighbors'], hyperparameters['weights'], \ hyperparameters['algorithm'], hyperparameters['leaf_size'] model = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm, leaf_size=leaf_size, n_jobs=-1) model.fit(x_train, y_train) return model except Exception as e: self.logger_object.log(self.file_object, 'Exception occured in performing Model Building and Tuning. Exception message: ' + str( e)) raise Exception() def get_best_model(self, x, y): """ Method Name: get_best_model Description: Find out the Model which has the best AUC score. Output: The best model name and the model object On Failure: Raise Exception Written By: iNeuron Intelligence Version: 1.0 Revisions: None """ self.logger_object.log(self.file_object, 'Entered the get_best_model method of the Model_Finder class') # create best model for XGBoost try: train_x, test_x, train_y, test_y = train_test_split(x, y) self.xgboost = self.get_tuned_xgboost_classifier(train_x, train_y) self.prediction_xgboost = self.xgboost.predict(test_x) # Predictions using the XGBoost Model if len( test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case self.xgboost_score = accuracy_score(test_y, self.prediction_xgboost) self.logger_object.log(self.file_object, 'Accuracy for XGBoost:' + str(self.xgboost_score)) # Log AUC else: self.xgboost_score = roc_auc_score(test_y, self.prediction_xgboost) # AUC for XGBoost self.logger_object.log(self.file_object, 'AUC for XGBoost:' + str(self.xgboost_score)) # Log AUC # create best model for Random Forest self.random_forest = self.get_tuned_random_forest_classifier(train_x, train_y) self.prediction_random_forest = self.random_forest.predict( test_x) # prediction using the Random Forest Algorithm if len( test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We will use accuracy in that case self.random_forest_score = accuracy_score(test_y, self.prediction_random_forest) self.logger_object.log(self.file_object, 'Accuracy for RF:' + str(self.random_forest_score)) else: self.random_forest_score = roc_auc_score(test_y, self.prediction_random_forest) # AUC for Random Forest self.logger_object.log(self.file_object, 'AUC for RF:' + str(self.random_forest_score)) if (self.random_forest_score < self.xgboost_score): self.file_operation.save_model(self.xgboost, 'XGBoost') self.train_classification_report, self.test_classification_report = self.generate_model_report( self.xgboost, train_x, train_y, test_x, test_y, y.nunique()) return 'XGBoost', self.train_classification_report, self.test_classification_report else: self.file_operation.save_model(self.random_forest, 'RandomForest') self.train_classification_report, self.test_classification_report = self.generate_model_report( self.random_forest, train_x, train_y, test_x, test_y, y.nunique()) return 'RandomForest', \ self.train_classification_report, self.test_classification_report except Exception as e: self.logger_object.log(self.file_object, 'Exception occured in get_best_model method of the Model_Finder class. Exception message: ' + str( e)) self.logger_object.log(self.file_object, 'Model Selection Failed. Exited the get_best_model method of the Model_Finder class') raise Exception() def generate_model_report(self, model_object, train_x, train_y, test_x, test_y, num_classes): """ Method Name: generate_model_report Description: Find out the Model which has the best AUC score. Output: The best model name and the model object On Failure: Raise Exception Written By: iNeuron Intelligence Version: 1.0 Revisions: None """ try: self.train_classification_report = [] self.test_classification_report = [] train_predictions = model_object.predict(train_x) test_predictions = model_object.predict(test_x) train_report = classification_report(train_y, train_predictions).split()[4:] test_report = classification_report(test_y, test_predictions).split()[4:] counter = 0 while len(self.train_classification_report) < num_classes + 1: temp_dict = {} temp_dict['class'] = train_report[counter] temp_dict['precision'] = train_report[counter + 1] temp_dict['recall'] = train_report[counter + 2] temp_dict['f1-score'] = train_report[counter + 3] temp_dict['support'] = train_report[counter + 4] counter = counter + 5 self.train_classification_report.append(temp_dict) counter = 0 while len(self.test_classification_report) < num_classes + 1: temp_dict = {} temp_dict['class'] = test_report[counter] temp_dict['precision'] = test_report[counter + 1] temp_dict['recall'] = test_report[counter + 2] temp_dict['f1-score'] = test_report[counter + 3] temp_dict['support'] = test_report[counter + 4] counter = counter + 5 self.test_classification_report.append(temp_dict) return self.train_classification_report, self.test_classification_report except Exception as e: self.logger_object.log(self.file_object, 'Exception occured in generate_model_report method of the ClassificationModelTuner class. Exception message: ' + str( e)) self.logger_object.log(self.file_object, 'Model Selection Failed. Exited the generate_model_report method of the ClassificationModelTuner class') raise Exception() from logger.appLogger import AppLogger from fileOperations.file_methods import FileOperation
PypiClean
/Flask-OpenTracing-1.1.0.tar.gz/Flask-OpenTracing-1.1.0/README.rst
################# Flask-OpenTracing ################# This package enables distributed tracing in Flask applications via `The OpenTracing Project`_. Once a production system contends with real concurrency or splits into many services, crucial (and formerly easy) tasks become difficult: user-facing latency optimization, root-cause analysis of backend errors, communication about distinct pieces of a now-distributed system, etc. Distributed tracing follows a request on its journey from inception to completion from mobile/browser all the way to the microservices. As core services and libraries adopt OpenTracing, the application builder is no longer burdened with the task of adding basic tracing instrumentation to their own code. In this way, developers can build their applications with the tools they prefer and benefit from built-in tracing instrumentation. OpenTracing implementations exist for major distributed tracing systems and can be bound or swapped with a one-line configuration change. If you want to learn more about the underlying python API, visit the python `source code`_. If you are migrating from the 0.x series, you may want to read the list of `breaking changes`_. .. _The OpenTracing Project: http://opentracing.io/ .. _source code: https://github.com/opentracing/opentracing-python .. _breaking changes: #breaking-changes-from-0-x Installation ============ Run the following command: .. code-block:: $ pip install Flask-Opentracing Usage ===== This Flask extension allows for tracing of Flask apps using the OpenTracing API. All that it requires is for a ``FlaskTracing`` tracer to be initialized using an instance of an OpenTracing tracer. You can either trace all requests to your site, or use function decorators to trace certain individual requests. **Note:** `optional_args` in both cases are any number of attributes (as strings) of `flask.Request` that you wish to set as tags on the created span Initialize ---------- `FlaskTracing` wraps the tracer instance that's supported by opentracing. To create a `FlaskTracing` object, you can either pass in a tracer object directly or a callable that returns the tracer object. For example: .. code-block:: python import opentracing from flask_opentracing import FlaskTracing opentracing_tracer = ## some OpenTracing tracer implementation tracing = FlaskTracing(opentracing_tracer, ...) or .. code-block:: python import opentracing from flask_opentracing import FlaskTracing def initialize_tracer(): ... return opentracing_tracer tracing = FlaskTracing(initialize_tracer, ...) Trace All Requests ------------------ .. code-block:: python import opentracing from flask_opentracing import FlaskTracing app = Flask(__name__) opentracing_tracer = ## some OpenTracing tracer implementation tracing = FlaskTracing(opentracing_tracer, True, app, [optional_args]) Trace Individual Requests ------------------------- .. code-block:: python import opentracing from flask_opentracing import FlaskTracing app = Flask(__name__) opentracing_tracer = ## some OpenTracing tracer implementation tracing = FlaskTracing(opentracing_tracer) @app.route('/some_url') @tracing.trace(optional_args) def some_view_func(): ... return some_view Accessing Spans Manually ------------------------ In order to access the span for a request, we've provided an method `FlaskTracing.get_span(request)` that returns the span for the request, if it is exists and is not finished. This can be used to log important events to the span, set tags, or create child spans to trace non-RPC events. If no request is passed in, the current request will be used. Tracing an RPC -------------- If you want to make an RPC and continue an existing trace, you can inject the current span into the RPC. For example, if making an http request, the following code will continue your trace across the wire: .. code-block:: python @tracing.trace() def some_view_func(request): new_request = some_http_request current_span = tracing.get_span(request) text_carrier = {} opentracing_tracer.inject(span, opentracing.Format.TEXT_MAP, text_carrier) for k, v in text_carrier.iteritems(): new_request.add_header(k,v) ... # make request Examples ======== See `examples`_ to view and run an example of two Flask applications with integrated OpenTracing tracers. .. _examples: https://github.com/opentracing-contrib/python-flask/tree/master/example `This tutorial <http://blog.scoutapp.com/articles/2018/01/15/tutorial-tracing-python-flask-requests-with-opentracing>`_ has a step-by-step guide for using `Flask-Opentracing` with `Jaeger <https://github.com/jaegertracing/jaeger>`_. Breaking changes from 0.x ========================= Starting with the 1.0 version, a few changes have taken place from previous versions: * ``FlaskTracer`` has been renamed to ``FlaskTracing``, although ``FlaskTracing`` can be used still as a deprecated name. * When passing an ``Application`` object at ``FlaskTracing`` creation time, ``trace_all_requests`` defaults to ``True``. * When no ``opentracing.Tracer`` is provided, ``FlaskTracing`` will rely on the global tracer. Further Information =================== If you're interested in learning more about the OpenTracing standard, please visit `opentracing.io`_ or `join the mailing list`_. If you would like to implement OpenTracing in your project and need help, feel free to send us a note at `[email protected]`_. .. _opentracing.io: http://opentracing.io/ .. _join the mailing list: http://opentracing.us13.list-manage.com/subscribe?u=180afe03860541dae59e84153&id=19117aa6cd .. [email protected]: [email protected]
PypiClean
/Flaskel-3.1.0rc2-py3-none-any.whl/flaskel/views/resource.py
import typing as t from sqlalchemy.exc import SQLAlchemyError from vbcore.db.exceptions import DBError from vbcore.db.support import SQLASupport from vbcore.http import httpcode, HttpMethod from flaskel import abort, cap, db_session, PayloadValidator, Response, webargs from flaskel.ext.default import builder from ..utils.datastruct import Pagination from .base import BaseView, Resource, UrlsType class CatalogResource(Resource): pagination_enabled: bool = True methods_collection = [ HttpMethod.GET, ] methods_resource = [ HttpMethod.GET, ] methods_subresource = [ HttpMethod.GET, ] def __init__(self, model): self._model = model def on_get(self, res_id, *_, model=None, **kwargs): """ Get resource info :param res_id: resource identifier (primary key value) :param model: alternative SQLAlchemy model class :param kwargs: extra query filters :return: """ model = model or self._model return model.get_one(id=res_id, **kwargs) def on_collection(self, *_, params=None, model=None, **kwargs): """ Resource collection paginated and sorted :param params: parameters (usually from query string) :param model: alternative SqlAlchemy model class :param kwargs: extra query filters :return: """ page = size = None model = model or self._model max_size = cap.config.PAGINATION_MAX_PAGE_SIZE order_by = getattr(model, "order_by", None) if params is None: params = webargs.paginate() if self.pagination_enabled is True: page = params.get("page") size = params.get("page_size") size = max(size, max_size or 0) if size else max_size response = model.get_list( to_dict=not self.pagination_enabled, order_by=order_by, page=page, page_size=size, max_per_page=max_size, params=params, **kwargs, ) if self.pagination_enabled is True: return self.response_paginated( response, restricted=not params.get("related", False), ) return response @classmethod def response_paginated(cls, res, **kwargs): """ Prepare the paginated response for resource collection :param res: list of sqlalchemy models :return: """ if isinstance(res, list): return [r.to_dict(**kwargs) for r in res] headers = Response.pagination_headers( res.total, Pagination(page=res.page, page_size=res.per_page) ) return ( [r.to_dict(**kwargs) for r in res.items], (httpcode.PARTIAL_CONTENT if res.has_next else httpcode.SUCCESS), headers, ) class Restful(CatalogResource): post_schema: t.Any = None put_schema: t.Any = None support_class: t.Type[SQLASupport] = SQLASupport validator: t.Type[PayloadValidator] = PayloadValidator methods_subresource = [ HttpMethod.GET, HttpMethod.POST, ] methods_collection = [ HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, ] methods_resource = [ HttpMethod.GET, HttpMethod.PUT, HttpMethod.DELETE, ] def __init__(self, model, session=db_session): """ :param session: sqlalchemy session instance :param model: sqlalchemy model """ super().__init__(model) self._session = session self.support = self.support_class(self._model, self._session) def validate(self, schema): """ :param schema: schema compatible with self validator :return: """ schema = schema() if callable(schema) else schema return self.validator.validate(schema) def create_resource(self, data): """ :param data: dictionary data that represents the resource :return: sqlalchemy model instance """ return self._model(**data) # noinspection PyMethodMayBeStatic def update_resource(self, resource, data): """ :param resource: sqlalchemy model instance :param data: dictionary data that represents the resource :return: """ resource.update(data) return resource def _session_exception_handler(self, exception: SQLAlchemyError) -> t.NoReturn: cap.logger.exception(exception) self._session.rollback() if isinstance(exception, DBError): abort(httpcode.CONFLICT, response={"cause": exception.as_dict()}) abort(httpcode.INTERNAL_SERVER_ERROR) def _create(self, res): try: self._session.add(res) self._session.commit() except SQLAlchemyError as exc: self._session_exception_handler(exc) def _update(self, res): try: self._session.merge(res) self._session.commit() except SQLAlchemyError as exc: self._session_exception_handler(exc) @classmethod def _prepare_upsert_filters(cls, *_, **__) -> t.Dict[str, t.Any]: return {} # pylint: disable=inconsistent-return-statements def _upsert(self, data) -> t.Tuple[t.Any, int]: try: res, created = self.support.update_or_create( data, **self._prepare_upsert_filters(data) ) self._session.commit() return res, httpcode.CREATED if created else httpcode.SUCCESS except SQLAlchemyError as exc: self._session_exception_handler(exc) def on_post(self, *_, **__) -> t.Tuple[t.Dict[str, t.Any], int]: payload = self.validate(self.post_schema) res = self.create_resource(payload) self._create(res) return res.to_dict(), httpcode.CREATED def _delete(self, res_id, *_, **__) -> t.Dict[str, t.Any]: """ :param res_id: resource identifier (primary key value) """ res = self._model.query.get_or_404(res_id) data = res.to_dict() try: self._session.delete(res) self._session.commit() except SQLAlchemyError as exc: self._session_exception_handler(exc) return data def on_delete(self, res_id, *args, **kwargs): self._delete(res_id, *args, **kwargs) def on_put(self, *_, res_id=None, **__) -> t.Tuple[t.Dict[str, t.Any], int]: """ :param res_id: resource identifier (primary key value) it is optional in order to implement upsert :return: """ payload = self.validate(self.put_schema or self.post_schema) if res_id is not None: res = self._model.query.get_or_404(res_id) res = self.update_resource(res, payload) self._update(res) return res.to_dict(), httpcode.SUCCESS res, status = self._upsert(payload) return res.to_dict(), status class PatchApiView(Restful): methods_subresource = None methods_collection = None methods_resource = None methods: t.ClassVar[t.Optional[t.Collection[str]]] = [ HttpMethod.PATCH, ] @classmethod def register( cls, app, name: t.Optional[str] = None, urls: UrlsType = (), view: t.Optional[t.Type[BaseView]] = None, **kwargs, ) -> t.Callable: _view = t.cast(t.Type[BaseView], view or cls) return BaseView.register(app, name, urls, _view, **kwargs) @builder.no_content def patch(self, *args, **kwargs): return self.on_patch(*args, **kwargs) def on_patch(self, *_, **__): return self.not_implemented()
PypiClean
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/printer/printer.py
from __future__ import annotations import dataclasses from itertools import chain from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, TypeVar, Union, cast, overload, ) from graphql import is_union_type from graphql.language.printer import print_ast from graphql.type import ( is_enum_type, is_input_type, is_interface_type, is_object_type, is_scalar_type, is_specified_directive, ) from graphql.utilities.print_schema import ( is_defined_type, print_block, print_deprecated, print_description, print_implemented_interfaces, print_specified_by_url, ) from graphql.utilities.print_schema import print_type as original_print_type from strawberry.custom_scalar import ScalarWrapper from strawberry.enum import EnumDefinition from strawberry.schema_directive import Location, StrawberrySchemaDirective from strawberry.type import StrawberryContainer from strawberry.unset import UNSET from .ast_from_value import ast_from_value if TYPE_CHECKING: from graphql import ( GraphQLArgument, GraphQLEnumType, GraphQLEnumValue, GraphQLScalarType, GraphQLUnionType, ) from graphql.type.directives import GraphQLDirective from strawberry.field import StrawberryField from strawberry.schema import BaseSchema _T = TypeVar("_T") @dataclasses.dataclass class PrintExtras: directives: Set[str] = dataclasses.field(default_factory=set) types: Set[type] = dataclasses.field(default_factory=set) @overload def _serialize_dataclasses(value: Dict[_T, object]) -> Dict[_T, object]: ... @overload def _serialize_dataclasses(value: Union[List[object], Tuple[object]]) -> List[object]: ... @overload def _serialize_dataclasses(value: object) -> object: ... def _serialize_dataclasses(value): if dataclasses.is_dataclass(value): return dataclasses.asdict(value) if isinstance(value, (list, tuple)): return [_serialize_dataclasses(v) for v in value] if isinstance(value, dict): return {k: _serialize_dataclasses(v) for k, v in value.items()} return value def print_schema_directive_params( directive: GraphQLDirective, values: Dict[str, Any] ) -> str: params = [] for name, arg in directive.args.items(): value = values.get(name, arg.default_value) if value is UNSET: value = None else: ast = ast_from_value(_serialize_dataclasses(value), arg.type) value = ast and f"{name}: {print_ast(ast)}" if value: params.append(value) if not params: return "" return "(" + ", ".join(params) + ")" def print_schema_directive( directive: Any, schema: BaseSchema, *, extras: PrintExtras ) -> str: strawberry_directive = cast( StrawberrySchemaDirective, directive.__class__.__strawberry_directive__ ) schema_converter = schema.schema_converter gql_directive = schema_converter.from_schema_directive(directive.__class__) params = print_schema_directive_params( gql_directive, { schema.config.name_converter.get_graphql_name(f): getattr( directive, f.python_name or f.name, UNSET ) for f in strawberry_directive.fields }, ) printed_directive = print_directive(gql_directive, schema=schema) if printed_directive is not None: extras.directives.add(printed_directive) for field in strawberry_directive.fields: f_type = field.type while isinstance(f_type, StrawberryContainer): f_type = f_type.of_type if hasattr(f_type, "_type_definition"): extras.types.add(cast(type, f_type)) if hasattr(f_type, "_scalar_definition"): extras.types.add(cast(type, f_type)) if isinstance(f_type, EnumDefinition): extras.types.add(cast(type, f_type)) return f" @{gql_directive.name}{params}" def print_field_directives( field: Optional[StrawberryField], schema: BaseSchema, *, extras: PrintExtras ) -> str: if not field: return "" directives = ( directive for directive in field.directives if any( location in [Location.FIELD_DEFINITION, Location.INPUT_FIELD_DEFINITION] for location in directive.__strawberry_directive__.locations # type: ignore ) ) return "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) def print_argument_directives( argument: GraphQLArgument, *, schema: BaseSchema, extras: PrintExtras ) -> str: strawberry_type = argument.extensions.get("strawberry-definition") directives = strawberry_type.directives if strawberry_type else [] return "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) def print_args( args: Dict[str, GraphQLArgument], indentation: str = "", *, schema: BaseSchema, extras: PrintExtras, ) -> str: if not args: return "" # If every arg does not have a description, print them on one line. if not any(arg.description for arg in args.values()): return ( "(" + ", ".join( ( f"{print_input_value(name, arg)}" f"{print_argument_directives(arg, schema=schema, extras=extras)}" ) for name, arg in args.items() ) + ")" ) return ( "(\n" + "\n".join( print_description(arg, f" {indentation}", not i) + f" {indentation}" + print_input_value(name, arg) + print_argument_directives(arg, schema=schema, extras=extras) for i, (name, arg) in enumerate(args.items()) ) + f"\n{indentation})" ) def print_fields(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: from strawberry.schema.schema_converter import GraphQLCoreConverter fields = [] for i, (name, field) in enumerate(type_.fields.items()): strawberry_field = field.extensions and field.extensions.get( GraphQLCoreConverter.DEFINITION_BACKREF ) args = ( print_args(field.args, " ", schema=schema, extras=extras) if hasattr(field, "args") else "" ) fields.append( print_description(field, " ", not i) + f" {name}" + args + f": {field.type}" + print_field_directives(strawberry_field, schema=schema, extras=extras) + print_deprecated(field.deprecation_reason) ) return print_block(fields) def print_scalar( type_: GraphQLScalarType, *, schema: BaseSchema, extras: PrintExtras ) -> str: # TODO: refactor this strawberry_type = type_.extensions.get("strawberry-definition") directives = strawberry_type.directives if strawberry_type else [] printed_directives = "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) return ( print_description(type_) + f"scalar {type_.name}" + print_specified_by_url(type_) + printed_directives ).strip() def print_enum_value( name: str, value: GraphQLEnumValue, first_in_block, *, schema: BaseSchema, extras: PrintExtras, ) -> str: strawberry_type = value.extensions.get("strawberry-definition") directives = strawberry_type.directives if strawberry_type else [] printed_directives = "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) return ( print_description(value, " ", first_in_block) + f" {name}" + print_deprecated(value.deprecation_reason) + printed_directives ) def print_enum( type_: GraphQLEnumType, *, schema: BaseSchema, extras: PrintExtras ) -> str: strawberry_type = type_.extensions.get("strawberry-definition") directives = strawberry_type.directives if strawberry_type else [] printed_directives = "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) values = [ print_enum_value(name, value, not i, schema=schema, extras=extras) for i, (name, value) in enumerate(type_.values.items()) ] return ( print_description(type_) + f"enum {type_.name}" + printed_directives + print_block(values) ) def print_extends(type_, schema: BaseSchema) -> str: from strawberry.schema.schema_converter import GraphQLCoreConverter strawberry_type = type_.extensions and type_.extensions.get( GraphQLCoreConverter.DEFINITION_BACKREF ) if strawberry_type and strawberry_type.extend: return "extend " return "" def print_type_directives(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: from strawberry.schema.schema_converter import GraphQLCoreConverter strawberry_type = type_.extensions and type_.extensions.get( GraphQLCoreConverter.DEFINITION_BACKREF ) if not strawberry_type: return "" allowed_locations = ( [Location.INPUT_OBJECT] if strawberry_type.is_input else [Location.OBJECT] ) directives = ( directive for directive in strawberry_type.directives or [] if any( location in allowed_locations for location in directive.__strawberry_directive__.locations ) ) return "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) def _print_object(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: return ( print_description(type_) + print_extends(type_, schema) + f"type {type_.name}" + print_implemented_interfaces(type_) + print_type_directives(type_, schema, extras=extras) + print_fields(type_, schema, extras=extras) ) def _print_interface(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: return ( print_description(type_) + print_extends(type_, schema) + f"interface {type_.name}" + print_implemented_interfaces(type_) + print_type_directives(type_, schema, extras=extras) + print_fields(type_, schema, extras=extras) ) def print_input_value(name: str, arg: GraphQLArgument) -> str: default_ast = ast_from_value(arg.default_value, arg.type) arg_decl = f"{name}: {arg.type}" if default_ast: arg_decl += f" = {print_ast(default_ast)}" return arg_decl + print_deprecated(arg.deprecation_reason) def _print_input_object(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: from strawberry.schema.schema_converter import GraphQLCoreConverter fields = [] for i, (name, field) in enumerate(type_.fields.items()): strawberry_field = field.extensions and field.extensions.get( GraphQLCoreConverter.DEFINITION_BACKREF ) fields.append( print_description(field, " ", not i) + " " + print_input_value(name, field) + print_field_directives(strawberry_field, schema=schema, extras=extras) ) return ( print_description(type_) + f"input {type_.name}" + print_type_directives(type_, schema, extras=extras) + print_block(fields) ) def print_union( type_: GraphQLUnionType, *, schema: BaseSchema, extras: PrintExtras ) -> str: strawberry_type = type_.extensions.get("strawberry-definition") directives = strawberry_type.directives if strawberry_type else [] printed_directives = "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) types = type_.types possible_types = " = " + " | ".join(t.name for t in types) if types else "" return ( print_description(type_) + f"union {type_.name}{printed_directives}" + possible_types ) def _print_type(type_, schema: BaseSchema, *, extras: PrintExtras) -> str: # prevents us from trying to print a scalar as an input type if is_scalar_type(type_): return print_scalar(type_, schema=schema, extras=extras) if is_enum_type(type_): return print_enum(type_, schema=schema, extras=extras) if is_object_type(type_): return _print_object(type_, schema, extras=extras) if is_input_type(type_): return _print_input_object(type_, schema, extras=extras) if is_interface_type(type_): return _print_interface(type_, schema, extras=extras) if is_union_type(type_): return print_union(type_, schema=schema, extras=extras) return original_print_type(type_) def print_schema_directives(schema: BaseSchema, *, extras: PrintExtras) -> str: directives = ( directive for directive in schema.schema_directives if any( location in [Location.SCHEMA] for location in directive.__strawberry_directive__.locations # type: ignore ) ) return "".join( print_schema_directive(directive, schema=schema, extras=extras) for directive in directives ) def _all_root_names_are_common_names(schema: BaseSchema) -> bool: query = schema.query._type_definition mutation = schema.mutation._type_definition if schema.mutation else None subscription = schema.subscription._type_definition if schema.subscription else None return ( query.name == "Query" and (mutation is None or mutation.name == "Mutation") and (subscription is None or subscription.name == "Subscription") ) def print_schema_definition( schema: BaseSchema, *, extras: PrintExtras ) -> Optional[str]: # TODO: add support for description if _all_root_names_are_common_names(schema) and not schema.schema_directives: return None query_type = schema.query._type_definition operation_types = [f" query: {query_type.name}"] if schema.mutation: mutation_type = schema.mutation._type_definition operation_types.append(f" mutation: {mutation_type.name}") if schema.subscription: subscription_type = schema.subscription._type_definition operation_types.append(f" subscription: {subscription_type.name}") directives = print_schema_directives(schema, extras=extras) return f"schema{directives} {{\n" + "\n".join(operation_types) + "\n}" def print_directive( directive: GraphQLDirective, *, schema: BaseSchema ) -> Optional[str]: strawberry_directive = directive.extensions["strawberry-definition"] if ( isinstance(strawberry_directive, StrawberrySchemaDirective) and not strawberry_directive.print_definition ): return None return ( print_description(directive) + f"directive @{directive.name}" # TODO: add support for directives on arguments directives + print_args(directive.args, schema=schema, extras=PrintExtras()) + (" repeatable" if directive.is_repeatable else "") + " on " + " | ".join(location.name for location in directive.locations) ) def is_builtin_directive(directive: GraphQLDirective) -> bool: # this allows to force print the builtin directives if there's a # directive that was implemented using the schema_directive if is_specified_directive(directive): strawberry_definition = directive.extensions.get("strawberry-definition") return strawberry_definition is None return False def print_schema(schema: BaseSchema) -> str: graphql_core_schema = schema._schema # type: ignore extras = PrintExtras() directives = filter( lambda n: not is_builtin_directive(n), graphql_core_schema.directives ) type_map = graphql_core_schema.type_map types = filter(is_defined_type, map(type_map.get, sorted(type_map))) types_printed = [_print_type(type_, schema, extras=extras) for type_ in types] schema_definition = print_schema_definition(schema, extras=extras) directives = filter( None, [print_directive(directive, schema=schema) for directive in directives] ) def _name_getter(type_: Any): if hasattr(type_, "name"): return type_.name if isinstance(type_, ScalarWrapper): return type_._scalar_definition.name return type_.__name__ return "\n\n".join( chain( sorted(extras.directives), filter(None, [schema_definition]), directives, types_printed, ( _print_type( schema.schema_converter.from_type(type_), schema, extras=extras ) # Make sure extra types are ordered for predictive printing for type_ in sorted(extras.types, key=_name_getter) ), ) )
PypiClean
/ApiDoc-1.4.0.tar.gz/ApiDoc-1.4.0/apidoc/factory/source/element.py
import collections from apidoc.object.source_raw import Sampleable, Displayable from apidoc.lib.util.cast import to_boolean class Element(): """ Populate Helper Factory """ def set_common_datas(self, element, name, datas): """Populated common data for an element from dictionnary datas """ element.name = str(name) if "description" in datas: element.description = str(datas["description"]).strip() if isinstance(element, Sampleable) and element.sample is None and "sample" in datas: element.sample = str(datas["sample"]).strip() if isinstance(element, Displayable): if "display" in datas: element.display = to_boolean(datas["display"]) if "label" in datas: element.label = datas["label"] else: element.label = element.name def create_dictionary_of_element_from_dictionary(self, property_name, datas): """Populate a dictionary of elements """ response = {} if property_name in datas and datas[property_name] is not None and isinstance(datas[property_name], collections.Iterable): for key, value in datas[property_name].items(): response[key] = self.create_from_name_and_dictionary(key, value) return response def create_list_of_element_from_dictionary(self, property_name, datas): """Populate a list of elements """ response = [] if property_name in datas and datas[property_name] is not None and isinstance(datas[property_name], list): for value in datas[property_name]: response.append(self.create_from_dictionary(value)) return response def get_enum(self, property, enum, datas): """Factory enum type """ str_property = str(datas[property]).lower() if str_property not in enum: raise ValueError("Unknow enum \"%s\" for \"%s\"." % (str_property, property)) return enum(str_property)
PypiClean
/Lokai-0.3.tar.gz/Lokai-0.3/lokai/tool_box/tb_common/dates.py
#----------------------------------------------------------------------- """ Provides a set of date and datetime object generators that take string or date/datetime inputs and produce a date or datetime object. Particularly useful for interpreting a range of date/datetime string formats. """ #----------------------------------------------------------------------- # Purpose : Provides common date/time manipulation helper functions # ## %a Locale's abbreviated weekday name. ## %A Locale's full weekday name. ## %b Locale's abbreviated month name. ## %B Locale's full month name. ## %c Locale's appropriate date and time representation. ## %d Day of the month as a decimal number [01, 31]. ## %H Hour (24-hour clock) as a decimal number [00, 23]. ## %I Hour (12-hour clock) as a decimal number [01, 12]. ## %j Day of the year as a decimal number [001, 366]. ## %m Month as a decimal number [01, 12]. ## %M Minute as a decimal number [00, 59]. ## %p Locale's equivalent of either AM or PM. ## %S Second as a decimal number [00, 61]. ## %U Week number of the year (Sunday as the first day of the week). ## Given as a decimal number [00, 53]. All days in a new year ## preceding the first Sunday are considered to be in week 0. ## %w Weekday as a decimal number [0(Sunday), 6]. ## %W Week number of the year (Monday as the first day of the week). ## Given as a decimal number [00, 53]. All days in a new year ## preceding the first Monday are considered to be in week 0. ## %x Locale's appropriate date representation. ## %X Locale's appropriate time representation. ## %y Year without century as a decimal number [00, 99]. ## %Y Year with century as a decimal number. ## %Z Time zone name (no characters if no time zone exists). ## %% A literal "%" character. #----------------------------------------------------------------------- import re import datetime #----------------------------------------------------------------------- class DateError(Exception): """ General top level exception for this module""" pass class ErrorInDateString(DateError): """ Specific error converting from a string""" pass class InvalidDateObject(DateError): """ Specific error converting from a date object""" pass #----------------------------------------------------------------------- __all__ = [] # don't allow wildcards #----------------------------------------------------------------------- # (This is probably why date processing doesn't go public - so much # depends on translation and localisation) mnths = { 'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5, 'June':6, 'Jun':6, 'July':7, 'Jul':7, 'Aug':8, 'Sept':9, 'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12} output_formats = { 'default' : "%Y-%m-%d %H:%M", 'iso' : "%Y-%m-%dT%H:%M:%SZ", 'iso-compact' : "%Y%m%d%H%M%SZ", 'isodate' : "%Y-%m-%d", 'compact' : "%Y%m%d%H%M%S", 'compact-date' : "%Y%m%d", 'dmy' : "%d/%m/%y", 'dmyhm' : "%d/%m/%y %H:%M", 'long' : "%Y-%m-%d %H:%M:%S", 'jsdate' : "%a %d %b %Y", 'jsmins' : "%a %d %b %Y %H:%M", 'jsnoyear' : "%a %d %b", 'jsshort' : "%a %d", 'excel_csv' : "%d-%b-%y", } DATE_FORM_ISO = 'iso' DATE_FORM_ISO_COMPACT = 'iso-compact' DATE_FORM_ISODATE = 'isodate' DATE_FORM_COMPACT = 'compact' DATE_FORM_COMPACT_DATE = 'compact-date' DATE_FORM_DMY = 'dmy' DATE_FORM_DMYHM = 'dmyhm' DATE_FORM_LONG = 'long' DATE_FORM_JSDATE = 'jsdate' DATE_FORM_JSMINS = 'jsmins' DATE_FORM_JSNOYEAR = 'jsnoyear' DATE_FORM_JSSHORT = 'jsshort' DATE_FORM_EXCEL_CSV = 'excel_csv' ## YYYY-MM-DDTHH:MM:SSZ ## YYYY-M[M]-D[D] H[H]:M[M] ## YYYYMMDDHHMM[SS] ## YYYY-M[M]-D[D] ## YYYYMMDD ## Mon DD Jul[y] YY[YY] ## Jul[y] D[D] HH:MM:SS YY[YY] GMT ## YYYY/MM/DD ## YYYY/MM/DD HH:MM:SS ## D[D]/M[M]/YY[YY] ## DDMMYY ## DD-Jul[y]-YY ## DD-Jul[y]-YYYY ## YYYY-M[M]-D[D] H[H]:M[M]:S[S] # Excel default output formats ## D[D]/M[M]/YY[YY] H[H]:M[M]:S[S] date_patterns = ( '^(\d{4,4})-(\d{2,2})-(\d{2,2})[T ](\d{2,2}):(\d{2,2}):(\d{2,2})Z?$' '|' '^(\d{4,4})-(\d{1,2})-(\d{1,2}) (\d{1,2}):(\d{1,2})$' '|' '^(\d{4,4})(\d{2,2})(\d{2,2})(\d{2,2})(\d{2,4})$' # fix - should have above format with and without seconds '|' '^(\d{4,4})-(\d{1,2})-(\d{1,2})$' '|' '^(\d{4,4})(\d{2,2})(\d{2,2})$' '|' '^([A-Za-z]{3}) (\d{2}) ([A-Za-z]{3,4}) (\d{2,4})$' '|' '^([A-Za-z]{3,4}) (\d{1,2}) (\d{2}:\d{2}:\d{2}) (\d{2,4}) GMT$' '|' '^(\d{4,4})/(\d{2,2})/(\d{2,2})$' '|' '^(\d{4,4})/(\d{2,2})/(\d{2,2}) (\d{2,2}):(\d{2,2}):(\d{2,2})$' '|' '^(\d{1,2})/(\d{1,2})/(\d{2,4})$' '|' '^(\d{2,2})(\d{2,2})(\d{2,2})$' '|' '^(\d{2,2})-([A-Za-z]{3,4})-(\d{2,4})$' '|' '^(\d{4,4})-(\d{1,2})-(\d{1,2}) (\d{1,2}):(\d{1,2}):(\d{1,2})$' '|' '^(\d{1,2})/(\d{1,2})/(\d{2,4}) (\d{1,2}):(\d{1,2}):(\d{1,2})$' '|' '^(\d{2,2})/(\d{2,2})/(\d{4,4}) (\d{2,2}):(\d{2,2})$' ) match_pattern = re.compile(date_patterns) #----------------------------------------------------------------------- def strtotime(string, force_time=None) : ''' Return a datetime.datetime or datetime.date object from the passed string force_time can toggle datetime object type: True = datetime.datetime False = datetime.date (explicit False) (hours, mins, seconds set to 0 where applicable) None = allow the string to define return object ''' # # if string == None: return None if isinstance(string, (datetime.date, datetime.datetime, datetime.timedelta)): # assume no conversion needed return string #dst = 0 # assume UTC hh = 0 # Defaults for non HHMMSS formats mm = 0 ss = 0 # generally we ignore seconds # # now convert the string source = string.strip() if len(source) == 0 : return None op = match_pattern.search(source) has_time = False if op : px = op.groups() if px[0] != None: ## YYYY-MM-DDTHH:MM:SSZ yy = int(px[0]) mt = int(px[1]) dd = int(px[2]) hh = int(px[3]) mm = int(px[4]) ss = int(px[5]) has_time = True elif px[6] != None: ## YYYY-M[M]-D[D] H[H]:M[M] yy = int(px[6]) mt = int(px[7]) dd = int(px[8]) hh = int(px[9]) mm = int(px[10]) has_time = True elif px[11] != None: ## YYYYMMDDHHMM[SS] yy = int(px[11]) mt = int(px[12]) dd = int(px[13]) hh = int(px[14]) if len(px[15]) == 2: mm = int(px[15]) else: mm = int(px[15][:2]) ss = int(px[15][2:]) has_time = True elif px[16] != None: ## YYYY-M[M]-D[D] yy = int(px[16]) mt = int(px[17]) dd = int(px[18]) elif px[19] != None: ## YYYYMMDD yy = int(px[19]) mt = int(px[20]) dd = int(px[21]) elif px[22] != None: ## Mon DD Jul[y] YY[YY] yy = int(px[25]) mt = mnths[px[24]] dd = int(px[23]) elif px[26] != None: ## Jul[y] D[D] HH:MM:SS YY[YY] GMT yy = int(px[29]) mt = mnths[px[26]] dd = int(px[27]) hh = int(px[28][0:2]) mm = int(px[28][3:5]) ss = int(px[28][6:8]) has_time = True elif px[30] != None or px[33] != None: ## YYYY/MM/DD [HH24:MM:SS] pxnum = 30 if px[33] != None: pxnum = 33 hh = int(px[36]) mm = int(px[37]) ss = int(px[38]) has_time = True yy = int(px[pxnum]) mt = int(px[pxnum+1]) dd = int(px[pxnum+2]) elif px[39] != None: ## D[D]/M[M]/YY[YY] yy = int(px[41]) mt = int(px[40]) dd = int(px[39]) elif px[42] != None: ## DDMMYY (must be 6 digits or would be seen as YYYYMMDD dd = int(px[42]) mt = int(px[43]) yy = int(px[44]) elif px[45] != None: ## DD-MMM[M]-YY dd = int(px[45]) mt = mnths[px[46]] yy = int(px[47]) elif px[48] != None: yy = int(px[48]) mt = int(px[49]) dd = int(px[50]) hh = int(px[51]) mm = int(px[52]) ss = int(px[53]) has_time = True elif px[54] != None: yy = int(px[56]) mt = int(px[55]) dd = int(px[54]) hh = int(px[57]) mm = int(px[58]) ss = int(px[59]) has_time = True elif px[60] != None: dd = int(px[60]) mt = int(px[61]) yy = int(px[62]) hh = int(px[63]) mm = int(px[64]) has_time = True #----------------------------------------- if yy < 100: ## 2 digit year passed yy += 2000 if yy < 1900: raise ValueError("Year must not be less than 1900") if force_time == True: tx = datetime.datetime(yy, mt, dd, hh, mm, ss) elif force_time == False: # Explicit False tx = datetime.date(yy, mt, dd) elif has_time: tx = datetime.datetime(yy, mt, dd, hh, mm, ss) else: tx = datetime.date(yy, mt, dd) return tx else : raise ErrorInDateString, string def reform(tx, i=None): """ convert any date string to a standard format """ return timetostr(strtotime(tx), i) def timetostr(var, i=None): """ Generate a string represetation of a date, datetime or float """ if var != None and var != '': if isinstance(var, (str, unicode)): # still a string. reconvert anyway t = strtotime(var) elif isinstance(var, (int, float)): t = datetime.datetime.utcfromtimestamp(var) elif isinstance(var, (datetime.date, datetime.datetime)): t = var else: print var, type(var) raise InvalidDateObject, "%s: %s" % (str(var), type(var)) if i in output_formats.keys(): return t.strftime(output_formats[i]) elif str(i).find('%') > -1: return t.strftime(i) else: return t.strftime(output_formats['default']) else: return "" def now(tz_info=None): """ Datetime now. See datetime module for use of tz_info """ return datetime.datetime.now(tz_info) def utcnow(): """ Return UTC time as a datetime object with empty tz data """ return datetime.datetime.utcnow() def yesterday(): """ Datetime yesterday """ return plus_days(datetime.datetime.now(), -1) def plus_days(curdate, numdays): """ Add a number of days to the given date/datetime """ dd = strtotime(curdate) result = dd + datetime.timedelta(days = numdays) return result def first_of_month(curdate): """ Set day = 1 for the given date/datetime """ date_source = strtotime(curdate) return date_source.replace(day = 1) def plus_months(curdate, num): """ Add a number of months to the given date/datetime """ tgt = strtotime(curdate) end_month_from_zero = tgt.month + num -1 year = tgt.year + int(end_month_from_zero/12) month = (end_month_from_zero)%12 + 1 return tgt.replace(year=year, month=month) def is_today(date): """ return result of comparing date with today """ date_obj = strtotime(date) today_obj = now() return (date_obj.year == today_obj.year and date_obj.month == today_obj.month and date_obj.day == today_obj.day) def end_of_day(date): """ return datetime with time set to 23:59:59 """ date_source = strtotime(date) return datetime.datetime.combine(date_source, datetime.time(23, 59, 59)) def force_date(date): """ Ensure that the given item is a date and not a date time. This done by truncating the value given and does not involve time zone conversion. """ date_source = strtotime(date) if isinstance(date_source, datetime.datetime): return datetime.date(date_source.year, date_source.month, date_source.day) return date_source #----------------------------------------------------------------------- COMMAND_LINE_DATE_STRING = ( "^(\d{4,4}|[Yy]{1,4})[/-]?(\d{1,2}|[Mm]{1,2})[/-]?(\d{2,4}|[Dd]{1,2})" "(?:([+-])(\d*)([YyMmDd]))?$" ) COMMAND_LINE_DATE_MATCH = re.compile(COMMAND_LINE_DATE_STRING) def date_parse(given_string): """ Parse the given string to produce a date object. Dates only. String is in two parts: a date specification and an optional offset. Date specification can contain replacement characters. These are replaced by the equivalent value from the current date. Dates are year month day, delimted by '/', '-' or nothing at all. Years are 4 digits, months and days one or two digits. Thus: 2006/03/19, 20060319, 2006-03-19 are all the same. Warning: 2006319 gives you 2006-31-09, so the format without a delimite can lead to problems. Offsets are + or - followed by a number and a unit. The units are: d = days m = months y = years Thus: 2006/3/1-1d takes 1 day from the 1st March to give 28 (29?) February 2006/1/1+3m takes you to the 2nd quarter of 2006 Substitution characters are: Y,y in the year position (1 to 4 allowed) M,m in the month position (1 or 2 allowed) D,d in the day position (1 or 2 allowed) Thus: ymd is today ym1 is the first of the current month y1225-1y is Christmas last year y/1/1 is the start of the current year """ if given_string is None: return None if isinstance(given_string, (datetime.date, datetime.datetime, datetime.timedelta)): # assume no conversion needed return given_string # now convert the string source = given_string.strip() if len(source) == 0 : return None op = COMMAND_LINE_DATE_MATCH.search(source) if op: now = datetime.date.today() this_year = now.year this_month = now.month this_day = now.day group_set = op.groups() # # Work out the basic date from the first three group items year_txt = group_set[0] month_txt = group_set[1] day_txt = group_set[2] if year_txt.lower()[0] == 'y': target_year = this_year else: try: target_year = int(year_txt) except ValueError: raise ErrorInDateString, given_string if month_txt.lower()[0] == 'm': target_month = this_month else: try: target_month = int(month_txt) except ValueError: raise ErrorInDateString, given_string if day_txt.lower()[0] == 'd': target_day = this_day else: try: target_day = int(day_txt) except ValueError: raise ErrorInDateString, given_string # # Set up a possible return result = datetime.date(target_year, target_month, target_day) # # Check if offset exists, and then process if group_set[3]: operation = group_set[3] try: offset = int(group_set[4]) except ValueError: raise ErrorInDateString, given_string if operation == '-': offset = -offset step_size = group_set[5].lower() if step_size == 'y': result = result.replace(year=(target_year + offset)) elif step_size == 'm': result = plus_months(result, offset) else: result = plus_days(result, offset) return result else: raise ErrorInDateString, given_string #-----------------------------------------------------------------------
PypiClean
/CDF-0.32.tar.gz/CDF-0.32/cdf/istp/interface.py
# Stock Python modules. import os import os.path import shutil import sys import tempfile # cdf extension modules. from .. import interface as cdf from .. import internal from .. import typing ### # TODO: The complex interactions of the various strategies are not well # documented and are in fact quite fragile. The system evolved as I # tried to encode all the information about when various ISTP attributes # are actually required and what they are required to be. A more studied # approach to encoding this data might add clarity. ### # This error indicates that user input is required to fill in some # missing data. class InferenceError(Exception): pass # This error indicates that user input is required to resolve # ambiguous contradictory data. class RedundancyError(Exception): pass # This error is for internal use, and indicates to the autofilling # function that there is at this moment insufficient data to # guess the right value. There may be sufficient data if we retry # later. class _MissingPrerequisite(Exception): pass # This error is for internal use, and indicates to the autofilling # function that although this var is listed as being potentially # required, we have determined that it is not. class _NotRequired(Exception): pass # This error is for internal use, and is not an error at all. It # indicates to the autofulling function that this var has been # inferred correctly and that its requirement is met. class _InferenceSuccessful(Exception): pass class fillStrategy: def __call__(self, archive, attr, var = None): return NotImplemented class userInput(fillStrategy): def __call__(self, archive, attr, var = None): if var is not None: if attr not in archive[var].attributes: raise InferenceError else: if attr not in archive.attributes: raise InferenceError class defaultValue(fillStrategy): def __init__(self, value): self._value = value def __call__(self, archive, attr, var = None): if var is not None: if attr not in archive[var].attributes: archive[var].attributes[attr] = self._value else: if attr not in archive.attributes: archive.attributes[attr] = self._value class autoIncrement(fillStrategy): def __init__(self, value, step = 1): self._value = value self._step = step def __call__(self, archive, attr, var = None): if var is not None: if attr in archive[var].attributes: archive[var].attributes[attr] += self._step else: archive[var].attributes[attr] = self._value else: if attr in archive.attributes: archive.attributes[attr] += self._step else: archive.attributes[attr] = self._value class selectFromList(fillStrategy): def __init__(self, choices, default = None): self._choices = choices self._default = default def __call__(self, archive, attr, var = None): if var is not None: if attr in archive[var].attributes: archive[var].attributes[attr] += self._step else: archive[var].attributes[attr] = self._value else: if attr in archive.attributes: archive.attributes[attr] += self._step else: archive.attributes[attr] = self._value class archiveName(fillStrategy): def __call__(self, archive, attr, var = None): filename = archive._filenames[-1] if var is not None: if attr not in archive[var].attributes: archive[var].attributes[attr] = filename else: if attr not in archive.attributes: archive.attributes[attr] = filename class varName(fillStrategy): def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] = var class primaryDataOnly(fillStrategy): def __call__(self, archive, attr, var): if attr not in archive[var].attributes: var_type = archive[var].attributes.get('VAR_TYPE', None) if var_type == 'data': raise InferenceError elif var_type is None: raise _MissingPrerequisite else: raise _NotRequired class fillValStrategy(fillStrategy): fillvals = { internal.CDF_CHAR: '.', internal.CDF_BYTE: -128, internal.CDF_UINT1: 255, internal.CDF_UINT2: 65535, internal.CDF_UINT4: 4294967295, internal.CDF_INT1: -128, internal.CDF_INT2: -32768, internal.CDF_INT4: -2147483648, internal.CDF_REAL4: -1.0*10**31, internal.CDF_REAL8: -1.0*10**31, internal.CDF_EPOCH: '31-Dec-9999 23:59:59.999', internal.CDF_EPOCH16: '31-Dec-9999 23:59:59.999', } def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] \ = self.fillvals[typing._typeConversions[archive[var]._dtype.type]] class formatStrategy(fillStrategy): formats = { internal.CDF_CHAR: '%s', internal.CDF_BYTE: '%c', internal.CDF_UINT1: '%u', internal.CDF_UINT2: '%u', internal.CDF_UINT4: '%lu', internal.CDF_INT1: '%d', internal.CDF_INT2: '%d', internal.CDF_INT4: '%ld', internal.CDF_REAL4: '%f', internal.CDF_REAL8: '%Lf', internal.CDF_EPOCH: '%s', internal.CDF_EPOCH16: '%s', } def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] \ = self.formats[typing._typeConversions[archive[var]._dtype.type]] raise _InferenceSuccessful class validminStrategy(fillStrategy): fillvals = { internal.CDF_CHAR: '.', internal.CDF_BYTE: -128, internal.CDF_UINT1: 0, internal.CDF_UINT2: 0, internal.CDF_UINT4: 0, internal.CDF_INT1: -128, internal.CDF_INT2: -32768, internal.CDF_INT4: -2147483648, internal.CDF_REAL4: -1.0*10**31, internal.CDF_REAL8: -1.0*10**31, internal.CDF_EPOCH: '01-Jan-0000 00:00:00.000', internal.CDF_EPOCH16: '01-Jan-0000 00:00:00.000', } def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] \ = self.fillvals[typing._typeConversions[archive[var]._dtype.type]] raise _InferenceSuccessful class validmaxStrategy(fillStrategy): fillvals = { internal.CDF_CHAR: '.', internal.CDF_BYTE: 127, internal.CDF_UINT1: 255, internal.CDF_UINT2: 65535, internal.CDF_UINT4: 4294967295, internal.CDF_INT1: 127, internal.CDF_INT2: 32767, internal.CDF_INT4: 2147483647, internal.CDF_REAL4: 1.0*10**31, internal.CDF_REAL8: 1.0*10**31, internal.CDF_EPOCH: '31-Dec-9999 23:59:59.999', internal.CDF_EPOCH16: '31-Dec-9999 23:59:59.999', } def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] \ = self.fillvals[typing._typeConversions[archive[var]._dtype.type]] raise _InferenceSuccessful class varTypeStrategy(fillStrategy): def __call__(self, archive, attr, var): if attr not in archive[var].attributes: archive[var].attributes[attr] = 'support_data' class notRequired(fillStrategy): def __call__(self, *args, **kwargs): raise _NotRequired class required(fillStrategy): def __init__(self, attr = None): self._attr = attr def __call__(self, archive, attr, var = None): if self._attr is not None: attr = self._attr if var is not None: if attr not in archive[var].attributes: raise _MissingPrerequisite else: if attr not in archive.attributes: raise _MissingPrerequisite raise _NotRequired # The contents of this attr must refer to an existing var # in the archive. class refersToVariable(fillStrategy): def __init__(self, attr = None): self._attr = attr def __call__(self, archive, attr, var): if attr not in archive[var].attributes: # The attr does not exist. # If we have a default value, assign it. if self._attr is not None: # Remember, it must be a valid variable. if self._attr in archive: archive[var].attributes[attr] = self._attr # Otherwise, this is an error. else: raise cdf.CoherenceError # Otherwise, this is an error. else: raise InferenceError elif archive[var].attributes[attr] not in archive: # The attr exists but is not valid. raise cdf.CoherenceError return True class timeSeriesStrategy(fillStrategy): def __call__(self, archive, attr, var): var_type = archive[var].attributes.get('VAR_TYPE', None) if var_type is None: raise _MissingPrerequisite elif var_type == 'ignore_data' or var_type == 'support_data': raise _NotRequired else: display_type = archive[var].attributes.get('DISPLAY_TYPE', None) if display_type == 'time_series': refersToVariable()(archive, attr, var) else: raise _NotRequired class dimensionStrategy(fillStrategy): def __init__(self, dim, strategy): self._dim = dim self._strategy = strategy def __call__(self, archive, attr, var): # Determine if this strategy applies. if len(archive[var]._dimSizes) >= self._dim: # Call secondary strategy. return self._strategy(archive, attr, var) else: raise _NotRequired class one_of(fillStrategy): def __init__(self, *args): self._strategies = args[:] def __call__(self, archive, attr, var): # Call strategies one by one until something succeeds. # If nothing succeeds, return the least traumatic exception we saw. exceptions = [] for strategy in self._strategies: try: return strategy(archive, attr, var) except _MissingPrerequisite as e: # Pretty mild, really. exceptions.insert(0, e) except InferenceError as e: exceptions.append(e) except RedundancyError as e: exceptions.append(e) except cdf.CoherenceError as e: exceptions.append(e) # Do not trap _NotRequired or _InferenceSucceeded, as # these will be used at a higher level and are close # enough to success that we need not try any other cases. if len(exceptions) > 0: raise exceptions[0] else: raise InferenceError attributes = { 'global':{ 'required':{ 'Project': userInput(), 'Source_name': userInput(), 'Discipline': userInput(), 'Data_type': userInput(), 'Descriptor': userInput(), 'Data_version': autoIncrement(1), 'Logical_file_id': archiveName(), 'PI_name': userInput(), 'PI_affiliation': userInput(), 'TEXT': userInput(), 'Instrument_type': userInput(), 'Mission_group': userInput(), 'Logical_source': userInput(), 'Logical_source_description': userInput(), }, 'recommended':[ 'Acknowledgement', 'ADID_ref', 'Generated_by', 'Generation_date', 'HTTP_LINK', 'LINK_TEXT', 'LINK_TITLE', 'MODS', 'Rules_of_use', 'Time_resolution', ], 'optional':[ 'Parents', 'Skeleton_version', 'Software_version', 'TITLE', 'Validate', ], }, 'var':{ 'required':{ 'CATDESC': varName(), 'DEPEND_0': timeSeriesStrategy(), 'DEPEND_1': dimensionStrategy(1, refersToVariable()), 'DEPEND_2': dimensionStrategy(2, refersToVariable()), 'DEPEND_3': dimensionStrategy(3, refersToVariable()), 'DISPLAY_TYPE': notRequired(), 'FIELDNAM': varName(), 'FILLVAL': fillValStrategy(), 'FORMAT': one_of( refersToVariable('FORM_PTR'), formatStrategy()), 'FORM_PTR': one_of( refersToVariable(), required('FORMAT')), 'LABLAXIS': varName(), 'LABL_PTR_1': one_of( required('LABLAXIS'), refersToVariable()), 'LABL_PTR_2': notRequired(), 'LABL_PTR_3': notRequired(), 'UNITS': defaultValue(' '), 'UNIT_PTR': one_of( required('UNITS'), refersToVariable()), 'VALIDMIN': validminStrategy(), 'VALIDMAX': validmaxStrategy(), 'VAR_TYPE': varTypeStrategy(), }, 'recommended':[ 'SCALETYP', 'SCAL_PTR', 'VAR_NOTES', ], 'optional':[ 'AVG_TYPE', 'DELTA_PLUS_VAR', 'DELTA_MINUS_VAR', 'DICT_KEY', 'MONOTON', 'SCALEMIN', 'SCALEMAX', 'V_PARENT', 'DERIVN', 'sig_digits', 'SI_conv', ] }, } # Attempt to fill in attributes/variables of the target archive based # on the contents of the skeleton file. def autofill(arc, skt): try: if skt is not None: sys.path.append(os.path.dirname(skt)) dir = tempfile.mkdtemp() sktfile = os.path.join(dir, 'sktfile.py') shutil.copy(skt, sktfile) sys.path.append(dir) import sktfile skeleton = sktfile.skeleton else: skeleton = {'variables':{}, 'attributes':{'global':{}, 'variable':{}}} # Fill in static variables for var in skeleton['variables']: if var not in arc: arc[var] = skeleton['variables'][var] # Fill in global attributes required = attributes['global']['required'].keys() retry = [] while len(required) > 0: for attr in required: try: if attr not in arc.attributes: if attr in skeleton['attributes']['global']: arc.attributes[attr] = skeleton['attributes']['global'][attr] else: attributes['global']['required'][attr](arc, attr) if attr not in arc.attributes: raise InferenceError except InferenceError: raise InferenceError('Unable to infer value of ' + 'global attr "' + str(attr) + '"') except _MissingPrerequisite: retry.append(attr) except _NotRequired: # Good enough. pass except _InferenceSuccessful: # Perfect! pass if len(required) == len(retry): # This pass has resolved nothing, abort. raise InferenceError('Unable to infer value of ' + 'global attr "' + str(retry[0]) + '"') required = retry retry = [] # Fill in per-variable attributes for var in arc: required = attributes['var']['required'].keys() retry = [] while len(required) > 0: for attr in required: try: if attr not in arc[var].attributes: if var in skeleton['attributes']['variable'] \ and attr in skeleton['attributes']['variable'][var]: arc[var].attributes[attr] \ = skeleton['attributes']['variable'][var][attr] else: attributes['var']['required'][attr]( arc, attr, var) if attr not in arc[var].attributes: raise InferenceError except InferenceError: raise InferenceError('Unable to infer value of "' + str(attr) + '" for var "' + str(var) + '"') except _MissingPrerequisite: retry.append(attr) except _NotRequired: # Good enough. pass except _InferenceSuccessful: # Perfect! pass if len(required) == len(retry): # This pass has resolved nothing, abort. raise InferenceError('Unable to infer value of "' + str(attr) + '" for var "' + str(var) + '"') required = retry retry = [] finally: sys.path.pop() sys.path.pop() # Attempt to ensure that certain expectations of a well-formed ISTP CDFs # are met. def verify(arc): for var in arc.keys(): dimensions = {} for attr in arc[var].attributes.keys(): if attr.startswith('DEPEND_'): # If the variable has DEPEND_x set, it had better have at # least x dimensions. Furthermore, in the x dimension, # it had better have the same number of values as the # series it depends on. dimension = max(1, int(attr.split('_')[1])) dimensions[dimension] = arc[var].attributes[attr] for dimension in dimensions: depends = dimensions[dimension] if depends in arc: data = arc[var] for i in xrange(1, dimension): data = data[i] if len(data) <= len(arc[depends]): continue else: raise cdf.CoherenceError('Variable "' + var + '", length ' + str(len(arc[var])) + ' depends on "' + depends + '", length ' + str(len(arc[depends]))) else: raise cdf.CoherenceError if len(dimensions.keys()) != max(dimensions.keys() + [0]): raise cdf.CoherenceError(str(dimensions)) class archive(cdf.archive): def __init__(self, *args, **kwargs): if 'skeleton' in kwargs: self._skeleton = kwargs['skeleton'] del kwargs['skeleton'] else: self._skeleton = None cdf.archive.__init__(self, *args, **kwargs) def _save(self): autofill(self, self._skeleton) cdf.archive._save(self) verify(self)
PypiClean
/EdaSpiffWorkflow-0.0.2.tar.gz/EdaSpiffWorkflow-0.0.2/EdaSpiffWorkflow_Aadesh_G/util/weakmethod.py
from builtins import object # # DO NOT EDIT THIS FILE. # THIS CODE IS TAKE FROM Exscript.util: # https://github.com/knipknap/exscript/tree/master/src/Exscript/util # # Copyright (C) 2007-2010 Samuel Abels. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Weak references to bound and unbound methods. """ import weakref class DeadMethodCalled(Exception): """ Raised by :class:`WeakMethod` if it is called when the referenced object is already dead. """ pass class WeakMethod(object): """ Do not create this class directly; use :class:`ref()` instead. """ __slots__ = 'name', 'callback' def __init__(self, name, callback): """ Constructor. Do not use directly, use :class:`ref()` instead. """ self.name = name self.callback = callback def _dead(self, ref): if self.callback is not None: self.callback(self) def get_function(self): """ Returns the referenced method/function if it is still alive. Returns None otherwise. :rtype: callable|None :returns: The referenced function if it is still alive. """ raise NotImplementedError() def isalive(self): """ Returns True if the referenced function is still alive, False otherwise. :rtype: bool :returns: Whether the referenced function is still alive. """ return self.get_function() is not None def __call__(self, *args, **kwargs): """ Proxied to the underlying function or method. Raises :class:`DeadMethodCalled` if the referenced function is dead. :rtype: object :returns: Whatever the referenced function returned. """ method = self.get_function() if method is None: raise DeadMethodCalled('method called on dead object ' + self.name) method(*args, **kwargs) class _WeakMethodBound(WeakMethod): __slots__ = 'name', 'callback', 'f', 'c' def __init__(self, f, callback): name = f.__self__.__class__.__name__ + '.' + f.__func__.__name__ WeakMethod.__init__(self, name, callback) self.f = f.__func__ self.c = weakref.ref(f.__self__, self._dead) def get_function(self): cls = self.c() if cls is None: return None return getattr(cls, self.f.__name__) class _WeakMethodFree(WeakMethod): __slots__ = 'name', 'callback', 'f' def __init__(self, f, callback): WeakMethod.__init__(self, f.__class__.__name__, callback) self.f = weakref.ref(f, self._dead) def get_function(self): return self.f() def ref(function, callback=None): """ Returns a weak reference to the given method or function. If the callback argument is not None, it is called as soon as the referenced function is garbage deleted. :type function: callable :param function: The function to reference. :type callback: callable :param callback: Called when the function dies. """ try: function.__func__ except AttributeError: return _WeakMethodFree(function, callback) return _WeakMethodBound(function, callback)
PypiClean
/cowsuper-0.0.2-py3-none-any.whl/cowsuper/examples/datasets/generate_weak_signals.ipynb
``` import numpy as np import pandas as pd import nltk import re import matplotlib.pyplot as plt from json import JSONDecoder from functools import partial import json from pprint import pprint from bs4 import BeautifulSoup from nltk.tokenize import WordPunctTokenizer from nltk.corpus import stopwords from nltk.stem import PorterStemmer, LancasterStemmer import mxnet as mx ``` ## DATA & PRE-PROCESSING ``` # Preprocessing steps stemmer = LancasterStemmer() def decodeHTMLencoding(tweets): decoded_tweets = tweets.applymap(lambda tweet: BeautifulSoup(tweet, 'lxml').get_text()) return decoded_tweets def removeStopWords(text): stopw = stopwords.words('english') words = [word for word in text.split() if len(word) > 3 and not word in stopw] # get stems from words for i in range(len(words)): words[i] = stemmer.stem(words[i]) return (" ".join(words)).strip() def cleanTweets(tweets): # decode tweets from html tags cleaned_tweets = decodeHTMLencoding(tweets) # remove URLs that starts with http cleaned_tweets = cleaned_tweets.applymap(lambda tweet: re.sub( r'https?:\/\/(www\.)?[-a-zA-Z0–9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0–9@:%_\+.~#?&//=]*)', '', tweet, flags=re.MULTILINE) ) # remove URLs that does not start with http cleaned_tweets = cleaned_tweets.applymap(lambda tweet: re.sub( r'[-a-zA-Z0–9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0–9@:%_\+.~#?&//=]*)', '', tweet, flags=re.MULTILINE)) # remove @ cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'@[A-Za-z0-9_]+', '', tweet, flags=re.MULTILINE) ) # remove # cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'#[A-Za-z0-9_]+', '', tweet, flags=re.MULTILINE) ) # remove RT cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub('RT ', '', tweet, flags=re.MULTILINE) ) # remove symbols and numbers (i.e keep letters only) cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub("[^a-zA-Z]"," ",tweet, flags=re.MULTILINE) ) #replace consecutive non-ASCII characters with a space cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'[^\x00-\x7F]+'," ",tweet.lower(), flags=re.MULTILINE) ) cleaned_tweets.drop_duplicates(inplace=True) cleaned_tweets.replace('', np.nan, inplace=True) cleaned_tweets.dropna(inplace=True) return cleaned_tweets def get_text_vectors(tweets, model): # dataset should be a pandas dataframe dimension = 300 data_array = np.empty(shape=[0, dimension]) indexes = [] for i, tweet in enumerate(tweets): words = tweet.split() if len(words) !=0: feature = 0 for word in words: try: feature += model[word] except: pass feature /= len(words) try: if feature.size == dimension: data_array = np.append(data_array, [feature], axis=0) indexes.append(i) except: continue indexes = np.asarray(indexes) assert indexes.size == data_array.shape[0] return data_array, indexes def remove_indices(weak_signals): # remove indexes of weak_signals that do not have coverage indices = np.where(np.sum(weak_signals, axis=1) == -1*weak_signals.shape[1])[0] weak_signals = np.delete(weak_signals, indices, axis=0) return weak_signals, indices df = pd.read_csv('../datasets/glove.42B.300d.txt', sep=" ", quoting=3, header=None, index_col=0) glove_model = {key: val.values for key, val in df.T.items()} # test word vectors from scipy import spatial result = 1 - spatial.distance.cosine(glove_model['horrible'], glove_model['terrible']) result def keyword_labeling(data, keywords, sentiment='pos'): mask = 1 if sentiment == 'pos' else 0 weak_signals = [] for terms in keywords: weak_signal = [] for text in data: label=-1 for word in terms: if word in text.lower(): label = mask weak_signal.append(label) weak_signals.append(weak_signal) return np.asarray(weak_signals).T POSITIVE_LABELS = [['good','great','nice','delight','wonderful'], ['love', 'best', 'genuine','well', 'thriller'], ['clever','enjoy','fine','deliver','fascinating'], ['super','excellent','charming','pleasure','strong'], ['fresh','comedy', 'interesting','fun','entertain', 'charm', 'clever'], ['amazing','romantic','intelligent','classic','stunning'], ['rich','compelling','delicious', 'intriguing','smart']] NEGATIVE_LABELS = [['bad','better','leave','never','disaster'], ['nothing','action','fail','suck','difficult'], ['mess','dull','dumb', 'bland','outrageous'], ['slow', 'terrible', 'boring', 'insult','weird','damn'], ['drag','awful','waste', 'flat','worse'], #['drag','no','not','awful','waste', 'flat'], ['horrible','ridiculous','stupid', 'annoying','painful'], ['poor','pathetic','pointless','offensive','silly']] ``` # YELP ``` datapath = '../datasets/yelp/' size = 10000 review = pd.read_json(datapath+'yelp_review.json', lines=True, dtype={'review_id':str,'user_id':str, 'business_id':str,'stars':int, 'date':str,'text':str,'useful':int, 'funny':int,'cool':int}, chunksize=size) # There are multiple chunks to be read count=0 chunk_list = [] for chunk_review in review: # Drop columns that aren't needed chunk_review = chunk_review.drop(['review_id','user_id','useful','funny','cool','business_id','date'], axis=1) chunk_list.append(chunk_review) count +=1 if count==6: break # After trimming down the review file, concatenate all relevant data back to one dataframe df = pd.concat(chunk_list, ignore_index=True, join='outer', axis=0) csv_name = datapath+"yelp_reviews.csv" df.to_csv(csv_name, index=False) df.head() positive_labels = keyword_labeling(df.text.values, POSITIVE_LABELS, sentiment='pos') negative_labels = keyword_labeling(df.text.values, NEGATIVE_LABELS, sentiment='neg') weak_signals = np.hstack([positive_labels, negative_labels]) weak_signals, indices = remove_indices(weak_signals) weak_signals.shape df = df.drop(df.index[indices]) df.reset_index(drop=True, inplace=True) train_data = df.text.values train_labels = np.zeros(df.shape[0]) train_labels[df.stars.values >3]=1 train_data = cleanTweets(df.drop(columns=['stars'])) train_labels = train_labels[train_data.index] weak_signals = weak_signals[train_data.index] train_data.shape, train_labels.shape train_features, train_index = get_text_vectors(train_data.values.ravel(), glove_model) train_features.shape, train_index.shape # get test data np.random.seed(5000) test_indexes = np.random.choice(train_index.size, 10000, replace=False) test_labels = train_labels[test_indexes] test_data = train_features[test_indexes] train_data = np.delete(train_features, test_indexes, axis=0) weak_signals = np.delete(weak_signals, test_indexes, axis=0) train_labels = np.delete(train_labels, test_indexes) train_data.shape,train_labels.shape,weak_signals.shape,test_labels.shape # save the weak_signals signals np.save(datapath+'weak_signals.npy', weak_signals) # save yelp data np.save(datapath+'data_features.npy', train_data) np.save(datapath+'test_features.npy', test_data) # save yelp labels np.save(datapath+'data_labels.npy', train_labels) np.save(datapath+'test_labels.npy', test_labels) train_data.shape,train_labels.shape,weak_signals.shape,test_labels.shape ``` # SST-2 ``` datapath = '../datasets/sst-2/' train_data = pd.read_csv(datapath+'sst2-train.csv') test_data = pd.read_csv(datapath+'sst2-test.csv') train_data.head() NEGATIVE_LABELS = [['bad','better','leave','never','disaster'], ['nothing','action','fail','suck','difficult'], ['mess','dull','dumb', 'bland','outrageous'], ['slow', 'terrible', 'boring', 'insult','weird','damn'], # ['drag','awful','waste', 'flat','worse'], ['drag','no','not','awful','waste', 'flat'], ['horrible','ridiculous','stupid', 'annoying','painful'], ['poor','pathetic','pointless','offensive','silly']] positive_labels = keyword_labeling(train_data.sentence.values, POSITIVE_LABELS) negative_labels = keyword_labeling(train_data.sentesnce.values, NEGATIVE_LABELS, sentiment='neg') weak_signals = np.hstack([positive_labels, negative_labels]) weak_signals.shape weak_signals, indices = remove_indices(train_data, weak_signals) weak_signals.shape train_labels = train_data.label.values test_labels = test_data.label.values n,m = weak_signals.shape weak_signal_probabilities = weak_signals.T.reshape(m,n,1) weak_signals_mask = weak_signal_probabilities >=0 from model_utilities import get_error_bounds true_error_rates = get_error_bounds(train_labels, weak_signal_probabilities, weak_signals_mask) print("error: ", np.asarray(true_error_rates)) # Clean data and reset index train_data.reset_index(drop=True, inplace=True) # apply on train data train_data = cleanTweets(train_data.drop(columns=['label'])) train_data = post_process_tweets(train_data) # apply on test data test_data = cleanTweets(test_data.drop(columns=['label'])) test_data = post_process_tweets(test_data) print(train_data[0].shape, train_labels.shape) print(test_data[0].shape, test_labels.shape) train_features, train_index = get_text_vectors(train_data[0].values.ravel(), glove_model) test_features, test_index = get_text_vectors(test_data[0].values.ravel(), glove_model) # save sst-2 data np.save(datapath+'data_features.npy', train_features) np.save(datapath+'test_features.npy', test_features) indexes = train_data[1] indexes = indexes[train_index] # save sst-2 labels np.save(datapath+'data_labels.npy', train_labels[indexes]) np.save(datapath+'test_labels.npy', test_labels[test_data[1]]) # save the one-hot signals np.save(datapath+'weak_signals.npy', weak_signals[indexes]) ``` # IMDB Dataset ``` datapath = '../datasets/imdb/' df = pd.read_csv(datapath+'IMDB Dataset.csv') # apply on train data cleaned_data = cleanTweets(df.drop(columns=['sentiment'])) indexes = cleaned_data.index.values df.shape, indexes.size n = indexes.size # get test data np.random.seed(50) test_indexes = np.random.choice(indexes, int(n*0.2), replace=False) test_labels = np.zeros(test_indexes.size) test_labels[df.sentiment.values[test_indexes]=='positive'] = 1 test_data = df.review.values[test_indexes] train_indexes = np.delete(indexes, [np.where(indexes == i)[0][0] for i in test_indexes]) train_labels = np.zeros(train_indexes.size) train_labels[df.sentiment.values[train_indexes]=='positive'] = 1 train_data = df.review.values[train_indexes] print(train_data.shape, train_labels.shape) print(test_data.shape, test_labels.shape) positive_labels = keyword_labeling(train_data, [['good'],['wonderful'],['great'],['amazing'],['excellent']], sentiment='pos') negative_labels = keyword_labeling(train_data, [['bad'],['horrible'],['sucks'],['awful'],['terrible']], sentiment='neg') weak_signals = np.hstack([positive_labels, negative_labels]) weak_signals, indices = remove_indices(weak_signals) weak_signals.shape # add signals not covered to test data test_data = np.append(test_data, train_data[indices]) test_labels = np.append(test_labels, train_labels[indices]) # delete train data not covered by weak signals train_data = np.delete(train_data, indices, axis=0) train_labels = np.delete(train_labels, indices) # get data features train_features, train_index = get_text_vectors(train_data, glove_model) test_features, test_index = get_text_vectors(test_data, glove_model) print(train_index.size, train_data.shape[0]) test_index.size, test_labels.size # save imdb data np.save(datapath+'data_features.npy', train_features) np.save(datapath+'test_features.npy', test_features) # save imdb labels np.save(datapath+'data_labels.npy', train_labels[train_index]) np.save(datapath+'test_labels.npy', test_labels[test_index]) # save the weak_signals np.save(datapath+'weak_signals.npy', weak_signals[train_index]) ```
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/widget/Chart.js.uncompressed.js
define("dojox/charting/widget/Chart", ["dojo/_base/kernel", "dojo/_base/lang", "dojo/_base/array","dojo/_base/html","dojo/_base/declare", "dojo/query", "dijit/_Widget", "../Chart", "dojox/lang/utils", "dojox/lang/functional","dojox/lang/functional/lambda", "dijit/_base/manager"], function(kernel, lang, arr, html, declare, query, Widget, Chart, du, df, dfl){ /*===== var Widget = dijit._Widget; =====*/ var collectParams, collectAxisParams, collectPlotParams, collectActionParams, collectDataParams, notNull = function(o){ return o; }, dc = lang.getObject("dojox.charting"); var ChartWidget = declare("dojox.charting.widget.Chart", Widget, { // parameters for the markup // theme for the chart theme: null, // margins for the chart: {l: 10, r: 10, t: 10, b: 10} margins: null, // chart area, define them as undefined to: // allow the parser to take them into account // but make sure they have no defined value to not override theme stroke: undefined, fill: undefined, // methods buildRendering: function(){ this.inherited(arguments); n = this.domNode; // collect chart parameters var axes = query("> .axis", n).map(collectAxisParams).filter(notNull), plots = query("> .plot", n).map(collectPlotParams).filter(notNull), actions = query("> .action", n).map(collectActionParams).filter(notNull), series = query("> .series", n).map(collectDataParams).filter(notNull); // build the chart n.innerHTML = ""; var c = this.chart = new Chart(n, { margins: this.margins, stroke: this.stroke, fill: this.fill, textDir: this.textDir }); // add collected parameters if(this.theme){ c.setTheme(this.theme); } axes.forEach(function(axis){ c.addAxis(axis.name, axis.kwArgs); }); plots.forEach(function(plot){ c.addPlot(plot.name, plot.kwArgs); }); this.actions = actions.map(function(action){ return new action.action(c, action.plot, action.kwArgs); }); var render = df.foldl(series, function(render, series){ if(series.type == "data"){ c.addSeries(series.name, series.data, series.kwArgs); render = true; }else{ c.addSeries(series.name, [0], series.kwArgs); var kw = {}; du.updateWithPattern( kw, series.kwArgs, { "query": "", "queryOptions": null, "start": 0, "count": 1 //, // "sort": [] }, true ); if(series.kwArgs.sort){ // sort is a complex object type and doesn't survive coercian kw.sort = lang.clone(series.kwArgs.sort); } lang.mixin(kw, { onComplete: function(data){ var values; if("valueFn" in series.kwArgs){ var fn = series.kwArgs.valueFn; values = arr.map(data, function(x){ return fn(series.data.getValue(x, series.field, 0)); }); }else{ values = arr.map(data, function(x){ return series.data.getValue(x, series.field, 0); }); } c.addSeries(series.name, values, series.kwArgs).render(); } }); series.data.fetch(kw); } return render; }, false); if(render){ c.render(); } }, destroy: function(){ // summary: properly destroy the widget this.chart.destroy(); this.inherited(arguments); }, resize: function(box){ // summary: // Resize the widget. // description: // Resize the domNode and the widget surface to the dimensions of a box of the following form: // `{ l: 50, t: 200, w: 300: h: 150 }` // If no box is provided, resize the surface to the marginBox of the domNode. // box: // If passed, denotes the new size of the widget. this.chart.resize(box); } }); collectParams = function(node, type, kw){ var dp = eval("(" + type + ".prototype.defaultParams)"); var x, attr; for(x in dp){ if(x in kw){ continue; } attr = node.getAttribute(x); kw[x] = du.coerceType(dp[x], attr == null || typeof attr == "undefined" ? dp[x] : attr); } var op = eval("(" + type + ".prototype.optionalParams)"); for(x in op){ if(x in kw){ continue; } attr = node.getAttribute(x); if(attr != null){ kw[x] = du.coerceType(op[x], attr); } } }; collectAxisParams = function(node){ var name = node.getAttribute("name"), type = node.getAttribute("type"); if(!name){ return null; } var o = {name: name, kwArgs: {}}, kw = o.kwArgs; if(type){ if(dc.axis2d[type]){ type = dojo._scopeName + "x.charting.axis2d." + type; } var axis = eval("(" + type + ")"); if(axis){ kw.type = axis; } }else{ type = dojo._scopeName + "x.charting.axis2d.Default"; } collectParams(node, type, kw); // compatibility conversions if(kw.font || kw.fontColor){ if(!kw.tick){ kw.tick = {}; } if(kw.font){ kw.tick.font = kw.font; } if(kw.fontColor){ kw.tick.fontColor = kw.fontColor; } } return o; }; collectPlotParams = function(node){ // var name = d.attr(node, "name"), type = d.attr(node, "type"); var name = node.getAttribute("name"), type = node.getAttribute("type"); if(!name){ return null; } var o = {name: name, kwArgs: {}}, kw = o.kwArgs; if(type){ if(dc.plot2d && dc.plot2d[type]){ type = dojo._scopeName + "x.charting.plot2d." + type; } var plot = eval("(" + type + ")"); if(plot){ kw.type = plot; } }else{ type = dojo._scopeName + "x.charting.plot2d.Default"; } collectParams(node, type, kw); return o; }; collectActionParams = function(node){ // var plot = d.attr(node, "plot"), type = d.attr(node, "type"); var plot = node.getAttribute("plot"), type = node.getAttribute("type"); if(!plot){ plot = "default"; } var o = {plot: plot, kwArgs: {}}, kw = o.kwArgs; if(type){ if(dc.action2d[type]){ type = dojo._scopeName + "x.charting.action2d." + type; } var action = eval("(" + type + ")"); if(!action){ return null; } o.action = action; }else{ return null; } collectParams(node, type, kw); return o; }; collectDataParams = function(node){ var ga = lang.partial(html.attr, node); var name = ga("name"); if(!name){ return null; } var o = { name: name, kwArgs: {} }, kw = o.kwArgs, t; t = ga("plot"); if(t != null){ kw.plot = t; } t = ga("marker"); if(t != null){ kw.marker = t; } t = ga("stroke"); if(t != null){ kw.stroke = eval("(" + t + ")"); } t = ga("outline"); if(t != null){ kw.outline = eval("(" + t + ")"); } t = ga("shadow"); if(t != null){ kw.shadow = eval("(" + t + ")"); } t = ga("fill"); if(t != null){ kw.fill = eval("(" + t + ")"); } t = ga("font"); if(t != null){ kw.font = t; } t = ga("fontColor"); if(t != null){ kw.fontColor = eval("(" + t + ")"); } t = ga("legend"); if(t != null){ kw.legend = t; } t = ga("data"); if(t != null){ o.type = "data"; o.data = t ? arr.map(String(t).split(','), Number) : []; return o; } t = ga("array"); if(t != null){ o.type = "data"; o.data = eval("(" + t + ")"); return o; } t = ga("store"); if(t != null){ o.type = "store"; o.data = eval("(" + t + ")"); t = ga("field"); o.field = t != null ? t : "value"; t = ga("query"); if(!!t){ kw.query = t; } t = ga("queryOptions"); if(!!t){ kw.queryOptions = eval("(" + t + ")"); } t = ga("start"); if(!!t){ kw.start = Number(t); } t = ga("count"); if(!!t){ kw.count = Number(t); } t = ga("sort"); if(!!t){ kw.sort = eval("("+t+")"); } t = ga("valueFn"); if(!!t){ kw.valueFn = dfl.lambda(t); } return o; } return null; }; return ChartWidget; });
PypiClean
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/tools/analysis_tools/optimize_anchors.py
import argparse import os.path as osp import mmcv import numpy as np import torch from mmcv import Config from scipy.optimize import differential_evolution from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh from mmdet.datasets import build_dataset from mmdet.utils import get_root_logger, replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Optimize anchor parameters.') parser.add_argument('config', help='Train config file path.') parser.add_argument( '--device', default='cuda:0', help='Device used for calculating.') parser.add_argument( '--input-shape', type=int, nargs='+', default=[608, 608], help='input image size') parser.add_argument( '--algorithm', default='differential_evolution', help='Algorithm used for anchor optimizing.' 'Support k-means and differential_evolution for YOLO.') parser.add_argument( '--iters', default=1000, type=int, help='Maximum iterations for optimizer.') parser.add_argument( '--output-dir', default=None, type=str, help='Path to save anchor optimize result.') args = parser.parse_args() return args class BaseAnchorOptimizer: """Base class for anchor optimizer. Args: dataset (obj:`Dataset`): Dataset object. input_shape (list[int]): Input image shape of the model. Format in [width, height]. logger (obj:`logging.Logger`): The logger for logging. device (str, optional): Device used for calculating. Default: 'cuda:0' out_dir (str, optional): Path to save anchor optimize result. Default: None """ def __init__(self, dataset, input_shape, logger, device='cuda:0', out_dir=None): self.dataset = dataset self.input_shape = input_shape self.logger = logger self.device = device self.out_dir = out_dir bbox_whs, img_shapes = self.get_whs_and_shapes() ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) # resize to input shape self.bbox_whs = bbox_whs / ratios def get_whs_and_shapes(self): """Get widths and heights of bboxes and shapes of images. Returns: tuple[np.ndarray]: Array of bbox shapes and array of image shapes with shape (num_bboxes, 2) in [width, height] format. """ self.logger.info('Collecting bboxes from annotation...') bbox_whs = [] img_shapes = [] prog_bar = mmcv.ProgressBar(len(self.dataset)) for idx in range(len(self.dataset)): ann = self.dataset.get_ann_info(idx) data_info = self.dataset.data_infos[idx] img_shape = np.array([data_info['width'], data_info['height']]) gt_bboxes = ann['bboxes'] for bbox in gt_bboxes: wh = bbox[2:4] - bbox[0:2] img_shapes.append(img_shape) bbox_whs.append(wh) prog_bar.update() print('\n') bbox_whs = np.array(bbox_whs) img_shapes = np.array(img_shapes) self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') return bbox_whs, img_shapes def get_zero_center_bbox_tensor(self): """Get a tensor of bboxes centered at (0, 0). Returns: Tensor: Tensor of bboxes with shape (num_bboxes, 4) in [xmin, ymin, xmax, ymax] format. """ whs = torch.from_numpy(self.bbox_whs).to( self.device, dtype=torch.float32) bboxes = bbox_cxcywh_to_xyxy( torch.cat([torch.zeros_like(whs), whs], dim=1)) return bboxes def optimize(self): raise NotImplementedError def save_result(self, anchors, path=None): anchor_results = [] for w, h in anchors: anchor_results.append([round(w), round(h)]) self.logger.info(f'Anchor optimize result:{anchor_results}') if path: json_path = osp.join(path, 'anchor_optimize_result.json') mmcv.dump(anchor_results, json_path) self.logger.info(f'Result saved in {json_path}') class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. <https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_. Args: num_anchors (int) : Number of anchors. iters (int): Maximum iterations for k-means. """ def __init__(self, num_anchors, iters, **kwargs): super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs) self.num_anchors = num_anchors self.iters = iters def optimize(self): anchors = self.kmeans_anchors() self.save_result(anchors, self.out_dir) def kmeans_anchors(self): self.logger.info( f'Start cluster {self.num_anchors} YOLO anchors with K-means...') bboxes = self.get_zero_center_bbox_tensor() cluster_center_idx = torch.randint( 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) assignments = torch.zeros((bboxes.shape[0], )).to(self.device) cluster_centers = bboxes[cluster_center_idx] if self.num_anchors == 1: cluster_centers = self.kmeans_maximization(bboxes, assignments, cluster_centers) anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() anchors = sorted(anchors, key=lambda x: x[0] * x[1]) return anchors prog_bar = mmcv.ProgressBar(self.iters) for i in range(self.iters): converged, assignments = self.kmeans_expectation( bboxes, assignments, cluster_centers) if converged: self.logger.info(f'K-means process has converged at iter {i}.') break cluster_centers = self.kmeans_maximization(bboxes, assignments, cluster_centers) prog_bar.update() print('\n') avg_iou = bbox_overlaps(bboxes, cluster_centers).max(1)[0].mean().item() anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() anchors = sorted(anchors, key=lambda x: x[0] * x[1]) self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') return anchors def kmeans_maximization(self, bboxes, assignments, centers): """Maximization part of EM algorithm(Expectation-Maximization)""" new_centers = torch.zeros_like(centers) for i in range(centers.shape[0]): mask = (assignments == i) if mask.sum(): new_centers[i, :] = bboxes[mask].mean(0) return new_centers def kmeans_expectation(self, bboxes, assignments, centers): """Expectation part of EM algorithm(Expectation-Maximization)""" ious = bbox_overlaps(bboxes, centers) closest = ious.argmax(1) converged = (closest == assignments).all() return converged, closest class YOLODEAnchorOptimizer(BaseAnchorOptimizer): """YOLO anchor optimizer using differential evolution algorithm. Args: num_anchors (int) : Number of anchors. iters (int): Maximum iterations for k-means. strategy (str): The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' Default: 'best1bin'. population_size (int): Total population size of evolution algorithm. Default: 15. convergence_thr (float): Tolerance for convergence, the optimizing stops when ``np.std(pop) <= abs(convergence_thr) + convergence_thr * np.abs(np.mean(population_energies))``, respectively. Default: 0.0001. mutation (tuple[float]): Range of dithering randomly changes the mutation constant. Default: (0.5, 1). recombination (float): Recombination constant of crossover probability. Default: 0.7. """ def __init__(self, num_anchors, iters, strategy='best1bin', population_size=15, convergence_thr=0.0001, mutation=(0.5, 1), recombination=0.7, **kwargs): super(YOLODEAnchorOptimizer, self).__init__(**kwargs) self.num_anchors = num_anchors self.iters = iters self.strategy = strategy self.population_size = population_size self.convergence_thr = convergence_thr self.mutation = mutation self.recombination = recombination def optimize(self): anchors = self.differential_evolution() self.save_result(anchors, self.out_dir) def differential_evolution(self): bboxes = self.get_zero_center_bbox_tensor() bounds = [] for i in range(self.num_anchors): bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) result = differential_evolution( func=self.avg_iou_cost, bounds=bounds, args=(bboxes, ), strategy=self.strategy, maxiter=self.iters, popsize=self.population_size, tol=self.convergence_thr, mutation=self.mutation, recombination=self.recombination, updating='immediate', disp=True) self.logger.info( f'Anchor evolution finish. Average IOU: {1 - result.fun}') anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] anchors = sorted(anchors, key=lambda x: x[0] * x[1]) return anchors @staticmethod def avg_iou_cost(anchor_params, bboxes): assert len(anchor_params) % 2 == 0 anchor_whs = torch.tensor( [[w, h] for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( bboxes.device, dtype=bboxes.dtype) anchor_boxes = bbox_cxcywh_to_xyxy( torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) ious = bbox_overlaps(bboxes, anchor_boxes) max_ious, _ = ious.max(1) cost = 1 - max_ious.mean().item() return cost def main(): logger = get_root_logger() args = parse_args() cfg = args.config cfg = Config.fromfile(cfg) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) input_shape = args.input_shape assert len(input_shape) == 2 anchor_type = cfg.model.bbox_head.anchor_generator.type assert anchor_type == 'YOLOAnchorGenerator', \ f'Only support optimize YOLOAnchor, but get {anchor_type}.' base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes num_anchors = sum([len(sizes) for sizes in base_sizes]) train_data_cfg = cfg.data.train while 'dataset' in train_data_cfg: train_data_cfg = train_data_cfg['dataset'] dataset = build_dataset(train_data_cfg) if args.algorithm == 'k-means': optimizer = YOLOKMeansAnchorOptimizer( dataset=dataset, input_shape=input_shape, device=args.device, num_anchors=num_anchors, iters=args.iters, logger=logger, out_dir=args.output_dir) elif args.algorithm == 'differential_evolution': optimizer = YOLODEAnchorOptimizer( dataset=dataset, input_shape=input_shape, device=args.device, num_anchors=num_anchors, iters=args.iters, logger=logger, out_dir=args.output_dir) else: raise NotImplementedError( f'Only support k-means and differential_evolution, ' f'but get {args.algorithm}') optimizer.optimize() if __name__ == '__main__': main()
PypiClean
/AppiumExtended-0.5.49b0-py3-none-any.whl/appium_extended_terminal/terminal.py
import logging import base64 import re import sys import time import traceback from typing import Dict, Any, Union, Tuple # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath( __file__)))) # The sys.path.append line adds the # parent directory of the tests directory to the Python module search path, allowing you to import modules from the # root folder. from appium_extended_helpers.helpers_decorators import log_debug class Terminal: def __init__(self, driver, logger: logging.Logger = None, log_level: int = logging.INFO, log_path: str = ''): self.driver = driver self.logger = logger if logger is None: self.logger = logging.getLogger(__name__) self.logger.setLevel(log_level) if bool(log_path): if not log_path.endswith('.log'): log_path = log_path + '.log' file_handler = logging.FileHandler(log_path) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) @log_debug() def adb_shell(self, command: str, args: str = "") -> Any: try: return self.driver.execute_script("mobile: shell", {'command': command, 'args': [args]}) except KeyError as e: self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) @log_debug() def push(self, source: str, destination: str) -> bool: """ Копирует файл или директорию на подключенное устройство через Appium сервер. Аргументы: driver: Appium WebDriver объект. source (str): Путь к копируемому файлу или директории на локальной машине. destination (str): Путь назначения на устройстве. Возвращает: bool: True, если файл или директория были успешно скопированы, False в противном случае. """ try: self.driver.push_file( destination_path=destination, source_path=source ) return True except IOError as e: self.logger.error("appium_extended_terminal.push()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def pull(self, source: str, destination: str) -> bool: """Извлекает файл с устройства по пути `source` и сохраняет его по пути `destination` на компьютере. Аргументы: source (str): Путь к файлу на устройстве. destination (str): Путь, по которому файл должен быть сохранен на компьютере. Возвращает: bool: True, если файл успешно извлечен и сохранен, False в противном случае. """ file_contents_base64 = self.driver.assert_extension_exists('mobile: pullFile'). \ execute_script('mobile: pullFile', {'remotePath': source}) if not file_contents_base64: return False try: decoded_contents = base64.b64decode(file_contents_base64) with open(destination, 'wb') as file: file.write(decoded_contents) return True except IOError as e: self.logger.error("appium_extended_terminal.pull") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def start_activity(self, package: str, activity: str) -> bool: """ Запускает активити на подключенном устройстве. Аргументы: package (str): Название пакета. activity (str): Название запускаемой активити. Возвращает: bool: True, если активность была успешно запущена, False в противном случае. """ try: self.adb_shell(command="am", args=f"start -n {package}/{activity}") return True except KeyError as e: self.logger.error("appium_extended_terminal.start_activity()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def get_current_app_package(self) -> Union[str, None]: """ Получает пакет текущего запущенного приложения на устройстве с помощью ADB. Возвращает: str: Название пакета текущего запущенного приложения, либо None, если произошла ошибка. """ try: result = self.adb_shell(command="dumpsys", args="window windows") lines = result.split('\n') for line in lines: if 'mCurrentFocus' in line or 'mFocusedApp' in line: matches = re.search(r'(([A-Za-z]{1}[A-Za-z\d_]*\.)+([A-Za-z][A-Za-z\d_]*)/)', line) if matches: return matches.group(1)[:-1] # removing trailing slash return None except KeyError as e: # Логируем ошибку, если возникло исключение self.logger.error("appium_extended_terminal.get_current_app_package()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return None @log_debug() def close_app(self, package: str) -> bool: """ Принудительно останавливает указанный пакет с помощью ADB. Аргументы: package (str): Название пакета приложения для закрытия. Возвращает: bool: True, если приложение успешно закрыто, False в противном случае. """ try: self.adb_shell(command="am", args=f"force-stop {package}") return True except KeyError as e: self.logger.error("appium_extended_terminal.close_app()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def reboot_app(self, package: str, activity: str) -> bool: """ Перезапускает приложение, закрывая его и затем запуская указанную активность. Аргументы: package (str): Название пакета приложения. activity (str): Название активности для запуска. Возвращает: bool: True, если перезапуск приложения выполнен успешно, False в противном случае. """ # Закрытие приложения if not self.close_app(package=package): return False # Запуск указанной активности if not self.start_activity(package=package, activity=activity): return False return True @log_debug() def install_app(self, app_path: str) -> bool: """ Устанавливает указанный пакет с помощью Appium. Дублирует команду драйвера. Добавлено для интуитивности. Аргументы: package (str): Название пакета приложения для установки. Возвращает: bool: True, если приложение успешно удалено, False в противном случае. """ try: self.driver.install_app(app_path=app_path) return True except KeyError as e: self.logger.error("appium_extended_terminal.install_app()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def is_app_installed(self, package) -> bool: """ Проверяет, установлен ли пакет. """ self.logger.debug(f"is_app_installed() < {package=}") try: result = self.adb_shell(command="pm", args="list packages") # Фильтруем пакеты if any([line.strip().endswith(package) for line in result.splitlines()]): self.logger.debug("is_app_installed() > True") return True self.logger.debug("is_app_installed() > False") return False except KeyError as e: self.logger.error("appium_extended_terminal.is_app_installed() > False") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def uninstall_app(self, package: str) -> bool: """ Удаляет указанный пакет с помощью ADB. Аргументы: package (str): Название пакета приложения для удаления. Возвращает: bool: True, если приложение успешно удалено, False в противном случае. """ try: self.driver.remove_app(app_id=package) return True except KeyError as e: self.logger.error("appium_extended_terminal.uninstall_app()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def press_home(self) -> bool: """ Отправляет событие нажатия кнопки Home на устройство с помощью ADB. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.input_keycode(keycode="KEYCODE_HOME") return True except KeyError as e: self.logger.error("appium_extended_terminal.press_home()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def press_back(self) -> bool: """ Отправляет событие нажатия кнопки Back на устройство с помощью ADB. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.input_keycode(keycode="KEYCODE_BACK") return True except KeyError as e: self.logger.error("appium_extended_terminal.press_back()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def press_menu(self) -> bool: """ Отправляет событие нажатия кнопки Menu на устройство с помощью ADB. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.input_keycode(keycode="KEYCODE_MENU") return True except KeyError as e: self.logger.error("appium_extended_terminal.press_menu()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def input_keycode_num_(self, num: int) -> bool: """ Отправляет событие нажатия клавиши с числовым значением на устройство с помощью ADB. Допустимые значения: 0-9, ADD, COMMA, DIVIDE, DOT, ENTER, EQUALS Аргументы: num (int): Числовое значение клавиши для нажатия. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.adb_shell(command="input", args=f"keyevent KEYCODE_NUMPAD_{num}") return True except KeyError as e: self.logger.error("appium_extended_terminal.input_keycode_num_()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def input_keycode(self, keycode: str) -> bool: """ Вводит указанный код клавиши на устройстве с помощью ADB. Аргументы: keycode (str): Код клавиши для ввода. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.adb_shell(command="input", args=f"keyevent {keycode}") return True except KeyError as e: self.logger.error("appium_extended_terminal.input_keycode()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def input_by_virtual_keyboard(self, key: str, keyboard: Dict[str, tuple]) -> bool: """ Вводит строку символов с помощью виртуальной клавиатуры. Аргументы: key (str): Строка символов для ввода. keyboard (dict): Словарь с маппингом символов на координаты нажатий. Возвращает: bool: True, если ввод выполнен успешно, False в противном случае. """ try: for char in key: # Вызываем функцию tap с координатами, соответствующими символу char self.tap(x=keyboard[str(char)][0], y=keyboard[str(char)][1]) return True except KeyError as e: # Логируем ошибку и возвращаем False в случае возникновения исключения self.logger.error("appium_extended_terminal.input_by_virtual_keyboard") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def input_text(self, text: str) -> bool: """ Вводит указанный текст на устройстве с помощью ADB. Аргументы: text (str): Текст для ввода. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.adb_shell(command="input", args=f"text {text}") return True except KeyError as e: # Логируем ошибку, если возникло исключение self.logger.error("appium_extended_terminal.input_text()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def tap(self, x: int, y: int) -> bool: """ Выполняет нажатие на указанные координаты на устройстве с помощью ADB. Аргументы: x: Координата X для нажатия. y: Координата Y для нажатия. Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.adb_shell(command="input", args=f"tap {str(x)} {str(y)}") return True except KeyError as e: # Логируем ошибку, если возникло исключение self.logger.error("appium_extended_terminal.tap()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def swipe(self, start_x: Union[str, int], start_y: Union[str, int], end_x: Union[str, int], end_y: Union[str, int], duration: int = 300) -> bool: """ Выполняет свайп (перетаскивание) с одной точки на экране в другую на устройстве с помощью ADB. Аргументы: start_x: Координата X начальной точки свайпа. start_y: Координата Y начальной точки свайпа. end_x: Координата X конечной точки свайпа. end_y: Координата Y конечной точки свайпа. duration (int): Длительность свайпа в миллисекундах (по умолчанию 300). Возвращает: bool: True, если команда была успешно выполнена, False в противном случае. """ try: self.adb_shell(command="input", args=f"swipe {str(start_x)} {str(start_y)} {str(end_x)} {str(end_y)} {str(duration)}") return True except KeyError as e: # Логируем ошибку, если возникло исключение self.logger.error("appium_extended_terminal.swipe()") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def swipe_right_to_left(self, duration: int = 300) -> bool: window_size = self.get_screen_resolution() width = window_size[0] height = window_size[1] left = int(width * 0.1) right = int(width * 0.9) return self.swipe(start_x=right, start_y=height // 2, end_x=left, end_y=height // 2, duration=duration) @log_debug() def swipe_left_to_right(self, duration: int = 300) -> bool: window_size = self.get_screen_resolution() width = window_size[0] height = window_size[1] left = int(width * 0.1) right = int(width * 0.9) return self.swipe(start_x=left, start_y=height // 2, end_x=right, end_y=height // 2, duration=duration) @log_debug() def swipe_top_to_bottom(self, duration: int = 300) -> bool: window_size = self.get_screen_resolution() height = window_size[1] top = int(height * 0.1) bottom = int(height * 0.9) return self.swipe(start_x=top, start_y=height // 2, end_x=bottom, end_y=height // 2, duration=duration) @log_debug() def swipe_bottom_to_top(self, duration: int = 300) -> bool: window_size = self.get_screen_resolution() height = window_size[1] top = int(height * 0.1) bottom = int(height * 0.9) return self.swipe(start_x=bottom, start_y=height // 2, end_x=top, end_y=height // 2, duration=duration) @log_debug() def check_vpn(self, ip_address: str = '') -> bool: """ Проверяет, активно ли VPN-соединение на устройстве с помощью ADB. Аргументы: ip (str): IP-адрес для проверки VPN-соединения. Если не указан, используется значение из конфигурации. Возвращает: bool: True, если VPN-соединение активно, False в противном случае. """ try: output = self.adb_shell(command="netstat", args="") lines = output.split('\n') for line in lines: if ip_address in line and "ESTABLISHED" in line: self.logger.debug("check_VPN() True") return True self.logger.debug("check_VPN() False") return False except KeyError as e: # Логируем ошибку, если возникло исключение self.logger.error("appium_extended_terminal.check_VPN") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def stop_logcat(self) -> bool: """ Останавливает выполнение logcat на устройстве с помощью ADB. Возвращает: bool: True, если выполнение logcat остановлено успешно, False в противном случае. """ # Получаем список выполняющихся процессов logcat try: process_list = self.adb_shell(command="ps", args="") except KeyError as e: self.logger.error("appium_extended_terminal.stop_logcat") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False # Проходим по списку процессов и отправляем каждому сигнал SIGINT for process in process_list.splitlines(): if "logcat" in process: pid = process.split()[1] try: self.adb_shell(command="kill", args=f"-SIGINT {str(pid)}") except KeyError as e: self.logger.error("appium_extended_terminal.stop_logcat") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def know_pid(self, name: str) -> Union[int, None]: """ Находит Process ID (PID) процесса по его имени, используя adb shell ps. Параметры: name (str): Имя процесса, PID которого нужно найти. Возвращает: Union[int, None]: PID процесса, если он найден, или None, если процесс не найден. """ # Получение списка всех процессов с помощью adb shell ps processes = self.adb_shell(command="ps") if name not in processes: self.logger.error("know_pid() [Процесс не обнаружен]") return None # Разделение вывода на строки и удаление пустых строк lines = processes.strip().split('\n') # Проход по каждой строке вывода, начиная с 2-й строки, игнорируя заголовки for line in lines[1:]: # Разделение строки на столбцы по пробелам columns = line.split() # Проверка, что строка имеет не менее 9 столбцов if len(columns) >= 9: # Извлечение PID и имени процесса из соответствующих столбцов pid, process_name = columns[1], columns[8] # Сравнение имени процесса с искомым именем if name == process_name: self.logger.debug(f"know_pid() > {str(pid)}") return int(pid) self.logger.error("know_pid() [Процесс не обнаружен]") # Возврат None, если процесс с заданным именем не найден return None @log_debug() def is_process_exist(self, name) -> bool: """ Проверяет, запущен ли процесс, используя adb shell ps. Параметры: name (str): Имя процесса. Возвращает: bool: True если процесс с указанным именем существует, False в ином случае. """ # Получение списка всех процессов с помощью adb shell ps processes = self.adb_shell(command="ps") if name not in processes: self.logger.debug("is_process_exist() > False") return False # Разделение вывода на строки и удаление пустых строк lines = processes.strip().split('\n') # Проход по каждой строке вывода, начиная с 2-й строки, игнорируя заголовки for line in lines[1:]: # Разделение строки на столбцы по пробелам columns = line.split() # Проверка, что строка имеет не менее 9 столбцов if len(columns) >= 9: # Извлечение PID и имени процесса из соответствующих столбцов _, process_name = columns[1], columns[8] # Сравнение имени процесса с искомым именем if name == process_name: self.logger.debug("is_process_exist() > True") return True self.logger.debug("is_process_exist() > False") # Возврат None, если процесс с заданным именем не найден return False @log_debug() def run_background_process(self, command: str, args: str = "", process: str = "") -> bool: """ Запускает процесс в фоновом режиме на устройстве Android. Аргументы: command (str): Команда для выполнения на устройстве. process (str): Название процесса, который будет запущен. По умолчанию "". Если process == "", то не будет проверяться его запуск в системе. Возвращает: bool: True, если процесс был успешно запущен, False в противном случае. """ self.logger.debug(f"run_background_process() < {command=}") try: self.adb_shell(command=command, args=args + " nohup > /dev/null 2>&1 &") if process != "": time.sleep(1) if not self.is_process_exist(name=process): return False return True except KeyError as e: self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False @log_debug() def kill_by_pid(self, pid: int) -> bool: """ Отправляет сигнал SIGINT для остановки процесса по указанному идентификатору PID с помощью ADB. Аргументы: pid (str): Идентификатор PID процесса для остановки. Возвращает: bool: True, если процесс успешно остановлен, False в противном случае. """ try: self.adb_shell(command="kill", args=f"-s SIGINT {str(pid)}") except KeyError as e: self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def kill_by_name(self, name: str) -> bool: """ Останавливает все процессы с указанным именем на устройстве с помощью ADB. Аргументы: name (str): Имя процесса для остановки. Возвращает: bool: True, если все процессы успешно остановлены, False в противном случае. """ self.logger.debug(f"kill_by_name() < {name=}") try: self.adb_shell(command="pkill", args=f"-l SIGINT {str(name)}") except KeyError as e: self.logger.error("kill_by_name() > False") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False self.logger.debug("kill_by_name() > True") return True @log_debug() def kill_all(self, name: str) -> bool: """ Останавливает все процессы, соответствующие указанному имени, на устройстве с помощью ADB. Аргументы: name (str): Имя процесса или шаблон имени для остановки. Возвращает: bool: True, если все процессы успешно остановлены, False в противном случае. """ try: self.adb_shell(command="pkill", args=f"-f {str(name)}") except KeyError as e: self.logger.error("appium_extended_terminal.kill_all") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def delete_files_from_internal_storage(self, path) -> bool: """ Удаляет файлы из внутреннего хранилища устройства с помощью ADB. Аргументы: path (str): Путь к папке с файлами для удаления. Возвращает: bool: True, если файлы успешно удалены, False в противном случае. """ try: self.adb_shell(command="rm", args=f"-rf {path}*") except KeyError as e: self.logger.error("appium_extended_terminal.delete_files_from_internal_storage") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def delete_file_from_internal_storage(self, path: str, filename: str) -> bool: """ Удаляет файл из внутреннего хранилища устройства с помощью ADB. Аргументы: path (str): Путь к папке с файлами для удаления. filename (str): Наименование файла. Возвращает: bool: True, если файл успешно удален, False в противном случае. """ try: if path.endswith('/'): path = path[:-1] self.adb_shell(command="rm", args=f"-rf {path}/{filename}") except KeyError as e: self.logger.error("appium_extended_terminal.delete_file_from_internal_storage") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def record_video(self, **options: Any) -> bool: """ Начинает запись видео. 3 минуты максимум. Аргументы: filename (str): Имя файла для сохранения видео. Возвращает: bool: True, если запись видео успешно начата, False в противном случае. """ try: self.driver.start_recording_screen(**options) except KeyError as e: self.logger.error("appium_extended_terminal.record_video") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def stop_video(self, **options: Any) -> Union[bytes, None]: """ Останавливает запись видео. Возвращает Base64 bytes Возвращает: bool: True, если запись видео успешно остановлена, False в противном случае. """ try: str_based64_video = self.driver.stop_recording_screen(**options) # Декодируем base64-кодированную строку в бинарные данные видео return base64.b64decode(str_based64_video) except KeyError as e: self.logger.error("appium_extended_terminal.stop_video") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return None @log_debug() def reboot(self) -> bool: """ Перезагружает устройство с помощью ADB. Возвращает: bool: True, если перезагрузка успешно запущена, False в противном случае. """ try: self.adb_shell(command='reboot') except KeyError as e: self.logger.error("appium_extended_terminal.reboot") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info) return False return True @log_debug() def get_screen_resolution(self) -> Union[Tuple[int, int], None]: """ Возвращает разрешение экрана устройства с помощью ADB. Возвращает: tuple[int, int] or None: Кортеж с шириной и высотой экрана в пикселях, или None в случае ошибки. """ try: output = self.adb_shell(command='wm', args='size') if "Physical size" in output: resolution_str = output.split(":")[1].strip() width, height = resolution_str.split("x") return int(width), int(height) except KeyError as e: self.logger.error("appium_extended_terminal.get_screen_resolution") self.logger.error(e) traceback_info = "".join(traceback.format_tb(sys.exc_info()[2])) self.logger.error(traceback_info)
PypiClean
/Chronoclust-0.2.1.2.tar.gz/Chronoclust-0.2.1.2/chronoclust/objects/microcluster.py
import numpy as np import chronoclust.utilities.mc_functions as nmba __author__ = "Givanna Putri, Deeksha Singh, Mark Read, and Tao Tang" __copyright__ = "Copyright 2017, Cytoclust Project" __credits__ = ["Givanna Putri", "Deeksha Singh", "Mark Read", "Tao Tang"] __version__ = "0.0.1" __maintainer__ = "Givanna Putri" __email__ = "[email protected]" __status__ = "Development" class Microcluster(object): def __init__(self, cf1, cf2, id=set(), cumulative_weight=0, preferred_dimension_vector=None, cluster_centroids=None, creation_time_in_hrs=0): """ Class representing Microcluster (MC) in Chronoclust Parameters ---------- cf1 : np.ndarray (Cluster Feature 1) weighted linear sum of all points in this Microcluster for each dimension. cf2 : np.ndarray (Cluster Feature 2) weighted linear sum of square of all points in this Microcluster for each dimension. id : np.ndarray, optional id of the Microcluster. Array containing single int for potential and outlier, multiple ints for core(consists of the ids of all constituting potential Microcluster). cumulative_weight : float, optional Sum of the weight of the datapoints in the cluster over time. For our usage, each datapoint worth weight of 1. preferred_dimension_vector : np.ndarray, optional An array which each index indicates whether the dimension is preferred by the Microcluster. cluster_centroids : np.ndarray, optional Cluster centroid. creation_time_in_hrs : int, optional Time when the cluster is created in hours. Attributes ---------- CF1 : np.ndarray (Cluster Feature 1) weighted linear sum of all points in this Microcluster for each dimension. Don't get confused with the paper definition of f(t) * pij where f(t) is weight of the datapoint at time t when adding new data point. The way we do it is fine because rather than compunding the decay rate based on the arrival time, we just apply it again to the new weight at every time interval. So for instance, CF1 at t0 is 7, at t1 it's decayed to 3.5 by multiplying 7 with 2^-1 assuming lambda is 1. Then at t2, rather than multiplying 7 with 2^-2, we multiply 3.5 by 2^-1 again, yielding same value 1.75. CF2 : np.ndarray (Cluster Feature 2) weighted linear sum of square of all points in this Microcluster for each dimension. id : np.ndarray id of the Microcluster. Array containing single int for potential and outlier, multiple ints for core(consists of the ids of all constituting potential Microcluster). cumulative_weight : float Sum of the weight of the datapoints in the cluster over time. For our usage, each datapoint worth weight of 1. preferred_dimension_vector : np.ndarray An array which each index indicates whether the dimension is preferred by the Microcluster. cluster_centroids : np.ndarray Cluster centroid. creation_time_in_hrs : int Time when the cluster is created in hours. points : dict All the datapoints within the Microcluster. Key is the points id (assigned when adding new points). Value is the numerical value of the data point. """ self.id = id self.CF1 = cf1 self.CF2 = cf2 self.cumulative_weight = cumulative_weight self.preferred_dimension_vector = preferred_dimension_vector self.cluster_centroids = cluster_centroids self.creation_time_in_hrs = creation_time_in_hrs self.points = {} self.prev_pcore_id = None self.prev_outlier_id = None def update_prev_outlier_id(self, outlier_id): self.prev_outlier_id = outlier_id def update_prev_pcore_id(self, pcore_id): self.prev_pcore_id = pcore_id def update_preferred_dimensions(self, variance_threshold_squared, k_constant): """ Calculate the preferred dimensions of the cluster. When calculating the preferred dimensions, the method compares squared variance along each dimension with squared variance threshold. Args: variance_threshold_squared (float): The squared variance threshold used to determine whether a dimension can be considered preferred. For a dimension to be preferred, the variance of the data points along that dimension must be less than or equal to the the threshold. k_constant (int): Default constant assigned to a dimension whose variance is smaller than variance_threshold. Returns: None. """ cf1 = self.CF1 cf2 = self.CF2 cum_weight = self.cumulative_weight squared_variance = nmba.calculate_squared_variance(cf1, cf2, cum_weight) updated_pref_dim_vector = [] for s in squared_variance: if s <= variance_threshold_squared: updated_pref_dim_vector.append(k_constant) else: updated_pref_dim_vector.append(1.0) self.preferred_dimension_vector = np.array(updated_pref_dim_vector) def add_new_point(self, new_point_values, new_point_timestamp, new_point_idx, new_point_weight=1, update_centroid=True): """ Add new point to the microcluster. In our usage, each point is initially of weight 1. This makes sum of weight to be the same as number of points. Parameters ---------- new_point_values : np.ndarray The datapoint represented as an array of value. new_point_timestamp : int The timepoint the datapoint is meant for. new_point_idx : int The index (or so called id) of the data point. new_point_weight : int, optional Weight of the datapoint to be added. Default to 1. update_centroid : bool, optional Whether to update the MC's centroid after adding new point. Default to True. This parameter is mainly used for testing for now. Returns ------- None """ cf1 = self.CF1 cf2 = self.CF2 self.CF1, self.CF2 = nmba.update_cf(cf1, cf2, new_point_values) self.cumulative_weight += new_point_weight self.points[new_point_idx] = new_point_values.tolist() # update the cluster centroid as it may have moved with the introduction of new data point. if update_centroid: self.set_centroid() def set_centroid(self): """ Calculate and set the microcluster's centroid. Returns ------- None """ cf1 = self.CF1 cum_weight = self.cumulative_weight self.cluster_centroids = nmba.calculate_centroid(cf1, cum_weight) def get_projected_dist_to_point(self, other_point): """ Calculate the projected distance between this cluster and a datapoint p. See Definition 8 of paper[1] Args: other_point (numpy.ndarray): The datapoint represented as an array of value of each of its dimension. Returns: Float: Projected distance between this point and point given as argument. """ centroid = np.array(self.cluster_centroids) pref_dim = np.array(self.preferred_dimension_vector) other_pt_np = np.array(other_point) distance = nmba.calculate_projected_distance(centroid, pref_dim, other_pt_np) return distance def calculate_projected_radius_squared(self): """ Calculate projected radius. See definition 5 in paper[1]. Note that this calculate SQUARED radius. Returns: Float: Squared Projected radius. """ cf1 = self.CF1 cf2 = self.CF2 pref_dim = self.preferred_dimension_vector cum_weight = self.cumulative_weight radius_squared = nmba.calculate_projected_radius_squared(cf1, cf2, pref_dim, cum_weight) return radius_squared def get_copy(self): """ Return a clone of itself. Cannot use copy or deepcopy as they don't work for class object. Returns: Microcluster: A clone of itself. """ cf1 = self.CF1 cf2 = self.CF2 new_cf1, new_cf2 = nmba.clone_cf(cf1, cf2) return Microcluster(cf1=new_cf1, cf2=new_cf2, cumulative_weight=self.cumulative_weight) def get_copy_with_new_point(self, datapoint, variance_threshold_squared, k_constant): """ Return a clone of itself with a datapoint added in it. It will create a clone of itself (note it'll be a standalone clone as CF1 and CF2 will not be copied over. Beware of Python assignment is passing pointers!), then add a new datapoint to it, then finally recalculate the preferred_dimension_vector, and return it. This is used to simulate adding a point to a cluster to see if the cluster can fit another point. Args: datapoint (numpy.array): A datapoint represented as array of values in each dimension, to be added to the clone of this cluster. variance_threshold_squared (float): Variance_threshold used to calculate preferred_dimension_vector. k_constant (int): k_constant used to calculate preferred_dimension_vector. Returns: Microcluster: A clone of itself with new datapoint added in it. """ temp_pmc = self.get_copy() temp_pmc.add_new_point(datapoint, -1, -1) temp_pmc.update_preferred_dimensions(variance_threshold_squared, k_constant) return temp_pmc def is_core(self, radius_threshold_squared, density_threshold, max_subspace_dimensionality): """ Check if this cluster is a core cluster. See definition 4 in paper [1]. Args: radius_threshold_squared (float): Squared minimum projected radius. density_threshold (float): Squared maximum density threshold. max_subspace_dimensionality (int): Minimum number of dimensions in pdim with value k_constant. Returns: bool: True if cluster is a core. False otherwise. """ # TODO: improve the core radius calculation based on decimal point? cf1 = self.CF1 cf2 = self.CF2 cum_weight = self.cumulative_weight pref_dim = self.preferred_dimension_vector core_status = nmba.is_core(cf1, cf2, pref_dim, cum_weight, radius_threshold_squared, density_threshold, max_subspace_dimensionality) return core_status def reset_points(self): # TODO revisit this self.points = {}
PypiClean
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/necks/yolo_neck.py
import torch import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS class DetectionBlock(BaseModule): """Detection block in YOLO neck. Let out_channels = n, the DetectionBlock contains: Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. The first 6 ConvLayers are formed the following way: 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. The Conv2D layer is 1x1x255. Some block will have branch after the fifth ConvLayer. The input channel is arbitrary (in_channels) Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = out_channels * 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out @NECKS.register_module() class YOLOV3Neck(BaseModule): """The neck of YOLOV3. It can be treated as a simplified version of FPN. It will take the result from Darknet backbone and do some upsampling and concatenation. It will finally output the detection result. Note: The input feats should be from top to bottom. i.e., from high-lvl to low-lvl But YOLOV3Neck will process them in reversed order. i.e., from bottom (high-lvl) to top (low-lvl) Args: num_scales (int): The number of scales / stages. in_channels (List[int]): The number of input channels per scale. out_channels (List[int]): The number of output channels per scale. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict, optional): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) # To support arbitrary scales, the code looks awful, but it works. # Better solution is welcomed. self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): in_c, out_c = self.in_channels[i], self.out_channels[i] inter_c = out_channels[i - 1] self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) # in_c + out_c : High-lvl feats will be cat with low-lvl feats self.add_module(f'detect{i+1}', DetectionBlock(in_c + out_c, out_c, **cfg)) def forward(self, feats): assert len(feats) == self.num_scales # processed from bottom (high-lvl) to top (low-lvl) outs = [] out = self.detect1(feats[-1]) outs.append(out) for i, x in enumerate(reversed(feats[:-1])): conv = getattr(self, f'conv{i+1}') tmp = conv(out) # Cat with low-lvl feats tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{i+2}') out = detect(tmp) outs.append(out) return tuple(outs)
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/chardet/langturkishmodel.py
# 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin5_TurkishCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42, 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255, 255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15, 26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255, 180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165, 164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106, 150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136, 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125, 124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119, 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86, 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96, 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107, ) TurkishLangModel = ( 3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3, 3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, 3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3, 3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1, 3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3, 3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1, 3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2, 2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1, 3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2, 2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0, 1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1, 2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2, 3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1, 3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2, 2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1, 3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2, 2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, 3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2, 3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, 3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3, 0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1, 3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1, 0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0, 3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1, 1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1, 3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3, 2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3, 2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, 3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0, 0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0, 1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2, 3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1, 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0, 3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2, 2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0, 0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0, 3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1, 0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, 3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1, 1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0, 1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3, 2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1, 2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1, 2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, 3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0, 0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, 3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1, 1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2, 0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1, 3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1, 0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0, 3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0, 3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0, 1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2, 2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1, 0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0, 3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0, 0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0, 0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0, 3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0, 0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0, 0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0, 3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0, 0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1, 3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0, 0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1, 0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0, 3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, 0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0, 0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0, 3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0, 0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0, 0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0, 3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0, 0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0, 3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0, 0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, 0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0, 0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0, 0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0, 1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0, 0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1, 0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0, 3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0, 0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0, 2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0, 2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0, 0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, 1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1, 0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, 2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) Latin5TurkishModel = { 'char_to_order_map': Latin5_TurkishCharToOrderMap, 'precedence_matrix': TurkishLangModel, 'typical_positive_ratio': 0.970290, 'keep_english_letter': True, 'charset_name': "ISO-8859-9", 'language': 'Turkish', }
PypiClean
/KITTI%20Iterator-1.4.0.tar.gz/KITTI Iterator-1.4.0/kitti_iterator/plotter.py
from ctypes.wintypes import POINT import numpy as np import os ### # Plotter Code ### # from PyQt5 import QtCore, QtGui, QtWidgets from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph.opengl as gl import numpy as np import sys from pyqtgraph import Vector import math import matplotlib as mpl cmap = mpl.colormaps['viridis'] # cmap = mpl.colormaps['magma'] global POINTS global point_cloud_array POINTS = [] MESHES = { 'vertexes': [], 'faces': [], # 'vertexColors': [], 'faceColors': [] } def dist(a, b): return ((a[0]-b[0])**2 + (a[1]-b[1])**2 + (a[2]-b[2])**2)**0.5 global min_height, max_height, colors, calculated, max_dist, min_dist, dist_range calculated = False colors_enabled = True colors_hash = [] colors_hash_res = 10 for i in range(0,colors_hash_res): colors_hash.append(cmap(float(i)/(colors_hash_res-1))) def update_graph(): global graph_region, POINTS, MESHES, point_cloud_array, mesh_region global min_height, max_height, colors, calculated, max_dist, min_dist, dist_range global app try: if not point_cloud_array.empty(): DATA = point_cloud_array.get() POINTS = DATA['POINTS'] MESHES = DATA['MESHES'] def col_map(index): return colors_hash[round(index*(colors_hash_res-1))] #POINTS = [(0,0,1), ] COLORS = np.ones(shape=(len(POINTS), 3), dtype=np.uint8) if len(POINTS)>0: POINTS = np.array(POINTS) if colors_enabled: heights = POINTS[:,2] max_height = np.max(heights) min_height = np.min(heights) # print('min_height, max_height', min_height, max_height) # max_height = 1.1 # min_height = -3.5 heights = (heights - min_height)/(max_height-min_height) heights_color_index = np.rint(heights*(colors_hash_res-1)).astype(np.uint8) COLORS = np.array([colors_hash[xi] for xi in heights_color_index]) #POINTS_scaled = POINTS / 10000.0 POINTS_scaled = POINTS.copy() # POINTS_scaled[:,2] = POINTS_scaled[:,2]*10.0 # POINTS_scaled[:,2] += 10.0 #print(POINTS) graph_region.setData(pos=POINTS_scaled, color=COLORS) #graph_region.setData(pos=POINTS) if 'vertexes' in MESHES and len(MESHES['vertexes'])>0: mesh_region.setMeshData( vertexes=MESHES['vertexes'], faces=MESHES['faces'], #vertexColors=MESHES['vertexColors'], faceColors=MESHES['faceColors'] ) except KeyboardInterrupt: app.closeAllWindows() def start_graph(points_q): global POINTS, point_cloud_array point_cloud_array = points_q print("Setting up graph") global app, graph_region, mesh_region, w, g, d3, t # app = QtGui.QApplication([]) app = QtWidgets.QApplication([]) w = gl.GLViewWidget() w.resize(800, 600) w.opts['distance'] = 20 w.show() w.setWindowTitle('LIDAR Point Cloud') w.cameraPosition() w.setCameraPosition(pos=QtGui.QVector3D(0, 0, 0), ) g = gl.GLGridItem() w.addItem(g) graph_region = gl.GLScatterPlotItem(pos=np.zeros((1, 3), dtype=np.float32), color=(0, 1, 0, 0.5), size=0.3, pxMode=False) # graph_region.translate(0, 0, 1.7) # graph_region.rotate(-90, 1, 0, 0) vertexes = np.array([[1, 0, 0], #0 [0, 0, 0], #1 [0, 1, 0], #2 [0, 0, 1], #3 [1, 1, 0], #4 [1, 1, 1], #5 [0, 1, 1], #6 [1, 0, 1]])#7 faces = np.array([[1,0,7], [1,3,7], [1,2,4], [1,0,4], [1,2,6], [1,3,6], [0,4,5], [0,7,5], [2,4,5], [2,6,5], [3,6,5], [3,7,5]]) colors = np.array([[1,0,0,1] for i in range(12)]) mesh_region = gl.GLMeshItem( vertexes=vertexes, faces=faces, faceColors=colors, drawEdges=True, edgeColor=(0, 0, 0, 1), ) # mesh_region.translate(0, 0, 1.7) # mesh_region.rotate(180, 1, 0, 0) w.addItem(mesh_region) #graph_region.rotate(90 + 135, 1, 0, 0) w.addItem(graph_region) t = QtCore.QTimer() t.timeout.connect(update_graph) t.start(500) QtGui.QApplication.instance().exec_() print("\n[STOP]\tGraph Window closed. Stopping...") def lidar_measurement_to_np_array(lidar_measurement): data = list() for location in lidar_measurement: data.append([location.x, location.y, location.z]) return np.array(data).reshape((-1, 3)) def plot_points(data): #try: global POINTS POINTS = np.array(data) if __name__ == '__main__': if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): from multiprocessing import Queue point_cloud_array = Queue() start_graph(point_cloud_array)
PypiClean
/DensityClust-1.4.9.tar.gz/DensityClust-1.4.9/LDC_MGM_main/LDC_MGM_main.py
import os import shutil import pandas as pd from DensityClust.localDenClust2 import LocalDensityCluster as LDC from DensityClust import split_cube from tools import show_clumps from DensityClust.localDenClust2 import Data, Param from DensityClust.LocalDensityClustering_main import localDenCluster from fit_clump_function import main_fit_gauss_3d as mgm def LDC_MGM_split_mode(data_name, para, save_folder_all, save_mgm_png): """ LDC algorithm :param data_name: 待检测数据的路径(str),fits文件 :param para: 算法参数,dict para.rho_min: Minimum density [5*rms] para.delta_min: Minimum delta [4] para.v_min: Minimum volume [27] para.noise: The noise level of the data, used for data truncation calculation [2*rms] para.dc: auto :param mask_name: 掩模数据的保存路径(str) [*.fits] :param outcat_name: 基于像素单位的核表保存路径(str) [*.csv] :param outcat_wcs_name: 基于wcs的核表保存路径(str) [*.csv] :param loc_outcat_name: 基于像素单位的局部区域核表保存路径(str) [*.csv] :param loc_outcat_wcs_name: 基于wcs的局部区域核表保存路径(str) [*.csv] :param detect_log: 检测中的信息保存文件(str) [*.txt] :param flags: 代码调用还是软件界面调用,默认为True(代码调用) :return: """ # data = Data(data_name) sub_cube_path_list = split_cube.split_cube_lxy(data_name, save_folder_all) outcat_wcs_all = pd.DataFrame([]) outcat_fit_wcs_all = pd.DataFrame([]) outcat_wcs_all_name = os.path.join(save_folder_all, 'LDC_auto_outcat_wcs.csv') outcat_all_name = os.path.join(save_folder_all, 'LDC_auto_outcat.csv') outcat_fit_wcs_all_name = os.path.join(save_folder_all, 'MWISP_outcat_wcs.csv') outcat_fit_all_name = os.path.join(save_folder_all, 'MWISP_outcat.csv') fig_name = os.path.join(save_folder_all, 'LDC_auto_detect_result.png') fig_name = fig_name.replace('.png', '_fit.png') for ii, sub_data_name in enumerate(sub_cube_path_list): sub_save_folder = sub_data_name.replace('.fits', '') os.makedirs(sub_save_folder, exist_ok=True) ldc_cfg = localDenCluster(sub_data_name, para, sub_save_folder) # 处理局部块的核表 outcat_name = ldc_cfg['outcat_name'] detect_log = ldc_cfg['detect_log'] outcat_i = pd.read_csv(outcat_name, sep='\t') loc_outcat_i = split_cube.get_outcat_i(outcat_i, ii) # 取局部数据块的局部核表,根据块的位置,局部区域会有不同 data = Data(data_path=sub_data_name) ldc = LDC(data=data, para=None) outcat_wcs = ldc.change_pix2world(loc_outcat_i) shutil.copy(detect_log, os.path.join(save_folder_all, 'LDC_auto_detect_log_%02d.txt' % ii)) outcat_wcs_all = pd.concat([outcat_wcs_all, outcat_wcs], axis=0) loc_outcat_i_name = outcat_name.replace('.csv', '_loc.csv') loc_outcat_i.to_csv(loc_outcat_i_name, sep='\t', index=False) # 进行MGM mask_name = ldc_cfg['mask_name'] mgm.MGM_main(loc_outcat_i_name, sub_data_name, mask_name, sub_save_folder, save_mgm_png) outcat_fit_wcs_path = os.path.join(sub_save_folder, 'MWISP_outcat.csv') outcat_fit_wcs = pd.read_csv(outcat_fit_wcs_path, sep='\t') fig_name_fit = os.path.join(sub_save_folder, 'LDC_auto_detect_result_fit.png') data = Data(data_path=sub_data_name) data_wcs = data.wcs show_clumps.make_plot_wcs_1(outcat_fit_wcs, data_wcs, data.data_cube, fig_name=fig_name_fit) outcat_fit_wcs_all = pd.concat([outcat_fit_wcs_all, outcat_fit_wcs], axis=0) # 保存整合的银经银纬的核表(LDC, MGM的核表) outcat_wcs_all.to_csv(outcat_wcs_all_name, sep='\t', index=False) outcat_fit_wcs_all.to_csv(outcat_fit_wcs_all_name, sep='\t', index=False) # 保存整合的像素的核表及绘制检测云核的位置 data = Data(data_path=data_name) ldc = LDC(data=data, para=None) data_wcs = ldc.data.wcs outcat_wcs_all = pd.read_csv(outcat_wcs_all_name, sep='\t') outcat_all = split_cube.change_world2pix(outcat_wcs_all, data_wcs) outcat_all.to_csv(outcat_all_name, sep='\t', index=False) show_clumps.make_plot_wcs_1(outcat_wcs_all, data_wcs, data.data_cube, fig_name=fig_name) outcat_fit_wcs_all = pd.read_csv(outcat_fit_wcs_all_name, sep='\t') outcat_fit_all = split_cube.change_world2pix_fit(outcat_fit_wcs_all, data_wcs) outcat_fit_all.to_csv(outcat_fit_all_name, sep='\t', index=False) show_clumps.make_plot_wcs_1(outcat_fit_wcs_all, data_wcs, data.data_cube, fig_name=fig_name) def LDC_MGM_main(data_name, para, save_folder=None, split=False, save_mgm_png=False, thresh_num=1): """ LDC_MGM算法入口,对指定的数据进行检测,将结果保存到指定位置 :param data_name: 待检测数据的路径(str),fits文件 :param para: 算法参数, dict para.rho_min: Minimum density [5*rms] para.delta_min: Minimum delta [4] para.v_min: Minimum volume [25, 5] para.noise: The noise level of the data, used for data truncation calculation [2*rms] para.dc: auto :param save_folder: 检测结果保存路径,如果是分块模式,中间结果也会保存 :param save_loc: 是否保存检测的局部核表,默认为False(不保存) split: 是否采用分块检测再拼接策略 save_mgm_png: 是否保存MGM的中间结果图片,默认为False: 不保存 Usage: from DensityClust.localDenClust2 import Param from LDC_MGM_main import LDC_MGM_main as ldc_mgm data_name = r'*******.fits' para = Param(delta_min=4, gradmin=0.01, v_min=[25, 5], noise_times=5, rms_times=2, rms_key='RMS') para.rm_touch_edge = False save_folder = r'########' save_mgm_png = False ldc_mgm.LDC_MGM_main(data_name, para, save_folder, split=False, save_mgm_png=save_mgm_png) """ if save_folder is None: save_folder = data_name.replace('.fits', '') os.makedirs(save_folder, exist_ok=True) if split: LDC_MGM_split_mode(data_name, para, save_folder_all=save_folder, save_mgm_png=save_mgm_png) else: ldc_cfg = localDenCluster(data_name, para, save_folder) outcat_name = ldc_cfg['outcat_name'] mask_name = ldc_cfg['mask_name'] data_name = ldc_cfg['data_path'] mgm.MGM_main(outcat_name, data_name, mask_name, save_folder, para=para, thresh_num=thresh_num, save_png=save_mgm_png) if __name__ == '__main__': num = 2 data_name = r'D:\LDC\test_data\synthetic\synthetic_model_000%d.fits' % num para = Param(delta_min=4, gradmin=0.01, v_min=[25, 5], noise_times=5, rms_times=2, rms_key='RMS') para.rm_touch_edge = False save_folder = r'D:\LDC\test_data\R2_R16_region\0145-005_L13_noise_2_rho_5_128_deb1' save_mgm_png = False LDC_MGM_main(data_name, para, save_folder, split=False, save_mgm_png=save_mgm_png)
PypiClean
/Droplet_Detector-0.1.3.tar.gz/Droplet_Detector-0.1.3/README.md
# Droplet-Detector ![droplet](https://github.com/kvasik3000/Droplet-Detector/assets/124969658/c1928e58-414d-49ef-97dd-20d10441bfd5) --- ![Build Status](https://github.com/kvasik3000/Droplet-Detector/actions/workflows/python-app.yml/badge.svg?branch=main) <img src = "https://img.shields.io/badge/Code%20Coverage-80%25-success?"> <img src = 'https://img.shields.io/github/contributors/kvasik3000/Droplet-Detector?'> <img src ='https://img.shields.io/github/repo-size/kvasik3000/Droplet-Detector?'> --- ## Введение Перед нашей командой стояла задача создать простой в использовании проект, который будет рисовать контуры капель. Также было необходимо написать код для подсчёта пикселей каждой капли и добавить возможность сохранять данные в виде таблицы. Эта работа позволила нам глубже изучить компьютерное зрение с помощью библиотек **OpenCV2** в Python, научиться работать в команде и дала возможность создать собственного телеграмм-бота. ### Работу выполнили студенты из группы 21932: - [Квас Андрей](https://github.com/kvasik3000) - [Некрасова Анна](https://github.com/NekrasovaAnn) - [Белькова Ксения](https://github.com/didilovu) ## Обнаружение контуров Для корректного обнаружения контуров капель мы использовали **Canny Edge**. Это алгоритм, состоящий из 4 основных шагов: 1. Уменьшение шума с помощью гауссовского сглаживания. 2. Вычисление градиента изображения с помощью фильтра Собеля. 3. Применение Non-Max Suppression для простого подавления локальных минимумов. 4. Применения пороговой обработки с Гистерезисом, которая создает 2 пороговых значения T_upper и T_lower, которые используются в функции Canny(). Для наглдяности покажем работу алгоритма на следующем изображении капель: ![разг](https://github.com/kvasik3000/Droplet-Detector/assets/124969658/32e768ae-bb56-4cb3-b32c-61f6f3158c25) ## Расчёт площади Для того, чтобы правильно считалась площадь, была необходима отрисовка замкнутых контуров капель. Это и была следующая проблема, с которой мы столкнулись. Для ее решения мы последовательлно применяли несколько функций: 1. Уменьшение шума с помощью медианного сглаживания. 2. Увелечение резкости изображения с помощью 2D фильтра 3. Маска Собеля для выделения контуров. 4. Размытие Гаусса для уменьшения шума. 5. Обнаружение контуров с помощью Canny. 6. Получение массива внешних контуров с помощью findContours. 7. Закрашиваниие контуров с помощью fillPoly. 8. Повторяем с шага 1 для получившейся маски. 9. Сложение двух получившихся масок. 10. Отрисовка контуров на картикне. Для подсчета площади капли в пикселях использовалась функция contourArea, которая считает пиксели в замкнутом контуре. В итоге, мы пришли к следующим результатам: ![разг2](https://github.com/kvasik3000/Droplet-Detector/assets/124969658/7904d768-380c-4ff6-be3d-87fbaf159be0) ## Отрисовка контуров на входном изображении В конце программы у нас имеются входное изображение и маска с контурами этого изображения. С помощью функции bitwise_and(img, img, mask=mask) из библиотеки **OpenCV2** мы рисуем маску прямо на входном изображении и выводим это как итог. ## Телеграмм-бот Для взаимодействия пользователя и телеграмм-бота мы использовали библиотеку **Telebot**. Бот запускается с помощью команды /start, после чего он нас приветствует и просит ввести команду Help. Далее мы можем увидеть список команд в определённой последовательности для корректной работы. Наш бот не очень разговорчивый, поэтому если вы попытаетесь поговорить с ним на отдалённые темы, у вас ничего не выйдет. ![Без имени](https://github.com/kvasik3000/Droplet-Detector/assets/124969658/ff7c0fff-05f0-4ddb-8bc3-4225ed99166e) ## DockerHub - https://hub.docker.com/repository/docker/nekrasovaanna/droplet-detector/general Команда для начала работы проекта ``` docker run nekrasovaanna/droplet-detector ``` ## Установка с Pypi и запуск Установите библиотеку Droplet_Detector в рабочую область ``` pip install Droplet_Detector ``` Перед запуском создайте папки в рабочей области "exel", "new_file" and "save_docs" ``` mkdir exel,new_file,save_docs ``` Импортируйте модуль Bot.py в вашем файле .py ``` from Droplet_Detector import Bot ``` Запустите бота написав ``` Bot.start() ``` Cсылка на бота ``` https://t.me/Droplet_Detector_bot ``` ## Источники - https://www.geeksforgeeks.org/python-opencv-canny-function/ - https://opencvguide.readthedocs.io/_/downloads/en/latest/pdf/ - https://pytba.readthedocs.io/ru/latest/index.html
PypiClean
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/doc/extdev/parserapi.rst
.. _parser-api: Parser API ========== `The docutils documentation describes`__ parsers as follows: The Parser analyzes the input document and creates a node tree representation. __ http://docutils.sourceforge.net/docs/dev/hacking.html#parsing-the-document In Sphinx, the parser modules works as same as docutils. The parsers are registered to Sphinx by extensions using Application APIs; :meth:`.Sphinx.add_source_suffix()` and :meth:`.Sphinx.add_source_parser()`. The *source suffix* is a mapping from file suffix to file type. For example, ``.rst`` file is mapped to ``'restructuredtext'`` type. Sphinx uses the file type to looking for parsers from registered list. On searching, Sphinx refers to the ``Parser.supported`` attribute and picks up a parser which contains the file type in the attribute. The users can override the source suffix mappings using :confval:`source_suffix` like following:: # a mapping from file suffix to file types source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } You should indicate file types your parser supports. This will allow users to configure their settings appropriately. .. module:: sphinx.parsers .. autoclass:: Parser
PypiClean
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/examples/meshing/gmshRefinement.py
from __future__ import division from __future__ import unicode_literals from builtins import range from fipy import input from fipy import CellVariable, Gmsh2D, DiffusionTerm, Viewer from fipy.tools import numerix from matplotlib import cm monitor = None geo = """\ c1_x = 6; c1_y = 6; c2_x = 4; c2_y = 4; r = 0.2; Point(1) = {0, 0, 0, 1.0}; Point(2) = {10, 0, 0, 1.0}; Point(3) = {10, 10, 0, 1.0}; Point(4) = {1, 10, 0, 1.0}; Point(5) = {0, 10, 0, 1.0}; Point(6) = {0, 9, 0, 1.0}; Point(7) = {c1_x, c1_y, 0, 1.0}; Point(8) = {c1_x, c1_y + r, 0, 1.0}; Point(9) = {c1_x + r, c1_y, 0, 1.0}; Point(10) = {c1_x, c1_y - r, 0, 1.0}; Point(11) = {c1_x - r, c1_y, 0, 1.0}; Point(12) = {c2_x, c2_y, 0, 1.0}; Point(13) = {c2_x, c2_y + r, 0, 1.0}; Point(14) = {c2_x + r, c2_y, 0, 1.0}; Point(15) = {c2_x, c2_y - r, 0, 1.0}; Point(16) = {c2_x - r, c2_y, 0, 1.0}; Line(1) = {1, 2}; Line(2) = {2, 3}; Line(3) = {3, 4}; Line(4) = {4, 5}; Line(5) = {5, 6}; Line(6) = {6, 1}; Circle(7) = {8, 7, 11}; Circle(8) = {11, 7, 10}; Circle(9) = {10, 7, 9}; Circle(10) = {9, 7, 8}; Circle(11) = {13, 12, 16}; Circle(12) = {16, 12, 15}; Circle(13) = {15, 12, 14}; Circle(14) = {14, 12, 13}; Line Loop(1) = {1, 2, 3, 4, 5, 6}; Line Loop(2) = {7, 8, 9, 10}; Line Loop(3) = {11, 12, 13, 14}; Plane Surface(1) = {1, 2, 3}; Plane Surface(2) = {2}; Plane Surface(3) = {3}; Physical Line("Ground") = {4, 5, 6}; Physical Surface("Field") = {1}; Physical Surface("Anode") = {2}; Physical Surface("Cathode") = {3}; """ for refinement in range(10): mesh = Gmsh2D(geo, background=monitor) charge = CellVariable(mesh=mesh, name=r"$\rho$", value=0.) charge.setValue(+1, where=mesh.physicalCells["Anode"]) charge.setValue(-1, where=mesh.physicalCells["Cathode"]) potential = CellVariable(mesh=mesh, name=r"$\psi$") potential.constrain(0., where=mesh.physicalFaces["Ground"]) eq = DiffusionTerm(coeff=1.) == -charge res0 = eq.sweep(var=potential) res = eq.justResidualVector(var=potential) res1 = numerix.L2norm(res) res1a = CellVariable(mesh=mesh, value=abs(res)) res = CellVariable(mesh=mesh, name="residual", value=abs(res) / mesh.cellVolumes**(1./mesh.dim) / 1e-3) # want cells no bigger than 1 and no smaller than 0.001 maxSize = 1. minSize = 0.001 monitor = CellVariable(mesh=mesh, name="monitor", value= 1. / (res + maxSize) + minSize) viewer = Viewer(vars=potential, xmin=3.5, xmax=4.5, ymin=3.5, ymax=4.5) # viewer = Viewer(vars=(potential, charge)) viewer.plot() # resviewer = Viewer(vars=res1a, log=True, datamin=1e-6, datamax=1e-2, cmap=cm.gray) # monviewer = Viewer(vars=monitor, log=True, datamin=1e-3, datamax=1) input("refinement %d, res0: %g, res: %g:%g, N: %d, min: %g, max: %g, avg: %g. Press <return> to proceed..." \ % (refinement, res0, res1, res1a.cellVolumeAverage, mesh.numberOfCells, numerix.sqrt(min(mesh.cellVolumes)), numerix.sqrt(max(mesh.cellVolumes)), numerix.mean(numerix.sqrt(mesh.cellVolumes))))
PypiClean
/2dwavesim-1.0.0.tar.gz/2dwavesim-1.0.0/README.md
# 2dwavesim This is a project that simulates waves on 2D plates/rooms. Given boundaries (or walls) and points where oscillation will be forced, this will simulate the resulting wavemodes! Currently it supports setting the attenuation properties of individual boundaries, multiple forcing points based on either data or a function, and any wall shape you want. It also supports variable time and space steps and spans (as long as you keep numerically stable!), as well as custom wavespeed and attenuation on the material. ![example](https://github.com/cooperhatfield/2dwavesim/blob/main/exampleimages/example.webp) TODO: - add tests - frequency-dependant wall transmission values - 3D?? ## Usage There are two main Classes: `Room(ds, width, height,*, walls=[], physics_params={})` <ul> This creates an instance of a `Room` class, which contains any walls or sources of the system. `ds` is a float which defines the unit of distance between two grid points. `width` and `height` and floats which define the dimentions of the grid. If they are not exact multiples of `ds`, then they are upper bounds on the number of points above the nearest multiple. `walls` is a list of `Wall` objects. This can be optionally set after construction as well. `physics_params` is a dict with structure `{'wavespeed':float, 'attenuation':float}`. Wavespeed represents the speed of the propigating wave on the room's medium, and attenuation represents the attenuation factor of waves on the medium. By defaut, wavespeed is assumed to be 343 units/s and attenuation is assumed to be $2^{-2}$ units $^{-1}$. **`Room.add_source_func(loc, func)`** <ul> Add a source based on a function. `loc` is the room-specific coordinate of the source. Note: unless `ds=1`, this is not the same as the list indices of the point in the room. `func` is a function taking a float (the time) and outputing a float (the value of the wave at that time). This should be something like `lambda t: np.sin(t)`, for example. </ul> **`Room.add_source_data(loc, data)`** <ul> Add a source based on a list of values. Careful! Make sure you use a `dt` value that matches the table data, as an entry of the data list will be used on every time tick. For example, if you make the data table represent the value of the wave every 200ms, then be sure to set `dt` to 200ms as well when you run the simulation. If there are less points in the list of values than there are time steps, then a value of 0 is used for all time steps past the last data point. `loc` is the room-specific coordinate of the source. Note: unless `ds=1`, this is not the same as the list indices of the point in the room. `data` is a list of floats (the value of the wave at that time). This should be something like `np.sin(np.arange(0, 10, 0.2))`, for example. </ul> **`Room.add_walls(walls)`** <ul> Add walls to the system after constructing the Room object. `walls` is a list of `Wall` objects to add the the system. </ul> **`Room.create_mask()`** <ul> Create a mask for the values of the room based on the currently set walls. This is automatically done when running the simulation, but it can be run beforehand if you want to plot the mask for visualization. </ul> **`Room.get_mask()`** <ul> Returns a 2D array of the wall mask as currently calculated. </ul> **`Room.run(dt, t_final)`** <ul> Calculate the wave propagation from the set sources and using the set walls. This will simulate from `t=0` to `t_final` at `dt` time steps. If `t_final` isn't an exact multiple of `dt`, then it acts like an upper bound. `dt` a float giving the time step for the simulation. A smaller value means more time resolution. WARNING: Numerical stability will almost certainly be lost if this is not set to satisfy the [CFL Condition](https://en.wikipedia.org/wiki/Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition), namely $\frac{u*dt}{ds} \leq C_{max}$ where $u$ is the `wavespeed` and $c_{max}$ is approximately 1 for the numerical method being used. `t_final` a float giving an upper limit for the amount of time to be simulated. A higher value will take more time to simulate, and will likely just repeat the steady state after a certain point in time... </ul> </ul> `Wall(endpoint1, endpoint2, transmission)` <ul> This creates an instance of a `Wall` class, which contains the wall's endpoints and transmission factor. `endpoint1` and `endpoint2` are tuple2 or 2-list2 of floats giving the position of each end of the wall in the room-specific coordinates. Note: unless `ds=1`, this is not the same as the list indices of the point in the room. `transmission` is a float in [0,1] which defines the proportion of wave amplitude able to penetrate the wall. If 0, then all energy is reflected back inwards, and if 1 then the wall "isn't there". </ul> ## Visualization The `visualization` module contains a few functions for visualizing results, or processing results into an easily displayed format. **`animate(data, *, filepath='', frame_space=10, walls=[])`** <ul> Automatically animate the given data using `matplotlib.animation.ArtistAnimation`. The animation file can optionally be saved to a file. `data` is a 3D array of waveform over time, which is the output from running the simulation. `filename` is the name and path of output file. Leave this blank to not save. Output formats are those supported by `matplotlib.animation.ArtistAnimation`, which is at least ".gif" and ".webp". `frame_space` is the temporal resolution of resulting animation. Make sure this isn't too small! `walls` is to optionally include the walls in the animation. They won't be visible if this isn't included. </ul> **`get_steady_state_index(data, *, sample_points, rms_tolerance=0.1, window_size=0.1)`** <ul> This function calculates the windowed RMS of the given points over time. This data is compared to the RMS value at the end of the simulation. Then the latest time index where all point RMS's are within a tolerance to the final RMS is taken as the time index where steady-state is reached. `data` is a 3D array of waveform over time, which is the output from running the simulation. `sample_points` is a list of points in the room which will be checked for RMS. `rms_tolerance` is a float in [0, 1] defining the limit on the amount the RMS is allowed to change from the final value and still be considered steady-state. `window_size` is a float in [0, 1] defining the percent of total points to consider in the window. </ul> **`get_standing_waves(data, *, steady_state_kwargs=None)`** <ul> This function calculates when the steady state begins, and returns a 2D array which is the average of the absolute value of all of the rooms points across all steady state times. `data` is a 3D array of waveform over time, which is the output from running the simulation. `steady_state_kwargs` is a dict of the keyword arguments to pass to `get_steady_state_index`. If `None`, then the default parameters and a sample point at the middle of the room are used. </ul>
PypiClean
/Kr0nOs_Bot-3.3.11-py3-none-any.whl/redbot/core/core_commands.py
import asyncio import contextlib import datetime import importlib import itertools import logging import os import re import sys import platform import getpass import pip import traceback from collections import namedtuple from pathlib import Path from random import SystemRandom from string import ascii_letters, digits from typing import TYPE_CHECKING, Union, Tuple, List, Optional, Iterable, Sequence, Dict, Set import aiohttp import discord import pkg_resources from babel import Locale as BabelLocale, UnknownLocaleError from redbot.core.data_manager import storage_type from . import ( __version__, version_info as red_version_info, VersionInfo, checks, commands, drivers, errors, i18n, config, ) from .utils import AsyncIter from .utils.predicates import MessagePredicate from .utils.chat_formatting import ( box, escape, humanize_list, humanize_number, humanize_timedelta, inline, pagify, ) from .commands.requires import PrivilegeLevel if TYPE_CHECKING: from redbot.core.bot import Red __all__ = ["Core"] log = logging.getLogger("red") _ = i18n.Translator("Core", __file__) TokenConverter = commands.get_dict_converter(delims=[" ", ",", ";"]) class CoreLogic: def __init__(self, bot: "Red"): self.bot = bot self.bot.register_rpc_handler(self._load) self.bot.register_rpc_handler(self._unload) self.bot.register_rpc_handler(self._reload) self.bot.register_rpc_handler(self._name) self.bot.register_rpc_handler(self._prefixes) self.bot.register_rpc_handler(self._version_info) self.bot.register_rpc_handler(self._invite_url) async def _load( self, cog_names: Iterable[str] ) -> Tuple[List[str], List[str], List[str], List[str], List[Tuple[str, str]], Set[str]]: """ Loads cogs by name. Parameters ---------- cog_names : list of str Returns ------- tuple 4-tuple of loaded, failed, not found and already loaded cogs. """ failed_packages = [] loaded_packages = [] notfound_packages = [] alreadyloaded_packages = [] failed_with_reason_packages = [] repos_with_shared_libs = set() bot = self.bot cogspecs = [] for name in cog_names: try: spec = await bot._cog_mgr.find_cog(name) if spec: cogspecs.append((spec, name)) else: notfound_packages.append(name) except Exception as e: log.exception("Package import failed", exc_info=e) exception_log = "Exception during import of cog\n" exception_log += "".join(traceback.format_exception(type(e), e, e.__traceback__)) bot._last_exception = exception_log failed_packages.append(name) async for spec, name in AsyncIter(cogspecs, steps=10): try: self._cleanup_and_refresh_modules(spec.name) await bot.load_extension(spec) except errors.PackageAlreadyLoaded: alreadyloaded_packages.append(name) except errors.CogLoadError as e: failed_with_reason_packages.append((name, str(e))) except Exception as e: log.exception("Package loading failed", exc_info=e) exception_log = "Exception during loading of cog\n" exception_log += "".join(traceback.format_exception(type(e), e, e.__traceback__)) bot._last_exception = exception_log failed_packages.append(name) else: await bot.add_loaded_package(name) loaded_packages.append(name) # remove in Red 3.4 downloader = bot.get_cog("Downloader") if downloader is None: continue try: maybe_repo = await downloader._shared_lib_load_check(name) except Exception: log.exception( "Shared library check failed," " if you're not using modified Downloader, report this issue." ) maybe_repo = None if maybe_repo is not None: repos_with_shared_libs.add(maybe_repo.name) return ( loaded_packages, failed_packages, notfound_packages, alreadyloaded_packages, failed_with_reason_packages, repos_with_shared_libs, ) @staticmethod def _cleanup_and_refresh_modules(module_name: str) -> None: """Interally reloads modules so that changes are detected""" splitted = module_name.split(".") def maybe_reload(new_name): try: lib = sys.modules[new_name] except KeyError: pass else: importlib._bootstrap._exec(lib.__spec__, lib) # noinspection PyTypeChecker modules = itertools.accumulate(splitted, "{}.{}".format) for m in modules: maybe_reload(m) children = {name: lib for name, lib in sys.modules.items() if name.startswith(module_name)} for child_name, lib in children.items(): importlib._bootstrap._exec(lib.__spec__, lib) async def _unload(self, cog_names: Iterable[str]) -> Tuple[List[str], List[str]]: """ Unloads cogs with the given names. Parameters ---------- cog_names : list of str Returns ------- tuple 2 element tuple of successful unloads and failed unloads. """ failed_packages = [] unloaded_packages = [] bot = self.bot for name in cog_names: if name in bot.extensions: bot.unload_extension(name) await bot.remove_loaded_package(name) unloaded_packages.append(name) else: failed_packages.append(name) return unloaded_packages, failed_packages async def _reload( self, cog_names: Sequence[str] ) -> Tuple[List[str], List[str], List[str], List[str], List[Tuple[str, str]], Set[str]]: await self._unload(cog_names) ( loaded, load_failed, not_found, already_loaded, load_failed_with_reason, repos_with_shared_libs, ) = await self._load(cog_names) return ( loaded, load_failed, not_found, already_loaded, load_failed_with_reason, repos_with_shared_libs, ) async def _name(self, name: Optional[str] = None) -> str: """ Gets or sets the bot's username. Parameters ---------- name : str If passed, the bot will change it's username. Returns ------- str The current (or new) username of the bot. """ if name is not None: await self.bot.user.edit(username=name) return self.bot.user.name async def _prefixes(self, prefixes: Optional[Sequence[str]] = None) -> List[str]: """ Gets or sets the bot's global prefixes. Parameters ---------- prefixes : list of str If passed, the bot will set it's global prefixes. Returns ------- list of str The current (or new) list of prefixes. """ if prefixes: await self.bot._prefix_cache.set_prefixes(guild=None, prefixes=prefixes) return prefixes return await self.bot._prefix_cache.get_prefixes(guild=None) @classmethod async def _version_info(cls) -> Dict[str, str]: """ Version information for Red and discord.py Returns ------- dict `redbot` and `discordpy` keys containing version information for both. """ return {"redbot": __version__, "discordpy": discord.__version__} async def _invite_url(self) -> str: """ Generates the invite URL for the bot. Returns ------- str Invite URL. """ app_info = await self.bot.application_info() perms_int = await self.bot._config.invite_perm() permissions = discord.Permissions(perms_int) return discord.utils.oauth_url(app_info.id, permissions) @staticmethod async def _can_get_invite_url(ctx): is_owner = await ctx.bot.is_owner(ctx.author) is_invite_public = await ctx.bot._config.invite_public() return is_owner or is_invite_public @i18n.cog_i18n(_) class Core(commands.Cog, CoreLogic): """Commands related to core functions""" # @commands.command(hidden=True) # async def ping(self, ctx: commands.Context): # """Pong.""" # await ctx.send("Pong.") @commands.command() async def info(self, ctx: commands.Context): """Shows info about Kr0nOs""" author_repo = "https://github.com/Twentysix26" org_repo = "https://github.com/Cog-Creators" red_repo = org_repo + "/Red-DiscordBot" red_pypi = "https://pypi.org/project/Red-DiscordBot" red_pypi_json = "https://pypi.org/pypi/Red-DiscordBot/json" support_server_url = "https://kable.lol/discord" dpy_repo = "https://github.com/Rapptz/discord.py" python_url = "https://www.python.org/" since = datetime.datetime(2016, 1, 2, 0, 0) days_since = (datetime.datetime.utcnow() - since).days dpy_version = "[{}]({})".format(discord.__version__, dpy_repo) python_version = "[{}.{}.{}]({})".format(*sys.version_info[:3], python_url) red_version = "[{}]({})".format(__version__, red_pypi) app_info = await self.bot.application_info() if app_info.team: owner = app_info.team.name else: owner = app_info.owner custom_info = await self.bot._config.custom_info() try: async with aiohttp.ClientSession() as session: async with session.get(red_pypi_json) as r: data = await r.json() except (aiohttp.ClientError, asyncio.TimeoutError): outdated = None else: outdated = VersionInfo.from_str(data["info"]["version"]) > red_version_info about = _( "Let's get toxic af. Kr0nOs is built using Red-Discord's framework," "facing d.py. Join the [support server](https://kable.lol/discord) " "if you run into any issues" ).format(red_repo, author_repo, org_repo, support_server_url) embed = discord.Embed(color=(await ctx.embed_colour())) embed.add_field(name=_("Instance owned by"), value=str(owner)) embed.add_field(name="Python", value=python_version) embed.add_field(name="discord.py", value=dpy_version) embed.add_field(name=_("Kr0nOs version"), value=red_version) if outdated in (True, None): if outdated is True: outdated_value = _("Yes, {version} is available.").format( version=data["info"]["version"] ) else: outdated_value = _("No") embed.add_field(name=_("Outdated"), value=outdated_value) if custom_info: embed.add_field(name=_("About this instance"), value=custom_info, inline=False) embed.add_field(name=_("About Kr0nOs"), value=about, inline=False) embed.set_footer(text=_("Kr0nOs Framework by KableKompany#0001").format(days_since)) try: await ctx.send(embed=embed) except discord.HTTPException: await ctx.send(_("I need the `Embed links` permission to send this")) @commands.command() async def uptime(self, ctx: commands.Context): """Shows [botname]'s uptime""" since = ctx.bot.uptime.strftime("%Y-%m-%d %H:%M:%S") delta = datetime.datetime.utcnow() - self.bot.uptime uptime_str = humanize_timedelta(timedelta=delta) or _("Less than one second") await ctx.send( _("Been up for: **{time_quantity}** (since {timestamp} UTC)").format( time_quantity=uptime_str, timestamp=since ) ) @commands.group() async def embedset(self, ctx: commands.Context): """ Commands for toggling embeds on or off. This setting determines whether or not to use embeds as a response to a command (for commands that support it). The default is to use embeds. """ if ctx.invoked_subcommand is None: text = _("Embed settings:\n\n") global_default = await self.bot._config.embeds() text += _("Global default: {}\n").format(global_default) if ctx.guild: guild_setting = await self.bot._config.guild(ctx.guild).embeds() text += _("Guild setting: {}\n").format(guild_setting) if ctx.channel: channel_setting = await self.bot._config.channel(ctx.channel).embeds() text += _("Channel setting: {}\n").format(channel_setting) user_setting = await self.bot._config.user(ctx.author).embeds() text += _("User setting: {}").format(user_setting) await ctx.send(box(text)) @embedset.command(name="global") @checks.is_owner() async def embedset_global(self, ctx: commands.Context): """ Toggle the global embed setting. This is used as a fallback if the user or guild hasn't set a preference. The default is to use embeds. """ current = await self.bot._config.embeds() await self.bot._config.embeds.set(not current) await ctx.send( _("Embeds are now {} by default.").format(_("disabled") if current else _("enabled")) ) @embedset.command(name="guild") @checks.guildowner_or_permissions(administrator=True) @commands.guild_only() async def embedset_guild(self, ctx: commands.Context, enabled: bool = None): """ Toggle the guild's embed setting. If enabled is None, the setting will be unset and the global default will be used instead. If set, this is used instead of the global default to determine whether or not to use embeds. This is used for all commands done in a guild channel except for help commands. """ await self.bot._config.guild(ctx.guild).embeds.set(enabled) if enabled is None: await ctx.send(_("Embeds will now fall back to the global setting.")) else: await ctx.send( _("Embeds are now {} for this guild.").format( _("enabled") if enabled else _("disabled") ) ) @embedset.command(name="channel") @checks.guildowner_or_permissions(administrator=True) @commands.guild_only() async def embedset_channel(self, ctx: commands.Context, enabled: bool = None): """ Toggle the channel's embed setting. If enabled is None, the setting will be unset and the guild default will be used instead. If set, this is used instead of the guild default to determine whether or not to use embeds. This is used for all commands done in a channel except for help commands. """ await self.bot._config.channel(ctx.channel).embeds.set(enabled) if enabled is None: await ctx.send(_("Embeds will now fall back to the global setting.")) else: await ctx.send( _("Embeds are now {} for this channel.").format( _("enabled") if enabled else _("disabled") ) ) @embedset.command(name="user") async def embedset_user(self, ctx: commands.Context, enabled: bool = None): """ Toggle the user's embed setting. If enabled is None, the setting will be unset and the global default will be used instead. If set, this is used instead of the global default to determine whether or not to use embeds. This is used for all commands done in a DM with the bot, as well as all help commands everywhere. """ await self.bot._config.user(ctx.author).embeds.set(enabled) if enabled is None: await ctx.send(_("Embeds will now fall back to the global setting.")) else: await ctx.send( _("Embeds are now {} for you.").format(_("enabled") if enabled else _("disabled")) ) @commands.command() @checks.is_owner() async def traceback(self, ctx: commands.Context, public: bool = False): """Sends to the owner the last command exception that has occurred If public (yes is specified), it will be sent to the chat instead""" if not public: destination = ctx.author else: destination = ctx.channel if self.bot._last_exception: for page in pagify(self.bot._last_exception, shorten_by=10): await destination.send(box(page, lang="py")) else: await ctx.send(_("No exception has occurred yet")) # @commands.command() # @commands.check(CoreLogic._can_get_invite_url) # async def invite(self, ctx): # """Show's [botname]'s invite url""" # try: # await ctx.author.send(await self._invite_url()) # except discord.errors.Forbidden: # await ctx.send( # "I couldn't send the invite message to you in DM. " # "Either you blocked me or you disabled DMs in this server." # ) @commands.group() @checks.is_owner() async def inviteset(self, ctx): """Setup the bot's invite""" pass @inviteset.command() async def public(self, ctx, confirm: bool = False): """ Define if the command should be accessible for the average user. """ if await self.bot._config.invite_public(): await self.bot._config.invite_public.set(False) await ctx.send("The invite is now private.") return app_info = await self.bot.application_info() if not app_info.bot_public: await ctx.send( "I am not a public bot. That means that nobody except " "you can invite me on new servers.\n\n" "You can change this by ticking `Public bot` in " "your token settings: " "https://discordapp.com/developers/applications/me/{0}".format(self.bot.user.id) ) return if not confirm: await ctx.send( "You're about to make the `{0}invite` command public. " "All users will be able to invite me on their server.\n\n" "If you agree, you can type `{0}inviteset public yes`.".format(ctx.clean_prefix) ) else: await self.bot._config.invite_public.set(True) await ctx.send("The invite command is now public.") @inviteset.command() async def perms(self, ctx, level: int): """ Make the bot create its own role with permissions on join. The bot will create its own role with the desired permissions\ when it joins a new server. This is a special role that can't be\ deleted or removed from the bot. For that, you need to provide a valid permissions level. You can generate one here: https://discordapi.com/permissions.html Please note that you might need two factor authentification for\ some permissions. """ await self.bot._config.invite_perm.set(level) await ctx.send("The new permissions level has been set.") @commands.command() @commands.guild_only() @checks.is_owner() async def leave(self, ctx: commands.Context): """Leaves server""" await ctx.send(_("Are you sure you want me to leave this server? (y/n)")) pred = MessagePredicate.yes_or_no(ctx) try: await self.bot.wait_for("message", check=pred) except asyncio.TimeoutError: await ctx.send(_("Response timed out.")) return else: if pred.result is True: await ctx.send(_("Alright. Bye :wave:")) log.debug(_("Leaving guild '{}'").format(ctx.guild.name)) await ctx.guild.leave() else: await ctx.send(_("Alright, I'll stay then :)")) @commands.command() @checks.is_owner() async def servers(self, ctx: commands.Context): """Lists and allows to leave servers""" guilds = sorted(list(self.bot.guilds), key=lambda s: s.name.lower()) msg = "" responses = [] for i, server in enumerate(guilds, 1): msg += "{}: {} (`{}`)\n".format(i, server.name, server.id) responses.append(str(i)) for page in pagify(msg, ["\n"]): await ctx.send(page) query = await ctx.send(_("To leave a server, just type its number.")) pred = MessagePredicate.contained_in(responses, ctx) try: await self.bot.wait_for("message", check=pred, timeout=15) except asyncio.TimeoutError: try: await query.delete() except discord.errors.NotFound: pass else: await self.leave_confirmation(guilds[pred.result], ctx) async def leave_confirmation(self, guild, ctx): if guild.owner.id == ctx.bot.user.id: await ctx.send(_("I cannot leave a guild I am the owner of.")) return await ctx.send(_("Are you sure you want me to leave {}? (yes/no)").format(guild.name)) pred = MessagePredicate.yes_or_no(ctx) try: await self.bot.wait_for("message", check=pred, timeout=15) if pred.result is True: await guild.leave() if guild != ctx.guild: await ctx.send(_("Done.")) else: await ctx.send(_("Alright then.")) except asyncio.TimeoutError: await ctx.send(_("Response timed out.")) @commands.command() @checks.is_owner() async def load(self, ctx: commands.Context, *cogs: str): """Loads packages""" if not cogs: return await ctx.send_help() cogs = tuple(map(lambda cog: cog.rstrip(","), cogs)) async with ctx.typing(): ( loaded, failed, not_found, already_loaded, failed_with_reason, repos_with_shared_libs, ) = await self._load(cogs) output = [] if loaded: loaded_packages = humanize_list([inline(package) for package in loaded]) formed = _("Loaded {packs}.").format(packs=loaded_packages) output.append(formed) if already_loaded: if len(already_loaded) == 1: formed = _("The following package is already loaded: {pack}").format( pack=inline(already_loaded[0]) ) else: formed = _("The following packages are already loaded: {packs}").format( packs=humanize_list([inline(package) for package in already_loaded]) ) output.append(formed) if failed: if len(failed) == 1: formed = _( "Failed to load the following package: {pack}." "\nCheck your console or logs for details." ).format(pack=inline(failed[0])) else: formed = _( "Failed to load the following packages: {packs}" "\nCheck your console or logs for details." ).format(packs=humanize_list([inline(package) for package in failed])) output.append(formed) if not_found: if len(not_found) == 1: formed = _("The following package was not found in any cog path: {pack}.").format( pack=inline(not_found[0]) ) else: formed = _( "The following packages were not found in any cog path: {packs}" ).format(packs=humanize_list([inline(package) for package in not_found])) output.append(formed) if failed_with_reason: reasons = "\n".join([f"`{x}`: {y}" for x, y in failed_with_reason]) if len(failed_with_reason) == 1: formed = _( "This package could not be loaded for the following reason:\n\n{reason}" ).format(reason=reasons) else: formed = _( "These packages could not be loaded for the following reasons:\n\n{reasons}" ).format(reasons=reasons) output.append(formed) if repos_with_shared_libs: if len(repos_with_shared_libs) == 1: formed = _( "**WARNING**: The following repo is using shared libs" " which are marked for removal in Red 3.4: {repo}.\n" "You should inform maintainer of the repo about this message." ).format(repo=inline(repos_with_shared_libs.pop())) else: formed = _( "**WARNING**: The following repos are using shared libs" " which are marked for removal in Red 3.4: {repos}.\n" "You should inform maintainers of these repos about this message." ).format(repos=humanize_list([inline(repo) for repo in repos_with_shared_libs])) output.append(formed) if output: total_message = "\n\n".join(output) for page in pagify(total_message): await ctx.send(page) @commands.command() @checks.is_owner() async def unload(self, ctx: commands.Context, *cogs: str): """Unloads packages""" if not cogs: return await ctx.send_help() cogs = tuple(map(lambda cog: cog.rstrip(","), cogs)) unloaded, failed = await self._unload(cogs) output = [] if unloaded: if len(unloaded) == 1: formed = _("The following package was unloaded: {pack}.").format( pack=inline(unloaded[0]) ) else: formed = _("The following packages were unloaded: {packs}.").format( packs=humanize_list([inline(package) for package in unloaded]) ) output.append(formed) if failed: if len(failed) == 1: formed = _("The following package was not loaded: {pack}.").format( pack=inline(failed[0]) ) else: formed = _("The following packages were not loaded: {packs}.").format( packs=humanize_list([inline(package) for package in failed]) ) output.append(formed) if output: total_message = "\n\n".join(output) for page in pagify(total_message): await ctx.send(page) @commands.command(name="reload") @checks.is_owner() async def reload(self, ctx: commands.Context, *cogs: str): """Reloads packages""" if not cogs: return await ctx.send_help() cogs = tuple(map(lambda cog: cog.rstrip(","), cogs)) async with ctx.typing(): ( loaded, failed, not_found, already_loaded, failed_with_reason, repos_with_shared_libs, ) = await self._reload(cogs) output = [] if loaded: loaded_packages = humanize_list([inline(package) for package in loaded]) formed = _("Reloaded {packs}.").format(packs=loaded_packages) output.append(formed) if failed: if len(failed) == 1: formed = _( "Failed to reload the following package: {pack}." "\nCheck your console or logs for details." ).format(pack=inline(failed[0])) else: formed = _( "Failed to reload the following packages: {packs}" "\nCheck your console or logs for details." ).format(packs=humanize_list([inline(package) for package in failed])) output.append(formed) if not_found: if len(not_found) == 1: formed = _("The following package was not found in any cog path: {pack}.").format( pack=inline(not_found[0]) ) else: formed = _( "The following packages were not found in any cog path: {packs}" ).format(packs=humanize_list([inline(package) for package in not_found])) output.append(formed) if failed_with_reason: reasons = "\n".join([f"`{x}`: {y}" for x, y in failed_with_reason]) if len(failed_with_reason) == 1: formed = _( "This package could not be reloaded for the following reason:\n\n{reason}" ).format(reason=reasons) else: formed = _( "These packages could not be reloaded for the following reasons:\n\n{reasons}" ).format(reasons=reasons) output.append(formed) if repos_with_shared_libs: if len(repos_with_shared_libs) == 1: formed = _( "**WARNING**: The following repo is using shared libs" " which are marked for removal in Red 3.4: {repo}.\n" "You should inform maintainers of these repos about this message." ).format(repo=inline(repos_with_shared_libs.pop())) else: formed = _( "**WARNING**: The following repos are using shared libs" " which are marked for removal in Red 3.4: {repos}.\n" "You should inform maintainers of these repos about this message." ).format(repos=humanize_list([inline(repo) for repo in repos_with_shared_libs])) output.append(formed) if output: total_message = "\n\n".join(output) for page in pagify(total_message): await ctx.send(page) @commands.command(name="shutdown") @checks.is_owner() async def _shutdown(self, ctx: commands.Context, silently: bool = False): """Shuts down the bot""" wave = "\N{WAVING HAND SIGN}" skin = "\N{EMOJI MODIFIER FITZPATRICK TYPE-3}" with contextlib.suppress(discord.HTTPException): if not silently: await ctx.send(_("Shutting down... ") + wave + skin) await ctx.bot.shutdown() @commands.command(name="restart") @checks.is_owner() async def _restart(self, ctx: commands.Context, silently: bool = False): """Attempts to restart Red Makes Red quit with exit code 26 The restart is not guaranteed: it must be dealt with by the process manager in use""" with contextlib.suppress(discord.HTTPException): if not silently: await ctx.send(_("Restarting...")) await ctx.bot.shutdown(restart=True) @commands.group(name="set") async def _set(self, ctx: commands.Context): """Changes [botname]'s settings""" if ctx.invoked_subcommand is None: if ctx.guild: guild_data = await ctx.bot._config.guild(ctx.guild).all() guild = ctx.guild admin_role_ids = guild_data["admin_role"] admin_role_names = [r.name for r in guild.roles if r.id in admin_role_ids] admin_roles_str = ( humanize_list(admin_role_names) if admin_role_names else "Not Set." ) mod_role_ids = guild_data["mod_role"] mod_role_names = [r.name for r in guild.roles if r.id in mod_role_ids] mod_roles_str = humanize_list(mod_role_names) if mod_role_names else "Not Set." guild_settings = _("Admin roles: {admin}\nMod roles: {mod}\n").format( admin=admin_roles_str, mod=mod_roles_str ) else: guild_settings = "" prefixes = await ctx.bot._prefix_cache.get_prefixes(ctx.guild) global_data = await ctx.bot._config.all() locale = global_data["locale"] regional_format = global_data["regional_format"] or _("Same as bot's locale") prefix_string = " ".join(prefixes) settings = _( "{bot_name} Settings:\n\n" "Prefixes: {prefixes}\n" "{guild_settings}" "Locale: {locale}\n" "Regional format: {regional_format}" ).format( bot_name=ctx.bot.user.name, prefixes=prefix_string, guild_settings=guild_settings, locale=locale, regional_format=regional_format, ) for page in pagify(settings): await ctx.send(box(page)) @checks.guildowner_or_permissions(administrator=True) @_set.command(name="deletedelay") @commands.guild_only() async def deletedelay(self, ctx: commands.Context, time: int = None): """Set the delay until the bot removes the command message. Must be between -1 and 60. Set to -1 to disable this feature. """ guild = ctx.guild if time is not None: time = min(max(time, -1), 60) # Enforces the time limits await ctx.bot._config.guild(guild).delete_delay.set(time) if time == -1: await ctx.send(_("Command deleting disabled.")) else: await ctx.send(_("Delete delay set to {num} seconds.").format(num=time)) else: delay = await ctx.bot._config.guild(guild).delete_delay() if delay != -1: await ctx.send( _( "Bot will delete command messages after" " {num} seconds. Set this value to -1 to" " stop deleting messages" ).format(num=delay) ) else: await ctx.send(_("I will not delete command messages.")) @checks.is_owner() @_set.command(name="description") async def setdescription(self, ctx: commands.Context, *, description: str = ""): """ Sets the bot's description. Use without a description to reset. This is shown in a few locations, including the help menu. The default is "Red V3" """ if not description: await ctx.bot._config.description.clear() ctx.bot.description = "Red V3" await ctx.send(_("Description reset.")) elif len(description) > 250: # While the limit is 256, we bold it adding characters. await ctx.send( _( "This description is too long to properly display. " "Please try again with below 250 characters" ) ) else: await ctx.bot._config.description.set(description) ctx.bot.description = description await ctx.tick() @_set.command() @checks.guildowner() @commands.guild_only() async def addadminrole(self, ctx: commands.Context, *, role: discord.Role): """ Adds an admin role for this guild. """ async with ctx.bot._config.guild(ctx.guild).admin_role() as roles: if role.id in roles: return await ctx.send(_("This role is already an admin role.")) roles.append(role.id) await ctx.send(_("That role is now considered an admin role.")) @_set.command() @checks.guildowner() @commands.guild_only() async def addmodrole(self, ctx: commands.Context, *, role: discord.Role): """ Adds a mod role for this guild. """ async with ctx.bot._config.guild(ctx.guild).mod_role() as roles: if role.id in roles: return await ctx.send(_("This role is already a mod role.")) roles.append(role.id) await ctx.send(_("That role is now considered a mod role.")) @_set.command(aliases=["remadmindrole", "deladminrole", "deleteadminrole"]) @checks.guildowner() @commands.guild_only() async def removeadminrole(self, ctx: commands.Context, *, role: discord.Role): """ Removes an admin role for this guild. """ async with ctx.bot._config.guild(ctx.guild).admin_role() as roles: if role.id not in roles: return await ctx.send(_("That role was not an admin role to begin with.")) roles.remove(role.id) await ctx.send(_("That role is no longer considered an admin role.")) @_set.command(aliases=["remmodrole", "delmodrole", "deletemodrole"]) @checks.guildowner() @commands.guild_only() async def removemodrole(self, ctx: commands.Context, *, role: discord.Role): """ Removes a mod role for this guild. """ async with ctx.bot._config.guild(ctx.guild).mod_role() as roles: if role.id not in roles: return await ctx.send(_("That role was not a mod role to begin with.")) roles.remove(role.id) await ctx.send(_("That role is no longer considered a mod role.")) @_set.command(aliases=["usebotcolor"]) @checks.guildowner() @commands.guild_only() async def usebotcolour(self, ctx: commands.Context): """ Toggle whether to use the bot owner-configured colour for embeds. Default is to use the bot's configured colour. Otherwise, the colour used will be the colour of the bot's top role. """ current_setting = await ctx.bot._config.guild(ctx.guild).use_bot_color() await ctx.bot._config.guild(ctx.guild).use_bot_color.set(not current_setting) await ctx.send( _("The bot {} use its configured color for embeds.").format( _("will not") if not current_setting else _("will") ) ) @_set.command() @checks.guildowner() @commands.guild_only() async def serverfuzzy(self, ctx: commands.Context): """ Toggle whether to enable fuzzy command search for the server. Default is for fuzzy command search to be disabled. """ current_setting = await ctx.bot._config.guild(ctx.guild).fuzzy() await ctx.bot._config.guild(ctx.guild).fuzzy.set(not current_setting) await ctx.send( _("Fuzzy command search has been {} for this server.").format( _("disabled") if current_setting else _("enabled") ) ) @_set.command() @checks.is_owner() async def fuzzy(self, ctx: commands.Context): """ Toggle whether to enable fuzzy command search in DMs. Default is for fuzzy command search to be disabled. """ current_setting = await ctx.bot._config.fuzzy() await ctx.bot._config.fuzzy.set(not current_setting) await ctx.send( _("Fuzzy command search has been {} in DMs.").format( _("disabled") if current_setting else _("enabled") ) ) @_set.command(aliases=["color"]) @checks.is_owner() async def colour(self, ctx: commands.Context, *, colour: discord.Colour = None): """ Sets a default colour to be used for the bot's embeds. Acceptable values for the colour parameter can be found at: https://discordpy.readthedocs.io/en/stable/ext/commands/api.html#discord.ext.commands.ColourConverter """ if colour is None: ctx.bot._color = discord.Color.red() await ctx.bot._config.color.set(discord.Color.red().value) return await ctx.send(_("The color has been reset.")) ctx.bot._color = colour await ctx.bot._config.color.set(colour.value) await ctx.send(_("The color has been set.")) @_set.group(invoke_without_command=True) @checks.is_owner() async def avatar(self, ctx: commands.Context, url: str = None): """Sets [botname]'s avatar Supports either an attachment or an image URL.""" if len(ctx.message.attachments) > 0: # Attachments take priority data = await ctx.message.attachments[0].read() elif url is not None: if url.startswith("<") and url.endswith(">"): url = url[1:-1] async with aiohttp.ClientSession() as session: async with session.get(url) as r: data = await r.read() else: await ctx.send_help() return try: async with ctx.typing(): await ctx.bot.user.edit(avatar=data) except discord.HTTPException: await ctx.send( _( "Failed. Remember that you can edit my avatar " "up to two times a hour. The URL or attachment " "must be a valid image in either JPG or PNG format." ) ) except discord.InvalidArgument: await ctx.send(_("JPG / PNG format only.")) else: await ctx.send(_("Done.")) @avatar.command(name="remove", aliases=["clear"]) @checks.is_owner() async def avatar_remove(self, ctx: commands.Context): """Removes [botname]'s avatar""" async with ctx.typing(): await ctx.bot.user.edit(avatar=None) await ctx.send(_("Avatar removed.")) @_set.command(name="playing", aliases=["game"]) @checks.bot_in_a_guild() @checks.is_owner() async def _game(self, ctx: commands.Context, *, game: str = None): """Sets [botname]'s playing status""" if game: if len(game) > 128: await ctx.send("The maximum length of game descriptions is 128 characters.") return game = discord.Game(name=game) else: game = None status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online await ctx.bot.change_presence(status=status, activity=game) if game: await ctx.send(_("Status set to ``Playing {game.name}``.").format(game=game)) else: await ctx.send(_("Game cleared.")) @_set.command(name="listening") @checks.bot_in_a_guild() @checks.is_owner() async def _listening(self, ctx: commands.Context, *, listening: str = None): """Sets [botname]'s listening status""" status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online if listening: activity = discord.Activity(name=listening, type=discord.ActivityType.listening) else: activity = None await ctx.bot.change_presence(status=status, activity=activity) if activity: await ctx.send( _("Status set to ``Listening to {listening}``.").format(listening=listening) ) else: await ctx.send(_("Listening cleared.")) @_set.command(name="watching") @checks.bot_in_a_guild() @checks.is_owner() async def _watching(self, ctx: commands.Context, *, watching: str = None): """Sets [botname]'s watching status""" status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online if watching: activity = discord.Activity(name=watching, type=discord.ActivityType.watching) else: activity = None await ctx.bot.change_presence(status=status, activity=activity) if activity: await ctx.send(_("Status set to ``Watching {watching}``.").format(watching=watching)) else: await ctx.send(_("Watching cleared.")) @_set.command() @checks.bot_in_a_guild() @checks.is_owner() async def status(self, ctx: commands.Context, *, status: str): """Sets [botname]'s status Available statuses: online idle dnd invisible """ statuses = { "online": discord.Status.online, "idle": discord.Status.idle, "dnd": discord.Status.dnd, "invisible": discord.Status.invisible, } game = ctx.bot.guilds[0].me.activity if len(ctx.bot.guilds) > 0 else None try: status = statuses[status.lower()] except KeyError: await ctx.send_help() else: await ctx.bot.change_presence(status=status, activity=game) await ctx.send(_("Status changed to {}.").format(status)) @_set.command(name="streaming", aliases=["stream"]) @checks.bot_in_a_guild() @checks.is_owner() async def stream(self, ctx: commands.Context, streamer=None, *, stream_title=None): """Sets [botname]'s streaming status Leaving both streamer and stream_title empty will clear it.""" status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else None if stream_title: stream_title = stream_title.strip() if "twitch.tv/" not in streamer: streamer = "https://www.twitch.tv/" + streamer activity = discord.Streaming(url=streamer, name=stream_title) await ctx.bot.change_presence(status=status, activity=activity) elif streamer is not None: await ctx.send_help() return else: await ctx.bot.change_presence(activity=None, status=status) await ctx.send(_("Done.")) @_set.command(name="username", aliases=["name"]) @checks.is_owner() async def _username(self, ctx: commands.Context, *, username: str): """Sets [botname]'s username""" try: await self._name(name=username) except discord.HTTPException: await ctx.send( _( "Failed to change name. Remember that you can " "only do it up to 2 times an hour. Use " "nicknames if you need frequent changes. " "`{}set nickname`" ).format(ctx.clean_prefix) ) else: await ctx.send(_("Done.")) @_set.command(name="nickname") @checks.admin() @commands.guild_only() async def _nickname(self, ctx: commands.Context, *, nickname: str = None): """Sets [botname]'s nickname""" try: await ctx.guild.me.edit(nick=nickname) except discord.Forbidden: await ctx.send(_("I lack the permissions to change my own nickname.")) else: await ctx.send(_("Done.")) @_set.command(aliases=["prefixes"]) @checks.is_owner() async def prefix(self, ctx: commands.Context, *prefixes: str): """Sets [botname]'s global prefix(es)""" if not prefixes: await ctx.send_help() return await self._prefixes(prefixes) await ctx.send(_("Prefix set.")) @_set.command(aliases=["serverprefixes"]) @checks.admin() @commands.guild_only() async def serverprefix(self, ctx: commands.Context, *prefixes: str): """Sets [botname]'s server prefix(es)""" if not prefixes: await ctx.bot._prefix_cache.set_prefixes(guild=ctx.guild, prefixes=[]) await ctx.send(_("Guild prefixes have been reset.")) return prefixes = sorted(prefixes, reverse=True) await ctx.bot._prefix_cache.set_prefixes(guild=ctx.guild, prefixes=prefixes) await ctx.send(_("Prefix set.")) @_set.command() @checks.is_owner() async def locale(self, ctx: commands.Context, language_code: str): """ Changes bot's locale. `<language_code>` can be any language code with country code included, e.g. `en-US`, `de-DE`, `fr-FR`, `pl-PL`, etc. Go to Red's Crowdin page to see locales that are available with translations: https://translate.discord.red To reset to English, use "en-US". """ try: locale = BabelLocale.parse(language_code, sep="-") except (ValueError, UnknownLocaleError): await ctx.send(_("Invalid language code. Use format: `en-US`")) return if locale.territory is None: await ctx.send( _("Invalid format - language code has to include country code, e.g. `en-US`") ) return standardized_locale_name = f"{locale.language}-{locale.territory}" i18n.set_locale(standardized_locale_name) await ctx.bot._config.locale.set(standardized_locale_name) await ctx.send(_("Locale has been set.")) @_set.command(aliases=["region"]) @checks.is_owner() async def regionalformat(self, ctx: commands.Context, language_code: str = None): """ Changes bot's regional format. This is used for formatting date, time and numbers. `<language_code>` can be any language code with country code included, e.g. `en-US`, `de-DE`, `fr-FR`, `pl-PL`, etc. Leave `<language_code>` empty to base regional formatting on bot's locale. """ if language_code is None: i18n.set_regional_format(None) await ctx.bot._config.regional_format.set(None) await ctx.send(_("Regional formatting will now be based on bot's locale.")) return try: locale = BabelLocale.parse(language_code, sep="-") except (ValueError, UnknownLocaleError): await ctx.send(_("Invalid language code. Use format: `en-US`")) return if locale.territory is None: await ctx.send( _("Invalid format - language code has to include country code, e.g. `en-US`") ) return standardized_locale_name = f"{locale.language}-{locale.territory}" i18n.set_regional_format(standardized_locale_name) await ctx.bot._config.regional_format.set(standardized_locale_name) await ctx.send( _("Regional formatting will now be based on `{language_code}` locale.").format( language_code=standardized_locale_name ) ) @_set.command() @checks.is_owner() async def custominfo(self, ctx: commands.Context, *, text: str = None): """Customizes a section of [p]info The maximum amount of allowed characters is 1024. Supports markdown, links and "mentions". Link example: `[My link](https://example.com)` """ if not text: await ctx.bot._config.custom_info.clear() await ctx.send(_("The custom text has been cleared.")) return if len(text) <= 1024: await ctx.bot._config.custom_info.set(text) await ctx.send(_("The custom text has been set.")) await ctx.invoke(self.info) else: await ctx.bot.send(_("Characters must be fewer than 1024.")) @_set.command() @checks.is_owner() async def api(self, ctx: commands.Context, service: str, *, tokens: TokenConverter): """Set various external API tokens. This setting will be asked for by some 3rd party cogs and some core cogs. To add the keys provide the service name and the tokens as a comma separated list of key,values as described by the cog requesting this command. Note: API tokens are sensitive and should only be used in a private channel or in DM with the bot. """ if ctx.channel.permissions_for(ctx.me).manage_messages: await ctx.message.delete() await ctx.bot.set_shared_api_tokens(service, **tokens) await ctx.send(_("`{service}` API tokens have been set.").format(service=service)) @commands.group() @checks.is_owner() async def helpset(self, ctx: commands.Context): """Manage settings for the help command.""" pass @helpset.command(name="usemenus") async def helpset_usemenus(self, ctx: commands.Context, use_menus: bool = None): """ Allows the help command to be sent as a paginated menu instead of seperate messages. This defaults to False. Using this without a setting will toggle. """ if use_menus is None: use_menus = not await ctx.bot._config.help.use_menus() await ctx.bot._config.help.use_menus.set(use_menus) if use_menus: await ctx.send(_("Help will use menus.")) else: await ctx.send(_("Help will not use menus.")) @helpset.command(name="showhidden") async def helpset_showhidden(self, ctx: commands.Context, show_hidden: bool = None): """ This allows the help command to show hidden commands This defaults to False. Using this without a setting will toggle. """ if show_hidden is None: show_hidden = not await ctx.bot._config.help.show_hidden() await ctx.bot._config.help.show_hidden.set(show_hidden) if show_hidden: await ctx.send(_("Help will not filter hidden commands")) else: await ctx.send(_("Help will filter hidden commands.")) @helpset.command(name="verifychecks") async def helpset_permfilter(self, ctx: commands.Context, verify: bool = None): """ Sets if commands which can't be run in the current context should be filtered from help Defaults to True. Using this without a setting will toggle. """ if verify is None: verify = not await ctx.bot._config.help.verify_checks() await ctx.bot._config.help.verify_checks.set(verify) if verify: await ctx.send(_("Help will only show for commands which can be run.")) else: await ctx.send(_("Help will show up without checking if the commands can be run.")) @helpset.command(name="verifyexists") async def helpset_verifyexists(self, ctx: commands.Context, verify: bool = None): """ This allows the bot to respond indicating the existence of a specific help topic even if the user can't use it. Note: This setting on it's own does not fully prevent command enumeration. Defaults to False. Using this without a setting will toggle. """ if verify is None: verify = not await ctx.bot._config.help.verify_exists() await ctx.bot._config.help.verify_exists.set(verify) if verify: await ctx.send(_("Help will verify the existence of help topics.")) else: await ctx.send( _( "Help will only verify the existence of " "help topics via fuzzy help (if enabled)." ) ) @helpset.command(name="pagecharlimit") async def helpset_pagecharlimt(self, ctx: commands.Context, limit: int): """Set the character limit for each page in the help message. This setting only applies to embedded help. The default value is 1000 characters. The minimum value is 500. The maximum is based on the lower of what you provide and what discord allows. Please note that setting a relatively small character limit may mean some pages will exceed this limit. """ if limit < 500: await ctx.send(_("You must give a value of at least 500 characters.")) return await ctx.bot._config.help.page_char_limit.set(limit) await ctx.send(_("Done. The character limit per page has been set to {}.").format(limit)) @helpset.command(name="maxpages") async def helpset_maxpages(self, ctx: commands.Context, pages: int): """Set the maximum number of help pages sent in a server channel. This setting only applies to embedded help. If a help message contains more pages than this value, the help message will be sent to the command author via DM. This is to help reduce spam in server text channels. The default value is 2 pages. """ if pages < 0: await ctx.send(_("You must give a value of zero or greater!")) return await ctx.bot._config.help.max_pages_in_guild.set(pages) await ctx.send(_("Done. The page limit has been set to {}.").format(pages)) @helpset.command(name="deletedelay") @commands.bot_has_permissions(manage_messages=True) async def helpset_deletedelay(self, ctx: commands.Context, seconds: int): """Set the delay after which help pages will be deleted. The setting is disabled by default, and only applies to non-menu help, sent in server text channels. Setting the delay to 0 disables this feature. The bot has to have MANAGE_MESSAGES permission for this to work. """ if seconds < 0: await ctx.send(_("You must give a value of zero or greater!")) return if seconds > 60 * 60 * 24 * 14: # 14 days await ctx.send(_("The delay cannot be longer than 14 days!")) return await ctx.bot._config.help.delete_delay.set(seconds) if seconds == 0: await ctx.send(_("Done. Help messages will not be deleted now.")) else: await ctx.send(_("Done. The delete delay has been set to {} seconds.").format(seconds)) @helpset.command(name="tagline") async def helpset_tagline(self, ctx: commands.Context, *, tagline: str = None): """ Set the tagline to be used. This setting only applies to embedded help. If no tagline is specified, the default will be used instead. """ if tagline is None: await ctx.bot._config.help.tagline.set("") return await ctx.send(_("The tagline has been reset.")) if len(tagline) > 2048: await ctx.send( _( "Your tagline is too long! Please shorten it to be " "no more than 2048 characters long." ) ) return await ctx.bot._config.help.tagline.set(tagline) await ctx.send(_("The tagline has been set.")) @commands.command() @commands.cooldown(1, 60, commands.BucketType.user) async def contact(self, ctx: commands.Context, *, message: str): """Sends a message to the owner""" guild = ctx.message.guild author = ctx.message.author footer = _("User ID: {}").format(author.id) if ctx.guild is None: source = _("through DM") else: source = _("from {}").format(guild) footer += _(" | Server ID: {}").format(guild.id) prefixes = await ctx.bot.get_valid_prefixes() prefix = re.sub(rf"<@!?{ctx.me.id}>", f"@{ctx.me.name}", prefixes[0]) content = _("Use `{}dm {} <text>` to reply to this user").format(prefix, author.id) description = _("Sent by {} {}").format(author, source) destinations = await ctx.bot.get_owner_notification_destinations() if not destinations: await ctx.send(_("I've been configured not to send this anywhere.")) return successful = False for destination in destinations: is_dm = isinstance(destination, discord.User) send_embed = None if is_dm: send_embed = await ctx.bot._config.user(destination).embeds() else: if not destination.permissions_for(destination.guild.me).send_messages: continue if destination.permissions_for(destination.guild.me).embed_links: send_embed = await ctx.bot._config.channel(destination).embeds() if send_embed is None: send_embed = await ctx.bot._config.guild(destination.guild).embeds() else: send_embed = False if send_embed is None: send_embed = await ctx.bot._config.embeds() if send_embed: if not is_dm: color = await ctx.bot.get_embed_color(destination) else: color = ctx.bot._color e = discord.Embed(colour=color, description=message) if author.avatar_url: e.set_author(name=description, icon_url=author.avatar_url) else: e.set_author(name=description) e.set_footer(text=footer) try: await destination.send(embed=e) except discord.Forbidden: log.exception(f"Contact failed to {destination}({destination.id})") # Should this automatically opt them out? except discord.HTTPException: log.exception( f"An unexpected error happened while attempting to" f" send contact to {destination}({destination.id})" ) else: successful = True else: msg_text = "{}\nMessage:\n\n{}\n{}".format(description, message, footer) try: await destination.send("{}\n{}".format(content, box(msg_text))) except discord.Forbidden: log.exception(f"Contact failed to {destination}({destination.id})") # Should this automatically opt them out? except discord.HTTPException: log.exception( f"An unexpected error happened while attempting to" f" send contact to {destination}({destination.id})" ) else: successful = True if successful: await ctx.send(_("Your message has been sent.")) else: await ctx.send(_("I'm unable to deliver your message. Sorry.")) @commands.command() @checks.is_owner() async def dm(self, ctx: commands.Context, user_id: int, *, message: str): """Sends a DM to a user This command needs a user id to work. To get a user id enable 'developer mode' in Discord's settings, 'appearance' tab. Then right click a user and copy their id""" destination = discord.utils.get(ctx.bot.get_all_members(), id=user_id) if destination is None or destination.bot: await ctx.send( _( "Invalid ID, user not found, or user is a bot. " "You can only send messages to people I share " "a server with." ) ) return prefixes = await ctx.bot.get_valid_prefixes() prefix = re.sub(rf"<@!?{ctx.me.id}>", f"@{ctx.me.name}", prefixes[0]) description = _("Owner of {}").format(ctx.bot.user) content = _("You can reply to this message with {}contact").format(prefix) if await ctx.embed_requested(): e = discord.Embed(colour=discord.Colour.red(), description=message) e.set_footer(text=content) if ctx.bot.user.avatar_url: e.set_author(name=description, icon_url=ctx.bot.user.avatar_url) else: e.set_author(name=description) try: await destination.send(embed=e) except discord.HTTPException: await ctx.send( _("Sorry, I couldn't deliver your message to {}").format(destination) ) else: await ctx.send(_("Message delivered to {}").format(destination)) else: response = "{}\nMessage:\n\n{}".format(description, message) try: await destination.send("{}\n{}".format(box(response), content)) except discord.HTTPException: await ctx.send( _("Sorry, I couldn't deliver your message to {}").format(destination) ) else: await ctx.send(_("Message delivered to {}").format(destination)) @commands.command(hidden=True) @checks.is_owner() async def datapath(self, ctx: commands.Context): """Prints the bot's data path.""" from redbot.core.data_manager import basic_config data_dir = Path(basic_config["DATA_PATH"]) msg = _("Data path: {path}").format(path=data_dir) await ctx.send(box(msg)) @commands.command(hidden=True) @checks.is_owner() async def debuginfo(self, ctx: commands.Context): """Shows debug information useful for debugging..""" if sys.platform == "linux": import distro # pylint: disable=import-error IS_WINDOWS = os.name == "nt" IS_MAC = sys.platform == "darwin" IS_LINUX = sys.platform == "linux" pyver = "{}.{}.{} ({})".format(*sys.version_info[:3], platform.architecture()[0]) pipver = pip.__version__ redver = red_version_info dpy_version = discord.__version__ if IS_WINDOWS: os_info = platform.uname() osver = "{} {} (version {})".format(os_info.system, os_info.release, os_info.version) elif IS_MAC: os_info = platform.mac_ver() osver = "Mac OSX {} {}".format(os_info[0], os_info[2]) elif IS_LINUX: os_info = distro.linux_distribution() osver = "{} {}".format(os_info[0], os_info[1]).strip() else: osver = "Could not parse OS, report this on Github." user_who_ran = getpass.getuser() driver = storage_type() if await ctx.embed_requested(): e = discord.Embed(color=await ctx.embed_colour()) e.title = "Debug Info for Red" e.add_field(name="Kr0nOs version", value=redver, inline=True) e.add_field(name="Python version", value=pyver, inline=True) e.add_field(name="Discord.py version", value=dpy_version, inline=True) e.add_field(name="Pip version", value=pipver, inline=True) e.add_field(name="System arch", value=platform.machine(), inline=True) e.add_field(name="User", value=user_who_ran, inline=True) e.add_field(name="OS version", value=osver, inline=False) e.add_field( name="Python executable", value=escape(sys.executable, formatting=True), inline=False, ) e.add_field(name="Storage type", value=driver, inline=False) await ctx.send(embed=e) else: info = ( "Debug Info for Red\n\n" + "Red version: {}\n".format(redver) + "Python version: {}\n".format(pyver) + "Python executable: {}\n".format(sys.executable) + "Discord.py version: {}\n".format(dpy_version) + "Pip version: {}\n".format(pipver) + "System arch: {}\n".format(platform.machine()) + "User: {}\n".format(user_who_ran) + "OS version: {}\n".format(osver) + "Storage type: {}\n".format(driver) ) await ctx.send(box(info)) @commands.group() @checks.is_owner() async def whitelist(self, ctx: commands.Context): """ Whitelist management commands. """ pass @whitelist.command(name="add", usage="<user>...") async def whitelist_add(self, ctx: commands.Context, *users: Union[discord.Member, int]): """ Adds a user to the whitelist. """ if not users: await ctx.send_help() return uids = [getattr(user, "id", user) for user in users] await self.bot._whiteblacklist_cache.add_to_whitelist(None, uids) await ctx.send(_("Users added to whitelist.")) @whitelist.command(name="list") async def whitelist_list(self, ctx: commands.Context): """ Lists whitelisted users. """ curr_list = await ctx.bot._config.whitelist() if not curr_list: await ctx.send("Whitelist is empty.") return msg = _("Whitelisted Users:") for user in curr_list: msg += "\n\t- {}".format(user) for page in pagify(msg): await ctx.send(box(page)) @whitelist.command(name="remove", usage="<user>...") async def whitelist_remove(self, ctx: commands.Context, *users: Union[discord.Member, int]): """ Removes user from whitelist. """ if not users: await ctx.send_help() return uids = [getattr(user, "id", user) for user in users] await self.bot._whiteblacklist_cache.remove_from_whitelist(None, uids) await ctx.send(_("Users have been removed from whitelist.")) @whitelist.command(name="clear") async def whitelist_clear(self, ctx: commands.Context): """ Clears the whitelist. """ await self.bot._whiteblacklist_cache.clear_whitelist() await ctx.send(_("Whitelist has been cleared.")) @commands.group() @checks.is_owner() async def blacklist(self, ctx: commands.Context): """ Blacklist management commands. """ pass @blacklist.command(name="add", usage="<user>...") async def blacklist_add(self, ctx: commands.Context, *users: Union[discord.Member, int]): """ Adds a user to the blacklist. """ if not users: await ctx.send_help() return for user in users: if isinstance(user, int): user_obj = discord.Object(id=user) else: user_obj = user if await ctx.bot.is_owner(user_obj): await ctx.send(_("You cannot blacklist an owner!")) return uids = [getattr(user, "id", user) for user in users] await self.bot._whiteblacklist_cache.add_to_blacklist(None, uids) await ctx.send(_("User added to blacklist.")) @blacklist.command(name="list") async def blacklist_list(self, ctx: commands.Context): """ Lists blacklisted users. """ curr_list = await self.bot._whiteblacklist_cache.get_blacklist(None) if not curr_list: await ctx.send("Blacklist is empty.") return msg = _("Blacklisted Users:") for user in curr_list: msg += "\n\t- {}".format(user) for page in pagify(msg): await ctx.send(box(page)) @blacklist.command(name="remove", usage="<user>...") async def blacklist_remove(self, ctx: commands.Context, *users: Union[discord.Member, int]): """ Removes user from blacklist. """ if not users: await ctx.send_help() return uids = [getattr(user, "id", user) for user in users] await self.bot._whiteblacklist_cache.remove_from_blacklist(None, uids) await ctx.send(_("Users have been removed from blacklist.")) @blacklist.command(name="clear") async def blacklist_clear(self, ctx: commands.Context): """ Clears the blacklist. """ await self.bot._whiteblacklist_cache.clear_blacklist() await ctx.send(_("Blacklist has been cleared.")) @commands.group() @commands.guild_only() @checks.admin_or_permissions(administrator=True) async def localwhitelist(self, ctx: commands.Context): """ Whitelist management commands. """ pass @localwhitelist.command(name="add", usage="<user_or_role>...") async def localwhitelist_add( self, ctx: commands.Context, *users_or_roles: Union[discord.Member, discord.Role, int] ): """ Adds a user or role to the whitelist. """ if not users_or_roles: await ctx.send_help() return names = [getattr(u_or_r, "name", u_or_r) for u_or_r in users_or_roles] uids = [getattr(u_or_r, "id", u_or_r) for u_or_r in users_or_roles] await self.bot._whiteblacklist_cache.add_to_whitelist(ctx.guild, uids) await ctx.send(_("{names} added to whitelist.").format(names=humanize_list(names))) @localwhitelist.command(name="list") async def localwhitelist_list(self, ctx: commands.Context): """ Lists whitelisted users and roles. """ curr_list = await self.bot._whiteblacklist_cache.get_whitelist(ctx.guild) if not curr_list: await ctx.send("Local whitelist is empty.") return msg = _("Whitelisted Users and roles:") for obj in curr_list: msg += "\n\t- {}".format(obj) for page in pagify(msg): await ctx.send(box(page)) @localwhitelist.command(name="remove", usage="<user_or_role>...") async def localwhitelist_remove( self, ctx: commands.Context, *users_or_roles: Union[discord.Member, discord.Role, int] ): """ Removes user or role from whitelist. """ if not users_or_roles: await ctx.send_help() return names = [getattr(u_or_r, "name", u_or_r) for u_or_r in users_or_roles] uids = [getattr(u_or_r, "id", u_or_r) for u_or_r in users_or_roles] await self.bot._whiteblacklist_cache.remove_from_whitelist(ctx.guild, uids) await ctx.send( _("{names} removed from the local whitelist.").format(names=humanize_list(names)) ) @localwhitelist.command(name="clear") async def localwhitelist_clear(self, ctx: commands.Context): """ Clears the whitelist. """ await self.bot._whiteblacklist_cache.clear_whitelist(ctx.guild) await ctx.send(_("Local whitelist has been cleared.")) @commands.group() @commands.guild_only() @checks.admin_or_permissions(administrator=True) async def localblacklist(self, ctx: commands.Context): """ blacklist management commands. """ pass @localblacklist.command(name="add", usage="<user_or_role>...") async def localblacklist_add( self, ctx: commands.Context, *users_or_roles: Union[discord.Member, discord.Role, int] ): """ Adds a user or role to the blacklist. """ if not users_or_roles: await ctx.send_help() return for user_or_role in users_or_roles: uid = discord.Object(id=getattr(user_or_role, "id", user_or_role)) if uid.id == ctx.author.id: await ctx.send(_("You cannot blacklist yourself!")) return if uid.id == ctx.guild.owner_id and not await ctx.bot.is_owner(ctx.author): await ctx.send(_("You cannot blacklist the guild owner!")) return if await ctx.bot.is_owner(uid): await ctx.send(_("You cannot blacklist a bot owner!")) return names = [getattr(u_or_r, "name", u_or_r) for u_or_r in users_or_roles] uids = [getattr(u_or_r, "id", u_or_r) for u_or_r in users_or_roles] await self.bot._whiteblacklist_cache.add_to_blacklist(ctx.guild, uids) await ctx.send( _("{names} added to the local blacklist.").format(names=humanize_list(names)) ) @localblacklist.command(name="list") async def localblacklist_list(self, ctx: commands.Context): """ Lists blacklisted users and roles. """ curr_list = await self.bot._whiteblacklist_cache.get_blacklist(ctx.guild) if not curr_list: await ctx.send("Local blacklist is empty.") return msg = _("Blacklisted Users and Roles:") for obj in curr_list: msg += "\n\t- {}".format(obj) for page in pagify(msg): await ctx.send(box(page)) @localblacklist.command(name="remove", usage="<user_or_role>...") async def localblacklist_remove( self, ctx: commands.Context, *users_or_roles: Union[discord.Member, discord.Role, int] ): """ Removes user or role from blacklist. """ if not users_or_roles: await ctx.send_help() return names = [getattr(u_or_r, "name", u_or_r) for u_or_r in users_or_roles] uids = [getattr(u_or_r, "id", u_or_r) for u_or_r in users_or_roles] await self.bot._whiteblacklist_cache.remove_from_blacklist(ctx.guild, uids) await ctx.send( _("{names} removed from the local blacklist.").format(names=humanize_list(names)) ) @localblacklist.command(name="clear") async def localblacklist_clear(self, ctx: commands.Context): """ Clears the blacklist. """ await self.bot._whiteblacklist_cache.clear_blacklist(ctx.guild) await ctx.send(_("Local blacklist has been cleared.")) @checks.guildowner_or_permissions(administrator=True) @commands.group(name="command") async def command_manager(self, ctx: commands.Context): """Manage the bot's commands.""" pass @command_manager.group(name="listdisabled", invoke_without_command=True) async def list_disabled(self, ctx: commands.Context): """ List disabled commands. If you're the bot owner, this will show global disabled commands by default. """ # Select the scope based on the author's privileges if await ctx.bot.is_owner(ctx.author): await ctx.invoke(self.list_disabled_global) else: await ctx.invoke(self.list_disabled_guild) @list_disabled.command(name="global") async def list_disabled_global(self, ctx: commands.Context): """List disabled commands globally.""" disabled_list = await self.bot._config.disabled_commands() if not disabled_list: return await ctx.send(_("There aren't any globally disabled commands.")) if len(disabled_list) > 1: header = _("{} commands are disabled globally.\n").format( humanize_number(len(disabled_list)) ) else: header = _("1 command is disabled globally.\n") paged = [box(x) for x in pagify(humanize_list(disabled_list), page_length=1000)] paged[0] = header + paged[0] await ctx.send_interactive(paged) @list_disabled.command(name="guild") async def list_disabled_guild(self, ctx: commands.Context): """List disabled commands in this server.""" disabled_list = await self.bot._config.guild(ctx.guild).disabled_commands() if not disabled_list: return await ctx.send(_("There aren't any disabled commands in {}.").format(ctx.guild)) if len(disabled_list) > 1: header = _("{} commands are disabled in {}.\n").format( humanize_number(len(disabled_list)), ctx.guild ) else: header = _("1 command is disabled in {}.\n").format(ctx.guild) paged = [box(x) for x in pagify(humanize_list(disabled_list), page_length=1000)] paged[0] = header + paged[0] await ctx.send_interactive(paged) @command_manager.group(name="disable", invoke_without_command=True) async def command_disable(self, ctx: commands.Context, *, command: str): """Disable a command. If you're the bot owner, this will disable commands globally by default. """ # Select the scope based on the author's privileges if await ctx.bot.is_owner(ctx.author): await ctx.invoke(self.command_disable_global, command=command) else: await ctx.invoke(self.command_disable_guild, command=command) @checks.is_owner() @command_disable.command(name="global") async def command_disable_global(self, ctx: commands.Context, *, command: str): """Disable a command globally.""" command_obj: commands.Command = ctx.bot.get_command(command) if command_obj is None: await ctx.send( _("I couldn't find that command. Please note that it is case sensitive.") ) return if self.command_manager in command_obj.parents or self.command_manager == command_obj: await ctx.send( _("The command to disable cannot be `command` or any of its subcommands.") ) return if isinstance(command_obj, commands.commands._AlwaysAvailableCommand): await ctx.send( _("This command is designated as being always available and cannot be disabled.") ) return async with ctx.bot._config.disabled_commands() as disabled_commands: if command not in disabled_commands: disabled_commands.append(command_obj.qualified_name) if not command_obj.enabled: await ctx.send(_("That command is already disabled globally.")) return command_obj.enabled = False await ctx.tick() @commands.guild_only() @command_disable.command(name="server", aliases=["guild"]) async def command_disable_guild(self, ctx: commands.Context, *, command: str): """Disable a command in this server only.""" command_obj: commands.Command = ctx.bot.get_command(command) if command_obj is None: await ctx.send( _("I couldn't find that command. Please note that it is case sensitive.") ) return if self.command_manager in command_obj.parents or self.command_manager == command_obj: await ctx.send( _("The command to disable cannot be `command` or any of its subcommands.") ) return if isinstance(command_obj, commands.commands._AlwaysAvailableCommand): await ctx.send( _("This command is designated as being always available and cannot be disabled.") ) return if command_obj.requires.privilege_level > await PrivilegeLevel.from_ctx(ctx): await ctx.send(_("You are not allowed to disable that command.")) return async with ctx.bot._config.guild(ctx.guild).disabled_commands() as disabled_commands: if command not in disabled_commands: disabled_commands.append(command_obj.qualified_name) done = command_obj.disable_in(ctx.guild) if not done: await ctx.send(_("That command is already disabled in this server.")) else: await ctx.tick() @command_manager.group(name="enable", invoke_without_command=True) async def command_enable(self, ctx: commands.Context, *, command: str): """Enable a command. If you're a bot owner, this will try to enable a globally disabled command by default. """ if await ctx.bot.is_owner(ctx.author): await ctx.invoke(self.command_enable_global, command=command) else: await ctx.invoke(self.command_enable_guild, command=command) @commands.is_owner() @command_enable.command(name="global") async def command_enable_global(self, ctx: commands.Context, *, command: str): """Enable a command globally.""" command_obj: commands.Command = ctx.bot.get_command(command) if command_obj is None: await ctx.send( _("I couldn't find that command. Please note that it is case sensitive.") ) return async with ctx.bot._config.disabled_commands() as disabled_commands: with contextlib.suppress(ValueError): disabled_commands.remove(command_obj.qualified_name) if command_obj.enabled: await ctx.send(_("That command is already enabled globally.")) return command_obj.enabled = True await ctx.tick() @commands.guild_only() @command_enable.command(name="server", aliases=["guild"]) async def command_enable_guild(self, ctx: commands.Context, *, command: str): """Enable a command in this server.""" command_obj: commands.Command = ctx.bot.get_command(command) if command_obj is None: await ctx.send( _("I couldn't find that command. Please note that it is case sensitive.") ) return if command_obj.requires.privilege_level > await PrivilegeLevel.from_ctx(ctx): await ctx.send(_("You are not allowed to enable that command.")) return async with ctx.bot._config.guild(ctx.guild).disabled_commands() as disabled_commands: with contextlib.suppress(ValueError): disabled_commands.remove(command_obj.qualified_name) done = command_obj.enable_in(ctx.guild) if not done: await ctx.send(_("That command is already enabled in this server.")) else: await ctx.tick() @checks.is_owner() @command_manager.command(name="disabledmsg") async def command_disabledmsg(self, ctx: commands.Context, *, message: str = ""): """Set the bot's response to disabled commands. Leave blank to send nothing. To include the command name in the message, include the `{command}` placeholder. """ await ctx.bot._config.disabled_command_msg.set(message) await ctx.tick() @commands.guild_only() @checks.guildowner_or_permissions(manage_guild=True) @commands.group(name="autoimmune") async def autoimmune_group(self, ctx: commands.Context): """ Server settings for immunity from automated actions """ pass @autoimmune_group.command(name="list") async def autoimmune_list(self, ctx: commands.Context): """ Get's the current members and roles configured for automatic moderation action immunity """ ai_ids = await ctx.bot._config.guild(ctx.guild).autoimmune_ids() roles = {r.name for r in ctx.guild.roles if r.id in ai_ids} members = {str(m) for m in ctx.guild.members if m.id in ai_ids} output = "" if roles: output += _("Roles immune from automated moderation actions:\n") output += ", ".join(roles) if members: if roles: output += "\n" output += _("Members immune from automated moderation actions:\n") output += ", ".join(members) if not output: output = _("No immunty settings here.") for page in pagify(output): await ctx.send(page) @autoimmune_group.command(name="add") async def autoimmune_add( self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role] ): """ Makes a user or roles immune from automated moderation actions """ async with ctx.bot._config.guild(ctx.guild).autoimmune_ids() as ai_ids: if user_or_role.id in ai_ids: return await ctx.send(_("Already added.")) ai_ids.append(user_or_role.id) await ctx.tick() @autoimmune_group.command(name="remove") async def autoimmune_remove( self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role] ): """ Makes a user or roles immune from automated moderation actions """ async with ctx.bot._config.guild(ctx.guild).autoimmune_ids() as ai_ids: if user_or_role.id not in ai_ids: return await ctx.send(_("Not in list.")) ai_ids.remove(user_or_role.id) await ctx.tick() @autoimmune_group.command(name="isimmune") async def autoimmune_checkimmune( self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role] ): """ Checks if a user or role would be considered immune from automated actions """ if await ctx.bot.is_automod_immune(user_or_role): await ctx.send(_("They are immune")) else: await ctx.send(_("They are not Immune")) @checks.is_owner() @_set.group() async def ownernotifications(self, ctx: commands.Context): """ Commands for configuring owner notifications. """ pass @ownernotifications.command() async def optin(self, ctx: commands.Context): """ Opt-in on recieving owner notifications. This is the default state. """ async with ctx.bot._config.owner_opt_out_list() as opt_outs: if ctx.author.id in opt_outs: opt_outs.remove(ctx.author.id) await ctx.tick() @ownernotifications.command() async def optout(self, ctx: commands.Context): """ Opt-out of recieving owner notifications. """ async with ctx.bot._config.owner_opt_out_list() as opt_outs: if ctx.author.id not in opt_outs: opt_outs.append(ctx.author.id) await ctx.tick() @ownernotifications.command() async def adddestination( self, ctx: commands.Context, *, channel: Union[discord.TextChannel, int] ): """ Adds a destination text channel to recieve owner notifications """ try: channel_id = channel.id except AttributeError: channel_id = channel async with ctx.bot._config.extra_owner_destinations() as extras: if channel_id not in extras: extras.append(channel_id) await ctx.tick() @ownernotifications.command(aliases=["remdestination", "deletedestination", "deldestination"]) async def removedestination( self, ctx: commands.Context, *, channel: Union[discord.TextChannel, int] ): """ Removes a destination text channel from recieving owner notifications. """ try: channel_id = channel.id except AttributeError: channel_id = channel async with ctx.bot._config.extra_owner_destinations() as extras: if channel_id in extras: extras.remove(channel_id) await ctx.tick() @ownernotifications.command() async def listdestinations(self, ctx: commands.Context): """ Lists the configured extra destinations for owner notifications """ channel_ids = await ctx.bot._config.extra_owner_destinations() if not channel_ids: await ctx.send(_("There are no extra channels being sent to.")) return data = [] for channel_id in channel_ids: channel = ctx.bot.get_channel(channel_id) if channel: # This includes the channel name in case the user can't see the channel. data.append(f"{channel.mention} {channel} ({channel.id})") else: data.append(_("Unknown channel with id: {id}").format(id=channel_id)) output = "\n".join(data) for page in pagify(output): await ctx.send(page) # RPC handlers async def rpc_load(self, request): cog_name = request.params[0] spec = await self.bot._cog_mgr.find_cog(cog_name) if spec is None: raise LookupError("No such cog found.") self._cleanup_and_refresh_modules(spec.name) await self.bot.load_extension(spec) async def rpc_unload(self, request): cog_name = request.params[0] self.bot.unload_extension(cog_name) async def rpc_reload(self, request): await self.rpc_unload(request) await self.rpc_load(request) @commands.group() @commands.guild_only() @checks.admin_or_permissions(manage_channels=True) async def ignore(self, ctx: commands.Context): """Add servers or channels to the ignore list.""" if ctx.invoked_subcommand is None: for page in pagify(await self.count_ignored(ctx)): await ctx.maybe_send_embed(page) @ignore.command(name="channel") async def ignore_channel( self, ctx: commands.Context, channel: Optional[Union[discord.TextChannel, discord.CategoryChannel]] = None, ): """Ignore commands in the channel or category. Defaults to the current channel. """ if not channel: channel = ctx.channel if not await self.bot._ignored_cache.get_ignored_channel(channel): await self.bot._ignored_cache.set_ignored_channel(channel, True) await ctx.send(_("Channel added to ignore list.")) else: await ctx.send(_("Channel already in ignore list.")) @ignore.command(name="server", aliases=["guild"]) @checks.admin_or_permissions(manage_guild=True) async def ignore_guild(self, ctx: commands.Context): """Ignore commands in this server.""" guild = ctx.guild if not await self.bot._ignored_cache.get_ignored_guild(guild): await self.bot._ignored_cache.set_ignored_guild(guild, True) await ctx.send(_("This server has been added to the ignore list.")) else: await ctx.send(_("This server is already being ignored.")) @commands.group() @commands.guild_only() @checks.admin_or_permissions(manage_channels=True) async def unignore(self, ctx: commands.Context): """Remove servers or channels from the ignore list.""" if ctx.invoked_subcommand is None: for page in pagify(await self.count_ignored(ctx)): await ctx.maybe_send_embed(page) @unignore.command(name="channel") async def unignore_channel( self, ctx: commands.Context, channel: Optional[Union[discord.TextChannel, discord.CategoryChannel]] = None, ): """Remove a channel or category from ignore the list. Defaults to the current channel. """ if not channel: channel = ctx.channel if await self.bot._ignored_cache.get_ignored_channel(channel): await self.bot._ignored_cache.set_ignored_channel(channel, False) await ctx.send(_("Channel removed from ignore list.")) else: await ctx.send(_("That channel is not in the ignore list.")) @unignore.command(name="server", aliases=["guild"]) @checks.admin_or_permissions(manage_guild=True) async def unignore_guild(self, ctx: commands.Context): """Remove this server from the ignore list.""" guild = ctx.message.guild if await self.bot._ignored_cache.get_ignored_guild(guild): await self.bot._ignored_cache.set_ignored_guild(guild, False) await ctx.send(_("This server has been removed from the ignore list.")) else: await ctx.send(_("This server is not in the ignore list.")) async def count_ignored(self, ctx: commands.Context): category_channels: List[discord.CategoryChannel] = [] text_channels: List[discord.TextChannel] = [] if await self.bot._ignored_cache.get_ignored_guild(ctx.guild): return _("This server is currently being ignored.") for channel in ctx.guild.text_channels: if channel.category and channel.category not in category_channels: if await self.bot._ignored_cache.get_ignored_channel(channel.category): category_channels.append(channel.category) if await self.bot._ignored_cache.get_ignored_channel(channel, check_category=False): text_channels.append(channel) cat_str = ( humanize_list([c.name for c in category_channels]) if category_channels else "None" ) chan_str = humanize_list([c.mention for c in text_channels]) if text_channels else "None" msg = _("Currently ignored categories: {categories}\nChannels: {channels}").format( categories=cat_str, channels=chan_str ) return msg # Removing this command from forks is a violation of the GPLv3 under which it is licensed. # Otherwise interfering with the ability for this command to be accessible is also a violation. @commands.command( cls=commands.commands._AlwaysAvailableCommand, name="licenseinfo", aliases=["licenceinfo"], i18n=_, ) async def license_info_command(ctx): """ Get info about Kr0nOs's licenses """ message = ( "This bot is an instance of Red-DiscordBot (hereafter referred to as Red)\n" # "Red is a free and open source application made available to the public and " # "licensed under the GNU GPLv3. The full text of this license is available to you at " "<https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/LICENSE>" ) await ctx.send(message) # We need a link which contains a thank you to other projects which we use at some point.
PypiClean
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/mode/gas/gas.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; CodeMirror.defineMode("gas", function(_config, parserConfig) { 'use strict'; // If an architecture is specified, its initialization function may // populate this array with custom parsing functions which will be // tried in the event that the standard functions do not find a match. var custom = []; // The symbol used to start a line comment changes based on the target // architecture. // If no architecture is pased in "parserConfig" then only multiline // comments will have syntax support. var lineCommentStartSymbol = ""; // These directives are architecture independent. // Machine specific directives should go in their respective // architecture initialization function. // Reference: // http://sourceware.org/binutils/docs/as/Pseudo-Ops.html#Pseudo-Ops var directives = { ".abort" : "builtin", ".align" : "builtin", ".altmacro" : "builtin", ".ascii" : "builtin", ".asciz" : "builtin", ".balign" : "builtin", ".balignw" : "builtin", ".balignl" : "builtin", ".bundle_align_mode" : "builtin", ".bundle_lock" : "builtin", ".bundle_unlock" : "builtin", ".byte" : "builtin", ".cfi_startproc" : "builtin", ".comm" : "builtin", ".data" : "builtin", ".def" : "builtin", ".desc" : "builtin", ".dim" : "builtin", ".double" : "builtin", ".eject" : "builtin", ".else" : "builtin", ".elseif" : "builtin", ".end" : "builtin", ".endef" : "builtin", ".endfunc" : "builtin", ".endif" : "builtin", ".equ" : "builtin", ".equiv" : "builtin", ".eqv" : "builtin", ".err" : "builtin", ".error" : "builtin", ".exitm" : "builtin", ".extern" : "builtin", ".fail" : "builtin", ".file" : "builtin", ".fill" : "builtin", ".float" : "builtin", ".func" : "builtin", ".global" : "builtin", ".gnu_attribute" : "builtin", ".hidden" : "builtin", ".hword" : "builtin", ".ident" : "builtin", ".if" : "builtin", ".incbin" : "builtin", ".include" : "builtin", ".int" : "builtin", ".internal" : "builtin", ".irp" : "builtin", ".irpc" : "builtin", ".lcomm" : "builtin", ".lflags" : "builtin", ".line" : "builtin", ".linkonce" : "builtin", ".list" : "builtin", ".ln" : "builtin", ".loc" : "builtin", ".loc_mark_labels" : "builtin", ".local" : "builtin", ".long" : "builtin", ".macro" : "builtin", ".mri" : "builtin", ".noaltmacro" : "builtin", ".nolist" : "builtin", ".octa" : "builtin", ".offset" : "builtin", ".org" : "builtin", ".p2align" : "builtin", ".popsection" : "builtin", ".previous" : "builtin", ".print" : "builtin", ".protected" : "builtin", ".psize" : "builtin", ".purgem" : "builtin", ".pushsection" : "builtin", ".quad" : "builtin", ".reloc" : "builtin", ".rept" : "builtin", ".sbttl" : "builtin", ".scl" : "builtin", ".section" : "builtin", ".set" : "builtin", ".short" : "builtin", ".single" : "builtin", ".size" : "builtin", ".skip" : "builtin", ".sleb128" : "builtin", ".space" : "builtin", ".stab" : "builtin", ".string" : "builtin", ".struct" : "builtin", ".subsection" : "builtin", ".symver" : "builtin", ".tag" : "builtin", ".text" : "builtin", ".title" : "builtin", ".type" : "builtin", ".uleb128" : "builtin", ".val" : "builtin", ".version" : "builtin", ".vtable_entry" : "builtin", ".vtable_inherit" : "builtin", ".warning" : "builtin", ".weak" : "builtin", ".weakref" : "builtin", ".word" : "builtin" }; var registers = {}; function x86(_parserConfig) { lineCommentStartSymbol = "#"; registers.ax = "variable"; registers.eax = "variable-2"; registers.rax = "variable-3"; registers.bx = "variable"; registers.ebx = "variable-2"; registers.rbx = "variable-3"; registers.cx = "variable"; registers.ecx = "variable-2"; registers.rcx = "variable-3"; registers.dx = "variable"; registers.edx = "variable-2"; registers.rdx = "variable-3"; registers.si = "variable"; registers.esi = "variable-2"; registers.rsi = "variable-3"; registers.di = "variable"; registers.edi = "variable-2"; registers.rdi = "variable-3"; registers.sp = "variable"; registers.esp = "variable-2"; registers.rsp = "variable-3"; registers.bp = "variable"; registers.ebp = "variable-2"; registers.rbp = "variable-3"; registers.ip = "variable"; registers.eip = "variable-2"; registers.rip = "variable-3"; registers.cs = "keyword"; registers.ds = "keyword"; registers.ss = "keyword"; registers.es = "keyword"; registers.fs = "keyword"; registers.gs = "keyword"; } function armv6(_parserConfig) { // Reference: // http://infocenter.arm.com/help/topic/com.arm.doc.qrc0001l/QRC0001_UAL.pdf // http://infocenter.arm.com/help/topic/com.arm.doc.ddi0301h/DDI0301H_arm1176jzfs_r0p7_trm.pdf lineCommentStartSymbol = "@"; directives.syntax = "builtin"; registers.r0 = "variable"; registers.r1 = "variable"; registers.r2 = "variable"; registers.r3 = "variable"; registers.r4 = "variable"; registers.r5 = "variable"; registers.r6 = "variable"; registers.r7 = "variable"; registers.r8 = "variable"; registers.r9 = "variable"; registers.r10 = "variable"; registers.r11 = "variable"; registers.r12 = "variable"; registers.sp = "variable-2"; registers.lr = "variable-2"; registers.pc = "variable-2"; registers.r13 = registers.sp; registers.r14 = registers.lr; registers.r15 = registers.pc; custom.push(function(ch, stream) { if (ch === '#') { stream.eatWhile(/\w/); return "number"; } }); } var arch = (parserConfig.architecture || "x86").toLowerCase(); if (arch === "x86") { x86(parserConfig); } else if (arch === "arm" || arch === "armv6") { armv6(parserConfig); } function nextUntilUnescaped(stream, end) { var escaped = false, next; while ((next = stream.next()) != null) { if (next === end && !escaped) { return false; } escaped = !escaped && next === "\\"; } return escaped; } function clikeComment(stream, state) { var maybeEnd = false, ch; while ((ch = stream.next()) != null) { if (ch === "/" && maybeEnd) { state.tokenize = null; break; } maybeEnd = (ch === "*"); } return "comment"; } return { startState: function() { return { tokenize: null }; }, token: function(stream, state) { if (state.tokenize) { return state.tokenize(stream, state); } if (stream.eatSpace()) { return null; } var style, cur, ch = stream.next(); if (ch === "/") { if (stream.eat("*")) { state.tokenize = clikeComment; return clikeComment(stream, state); } } if (ch === lineCommentStartSymbol) { stream.skipToEnd(); return "comment"; } if (ch === '"') { nextUntilUnescaped(stream, '"'); return "string"; } if (ch === '.') { stream.eatWhile(/\w/); cur = stream.current().toLowerCase(); style = directives[cur]; return style || null; } if (ch === '=') { stream.eatWhile(/\w/); return "tag"; } if (ch === '{') { return "braket"; } if (ch === '}') { return "braket"; } if (/\d/.test(ch)) { if (ch === "0" && stream.eat("x")) { stream.eatWhile(/[0-9a-fA-F]/); return "number"; } stream.eatWhile(/\d/); return "number"; } if (/\w/.test(ch)) { stream.eatWhile(/\w/); if (stream.eat(":")) { return 'tag'; } cur = stream.current().toLowerCase(); style = registers[cur]; return style || null; } for (var i = 0; i < custom.length; i++) { style = custom[i](ch, stream, state); if (style) { return style; } } }, lineComment: lineCommentStartSymbol, blockCommentStart: "/*", blockCommentEnd: "*/" }; }); });
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/xmpp/UserService.js.uncompressed.js
define("dojox/xmpp/UserService", ["dijit","dojo","dojox"], function(dijit,dojo,dojox){ dojo.provide("dojox.xmpp.UserService"); dojo.declare("dojox.xmpp.UserService", null, { constructor: function(xmppService){ this.session= xmppService; }, getPersonalProfile: function(){ var req={ id: this.session.getNextIqId(), type: 'get' } var request = new dojox.string.Builder(dojox.xmpp.util.createElement("iq",req,false)); request.append(dojox.xmpp.util.createElement("query",{xmlns:"jabber:iq:private"},false)); request.append(dojox.xmpp.util.createElement("sunmsgr",{xmlsns:'sun:xmpp:properties'},true)); request.append("</query></iq>"); var def = this.session.dispatchPacket(request.toString(),"iq",req.id); def.addCallback(this, "_onGetPersonalProfile"); }, setPersonalProfile: function(props){ var req={ id: this.session.getNextIqId(), type: 'set' } var request = new dojox.string.Builder(dojox.xmpp.util.createElement("iq",req,false)); request.append(dojox.xmpp.util.createElement("query",{xmlns:"jabber:iq:private"},false)); request.append(dojox.xmpp.util.createElement("sunmsgr",{xmlsns:'sun:xmpp:properties'},false)); for (var key in props){ request.append(dojox.xmpp.util.createElement("property",{name: key},false)); request.append(dojox.xmpp.util.createElement("value",{},false)); request.append(props[key]); request.append("</value></props>"); } request.append("</sunmsgr></query></iq>"); var def = this.session.dispatchPacket(request.toString(), "iq", req.id); def.addCallback(this, "_onSetPersonalProfile"); }, _onSetPersonalProfile: function(response){ if(response.getAttribute('type')=='result'){ this.onSetPersonalProfile(response.getAttribute('id')); }else if(response.getAttribute('type')=='error'){ var err = this.session.processXmppError(response); this.onSetPersonalProfileFailure(err); } }, onSetPersonalProfile: function(id){}, onSetPersonalProfileFailure: function(err){}, _onGetPersonalProfile: function(profile){ if (profile.getAttribute('type')=='result'){ var props = {}; if (profile.hasChildNodes()){ var queryNode = profile.firstChild; if ((queryNode.nodeName=="query")&&(queryNode.getAttribute('xmlns')=='jabber:iq:private')){ var sunNode = queryNode.firstChild; if ((sunNode.nodeName=='query')&&(sunNode.getAttributes('xmlns')=='sun:xmpp:properties')){ for (var i=0; i<sunNode.childNodes.length;i++){ var n = sunNode.childNodes[i]; if(n.nodeName == 'property'){ var name = n.getAttribute('name'); var val = n.firstChild || ""; props[name]=val; } } } } this.onGetPersonalProfile(props); } }else if (profile.getAttribute('type')=='error'){ var err = this.session.processXmppError(profile); this.onGetPersonalProfileFailure(err); } return profile; }, onGetPersonalProfile: function(profile){ //console.log("UserService::onGetPersonalProfile() ", profile); }, onGetPersonalProfileFailure: function(err){ //console.log("UserService::onGetPersonalProfileFailure() ", err); } }); });
PypiClean
/Flask-Security-Utils-1.0.2.tar.gz/Flask-Security-Utils-1.0.2/security_utils/CountryFirewall.py
import logging import os from IP2Location import IP2Location, IP2LocationIPTools from urllib.request import urlopen from os.path import exists as file_exists traza = logging.getLogger(__name__) class CountryFirewall(): """ Clase de utilidad """ __instance = None __ipCountryFileDB = None __ipV6CountryFileDB = None __databaseIpV4 = None __databaseIpV6 = None __inMemoryDatabase = None __IP2LocationIPTools = None def __new__(cls, ip_country_file_db : str = None, ip_v6_country_file_db : str = None, blocked_countries : list = None, allowed_countries : list = None, in_memory_database : bool = False): """ Constructor of the CountryFirewall class. """ if (cls.__instance == None): #Register file paths cls.__ipCountryFileDB = ip_country_file_db if ip_country_file_db is not None else "" cls.__ipV6CountryFileDB = ip_v6_country_file_db if ip_v6_country_file_db is not None else "" cls.__inMemoryDatabase = in_memory_database #Register country names cls.__blockedCountries = blocked_countries cls.__allowedCountries = allowed_countries #Verify before initialize the class cls.__verifyCountriesLists(cls) #Load or download files cls.__loadOrDownloadDatabase(cls) #Create the ip tools object cls.__IP2LocationIPTools = IP2LocationIPTools() #Create the class cls.__instance = object.__new__(cls) return cls.__instance def __verifyCountriesLists(cls): """ This function raise an exception when one or more countries are allowed and blocked at the same time. """ if None not in [cls.__blockedCountries,cls.__allowedCountries]: setBlockedList = set(cls.__blockedCountries) setAllowedList = set(cls.__allowedCountries) intersec = setBlockedList.intersection(setAllowedList) if len(intersec) > 0: raise Exception("The 'BLOCKED_COUNTRIES' and 'ALLOWED_COUNTRIES' contains this repeated countries {}.".format(intersec)) def __loadOrDownloadDatabase(cls): """ This function download the database from the git repository if the path doesn't exist. """ #Create database folder if it doesn't exist. def createFolder(): if not os.path.exists("ip_database"): os.mkdir("ip_database") #Download IPV4 database if it needed if file_exists(cls.__ipCountryFileDB) == False: urlIpV4 = "https://github.com/alejivo/Flask-Security-Utils/raw/main/IP2LocationDB/IP2LOCATION-LITE-DB1.BIN" createFolder() #cls.__downloadFile(cls,url=urlIpV4,filePath=os.path.join("ip_database","IP2LOCATION-LITE-DB1.BIN")) cls.__downloadFile(cls,url=urlIpV4,filePath=os.path.join("ip_database","IP2LOCATION-LITE-DB1.BIN")) cls.__ipCountryFileDB = os.path.join("ip_database","IP2LOCATION-LITE-DB1.BIN") #Download IPV6 database if it needed if file_exists(cls.__ipV6CountryFileDB) == False: urlIpV6 = "https://github.com/alejivo/Flask-Security-Utils/raw/main/IP2LocationDB/IP2LOCATION-LITE-DB1.IPV6.BIN" createFolder() cls.__downloadFile(cls, url=urlIpV6, filePath=os.path.join("ip_database","IP2LOCATION-LITE-DB1.IPV6.BIN")) cls.__ipV6CountryFileDB = os.path.join("ip_database","IP2LOCATION-LITE-DB1.IPV6.BIN") #Load database files if cls.__inMemoryDatabase == True: cls.__databaseIpV4 : IP2Location = IP2Location(os.path.join("ip_database", "IP2LOCATION-LITE-DB1.BIN"), "SHARED_MEMORY") cls.__databaseIpV6 : IP2Location = IP2Location(os.path.join("ip_database", "IP2LOCATION-LITE-DB1.IPV6.BIN"), "SHARED_MEMORY") else: cls.__databaseIpV4 : IP2Location = IP2Location(os.path.join("ip_database", "IP2LOCATION-LITE-DB1.BIN")) cls.__databaseIpV6 : IP2Location = IP2Location(os.path.join("ip_database", "IP2LOCATION-LITE-DB1.IPV6.BIN")) def isIPInBlockedCountry(cls, ip : str) -> bool: """ This function return True if the IP is into the blocked list, or false if not. """ if cls.__IP2LocationIPTools.is_ipv4(ip=ip): if str(cls.__databaseIpV4.get_country_short(ip)) in cls.__blockedCountries: return True else: return False elif cls.__IP2LocationIPTools.is_ipv6(ip=ip): if str(cls.__databaseIpV6.get_country_short(ip)) in cls.__blockedCountries: return True else: return False else: traza.error("The IP[{}] is not ip4 or ip6 and can't be possessed, isIPInBlockedCountry returns False.".format(ip)) return False def isIPInAllowedCountry(cls, ip : str) -> bool: """ This function return True if the IP is into the allowed country list, or false if not. """ if cls.__IP2LocationIPTools.is_ipv4(ip): if str(cls.__databaseIpV4.get_country_short(ip)) in cls.__allowedCountries: return True else: return False elif cls.__IP2LocationIPTools.is_ipv6(ip): if str(cls.__databaseIpV6.get_country_short(ip)) in cls.__allowedCountries: return True else: return False else: traza.error("The IP[{}] is not ip4 or ip6 and can't be possessed, isIPInAllowedCountry returns True.".format(ip)) return True def isInList(cls, ip : str, countryList: list): """ This function return True if the country of IP is into the list, or false if not. """ if cls.__IP2LocationIPTools.is_ipv4(ip): if str(cls.__databaseIpV4.get_country_short(ip)) in countryList: return True else: return False elif cls.__IP2LocationIPTools.is_ipv6(ip): if str(cls.__databaseIpV6.get_country_short(ip)) in countryList: return True else: return False else: traza.error("The IP[{}] is not ip4 or ip6 and can't be possessed, isInList returns False.".format(ip)) return False def __downloadFile(cls, url, filePath): """ This function is an auxiliary one useful to download database files. """ try: f = urlopen(url) with open(filePath, "wb") as local_file: local_file.write(f.read()) except Exception as e: raise Exception("Exception {} trying to download the file {} to the path {} ".format(e, url, filePath))
PypiClean
/Congo-0.0.1.tar.gz/Congo-0.0.1/portfolio/component/static/portfolio/vendor/bootstrap/Gruntfile.js
module.exports = function (grunt) { 'use strict'; // Force use of Unix newlines grunt.util.linefeed = '\n'; RegExp.quote = function (string) { return string.replace(/[-\\^$*+?.()|[\]{}]/g, '\\$&'); }; var fs = require('fs'); var path = require('path'); var npmShrinkwrap = require('npm-shrinkwrap'); var generateGlyphiconsData = require('./grunt/bs-glyphicons-data-generator.js'); var BsLessdocParser = require('./grunt/bs-lessdoc-parser.js'); var getLessVarsData = function () { var filePath = path.join(__dirname, 'less/variables.less'); var fileContent = fs.readFileSync(filePath, { encoding: 'utf8' }); var parser = new BsLessdocParser(fileContent); return { sections: parser.parseFile() }; }; var generateRawFiles = require('./grunt/bs-raw-files-generator.js'); var generateCommonJSModule = require('./grunt/bs-commonjs-generator.js'); var configBridge = grunt.file.readJSON('./grunt/configBridge.json', { encoding: 'utf8' }); Object.keys(configBridge.paths).forEach(function (key) { configBridge.paths[key].forEach(function (val, i, arr) { arr[i] = path.join('./docs/assets', val); }); }); // Project configuration. grunt.initConfig({ // Metadata. pkg: grunt.file.readJSON('package.json'), banner: '/*!\n' + ' * Bootstrap v<%= pkg.version %> (<%= pkg.homepage %>)\n' + ' * Copyright 2011-<%= grunt.template.today("yyyy") %> <%= pkg.author %>\n' + ' * Licensed under <%= pkg.license.type %> (<%= pkg.license.url %>)\n' + ' */\n', jqueryCheck: configBridge.config.jqueryCheck.join('\n'), jqueryVersionCheck: configBridge.config.jqueryVersionCheck.join('\n'), // Task configuration. clean: { dist: 'dist', docs: 'docs/dist' }, jshint: { options: { jshintrc: 'js/.jshintrc' }, grunt: { options: { jshintrc: 'grunt/.jshintrc' }, src: ['Gruntfile.js', 'grunt/*.js'] }, core: { src: 'js/*.js' }, test: { options: { jshintrc: 'js/tests/unit/.jshintrc' }, src: 'js/tests/unit/*.js' }, assets: { src: ['docs/assets/js/src/*.js', 'docs/assets/js/*.js', '!docs/assets/js/*.min.js'] } }, jscs: { options: { config: 'js/.jscsrc' }, grunt: { src: '<%= jshint.grunt.src %>' }, core: { src: '<%= jshint.core.src %>' }, test: { src: '<%= jshint.test.src %>' }, assets: { options: { requireCamelCaseOrUpperCaseIdentifiers: null }, src: '<%= jshint.assets.src %>' } }, concat: { options: { banner: '<%= banner %>\n<%= jqueryCheck %>\n<%= jqueryVersionCheck %>', stripBanners: false }, bootstrap: { src: [ 'js/transition.js', 'js/alert.js', 'js/button.js', 'js/carousel.js', 'js/collapse.js', 'js/dropdown.js', 'js/modal.js', 'js/tooltip.js', 'js/popover.js', 'js/scrollspy.js', 'js/tab.js', 'js/affix.js' ], dest: 'dist/js/<%= pkg.name %>.js' } }, uglify: { options: { preserveComments: 'some' }, core: { src: '<%= concat.bootstrap.dest %>', dest: 'dist/js/<%= pkg.name %>.min.js' }, customize: { src: configBridge.paths.customizerJs, dest: 'docs/assets/js/customize.min.js' }, docsJs: { src: configBridge.paths.docsJs, dest: 'docs/assets/js/docs.min.js' } }, qunit: { options: { inject: 'js/tests/unit/phantom.js' }, files: 'js/tests/index.html' }, less: { compileCore: { options: { strictMath: true, sourceMap: true, outputSourceFiles: true, sourceMapURL: '<%= pkg.name %>.css.map', sourceMapFilename: 'dist/css/<%= pkg.name %>.css.map' }, src: 'less/bootstrap.less', dest: 'dist/css/<%= pkg.name %>.css' }, compileTheme: { options: { strictMath: true, sourceMap: true, outputSourceFiles: true, sourceMapURL: '<%= pkg.name %>-theme.css.map', sourceMapFilename: 'dist/css/<%= pkg.name %>-theme.css.map' }, src: 'less/theme.less', dest: 'dist/css/<%= pkg.name %>-theme.css' } }, autoprefixer: { options: { browsers: configBridge.config.autoprefixerBrowsers }, core: { options: { map: true }, src: 'dist/css/<%= pkg.name %>.css' }, theme: { options: { map: true }, src: 'dist/css/<%= pkg.name %>-theme.css' }, docs: { src: ['docs/assets/css/anchor.css', 'docs/assets/css/src/docs.css'] }, examples: { expand: true, cwd: 'docs/examples/', src: ['**/*.css'], dest: 'docs/examples/' } }, csslint: { options: { csslintrc: 'less/.csslintrc' }, dist: [ 'dist/css/bootstrap.css', 'dist/css/bootstrap-theme.css' ], examples: [ 'docs/examples/**/*.css' ], docs: { options: { ids: false, 'overqualified-elements': false }, src: 'docs/assets/css/src/docs.css' } }, cssmin: { options: { // TODO: disable `zeroUnits` optimization once clean-css 3.2 is released // and then simplify the fix for https://github.com/twbs/bootstrap/issues/14837 accordingly compatibility: 'ie8', keepSpecialComments: '*', advanced: false }, minifyCore: { src: 'dist/css/<%= pkg.name %>.css', dest: 'dist/css/<%= pkg.name %>.min.css' }, minifyTheme: { src: 'dist/css/<%= pkg.name %>-theme.css', dest: 'dist/css/<%= pkg.name %>-theme.min.css' }, docs: { src: [ 'docs/assets/css/src/pygments-manni.css', 'docs/assets/css/src/anchor.css', 'docs/assets/css/src/docs.css' ], dest: 'docs/assets/css/docs.min.css' } }, usebanner: { options: { position: 'top', banner: '<%= banner %>' }, files: { src: 'dist/css/*.css' } }, csscomb: { options: { config: 'less/.csscomb.json' }, dist: { expand: true, cwd: 'dist/css/', src: ['*.css', '!*.min.css'], dest: 'dist/css/' }, examples: { expand: true, cwd: 'docs/examples/', src: '**/*.css', dest: 'docs/examples/' }, docs: { src: 'docs/assets/css/src/docs.css', dest: 'docs/assets/css/src/docs.css' } }, copy: { fonts: { expand: true, src: 'fonts/*', dest: 'dist/' }, docs: { expand: true, cwd: 'dist/', src: [ '**/*' ], dest: 'docs/dist/' } }, connect: { server: { options: { port: 3000, base: '.' } } }, jekyll: { options: { config: '_config.yml' }, docs: {}, github: { options: { raw: 'github: true' } } }, jade: { options: { pretty: true, data: getLessVarsData }, customizerVars: { src: 'docs/_jade/customizer-variables.jade', dest: 'docs/_includes/customizer-variables.html' }, customizerNav: { src: 'docs/_jade/customizer-nav.jade', dest: 'docs/_includes/nav/customize.html' } }, htmllint: { options: { ignore: [ 'Attribute "autocomplete" not allowed on element "button" at this point.', 'Attribute "autocomplete" not allowed on element "input" at this point.', 'Element "img" is missing required attribute "src".' ] }, src: '_gh_pages/**/*.html' }, watch: { src: { files: '<%= jshint.core.src %>', tasks: ['jshint:src', 'qunit', 'concat'] }, test: { files: '<%= jshint.test.src %>', tasks: ['jshint:test', 'qunit'] }, less: { files: 'less/**/*.less', tasks: 'less' } }, sed: { versionNumber: { pattern: (function () { var old = grunt.option('oldver'); return old ? RegExp.quote(old) : old; })(), replacement: grunt.option('newver'), recursive: true } }, 'saucelabs-qunit': { all: { options: { build: process.env.TRAVIS_JOB_ID, throttled: 10, maxRetries: 3, maxPollRetries: 4, urls: ['http://127.0.0.1:3000/js/tests/index.html?hidepassed'], browsers: grunt.file.readYAML('grunt/sauce_browsers.yml') } } }, exec: { npmUpdate: { command: 'npm update' } }, compress: { main: { options: { archive: 'bootstrap-<%= pkg.version %>-dist.zip', mode: 'zip', level: 9, pretty: true }, files: [ { expand: true, cwd: 'dist/', src: ['**'], dest: 'bootstrap-<%= pkg.version %>-dist' } ] } } }); // These plugins provide necessary tasks. require('load-grunt-tasks')(grunt, { scope: 'devDependencies' }); require('time-grunt')(grunt); // Docs HTML validation task grunt.registerTask('validate-html', ['jekyll:docs', 'htmllint']); var runSubset = function (subset) { return !process.env.TWBS_TEST || process.env.TWBS_TEST === subset; }; var isUndefOrNonZero = function (val) { return val === undefined || val !== '0'; }; // Test task. var testSubtasks = []; // Skip core tests if running a different subset of the test suite if (runSubset('core') && // Skip core tests if this is a Savage build process.env.TRAVIS_REPO_SLUG !== 'twbs-savage/bootstrap') { testSubtasks = testSubtasks.concat(['dist-css', 'dist-js', 'csslint:dist', 'test-js', 'docs']); } // Skip HTML validation if running a different subset of the test suite if (runSubset('validate-html') && // Skip HTML5 validator on Travis when [skip validator] is in the commit message isUndefOrNonZero(process.env.TWBS_DO_VALIDATOR)) { testSubtasks.push('validate-html'); } // Only run Sauce Labs tests if there's a Sauce access key if (typeof process.env.SAUCE_ACCESS_KEY !== 'undefined' && // Skip Sauce if running a different subset of the test suite runSubset('sauce-js-unit') && // Skip Sauce on Travis when [skip sauce] is in the commit message isUndefOrNonZero(process.env.TWBS_DO_SAUCE)) { testSubtasks.push('connect'); testSubtasks.push('saucelabs-qunit'); } grunt.registerTask('test', testSubtasks); grunt.registerTask('test-js', ['jshint:core', 'jshint:test', 'jshint:grunt', 'jscs:core', 'jscs:test', 'jscs:grunt', 'qunit']); // JS distribution task. grunt.registerTask('dist-js', ['concat', 'uglify:core', 'commonjs']); // CSS distribution task. grunt.registerTask('less-compile', ['less:compileCore', 'less:compileTheme']); grunt.registerTask('dist-css', ['less-compile', 'autoprefixer:core', 'autoprefixer:theme', 'usebanner', 'csscomb:dist', 'cssmin:minifyCore', 'cssmin:minifyTheme']); // Full distribution task. grunt.registerTask('dist', ['clean:dist', 'dist-css', 'copy:fonts', 'dist-js']); // Default task. grunt.registerTask('default', ['clean:dist', 'copy:fonts', 'test']); // Version numbering task. // grunt change-version-number --oldver=A.B.C --newver=X.Y.Z // This can be overzealous, so its changes should always be manually reviewed! grunt.registerTask('change-version-number', 'sed'); grunt.registerTask('build-glyphicons-data', function () { generateGlyphiconsData.call(this, grunt); }); // task for building customizer grunt.registerTask('build-customizer', ['build-customizer-html', 'build-raw-files']); grunt.registerTask('build-customizer-html', 'jade'); grunt.registerTask('build-raw-files', 'Add scripts/less files to customizer.', function () { var banner = grunt.template.process('<%= banner %>'); generateRawFiles(grunt, banner); }); grunt.registerTask('commonjs', 'Generate CommonJS entrypoint module in dist dir.', function () { var srcFiles = grunt.config.get('concat.bootstrap.src'); var destFilepath = 'dist/js/npm.js'; generateCommonJSModule(grunt, srcFiles, destFilepath); }); // Docs task. grunt.registerTask('docs-css', ['autoprefixer:docs', 'autoprefixer:examples', 'csscomb:docs', 'csscomb:examples', 'cssmin:docs']); grunt.registerTask('lint-docs-css', ['csslint:docs', 'csslint:examples']); grunt.registerTask('docs-js', ['uglify:docsJs', 'uglify:customize']); grunt.registerTask('lint-docs-js', ['jshint:assets', 'jscs:assets']); grunt.registerTask('docs', ['docs-css', 'lint-docs-css', 'docs-js', 'lint-docs-js', 'clean:docs', 'copy:docs', 'build-glyphicons-data', 'build-customizer']); grunt.registerTask('prep-release', ['jekyll:github', 'compress']); // Task for updating the cached npm packages used by the Travis build (which are controlled by test-infra/npm-shrinkwrap.json). // This task should be run and the updated file should be committed whenever Bootstrap's dependencies change. grunt.registerTask('update-shrinkwrap', ['exec:npmUpdate', '_update-shrinkwrap']); grunt.registerTask('_update-shrinkwrap', function () { var done = this.async(); npmShrinkwrap({ dev: true, dirname: __dirname }, function (err) { if (err) { grunt.fail.warn(err); } var dest = 'test-infra/npm-shrinkwrap.json'; fs.renameSync('npm-shrinkwrap.json', dest); grunt.log.writeln('File ' + dest.cyan + ' updated.'); done(); }); }); };
PypiClean
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/install.py
import os import stat from shutil import copy2, copystat import SCons.Action import SCons.Tool import SCons.Util from SCons.Subst import SUBST_RAW from SCons.Tool.linkCommon import ( StringizeLibSymlinks, CreateLibSymlinks, EmitLibSymlinks, ) # We keep track of *all* installed files. _INSTALLED_FILES = [] _UNIQUE_INSTALLED_FILES = None class CopytreeError(OSError): pass def scons_copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, ignore_dangling_symlinks=False, dirs_exist_ok=False): """Recursively copy a directory tree, SCons version. This is a modified copy of the Python 3.7 shutil.copytree function. SCons update: dirs_exist_ok dictates whether to raise an exception in case dst or any missing parent directory already exists. Implementation depends on os.makedirs having a similar flag, which it has since Python 3.2. This version also raises an SCons-defined exception rather than the one defined locally to shtuil. This version uses a change from Python 3.8. TODO: we can remove this forked copy once the minimum Py version is 3.8. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. If the file pointed by the symlink doesn't exist, an exception will be added in the list of errors raised in an Error exception at the end of the copy process. You can set the optional ignore_dangling_symlinks flag to true if you want to silence this exception. Notice that this has no effect on platforms that don't support os.symlink. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. The optional copy_function argument is a callable that will be used to copy each file. It will be called with the source path and the destination path as arguments. By default, copy2() is used, but any function that supports the same signature (like copy()) can be used. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst, exist_ok=dirs_exist_ok) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if os.path.islink(srcname): linkto = os.readlink(srcname) if symlinks: # We can't just leave it to `copy_function` because legacy # code with a custom `copy_function` may rely on copytree # doing the right thing. os.symlink(linkto, dstname) copystat(srcname, dstname, follow_symlinks=not symlinks) else: # ignore dangling symlink if the flag is on if not os.path.exists(linkto) and ignore_dangling_symlinks: continue # otherwise let the copy occurs. copy2 will raise an error if os.path.isdir(srcname): scons_copytree(srcname, dstname, symlinks=symlinks, ignore=ignore, copy_function=copy_function, ignore_dangling_symlinks=ignore_dangling_symlinks, dirs_exist_ok=dirs_exist_ok) else: copy_function(srcname, dstname) elif os.path.isdir(srcname): scons_copytree(srcname, dstname, symlinks=symlinks, ignore=ignore, copy_function=copy_function, ignore_dangling_symlinks=ignore_dangling_symlinks, dirs_exist_ok=dirs_exist_ok) else: # Will raise a SpecialFileError for unsupported file types copy_function(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except CopytreeError as err: # SCons change errors.extend(err.args[0]) except OSError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: # Copying file access times may fail on Windows if getattr(why, 'winerror', None) is None: errors.append((src, dst, str(why))) if errors: raise CopytreeError(errors) # SCons change return dst # # Functions doing the actual work of the Install Builder. # def copyFunc(dest, source, env) -> int: """Install a source file or directory into a destination by copying. Mode/permissions bits will be copied as well, except that the target will be made writable. Returns: POSIX-style error code - 0 for success, non-zero for fail """ if os.path.isdir(source): if os.path.exists(dest): if not os.path.isdir(dest): raise SCons.Errors.UserError("cannot overwrite non-directory `%s' with a directory `%s'" % (str(dest), str(source))) else: parent = os.path.split(dest)[0] if not os.path.exists(parent): os.makedirs(parent) scons_copytree(source, dest, dirs_exist_ok=True) else: copy2(source, dest) st = os.stat(source) os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE) return 0 # # Functions doing the actual work of the InstallVersionedLib Builder. # def copyFuncVersionedLib(dest, source, env) -> int: """Install a versioned library into a destination by copying. Any required symbolic links for other library names are created. Mode/permissions bits will be copied as well, except that the target will be made writable. Returns: POSIX-style error code - 0 for success, non-zero for fail """ if os.path.isdir(source): raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) ) else: # remove the link if it is already there try: os.remove(dest) except: pass copy2(source, dest) st = os.stat(source) os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE) installShlibLinks(dest, source, env) return 0 def listShlibLinksToInstall(dest, source, env): install_links = [] source = env.arg2nodes(source) dest = env.fs.File(dest) install_dir = dest.get_dir() for src in source: symlinks = getattr(getattr(src, 'attributes', None), 'shliblinks', None) if symlinks: for link, linktgt in symlinks: link_base = os.path.basename(link.get_path()) linktgt_base = os.path.basename(linktgt.get_path()) install_link = env.fs.File(link_base, install_dir) install_linktgt = env.fs.File(linktgt_base, install_dir) install_links.append((install_link, install_linktgt)) return install_links def installShlibLinks(dest, source, env): """If we are installing a versioned shared library create the required links.""" Verbose = False symlinks = listShlibLinksToInstall(dest, source, env) if Verbose: print('installShlibLinks: symlinks={!r}'.format(StringizeLibSymlinks(symlinks))) if symlinks: CreateLibSymlinks(env, symlinks) return def installFunc(target, source, env) -> int: """Install a source file into a target. Uses the function specified in the INSTALL construction variable. Returns: POSIX-style error code - 0 for success, non-zero for fail """ try: install = env['INSTALL'] except KeyError: raise SCons.Errors.UserError('Missing INSTALL construction variable.') assert len(target) == len(source), ( "Installing source %s into target %s: " "target and source lists must have same length." % (list(map(str, source)), list(map(str, target))) ) for t, s in zip(target, source): if install(t.get_path(), s.get_path(), env): return 1 return 0 def installFuncVersionedLib(target, source, env) -> int: """Install a versioned library into a target. Uses the function specified in the INSTALL construction variable. Returns: POSIX-style error code - 0 for success, non-zero for fail """ try: install = env['INSTALLVERSIONEDLIB'] except KeyError: raise SCons.Errors.UserError( 'Missing INSTALLVERSIONEDLIB construction variable.' ) assert len(target) == len(source), ( "Installing source %s into target %s: " "target and source lists must have same length." % (list(map(str, source)), list(map(str, target))) ) for t, s in zip(target, source): if hasattr(t.attributes, 'shlibname'): tpath = os.path.join(t.get_dir(), t.attributes.shlibname) else: tpath = t.get_path() if install(tpath, s.get_path(), env): return 1 return 0 def stringFunc(target, source, env): installstr = env.get('INSTALLSTR') if installstr: return env.subst_target_source(installstr, SUBST_RAW, target, source) target = str(target[0]) source = str(source[0]) if os.path.isdir(source): type = 'directory' else: type = 'file' return 'Install %s: "%s" as "%s"' % (type, source, target) # # Emitter functions # def add_targets_to_INSTALLED_FILES(target, source, env): """ An emitter that adds all target files to the list stored in the _INSTALLED_FILES global variable. This way all installed files of one scons call will be collected. """ global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES _INSTALLED_FILES.extend(target) _UNIQUE_INSTALLED_FILES = None return (target, source) def add_versioned_targets_to_INSTALLED_FILES(target, source, env): """ An emitter that adds all target files to the list stored in the _INSTALLED_FILES global variable. This way all installed files of one scons call will be collected. """ global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES Verbose = False _INSTALLED_FILES.extend(target) if Verbose: print("add_versioned_targets_to_INSTALLED_FILES: target={!r}".format(list(map(str, target)))) symlinks = listShlibLinksToInstall(target[0], source, env) if symlinks: EmitLibSymlinks(env, symlinks, target[0]) _UNIQUE_INSTALLED_FILES = None return (target, source) class DESTDIR_factory: """ A node factory, where all files will be relative to the dir supplied in the constructor. """ def __init__(self, env, dir): self.env = env self.dir = env.arg2nodes( dir, env.fs.Dir )[0] def Entry(self, name): name = SCons.Util.make_path_relative(name) return self.dir.Entry(name) def Dir(self, name): name = SCons.Util.make_path_relative(name) return self.dir.Dir(name) # # The Builder Definition # install_action = SCons.Action.Action(installFunc, stringFunc) installas_action = SCons.Action.Action(installFunc, stringFunc) installVerLib_action = SCons.Action.Action(installFuncVersionedLib, stringFunc) BaseInstallBuilder = None def InstallBuilderWrapper(env, target=None, source=None, dir=None, **kw): if target and dir: import SCons.Errors raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.") if not dir: dir=target import SCons.Script install_sandbox = SCons.Script.GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs try: dnodes = env.arg2nodes(dir, target_factory.Dir) except TypeError: raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir)) sources = env.arg2nodes(source, env.fs.Entry) tgt = [] for dnode in dnodes: for src in sources: # Prepend './' so the lookup doesn't interpret an initial # '#' on the file name portion as meaning the Node should # be relative to the top-level SConstruct directory. target = env.fs.Entry('.'+os.sep+src.name, dnode) tgt.extend(BaseInstallBuilder(env, target, src, **kw)) return tgt def InstallAsBuilderWrapper(env, target=None, source=None, **kw): result = [] for src, tgt in map(lambda x, y: (x, y), source, target): result.extend(BaseInstallBuilder(env, tgt, src, **kw)) return result BaseVersionedInstallBuilder = None def InstallVersionedBuilderWrapper(env, target=None, source=None, dir=None, **kw): if target and dir: import SCons.Errors raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.") if not dir: dir=target import SCons.Script install_sandbox = SCons.Script.GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs try: dnodes = env.arg2nodes(dir, target_factory.Dir) except TypeError: raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir)) sources = env.arg2nodes(source, env.fs.Entry) tgt = [] for dnode in dnodes: for src in sources: # Prepend './' so the lookup doesn't interpret an initial # '#' on the file name portion as meaning the Node should # be relative to the top-level SConstruct directory. target = env.fs.Entry('.'+os.sep+src.name, dnode) tgt.extend(BaseVersionedInstallBuilder(env, target, src, **kw)) return tgt added = None def generate(env): from SCons.Script import AddOption, GetOption global added if not added: added = 1 AddOption('--install-sandbox', dest='install_sandbox', type="string", action="store", help='A directory under which all installed files will be placed.') global BaseInstallBuilder if BaseInstallBuilder is None: install_sandbox = GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs BaseInstallBuilder = SCons.Builder.Builder( action = install_action, target_factory = target_factory.Entry, source_factory = env.fs.Entry, multi = True, emitter = [ add_targets_to_INSTALLED_FILES, ], source_scanner = SCons.Scanner.ScannerBase({}, name='Install', recursive=False), name = 'InstallBuilder') global BaseVersionedInstallBuilder if BaseVersionedInstallBuilder is None: install_sandbox = GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs BaseVersionedInstallBuilder = SCons.Builder.Builder( action = installVerLib_action, target_factory = target_factory.Entry, source_factory = env.fs.Entry, multi = True, emitter = [ add_versioned_targets_to_INSTALLED_FILES, ], name = 'InstallVersionedBuilder') env['BUILDERS']['_InternalInstall'] = InstallBuilderWrapper env['BUILDERS']['_InternalInstallAs'] = InstallAsBuilderWrapper env['BUILDERS']['_InternalInstallVersionedLib'] = InstallVersionedBuilderWrapper # We'd like to initialize this doing something like the following, # but there isn't yet support for a ${SOURCE.type} expansion that # will print "file" or "directory" depending on what's being # installed. For now we punt by not initializing it, and letting # the stringFunc() that we put in the action fall back to the # hand-crafted default string if it's not set. # #try: # env['INSTALLSTR'] #except KeyError: # env['INSTALLSTR'] = 'Install ${SOURCE.type}: "$SOURCES" as "$TARGETS"' try: env['INSTALL'] except KeyError: env['INSTALL'] = copyFunc try: env['INSTALLVERSIONEDLIB'] except KeyError: env['INSTALLVERSIONEDLIB'] = copyFuncVersionedLib def exists(env): return 1 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/DeepXDE-1.9.3-py3-none-any.whl/deepxde/nn/tensorflow/fnn.py
from .nn import NN from .. import activations from .. import initializers from .. import regularizers from ...backend import tf class FNN(NN): """Fully-connected neural network.""" def __init__( self, layer_sizes, activation, kernel_initializer, regularization=None, dropout_rate=0, ): super().__init__() self.regularizer = regularizers.get(regularization) self.dropout_rate = dropout_rate self.denses = [] if isinstance(activation, list): if not (len(layer_sizes) - 1) == len(activation): raise ValueError( "Total number of activation functions do not match with sum of hidden layers and output layer!" ) activation = list(map(activations.get, activation)) else: activation = activations.get(activation) initializer = initializers.get(kernel_initializer) for j, units in enumerate(layer_sizes[1:-1]): self.denses.append( tf.keras.layers.Dense( units, activation=( activation[j] if isinstance(activation, list) else activation ), kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) ) if self.dropout_rate > 0: self.denses.append(tf.keras.layers.Dropout(rate=self.dropout_rate)) self.denses.append( tf.keras.layers.Dense( layer_sizes[-1], kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) ) def call(self, inputs, training=False): y = inputs if self._input_transform is not None: y = self._input_transform(y) for f in self.denses: y = f(y, training=training) if self._output_transform is not None: y = self._output_transform(inputs, y) return y class PFNN(NN): """Parallel fully-connected neural network that uses independent sub-networks for each network output. Args: layer_sizes: A nested list to define the architecture of the neural network (how the layers are connected). If `layer_sizes[i]` is int, it represent one layer shared by all the outputs; if `layer_sizes[i]` is list, it represent `len(layer_sizes[i])` sub-layers, each of which exclusively used by one output. Note that `len(layer_sizes[i])` should equal to the number of outputs. Every number specify the number of neurons of that layer. """ def __init__( self, layer_sizes, activation, kernel_initializer, regularization=None ): super().__init__() activation = activations.get(activation) initializer = initializers.get(kernel_initializer) self.regularizer = regularizers.get(regularization) n_output = layer_sizes[-1] self.denses = [] # hidden layers for i in range(1, len(layer_sizes) - 1): prev_layer_size = layer_sizes[i - 1] curr_layer_size = layer_sizes[i] # Non-Shared layers if isinstance(curr_layer_size, (list, tuple)): if len(curr_layer_size) != n_output: raise ValueError( "number of sub-layers should equal number of network outputs" ) # e.g. [8, 8, 8] -> [16, 16, 16] or 64 -> [8, 8, 8] self.denses.append( [ tf.keras.layers.Dense( units, activation=activation, kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) for units in curr_layer_size ] ) # Shared layers else: # e.g. 64 -> 64 if not isinstance(prev_layer_size, int): raise ValueError( "cannot rejoin parallel subnetworks after splitting" ) self.denses.append( tf.keras.layers.Dense( curr_layer_size, activation=activation, kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) ) # output layers if isinstance(layer_sizes[-2], (list, tuple)): # e.g. [3, 3, 3] -> 3 self.denses.append( [ tf.keras.layers.Dense( 1, kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) for _ in range(n_output) ] ) else: self.denses.append( tf.keras.layers.Dense( n_output, kernel_initializer=initializer, kernel_regularizer=self.regularizer, ) ) def call(self, inputs, training=False): y = inputs if self._input_transform is not None: y = self._input_transform(y) # hidden layers for layer in self.denses[:-1]: if isinstance(layer, list): if isinstance(y, list): y = [f(x, training=training) for f, x in zip(layer, y)] else: y = [f(y, training=training) for f in layer] else: y = layer(y, training=training) # output layers if isinstance(y, list): y = [f(x, training=training) for f, x in zip(self.denses[-1], y)] y = tf.concat(y, 1) else: y = self.denses[-1](y, training=training) if self._output_transform is not None: y = self._output_transform(inputs, y) return y
PypiClean
/FaceDetector_cc-0.0.2-py3-none-any.whl/FaceDec.py
import cv2 as c import time import mediapipe as mp import numpy as np class FaceDetector: def __init__( self, cam_index: int = 0, cam_width: int = 640, cam_height: int = 360, min_detection_confidence=0.5, model_selection=0): """Face Detection""" self.cid = cam_index self.cw = cam_width self.ch =cam_height self.det_conf = min_detection_confidence self.mod_sel = model_selection self.mp_face = mp.solutions.face_detection self.RED = (0, 0, 255) self.YELLOW = (0, 255, 255) def init_cam(self) -> np.ndarray: """Initiate camera""" self.cam = c.VideoCapture(self.cid, 700) self.cam.set(3, self.cw) self.cam.set(4, self.ch) self.cam.set(5, 30) self.cam.set(6, c.VideoWriter_fourcc(*'MJPG')) return self.cam def detect_face( self, image: np.ndarray, draw_detection: bool = True, draw_key: bool = True, draw_bbox: bool = True) -> tuple: """Detect Face points""" self.face = self.mp_face.FaceDetection( min_detection_confidence=self.det_conf, model_selection=self.mod_sel) self.image_rgb = c.cvtColor(image, c.COLOR_BGR2RGB) self.result = self.face.process(image=self.image_rgb) if self.result.detections: for id1, fd in enumerate(self.result.detections): self.kp_data = fd.location_data.relative_keypoints self.bbox_data = fd.location_data.relative_bounding_box if draw_detection: h, w, _ = image.shape if draw_key: x1, y1 = int(self.bbox_data.xmin * w), int(self.bbox_data.ymin * h) x2, y2 = int((self.bbox_data.xmin + self.bbox_data.width) * w), int((self.bbox_data.ymin + self.bbox_data.height) * h) c.rectangle(image, (x1, y1), (x2, y2), self.YELLOW, 2) if draw_bbox: for key in self.kp_data: cx, cy = int(key.x * w), int(key.y * h) c.circle(image, (cx, cy), 3, self.RED, -1) return self.kp_data, self.bbox_data def main(): RED = (0, 0, 255) YELLOW = (0, 255, 255) timeS, timeE = 0, 0 obj = FaceDetector() cam = obj.init_cam() while cam.isOpened(): success, frame = cam.read() if not success: continue face_lm, face_bbox = obj.detect_face(frame) print(face_lm, face_bbox, sep='\n', end='\n\n') timeE: float = time.time() fps = int(1 / (timeE - timeS)) timeS = timeE c.putText(frame, str(f'FPS : {fps}'), (10, 30), 0, 1, YELLOW, 2) c.imshow('Face Detection : Chanchal Roy', frame) if c.waitKey(1) & 0xff == ord('q'): break cam.release() c.destroyAllWindows() if __name__ == '__main__': main()
PypiClean
/HAFFET-0.1.2.tar.gz/HAFFET-0.1.2/sdapy/functions.py
import numpy as np import emcee, os, re, math import h5py from six import string_types from dateutil.parser import parse from sdapy import filters import pandas as pd # ztfquery source path LOCALSOURCE = os.getenv('ZTFDATA',"./Data/") def Lbol_to_Mbol(L): # bolometric luminosity to bolometric mag return -2.5*( np.log10(L) - (71.21+17.5)*0.4 ) def Mbol_to_Lbol(M): # bolometric mag to bolometric luminosity return 10**(-0.4*M + (71.21+17.5)*0.4) def redchisqg(ydata,ymod,deg=2,sd=None): if sd is None: chisq=np.sum((ydata-ymod)**2) else: chisq=np.sum( ((ydata-ymod)/sd)**2 ) nu=ydata.size-1-deg return chisq/nu def BC_Lyman(color, colortype='g-r', phase='normal', sntype='Ic'): ''' Lyman analytic bolometric correction for SE/II SNe https://ui.adsabs.harvard.edu/abs/2014MNRAS.437.3848L/abstract table 2, 3, 4 ''' bcfile = '%s/bc_table.txt' % LOCALSOURCE if not os.path.exists: print ('Error: BC file, %s, not found'%bcfile) return None, None typemap = dict() typemap['Ib'] = 'SESNe' typemap['Ic'] = 'SESNe' typemap['IIb'] = 'SESNe' typemap['Ic-BL'] = 'SESNe' typemap['II'] = 'II' if not sntype in typemap.keys(): print ('Error: no BC found for type %s'%sntype) return None, None _type = typemap[sntype] if not phase in ['normal','cool']: print ('Error: wrong phase as %s'%phase) return None, None if phase == 'cool': _type = 'cool' if not colortype in ['g-r','g-i','B-V','B-R','B-I','V-R','V-I']: print ('Error: colortype %s not registered'%colortype) return None, None c1,c2 = colortype.split('-') _ = [] for nn,ll in enumerate(open(bcfile).readlines()): if ll[0]=='#' or len(ll)==0:_.append(nn) bctab = pd.read_csv(bcfile, skiprows=_, delim_whitespace=True).query('type==@_type and x==@c1 and y==@c2') crange = [float(bctab['range'].to_list()[0].split('/')[0]), float(bctab['range'].to_list()[0].split('/')[1])] mbol = float(bctab['bc_c0'].to_list()[0]) + \ float(bctab['bc_c1'].to_list()[0]) * color + \ float(bctab['bc_c2'].to_list()[0]) * color**2 _ = np.logical_and(color >= min(crange), color <= max(crange)) return mbol, _ def dessart_vej_to_vm(vpeak, vpeake, sntype, verbose): ''' convert vej at peak to photospheirc vm, unit: 10*3 km/s following Dessart 16, convert photospheric velocity (vej) to characteristic velicity (vm) vej is O 7772 line velocity for SNe Ic, and He 5886 for Ib, at the peak epoch. https://academic.oup.com/mnras/article/458/2/1618/2589109 ''' if sntype in ['Ib', 'IIb']: vm, dvm = (vpeak-2.64) / 0.765, vpeake / 0.765 elif sntype in ['Ic', 'Ic-BL']: vm, dvm = (vpeak-2.99) / 0.443, vpeake / 0.443 else: if verbose: print ('check if Dessart et al suitable for your sn type') vm, dvm = None, None return vm, dvm # pseudo equivalent width def calc_pew(w,f,fc): return scipy.integrate.trapz(1-f/fc,w) # wavelength (A) to velocity (1e3 km/s) def calc_vel(x,x0): return -(x-x0)/x0*299792.458/1000. # velocity (1e3 km/s) to wavelength (A) def calc_wav(v,x0): return -v*1000./299792.458*x0+x0 def get_samples_mc(h5_file): if not os.path.exists(h5_file): return None, None reader = emcee.backends.HDFBackend(h5_file) tau = reader.get_autocorr_time(tol=0) try: burnin = int(5*np.max(tau)) except: return None, None samples = reader.get_chain(discard=burnin, thin=np.max(int(np.max(tau)), 0), flat=True) lnpost = reader.get_log_prob(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) try: return samples.value, lnpost.value except: return samples, lnpost def get_samples_nstep(h5_file, thin_by=1): if not os.path.exists(h5_file): return reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] return nsteps def get_samples_scipy(h5_file): if not os.path.exists(h5_file): return reader = h5py.File(h5_file, 'r') samples = reader['samples'] lnpost = reader['lnprob'] try: return samples.value, lnpost.value except: return samples, lnpost def get_numpy(v): try: return v.to_numpy() # pandas dataframe except: return np.array(v) # numpy array def is_seq(o): """Check if the object is a sequence Parameters ---------- o : any object The object to check Returns ------- is_seq : bool, scalar True if *o* is a sequence, False otherwise """ return hasattr(o, "__len__") def is_coordinate(s): """Check if input is a coordinate.""" if type(s) is str: if len(s.split()) != 1: return False else: return False matches = re.search( r'([+-]?([0-9]{1,2}):([0-9]{2})(:[0-9]{2})?\.?([0-9]+)?)', s) return False if matches is None else True def is_date(s): """Check if input is a valid date.""" try: parse(s) return True except ValueError: return False def penc_to_errors(mean, perc_low, perc_up): return abs(max(mean-perc_low, perc_up-mean)) def detect_symbol_1(ll, pattern='\"'): quote_locations = [] done, startIndex = False, 0 while not done: startIndex = ll.find(pattern, startIndex) endIndex = ll.find(pattern, startIndex + 1) if startIndex != -1 and endIndex != -1: quote_locations.append([startIndex, endIndex]) else: done = True startIndex = endIndex + 1 return quote_locations def detect_symbol_2(ll, pattern='\"'): quote_locations = [] done, startIndex = False, 0 while not done: startIndex = ll.find(pattern, startIndex) if startIndex != -1: quote_locations.append(startIndex) else: done = True startIndex += 1 return quote_locations def replace_str_index(text,index=0,replacement=''): return '%s%s%s'%(text[:index],replacement,text[index+1:]) def remove_quote(ll): # detect quote nq = detect_symbol_1(ll, pattern='\"') if len(nq) == 0: return ll nc = detect_symbol_2(ll, pattern=',') for ncid in nc: for q in nq: if ncid > q[0] and ncid < q[1]: ll = replace_str_index(ll,index=ncid,replacement=' ') return ll.replace('\"','') def is_number(s): """Check if input is a number.""" if s is None: return False if isinstance(s, list) and not isinstance(s, string_types): try: for x in s: if isinstance(x, string_types) and ' ' in x: raise ValueError _ = [float(x) for x in s] return True except ValueError: return False else: try: if isinstance(s, string_types) and ' ' in s: raise ValueError float(s) return True except ValueError: return False def is_integer(s): """Check if input is an integer.""" if isinstance(s, list) and not isinstance(s, string_types): try: _ = [int(x) for x in s] return True except ValueError: return False else: try: int(s) return True except ValueError: return False def str_clean(s): # for oac meta, at some point there're multiple objects in one column if type(s) is str: return s.split()[0] else: return s def filter_clean(filt): if type(filt) != str: return '-' filt = filt.strip().lower().replace('_','').replace(':','') if filt.startswith('sdss'): filt = filt.replace('sdss', '') if filt.startswith('ztf'): filt = filt.replace('ztf', '') if filt.startswith('atlas'): filt = filt.replace('atlas', '') if filt.startswith('uvot'): filt = filt.replace('uvot', '') if filt == 'uvm2': filt = 'D' if filt == 'uvw1': filt = 'A' if filt == 'uvw2': filt = 'S' if filt == 'fuv': filt = 'F' if filt == 'nuv': filt = 'N' if filt == 'w1': filt = 'W' if filt == 'w2': filt = 'Q' filt = filt[0] if not filt.lower() in filters.central_wavelengths and not filt.upper() in filters.central_wavelengths: print ('!!! Error: check filter: %s/%s not registered correctly' % (filt.upper(), filt.lower())) return '-' if filt.lower() in filters.central_wavelengths: return filt.lower() else: return filt.upper() def type_clean(sntype): if type(sntype) == str: return sntype.replace('SLSN ','').replace('SN ','').replace('AT ','').strip() elif type(sntype) == float: if math.isnan(sntype): return '-' else: return str(sntype) # Borrowed from astrocats' supernova catalog module. def name_clean(name): """Clean a transient's name.""" if name.strip() in ['-', 'None', 'nan']: return newname = name.strip(' ;,*.') if newname.startswith('NAME '): newname = newname.replace('NAME ', '', 1) if newname.endswith(' SN'): newname = newname.replace(' SN', '') if newname.endswith(':SN'): newname = newname.replace(':SN', '') if newname.startswith('MASJ'): newname = newname.replace('MASJ', 'MASTER OT J', 1) if (newname.startswith('MASTER') and len(newname) > 7 and is_number(newname[7])): newname = newname.replace('MASTER', 'MASTER OT J', 1) if (newname.startswith('MASTER OT') and len(newname) > 10 and is_number(newname[10])): newname = newname.replace('MASTER OT', 'MASTER OT J', 1) if newname.startswith('MASTER OT J '): newname = newname.replace('MASTER OT J ', 'MASTER OT J', 1) if newname.startswith('OGLE '): newname = newname.replace('OGLE ', 'OGLE-', 1) if newname.startswith('OGLE-') and len(newname) != 16: namesp = newname.split('-') if (len(namesp) == 4 and len(namesp[1]) == 4 and is_number(namesp[1]) and is_number(namesp[3])): newname = 'OGLE-' + namesp[1] + '-SN-' + namesp[3].zfill(3) elif (len(namesp) == 2 and is_number(namesp[1][:2]) and not is_number(namesp[1][2:])): newname = 'OGLE' + namesp[1] if newname.startswith('SN SDSS'): newname = newname.replace('SN SDSS ', 'SDSS', 1) if newname.startswith('SDSS '): newname = newname.replace('SDSS ', 'SDSS', 1) if newname.startswith('SDSS'): namesp = newname.split('-') if (len(namesp) == 3 and is_number(namesp[0][4:]) and is_number(namesp[1]) and is_number(namesp[2])): newname = namesp[0] + '-' + namesp[1] + '-' + namesp[2].zfill(3) if newname.startswith('SDSS-II SN'): namesp = newname.split() if len(namesp) == 3 and is_number(namesp[2]): newname = 'SDSS-II SN ' + namesp[2].lstrip('0') if newname.startswith('SN CL'): newname = newname.replace('SN CL', 'CL', 1) if newname.startswith('SN HiTS'): newname = newname.replace('SN HiTS', 'SNHiTS', 1) if newname.startswith('SNHiTS '): newname = newname.replace('SNHiTS ', 'SNHiTS', 1) if newname.startswith('GAIA'): newname = newname.replace('GAIA', 'Gaia', 1) if newname.startswith('KSN-'): newname = newname.replace('KSN-', 'KSN', 1) if newname.startswith('KSN'): newname = 'KSN' + newname[3:].lower() if newname.startswith('Gaia '): newname = newname.replace('Gaia ', 'Gaia', 1) if newname.startswith('Gaia'): newname = 'Gaia' + newname[4:].lower() if newname.startswith('GRB'): newname = newname.replace('GRB', 'GRB ', 1) if newname.startswith('GRB ') and is_number(newname[4:].strip()): newname = 'GRB ' + newname[4:].strip() + 'A' if newname.startswith('ESSENCE '): newname = newname.replace('ESSENCE ', 'ESSENCE', 1) if newname.startswith('LSQ '): newname = newname.replace('LSQ ', 'LSQ', 1) if newname.startswith('LSQ') and is_number(newname[3]): newname = newname[:3] + newname[3:].lower() if newname.startswith('DES') and is_number(newname[3]): newname = newname[:7] + newname[7:].lower() if newname.startswith('SNSDF '): newname = newname.replace(' ', '') if newname.startswith('SNSDF'): namesp = newname.split('.') if len(namesp[0]) == 9: newname = namesp[0] + '-' + namesp[1].zfill(2) if newname.startswith('HFF '): newname = newname.replace(' ', '') if newname.startswith('SN HST'): newname = newname.replace('SN HST', 'HST', 1) if newname.startswith('HST ') and newname[4] != 'J': newname = newname.replace('HST ', 'HST J', 1) if newname.startswith('SNLS') and newname[4] != '-': newname = newname.replace('SNLS', 'SNLS-', 1) if newname.startswith('SNLS- '): newname = newname.replace('SNLS- ', 'SNLS-', 1) if newname.startswith('CRTS CSS'): newname = newname.replace('CRTS CSS', 'CSS', 1) if newname.startswith('CRTS MLS'): newname = newname.replace('CRTS MLS', 'MLS', 1) if newname.startswith('CRTS SSS'): newname = newname.replace('CRTS SSS', 'SSS', 1) if newname.startswith(('CSS', 'MLS', 'SSS')): newname = newname.replace(' ', ':').replace('J', '') if newname.startswith('SN HFF'): newname = newname.replace('SN HFF', 'HFF', 1) if newname.startswith('SN GND'): newname = newname.replace('SN GND', 'GND', 1) if newname.startswith('SN SCP'): newname = newname.replace('SN SCP', 'SCP', 1) if newname.startswith('SN UDS'): newname = newname.replace('SN UDS', 'UDS', 1) if newname.startswith('SCP') and newname[3] != '-': newname = newname.replace('SCP', 'SCP-', 1) if newname.startswith('SCP- '): newname = newname.replace('SCP- ', 'SCP-', 1) if newname.startswith('SCP-') and is_integer(newname[7:]): newname = 'SCP-' + newname[4:7] + str(int(newname[7:])) if newname.startswith('PS 1'): newname = newname.replace('PS 1', 'PS1', 1) if newname.startswith('PS1 SN PS'): newname = newname.replace('PS1 SN PS', 'PS', 1) if newname.startswith('PS1 SN'): newname = newname.replace('PS1 SN', 'PS1', 1) if newname.startswith('PS1') and is_number(newname[3]): newname = newname[:3] + newname[3:].lower() elif newname.startswith('PS1-') and is_number(newname[4]): newname = newname[:4] + newname[4:].lower() if newname.startswith('PSN K'): newname = newname.replace('PSN K', 'K', 1) if newname.startswith('K') and is_number(newname[1:5]): namesp = newname.split('-') if len(namesp[0]) == 5: newname = namesp[0] + '-' + namesp[1].zfill(3) if newname.startswith('Psn'): newname = newname.replace('Psn', 'PSN', 1) if newname.startswith('PSNJ'): newname = newname.replace('PSNJ', 'PSN J', 1) if newname.startswith('TCPJ'): newname = newname.replace('TCPJ', 'TCP J', 1) if newname.startswith('SMTJ'): newname = newname.replace('SMTJ', 'SMT J', 1) if newname.startswith('PSN20J'): newname = newname.replace('PSN20J', 'PSN J', 1) if newname.startswith('SN ASASSN'): newname = newname.replace('SN ASASSN', 'ASASSN', 1) if newname.startswith('ASASSN-20') and is_number(newname[9]): newname = newname.replace('ASASSN-20', 'ASASSN-', 1) if newname.startswith('ASASSN '): newname = newname.replace('ASASSN ', 'ASASSN-', 1).replace('--', '-') if newname.startswith('ASASSN') and newname[6] != '-': newname = newname.replace('ASASSN', 'ASASSN-', 1) if newname.startswith('ASASSN-') and is_number(newname[7]): newname = newname[:7] + newname[7:].lower() if newname.startswith('ROTSE3J'): newname = newname.replace('ROTSE3J', 'ROTSE3 J', 1) if newname.startswith('MACSJ'): newname = newname.replace('MACSJ', 'MACS J', 1) if newname.startswith('MWSNR'): newname = newname.replace('MWSNR', 'MWSNR ', 1) if newname.startswith('SN HUNT'): newname = newname.replace('SN HUNT', 'SNhunt', 1) if newname.startswith('SN Hunt'): newname = newname.replace(' ', '') if newname.startswith('SNHunt'): newname = newname.replace('SNHunt', 'SNhunt', 1) if newname.startswith('SNhunt '): newname = newname.replace('SNhunt ', 'SNhunt', 1) if newname.startswith('ptf'): newname = newname.replace('ptf', 'PTF', 1) if newname.startswith('SN PTF'): newname = newname.replace('SN PTF', 'PTF', 1) if newname.startswith('PTF '): newname = newname.replace('PTF ', 'PTF', 1) if newname.startswith('PTF') and is_number(newname[3]): newname = newname[:3] + newname[3:].lower() if newname.startswith('IPTF'): newname = newname.replace('IPTF', 'iPTF', 1) if newname.startswith('iPTF '): newname = newname.replace('iPTF ', 'iPTF', 1) if newname.startswith('iPTF') and is_number(newname[4]): newname = newname[:4] + newname[4:].lower() if newname.startswith('PESSTOESO'): newname = newname.replace('PESSTOESO', 'PESSTO ESO ', 1) if newname.startswith('snf'): newname = newname.replace('snf', 'SNF', 1) if newname.startswith('SNF '): newname = newname.replace('SNF ', 'SNF', 1) if (newname.startswith('SNF') and is_number(newname[3:]) and len(newname) >= 12): newname = 'SNF' + newname[3:11] + '-' + newname[11:] if newname.startswith(('MASTER OT J', 'ROTSE3 J')): prefix = newname.split('J')[0] coords = newname.split('J')[-1].strip() decsign = '+' if '+' in coords else '-' coordsplit = coords.replace('+', '-').split('-') if ('.' not in coordsplit[0] and len(coordsplit[0]) > 6 and '.' not in coordsplit[1] and len(coordsplit[1]) > 6): newname = ( prefix + 'J' + coordsplit[0][:6] + '.' + coordsplit[0][6:] + decsign + coordsplit[1][:6] + '.' + coordsplit[1][6:]) if (newname.startswith('Gaia ') and is_number(newname[3:4]) and len(newname) > 5): newname = newname.replace('Gaia ', 'Gaia', 1) if (newname.startswith('AT ') and is_number(newname[3:7]) and len(newname) > 7): newname = newname.replace('AT ', 'AT', 1) if len(newname) <= 4 and is_number(newname): newname = 'SN' + newname + 'A' if (len(newname) > 4 and is_number(newname[:4]) and not is_number(newname[4:])): newname = 'SN' + newname if (newname.startswith('Sn ') and is_number(newname[3:7]) and len(newname) > 7): newname = newname.replace('Sn ', 'SN', 1) if (newname.startswith('sn') and is_number(newname[2:6]) and len(newname) > 6): newname = newname.replace('sn', 'SN', 1) if (newname.startswith('SN ') and is_number(newname[3:7]) and len(newname) > 7): newname = newname.replace('SN ', 'SN', 1) if (newname.startswith('SN') and is_number(newname[2:6]) and len(newname) == 7 and newname[6].islower()): newname = 'SN' + newname[2:6] + newname[6].upper() elif (newname.startswith('SN') and is_number(newname[2:6]) and (len(newname) == 8 or len(newname) == 9) and newname[6:].isupper()): newname = 'SN' + newname[2:6] + newname[6:].lower() if (newname.startswith('AT') and is_number(newname[2:6]) and len(newname) == 7 and newname[6].islower()): newname = 'AT' + newname[2:6] + newname[6].upper() elif (newname.startswith('AT') and is_number(newname[2:6]) and (len(newname) == 8 or len(newname) == 9) and newname[6:].isupper()): newname = 'AT' + newname[2:6] + newname[6:].lower() newname = (' '.join(newname.split())).strip() return newname
PypiClean
/MAPIE-0.6.5-py3-none-any.whl/mapie/regression/quantile_regression.py
from __future__ import annotations import warnings from typing import Iterable, List, Optional, Tuple, Union, cast import numpy as np from sklearn.base import RegressorMixin, clone from sklearn.linear_model import QuantileRegressor from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.utils import check_random_state from sklearn.utils.validation import (_check_y, _num_samples, check_is_fitted, indexable) from mapie._compatibility import np_quantile from mapie._typing import ArrayLike, NDArray from mapie.utils import (check_alpha_and_n_samples, check_defined_variables_predict_cqr, check_estimator_fit_predict, check_lower_upper_bounds, check_null_weight, fit_estimator) from .regression import MapieRegressor class MapieQuantileRegressor(MapieRegressor): """ This class implements the conformalized quantile regression strategy as proposed by Romano et al. (2019) to make conformal predictions. The only valid ``method`` is ``"quantile"`` and the only valid ``cv`` is ``"split"``. Parameters ---------- estimator : Optional[RegressorMixin] Any regressor with scikit-learn API (i.e. with ``fit`` and ``predict`` methods). If ``None``, estimator defaults to a ``QuantileRegressor`` instance. By default ``"None"``. method: str Method to choose for prediction, in this case, the only valid method is the ``"quantile"`` method. By default ``"quantile"``. cv: Optional[str] The cross-validation strategy for computing conformity scores. In theory a split method is implemented as it is needed to provide both a training and calibration set. By default ``None``. alpha: float Between ``0.0`` and ``1.0``, represents the risk level of the confidence interval. Lower ``alpha`` produce larger (more conservative) prediction intervals. ``alpha`` is the complement of the target coverage level. By default ``0.1``. Attributes ---------- valid_methods_: List[str] List of all valid methods. single_estimator_: RegressorMixin Estimator fitted on the whole training set. estimators_: List[RegressorMixin] - [0]: Estimator with quantile value of alpha/2 - [1]: Estimator with quantile value of 1 - alpha/2 - [2]: Estimator with quantile value of 0.5 conformity_scores_: NDArray of shape (n_samples_train, 3) Conformity scores between ``y_calib`` and ``y_pred``. - [:, 0]: for ``y_calib`` coming from prediction estimator with quantile of alpha/2 - [:, 1]: for ``y_calib`` coming from prediction estimator with quantile of 1 - alpha/2 - [:, 2]: maximum of those first two scores n_calib_samples: int Number of samples in the calibration dataset. References ---------- Yaniv Romano, Evan Patterson and Emmanuel J. Candès. "Conformalized Quantile Regression" Advances in neural information processing systems 32 (2019). Examples -------- >>> import numpy as np >>> from mapie.regression import MapieQuantileRegressor >>> X_train = np.array([[0], [1], [2], [3], [4], [5]]) >>> y_train = np.array([5, 7.5, 9.5, 10.5, 12.5, 15]) >>> X_calib = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) >>> y_calib = np.array([5, 7, 9, 4, 8, 1, 5, 7.5, 9.5, 12]) >>> mapie_reg = MapieQuantileRegressor().fit( ... X_train, ... y_train, ... X_calib=X_calib, ... y_calib=y_calib ... ) >>> y_pred, y_pis = mapie_reg.predict(X_train) >>> print(y_pis[:, :, 0]) [[-8.16666667 19. ] [-6.33333333 20.83333333] [-4.5 22.66666667] [-2.66666667 24.5 ] [-0.83333333 26.33333333] [ 1. 28.16666667]] >>> print(y_pred) [ 5. 7. 9. 11. 13. 15.] """ valid_methods_ = ["quantile"] fit_attributes = [ "estimators_", "conformity_scores_", "n_calib_samples", ] quantile_estimator_params = { "GradientBoostingRegressor": { "loss_name": "loss", "alpha_name": "alpha" }, "QuantileRegressor": { "loss_name": "quantile", "alpha_name": "quantile" }, "HistGradientBoostingRegressor": { "loss_name": "loss", "alpha_name": "quantile" }, "LGBMRegressor": { "loss_name": "objective", "alpha_name": "alpha" }, } def __init__( self, estimator: Optional[ Union[ RegressorMixin, Pipeline, List[Union[RegressorMixin, Pipeline]] ] ] = None, method: str = "quantile", cv: Optional[str] = None, alpha: float = 0.1, ) -> None: super().__init__( estimator=estimator, method=method, ) self.cv = cv self.alpha = alpha def _check_alpha( self, alpha: float = 0.1, ) -> NDArray: """ Perform several checks on the alpha value and changes it from a float to an ArrayLike. Parameters ---------- alpha : float Can only be a float value between ``0.0`` and ``1.0``. Represent the risk level of the confidence interval. Lower alpha produce larger (more conservative) prediction intervals. Alpha is the complement of the target coverage level. By default ``0.1``. Returns ------- ArrayLike An ArrayLike of three values: - [0]: alpha value of alpha/2 - [1]: alpha value of of 1 - alpha/2 - [2]: alpha value of 0.5 Raises ------ ValueError If alpha is not a float. ValueError If the value of ``alpha`` is not between ``0.0`` and ``1.0``. """ if self.cv == "prefit": warnings.warn( "WARNING: The alpha that is set needs to be the same" + " as the alpha of your prefitted model in the following" " order [alpha/2, 1 - alpha/2, 0.5]" ) if isinstance(alpha, float): if np.any(np.logical_or(alpha <= 0, alpha >= 1.0)): raise ValueError( "Invalid alpha. Allowed values are between 0.0 and 1.0." ) else: alpha_np = np.array([alpha / 2, 1 - alpha / 2, 0.5]) else: raise ValueError( "Invalid alpha. Allowed values are float." ) return alpha_np def _check_estimator( self, estimator: Optional[Union[RegressorMixin, Pipeline]] = None, ) -> Union[RegressorMixin, Pipeline]: """ Perform several checks on the estimator to check if it has all the required specifications to be used with this methodology. The estimators that can be used in MapieQuantileRegressor need to have a ``fit`` and ``predict`` attribute, but also need to allow a quantile loss and therefore also setting a quantile value. Note that there is a ``TypedDict`` to check which methods allow for quantile regression. Parameters ---------- estimator : Optional[RegressorMixin], optional Estimator to check, by default ``None``. Returns ------- RegressorMixin The estimator itself or a default ``QuantileRegressor`` instance with ``solver`` set to "highs". Raises ------ ValueError If the estimator implements ``fit`` or ``predict`` methods. ValueError We check if it's a known estimator that does quantile regression according to the dictionnary set quantile_estimator_params. This dictionnary will need to be updated with the latest new available estimators. ValueError The estimator does not have the ``"loss_name"`` in its parameters and therefore can not be used as an estimator. ValueError There is no quantile ``"loss_name"`` and therefore this estimator can not be used as a ``MapieQuantileRegressor``. ValueError The parameter to set the alpha value does not exist in this estimator and therefore we cannot use it. """ if estimator is None: return QuantileRegressor( solver="highs-ds", alpha=0.0, ) check_estimator_fit_predict(estimator) if isinstance(estimator, Pipeline): self._check_estimator(estimator[-1]) return estimator else: name_estimator = estimator.__class__.__name__ if name_estimator == "QuantileRegressor": return estimator else: if name_estimator in self.quantile_estimator_params: param_estimator = estimator.get_params() loss_name, alpha_name = self.quantile_estimator_params[ name_estimator ].values() if loss_name in param_estimator: if param_estimator[loss_name] != "quantile": raise ValueError( "You need to set the loss/objective argument" + " of your base model to ``quantile``." ) else: if alpha_name in param_estimator: return estimator else: raise ValueError( "The matching parameter `alpha_name` for" " estimator does not exist. " "Make sure you set it when initializing " "your estimator." ) else: raise ValueError( "The matching parameter `loss_name` for" + " estimator does not exist." ) else: raise ValueError( "The base model does not seem to be accepted" + " by MapieQuantileRegressor. \n" "Give a base model among: \n" f"{self.quantile_estimator_params.keys()} " "Or, add your base model to" + " ``quantile_estimator_params``." ) def _check_cv( self, cv: Optional[str] = None ) -> str: """ Check if cv argument is ``None``, ``"split"`` or ``"prefit"``. Parameters ---------- cv : Optional[str], optional cv to check, by default ``None``. Returns ------- str cv itself or a default ``"split"``. Raises ------ ValueError Raises an error if the cv is anything else but the method ``"split"`` or ``"prefit"``. Only the split method has been implemented. """ if cv is None: return "split" if cv in ("split", "prefit"): return cv else: raise ValueError( "Invalid cv method, only valid method is ``split``." ) def _check_calib_set( self, X: ArrayLike, y: ArrayLike, sample_weight: Optional[ArrayLike] = None, X_calib: Optional[ArrayLike] = None, y_calib: Optional[ArrayLike] = None, calib_size: Optional[float] = 0.3, random_state: Optional[Union[int, np.random.RandomState, None]] = None, shuffle: Optional[bool] = True, stratify: Optional[ArrayLike] = None, ) -> Tuple[ ArrayLike, ArrayLike, ArrayLike, ArrayLike, Optional[ArrayLike] ]: """ Check if a calibration set has already been defined, if not, then we define one using the ``train_test_split`` method. Parameters ---------- Same definition of parameters as for the ``fit`` method. Returns ------- Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike, ArrayLike] - [0]: ArrayLike of shape (n_samples_*(1-calib_size), n_features) X_train - [1]: ArrayLike of shape (n_samples_*(1-calib_size),) y_train - [2]: ArrayLike of shape (n_samples_*calib_size, n_features) X_calib - [3]: ArrayLike of shape (n_samples_*calib_size,) y_calib - [4]: ArrayLike of shape (n_samples_,) sample_weight_train """ if X_calib is None or y_calib is None: if sample_weight is None: X_train, X_calib, y_train, y_calib = train_test_split( X, y, test_size=calib_size, random_state=random_state, shuffle=shuffle, stratify=stratify ) sample_weight_train = sample_weight else: ( X_train, X_calib, y_train, y_calib, sample_weight_train, _, ) = train_test_split( X, y, sample_weight, test_size=calib_size, random_state=random_state, shuffle=shuffle, stratify=stratify ) else: X_train, y_train, sample_weight_train = X, y, sample_weight X_train, X_calib = cast(ArrayLike, X_train), cast(ArrayLike, X_calib) y_train, y_calib = cast(ArrayLike, y_train), cast(ArrayLike, y_calib) sample_weight_train = cast(ArrayLike, sample_weight_train) return X_train, y_train, X_calib, y_calib, sample_weight_train def _check_prefit_params( self, estimator: List[Union[RegressorMixin, Pipeline]], ) -> None: """ Check the parameters set for the specific case of prefit estimators. Parameters ---------- estimator : List[Union[RegressorMixin, Pipeline]] List of three prefitted estimators that should have pre-defined quantile levels of alpha/2, 1 - alpha/2 and 0.5. Raises ------ ValueError If a non-iterable variable is provided for estimator. ValueError If less or more than three models are defined. Warning If the alpha is defined, warns the user that it must be set accordingly with the prefit estimators. """ if isinstance(estimator, Iterable) is False: raise ValueError( "Estimator for prefit must be an iterable object." ) if len(estimator) == 3: for est in estimator: check_estimator_fit_predict(est) check_is_fitted(est) else: raise ValueError( "You need to have provided 3 different estimators, they" " need to be preset with alpha values in the following" " order [alpha/2, 1 - alpha/2, 0.5]." ) def fit( self, X: ArrayLike, y: ArrayLike, sample_weight: Optional[ArrayLike] = None, X_calib: Optional[ArrayLike] = None, y_calib: Optional[ArrayLike] = None, calib_size: Optional[float] = 0.3, random_state: Optional[Union[int, np.random.RandomState]] = None, shuffle: Optional[bool] = True, stratify: Optional[ArrayLike] = None, ) -> MapieQuantileRegressor: """ Fit estimator and compute residuals used for prediction intervals. All the clones of the estimators for different quantile values are stored in order alpha/2, 1 - alpha/2, 0.5 in the ``estimators_`` attribute. Residuals for the first two estimators and the maximum of residuals among these residuals are stored in the ``conformity_scores_`` attribute. Parameters ---------- X: ArrayLike of shape (n_samples, n_features) Training data. y: ArrayLike of shape (n_samples,) Training labels. sample_weight: Optional[ArrayLike] of shape (n_samples,) Sample weights for fitting the out-of-fold models. If ``None``, then samples are equally weighted. If some weights are null, their corresponding observations are removed before the fitting process and hence have no residuals. If weights are non-uniform, residuals are still uniformly weighted. Note that the sample weight defined are only for the training, not for the calibration procedure. By default ``None``. X_calib: Optional[ArrayLike] of shape (n_calib_samples, n_features) Calibration data. y_calib: Optional[ArrayLike] of shape (n_calib_samples,) Calibration labels. calib_size: Optional[float] If ``X_calib`` and ``y_calib`` are not defined, then the calibration dataset is created with the split defined by ``calib_size``. random_state: Optional[Union[int, np.random.RandomState]], default=None For the ``sklearn.model_selection.train_test_split`` documentation. Controls the shuffling applied to the data before applying the split. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. By default ``None``. shuffle: bool, default=True For the ``sklearn.model_selection.train_test_split`` documentation. Whether or not to shuffle the data before splitting. If ``shuffle=False`` then stratify must be None. By default ``True``. stratify: array-like, default=None For the ``sklearn.model_selection.train_test_split`` documentation. If not ``None``, data is split in a stratified fashion, using this as the class labels. Read more in the :ref:`User Guide <stratification>`. By default ``None``. Returns ------- MapieQuantileRegressor The model itself. """ self.cv = self._check_cv(cast(str, self.cv)) # Initialization self.estimators_: List[RegressorMixin] = [] if self.cv == "prefit": estimator = cast(List, self.estimator) alpha = self._check_alpha(self.alpha) self._check_prefit_params(estimator) X_calib, y_calib = indexable(X, y) self.n_calib_samples = _num_samples(y_calib) y_calib_preds = np.full( shape=(3, self.n_calib_samples), fill_value=np.nan ) for i, est in enumerate(estimator): self.estimators_.append(est) y_calib_preds[i] = est.predict(X_calib).ravel() self.single_estimator_ = self.estimators_[2] else: # Checks self._check_parameters() checked_estimator = self._check_estimator(self.estimator) alpha = self._check_alpha(self.alpha) X, y = indexable(X, y) random_state = check_random_state(random_state) results = self._check_calib_set( X, y, sample_weight, X_calib, y_calib, calib_size, random_state, shuffle, stratify, ) X_train, y_train, X_calib, y_calib, sample_weight_train = results X_train, y_train = indexable(X_train, y_train) X_calib, y_calib = indexable(X_calib, y_calib) y_train, y_calib = _check_y(y_train), _check_y(y_calib) self.n_calib_samples = _num_samples(y_calib) check_alpha_and_n_samples(self.alpha, self.n_calib_samples) sample_weight_train, X_train, y_train = check_null_weight( sample_weight_train, X_train, y_train ) y_train = cast(NDArray, y_train) y_calib_preds = np.full( shape=(3, self.n_calib_samples), fill_value=np.nan ) if isinstance(checked_estimator, Pipeline): estimator = checked_estimator[-1] else: estimator = checked_estimator name_estimator = estimator.__class__.__name__ alpha_name = self.quantile_estimator_params[ name_estimator ]["alpha_name"] for i, alpha_ in enumerate(alpha): cloned_estimator_ = clone(checked_estimator) params = {alpha_name: alpha_} if isinstance(checked_estimator, Pipeline): cloned_estimator_[-1].set_params(**params) else: cloned_estimator_.set_params(**params) self.estimators_.append(fit_estimator( cloned_estimator_, X_train, y_train, sample_weight_train )) y_calib_preds[i] = self.estimators_[-1].predict(X_calib) self.single_estimator_ = self.estimators_[2] self.conformity_scores_ = np.full( shape=(3, self.n_calib_samples), fill_value=np.nan ) self.conformity_scores_[0] = y_calib_preds[0] - y_calib self.conformity_scores_[1] = y_calib - y_calib_preds[1] self.conformity_scores_[2] = np.max( [ self.conformity_scores_[0], self.conformity_scores_[1] ], axis=0 ) return self def predict( self, X: ArrayLike, ensemble: bool = False, alpha: Optional[Union[float, Iterable[float]]] = None, symmetry: Optional[bool] = True, ) -> Union[NDArray, Tuple[NDArray, NDArray]]: """ Predict target on new samples with confidence intervals. Residuals from the training set and predictions from the model clones are central to the computation. Prediction Intervals for a given ``alpha`` are deduced from the quantile regression at the alpha values: alpha/2, 1 - (alpha/2) while adding a constant based uppon their residuals. Parameters ---------- X: ArrayLike of shape (n_samples, n_features) Test data. ensemble: bool Ensemble has not been defined in predict and therefore should will not have any effects in this method. alpha: Optional[Union[float, Iterable[float]]] For ``MapieQuantileRegresor`` the alpha has to be defined directly in initial arguments of the class. symmetry: Optional[bool] Deciding factor to whether to find the quantile value for each residuals separatly or to use the maximum of the two combined. Returns ------- Union[NDArray, Tuple[NDArray, NDArray]] - NDArray of shape (n_samples,) if ``alpha`` is ``None``. - Tuple[NDArray, NDArray] of shapes (n_samples,) and (n_samples, 2, n_alpha) if ``alpha`` is not ``None``. - [:, 0, :]: Lower bound of the prediction interval. - [:, 1, :]: Upper bound of the prediction interval. """ check_is_fitted(self, self.fit_attributes) check_defined_variables_predict_cqr(ensemble, alpha) alpha = self.alpha if symmetry else self.alpha/2 check_alpha_and_n_samples(alpha, self.n_calib_samples) n = self.n_calib_samples q = (1 - (alpha)) * (1 + (1 / n)) y_preds = np.full( shape=(3, _num_samples(X)), fill_value=np.nan, dtype=float, ) for i, est in enumerate(self.estimators_): y_preds[i] = est.predict(X) if symmetry: quantile = np.full( 2, np_quantile( self.conformity_scores_[2], q, method="higher" ) ) else: quantile = np.array( [ np_quantile( self.conformity_scores_[0], q, method="higher" ), np_quantile( self.conformity_scores_[1], q, method="higher" ) ] ) y_pred_low = y_preds[0][:, np.newaxis] - quantile[0] y_pred_up = y_preds[1][:, np.newaxis] + quantile[1] check_lower_upper_bounds(y_preds, y_pred_low, y_pred_up) return y_preds[2], np.stack([y_pred_low, y_pred_up], axis=1)
PypiClean
/LumberMill-0.9.5.7-py3-none-any.whl/lumbermill/cluster/Pack.py
import collections import hashlib import os import re import socket import sys import threading import time import msgpack from Crypto import Random from Crypto.Cipher import AES from BaseThreadedModule import BaseThreadedModule from utils.Decorators import ModuleDocstringParser, setInterval from utils.misc import TimedFunctionManager class PackMember: def __init__(self, host, hostname=None): self.host = host self.ip_address = host[0] self.port = host[1] self.hostname = hostname self.last_seen = time.time() def getHost(self): return self.host def getIp(self): return self.ip_address def getPort(self): return self.port def getHostName(self): if not self.hostname: try: self.hostname = socket.gethostbyaddr(self.ip_address) except: self.hostname = self.ip_address return self.hostname def updateLastSeen(self): self.last_seen = time.time() def isAlive(self): return True if time.time() - self.last_seen < 30 else False @ModuleDocstringParser class Pack(BaseThreadedModule): """ Pack base module. Handles pack leader discovery and alive checks of pack followers. IMPORTANT: This is just a first alpha implementation. No leader election, no failover, no sanity checks for conflicting leaders. name: Name of the cluster. Used for auto-discovery in same network. secret: pre shared key to en/decrypt cluster messages. broadcast: Ipaddress for udp broadcasts. interface: Ipaddress to listen on. port: Port to listen on. interval: Autodiscover interval. pack: Set this node to be either leader or member. Configuration template: - Pack: name: # <type: string; is: required> secret: # <type: string; is: required> broadcast: # <type: string; is: required> interface: # <default: '0.0.0.0'; type: string; is: optional> port: # <default: 5252; type: integer; is: optional> interval: # <default: 10; type: integer; is: optional> pack: # <default: 'leader'; type: string; values: ['leader', 'follower']; is: optional> """ module_type = "stand_alone" """ Set module type. """ can_run_forked = False def configure(self, configuration): # self.logger.setLevel(logging.DEBUG) # Call parent configure method. BaseThreadedModule.configure(self, configuration) self.is_leader = True if self.getConfigurationValue('pack') == 'leader' else False self.pack_followers = {} self.cluster_name = self.getConfigurationValue('name') self.discovered_leader = None self.secret = hashlib.sha256(self.getConfigurationValue('secret')).digest() self.handlers = collections.defaultdict(list) self.lock = threading.Lock() # Setup socket. self.interface_addr = self.getConfigurationValue('interface') self.interface_port = self.getConfigurationValue('port') self.broadcast_addr = self.getConfigurationValue('broadcast') self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.socket.settimeout(1) self.hostname = socket.gethostname() try: self.socket.bind((self.interface_addr, self.interface_port)) except: etype, evalue, etb = sys.exc_info() self.logger.error("Could not listen on %s:%s. Exception: %s, Error: %s." % (self.getConfigurationValue("interface"), self.getConfigurationValue("port"), etype, evalue)) self.alive = False self.lumbermill.shutDown() return self.addHandlers() def addHandlers(self): handle_marker = 'leaderHandle' if self.is_leader else 'followerHandle' for method_name in dir(self): if not method_name.startswith(handle_marker): continue action_name = method_name.replace(handle_marker, '') action_name = self.convertCamelCaseToUnderScore(action_name) self.addHandler(action_name, getattr(self, method_name)) def convertCamelCaseToUnderScore(self, camel_case): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def encrypt(self, message): x = AES.block_size - len(message) % AES.block_size message = message + (chr(x) * x) iv = Random.OSRNG.posix.new().read(AES.block_size) cipher = AES.new(self.secret, AES.MODE_CBC, iv) return iv + cipher.encrypt(message) def decrypt(self, message): unpad = lambda s: s[:-ord(s[-1])] iv = message[:AES.block_size] cipher = AES.new(self.secret, AES.MODE_CBC, iv) return unpad(cipher.decrypt(message))[AES.block_size:] def getDefaultMessageDict(self, action, custom_dict={}): message_dict = {'action': action, 'sender': self.hostname, 'cluster': self.cluster_name, 'timestamp': time.time()} message_dict.update(custom_dict) return message_dict def getPackMembers(self): return self.pack_followers def getDiscoveredLeader(self): return self.discovered_leader def sendBroadcastMessage(self, message): # msgpack encode end encrypt message. str_msg = msgpack.packb(message) str_msg = self.encrypt(str_msg) try: self.socket.sendto(str_msg, (self.broadcast_addr, self.interface_port)) except: etype, evalue, etb = sys.exc_info() self.logger.warning("Could not send broadcast message %s to %s. Exception: %s, Error: %s." % (message, host, etype, evalue)) def sendMessage(self, message, ip_address): self.logger.debug("Sending message %s to %s." % (message, ip_address)) # Json encode end encrypt message. str_msg = msgpack.packb(message) str_msg = self.encrypt(str_msg) try: self.socket.sendto(str_msg, (ip_address, self.interface_port)) except: etype, evalue, etb = sys.exc_info() self.logger.warning("Could not send message %s to %s. Exception: %s, Error: %s." % (message, ip_address, etype, evalue)) def sendMessageToPackFollower(self, pack_member, message): if pack_member not in self.pack_followers.values(): self.logger.warning('Can not send message to unknown pack follower %s.' % pack_member) return self.sendMessage(message, pack_member.getIp()) def sendMessageToPack(self, message): for pack_member in self.pack_followers.itervalues(): self.sendMessageToPackFollower(pack_member, message) def sendMessageToPackLeader(self, message): self.sendMessage(message, self.discovered_leader.getIp()) def addHandler(self, action, callback): self.logger.debug('Adding handler %s for %s.' % (callback, action)) self.handlers[action].append(callback) def run(self): if self.is_leader: TimedFunctionManager.startTimedFunction(self.sendAliveRequests) TimedFunctionManager.startTimedFunction(self.sendDiscoverBroadcast) TimedFunctionManager.startTimedFunction(self.dropDeadPackFollowers) else: TimedFunctionManager.startTimedFunction(self.dropDeadPackLeader) self.logger.info("Running as pack %s." % self.getConfigurationValue('pack')) while self.alive: try: message, host = self.socket.recvfrom(64536) except socket.timeout: continue try: # Decrypt and msgpack decode message. message = msgpack.unpackb(self.decrypt(message)) except: etype, evalue, etb = sys.exc_info() self.logger.warning("Could not parse cluster message %s. Maybe your secrets differ? Exception: %s, Error: %s." % (message, etype, evalue)) continue # Ignore messages for other clusters and from self. if message['cluster'] != self.cluster_name or message['sender'] == self.hostname: continue self.logger.info("Received message %s from %s." % (message, host[0])) if message['action'] not in self.handlers.keys(): self.logger.warning('Got request for unknown handler %s.' % message['action']) # Excecute callbacks for callback in self.handlers["%s" % message['action']]: self.logger.debug('Calling callback %s for %s.' % (callback, message['action'])) callback(host, message) @setInterval(10, call_on_init=True) def sendDiscoverBroadcast(self): """ Leader sends udp broadcast messages every 10 seconds. """ message = self.getDefaultMessageDict(action='discovery_request') self.logger.debug('Sending broadcast pack follower discovery.') self.sendBroadcastMessage(message) @setInterval(10) def sendAliveRequests(self): """ Leader sends alive requests to all dicovered followers every 10 seconds. """ for ip_address, pack_follower in self.pack_followers.items(): message = self.getDefaultMessageDict(action='alive_request') self.sendMessageToPackFollower(pack_follower, message) @setInterval(10) def dropDeadPackFollowers(self): """ Drop followers who have not been seen the last 30 seconds. @see PackMember.isAlive() and PackMember.updateLastSeen() """ with self.lock: for ip_address, pack_follower in self.pack_followers.items(): if pack_follower.isAlive(): continue self.logger.warning('Dropping dead pack follower %s, %s.' % (pack_follower.getHostName(), ip_address)) self.pack_followers.pop(ip_address) @setInterval(1) def dropDeadPackLeader(self): """ Drop leader who has not been seen the last 30 seconds. @see PackMember.isAlive() and PackMember.updateLastSeen() """ if not self.discovered_leader: return with self.lock: if self.discovered_leader.isAlive(): return self.logger.warning('Dropping dead pack leader %s, %s.' % (self.discovered_leader.getHostName(), self.discovered_leader.getIp())) self.discovered_leader = None """ Pack follower discovery procedure. - leader sends a broadcast 'discovery_request' message - follower replies with a 'discovery_reply' message - leader replies with a 'discovery_finish' message - follower sets its discovered leader """ def followerHandleDiscoveryRequest(self, sender, message): sender_ip = sender[0] if self.discovered_leader and self.discovered_leader.getIp() == sender_ip: return message = self.getDefaultMessageDict(action='discovery_reply') self.sendMessage(message, sender[0]) def leaderHandleDiscoveryReply(self, sender, message): self.logger.info("Discovered %s as pack member in pid %s." % (message['sender'], os.getpid())) sender_ip = sender[0] try: pack_member = self.pack_followers[sender_ip] except KeyError: pack_member = PackMember(sender, message['sender']) self.pack_followers[sender_ip] = pack_member message = self.getDefaultMessageDict(action='discovery_finish') self.sendMessage(message, pack_member.getIp()) def followerHandleDiscoveryFinish(self, sender, message): self.logger.info("Discovered %s as pack leader." % message['sender']) self.discovered_leader = PackMember(sender, message['sender']) """ Pack follower alive procedure. - leader sends an 'alive_request' message - follower replies with a 'alive_reply' message - leader calls follower.updateLastSeen() and replies with a 'alive_finish' message - follower calls leader.updateLastSeen() """ def followerHandleAliveRequest(self, sender, message): sender_ip = sender[0] if not self.discovered_leader or sender_ip != self.discovered_leader.getIp(): return self.logger.debug('Got alive request from %s.' % sender_ip) message = self.getDefaultMessageDict(action='alive_reply') self.sendMessageToPackLeader(message) def leaderHandleAliveReply(self, sender, message): try: pack_follower = self.pack_followers[sender[0]] except KeyError: return self.logger.debug('Got alive reply from %s.' % pack_follower.getHostName()) with self.lock: pack_follower.updateLastSeen() message = self.getDefaultMessageDict(action='alive_finish') self.sendMessageToPackFollower(pack_follower, message) def followerHandleAliveFinish(self, sender, message): sender_ip = sender[0] if not self.discovered_leader or sender_ip != self.discovered_leader.getIp(): return self.discovered_leader.updateLastSeen() def shutDown(self): # Call parent configure method. BaseThreadedModule.shutDown(self) self.socket.close() self.socket = None
PypiClean
/KVM48-1.3.600-py3-none-any.whl/kvm48/utils.py
import os import re import time import urllib.parse from typing import Optional __all__ = [ "extension_from_url", "sanitize_filename", "sanitize_filepath", "read_keypress_with_timeout", ] def extension_from_url(url: str, *, dot: bool = False) -> str: ext = os.path.splitext(urllib.parse.urlparse(url).path)[1] return ext if dot else ext[1:] def collapse_filename_spaces(unsanitized: str) -> str: # Collapse consecutive spaces. result = re.sub(r" +", " ", unsanitized) # Remove space before the file extension. result = re.sub(r" (?=\.[^.]+$)", "", result) return result def sanitize_filename(unsanitized: str, convert_non_bmp_chars="keep") -> str: # Strip control characters (0x00-0x1F, 0x7F), and use homoglyphs # (Halfwidth and Fullwidth Forms block, U+FF00 - U+FFEF) for # characters illegal in exFAT/NTFS: # # " => U+FF02 FULLWIDTH QUOTATION MARK (") # * => U+FF0A FULLWIDTH ASTERISK (*) # / => U+FF0F FULLWIDTH SOLIDUS (/) # : => U+FF1A FULLWIDTH COLON (:) # < => U+FF1C FULLWIDTH LESS-THAN SIGN (<) # > => U+FF1E FULLWIDTH GREATER-THAN SIGN (>) # ? => U+FF1F FULLWIDTH QUESTION MARK (?) # \ => U+FF3C FULLWIDTH REVERSE SOLIDUS (\) # | => U+FF5C FULLWIDTH VERTICAL LINE (|) # # Also replace whitespace characters with space. # # The convert_non_bmp_chars option determines how non-BMP # characters (not in the Basic Multilingual Plane, i.e., code # points beyond U+FFFF) are treated. Conversion is necessary is # necessary for certain legacy filesystems with only UCS-2 support, # e.g., FAT32. # # The value of this option can be one of 'keep', 'strip', # 'replace', 'question_mark', or any single BMP character (U+0001 # to U+FFFF). 'keep' keeps the characters intact (default # behavior); 'strip' strips all non-BMP characters; 'replace' # replaces all non-BMP characters with U+FFFD (REPLACEMENT # CHARACTER �); 'question_mark' replaces all non-BMP characters # with U+003F (QUESTION MARK ?); otherwise, a single BMP character # specifies the replacement character for non-BMP characters # directly. result = re.sub(r"[\x00-\x1f\x7f]+", "", unsanitized).translate( str.maketrans( '"*/:<>?\\|\t\n\r\f\v', "\uFF02\uFF0A\uFF0F\uFF1A\uFF1C\uFF1E\uFF1F\uFF3C\uFF5C ", ) ) if convert_non_bmp_chars == "keep": return collapse_filename_spaces(result) else: if convert_non_bmp_chars == "strip": repl = "" elif convert_non_bmp_chars == "replace": repl = "\uFFFD" elif convert_non_bmp_chars == "question_mark": repl = "?" elif len(convert_non_bmp_chars) == 1: repl = convert_non_bmp_chars codepoint = ord(repl) if codepoint <= 0x1F or codepoint == 0x7F or codepoint > 0xFFFF: raise ValueError("invalid replacement character %s" % repr(repl)) else: raise ValueError( "unrecognized convert_non_bmp_chars %s" % repr(convert_non_bmp_chars) ) result = "".join(ch if ord(ch) <= 0xFFFF else repl for ch in result) return collapse_filename_spaces(result) def sanitize_filepath(unsanitized: str, convert_non_bmp_chars="keep") -> str: return os.sep.join( sanitize_filename(seg, convert_non_bmp_chars=convert_non_bmp_chars) for seg in unsanitized.split(os.sep) ) if os.name == "posix": import select import sys import termios import tty def read_keypress_with_timeout(timeout: float) -> Optional[str]: end_time = time.time() + timeout stdin_fileno = sys.stdin.fileno() saved_tcattr = termios.tcgetattr(stdin_fileno) try: tty.setcbreak(stdin_fileno) while time.time() <= end_time: if select.select((sys.stdin,), (), (), 0.1)[0]: return sys.stdin.read(1) finally: termios.tcsetattr(stdin_fileno, termios.TCSAFLUSH, saved_tcattr) elif os.name == "nt": try: import msvcrt def read_keypress_with_timeout(timeout: float) -> Optional[str]: end_time = time.time() + timeout while time.time() <= end_time: if msvcrt.kbhit(): return msvcrt.getwch() except ImportError: def read_keypress_with_timeout(timeout: float) -> None: time.sleep(timeout) else: def read_keypress_with_timeout(timeout: float) -> None: time.sleep(timeout)
PypiClean
/guibits-1.0-py3-none-any.whl/src/guibits1_0/cursoring.py
# author R.N.Bosworth # version 28 Jul 22 19:55 import PyQt6.QtCore from . import coloring, cursor_blinking, font_size_checking from . import resolving, scrolling, show_checking from . import type_checking2_0, windowing """ Copyright (C) 2014,2015,2016,2017,2020,2021,2022 R.N.Bosworth This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License (lgpl.txt) for more details. """ # exposed procedures # ------------------ def draw_cursor(win,fsize,x,y,c): """ pre: win = window in whose pane cursor is to be drawn win must be showing on the screen fsize = required point size of cursor, as a float x = horizontal offset in points of center of cursor from left-hand edge of win's pane, as a float y = vertical offset in points of top of cursor from top of win's pane, as a float c = color in which cursor is to be drawn on win's pane, as coloring.Color (x,y) must be such that cursor lies entirely within win's pane post: the cursor has been drawn on win's pane as specified the pane of win is positioned such that the cursor is visible test: win is None win is not showing on screen win is showing on screen win does not have a cursor win does have a cursor fsize = 72.1 fsize = 72.0 cursor slightly too far left cursor on left edge of window cursor slightly too far right cursor on right edge of window cursor slightly too high cursor on top edge of window cursor slightly too low cursor on bottom edge of window """ type_checking2_0.check_derivative(win,windowing.Window) type_checking2_0.check_identical(fsize,float) type_checking2_0.check_identical(x,float) type_checking2_0.check_identical(y,float) type_checking2_0.check_derivative(c,coloring.Color) if win == None: raise Exception("specified window is None") show_checking.check_showing(win,"cursor") font_size_checking.check_pane_font_size(fsize) # check the cursor position _pr = PyQt6.QtCore.QRectF(0.0,0.0,win._my_pane_width,win._my_pane_height) if win._my_cursor == None: win._my_cursor = cursor_blinking._Cursor() # cursor is a singleton _cu = win._my_cursor if not _pr.contains(PyQt6.QtCore.QRectF(x - _CURSOR_WIDTH/2.0, y, _CURSOR_WIDTH, fsize)): raise Exception("cursor lies outside the pane") # update the cursor as required with _cu._my_lock: _cu._my_font_size = fsize # in points _cu._my_x = x - _CURSOR_WIDTH/2.0 # tlh corner of cursor in points _cu._my_y = y # tlh corner of cursor in points _cu._my_color = c # as coloring.Color _cu._is_colored = True _cu._my_window = win # start the blinking thread if necessary if _cu._my_blinker == None: _cu._my_blinker = cursor_blinking.new_cursor_blinker(_cu,_cursor_changed) _cu._my_blinker.start() # ensure cursor is visible to user _ensure_cursor_is_visible(win) win._my_pane.update() def wipe_cursor(win): """ pre: win = window in which any cursor is to be wiped post: a paint event has been queued for win when the paint event has been processed, the cursor does not appear in the pane of window win test: win is None win is non-None cursor does exist cursor does not exist """ type_checking2_0.check_derivative(win,windowing.Window) if win == None: raise Exception("specified window is None") if win._my_cursor != None: with win._my_cursor._my_lock: # switch off blinking thread win._my_cursor._my_blinker.please_drop_dead() # make cursor invisible win._my_cursor = None win._my_pane.update() # private members # --------------- _CURSOR_WIDTH = 1.0 # width in points def _calculate_pane_location(vpr,pr,cr): """ pre: vpr = rectangle of viewport in pixels, as QRectF pr = rectangle of pane relative to viewport in pixels, as QRectF cr = rectangle of cursor relative to pane in pixels, as QRectF (must intersect with pr) post: returns location of pane in pixels as QPointF, relative to viewport which ensures that cursor rectangle is visible in viewport tests: cursor at top left-hand corner of pane pane at (-1.0,-1.0) pane at (0.0,0.0) pane at (1.0,1.0) cursor at bottom right-hand corner of pane pane just off bottom and right of view pane just at bottom and right of view pane just inside bottom and right of view """ # x-axis px = pr.x() # find offset of left-hand edge of cursor wrt the viewport lhoff = cr.x() + pr.x() if lhoff < 0.0: px = pr.x()-lhoff #find offset of right-hand edge of cursor wrt the viewport rhoff = cr.x() + cr.width() + pr.x() if rhoff > vpr.width(): px = pr.x() - (rhoff - vpr.width()) # y-axis py = pr.y() # find offset of top edge of cursor wrt the viewport topoff = cr.y() + pr.y() if topoff < 0.0: py = pr.y()-topoff #find offset of bottom edge of cursor wrt the viewport botoff = cr.y() + cr.height() + pr.y() if botoff > vpr.height(): py = pr.y() - (botoff - vpr.height()) #p.set_location(px,py) return PyQt6.QtCore.QPointF(px,py) def _cursor_changed(cursor): """ pre: cursor = cursor whose position has changed, or which has appeared post: a paint event has been queued for the cursor's pane test: once thru """ #print("cursoring.cursor_changed") cursor._my_window._my_pane.update() def _ensure_cursor_is_visible(win): """ pre: win = window in which cursor is to be checked post: the pane of win is positioned such that the cursor is visible a paint event on the window has been queued so that the concrete pane will eventually be repositioned test: cursor is within viewport cursor is not within viewport """ # get the viewport rectangle from the last time it was displayed vpr = PyQt6.QtCore.QRectF(win._my_scroll_area.viewport().geometry()) # get the pane rectangle from the last time it was displayed pr = PyQt6.QtCore.QRectF(win._my_pane.frameGeometry()) # find cursor's rectangle in pixels cursor = win._my_cursor ppr = resolving.pixels_per_point(win._my_frame) with cursor._my_lock: cr = PyQt6.QtCore.QRectF(cursor._my_x*win._my_zoom_factor*ppr, \ cursor._my_y*win._my_zoom_factor*ppr, \ _CURSOR_WIDTH*win._my_zoom_factor*ppr, \ cursor._my_font_size*win._my_zoom_factor*ppr) # it is guaranteed that this rectangle lies inside visible pane # calculate bloated cursor rectangle (with cordon sanitaire to make visible) # add a cordon sanitaire of cursor height around the cursor cursor_x = cr.x() cursor_y = cr.y() cursor_width = cr.width() cursor_height = cr.height() bcr = PyQt6.QtCore.QRectF(cursor_x - cursor_height, cursor_y - cursor_height, \ cursor_width + 2.0*cursor_height, 3.0*cursor_height) new_location = _calculate_pane_location(vpr,pr,bcr) if new_location.x() != pr.x() or new_location.y() != pr.y(): pr = PyQt6.QtCore.QRectF(new_location.x(), new_location.y(), \ pr.width(), pr.height()) scrolling._reconcile_scrollbars(vpr,pr,win._my_scroll_area)
PypiClean
/Flask-Hypertable-0.3.0.tar.gz/Flask-Hypertable-0.3.0/CONTRIBUTING.rst
============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/coderfi/flask_hypertable/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "feature" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ Flask Hypertable could always use more documentation, whether as part of the official Flask Hypertable docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/coderfi/flask_hypertable/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `flask_hypertable` for local development. 1. Fork the `flask_hypertable` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/flask_hypertable.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv flask_hypertable $ cd flask_hypertable/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 flask_hypertable tests $ python setup.py test $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 2.6, 2.7 and for PyPy. Check https://travis-ci.org/coderfi/flask_hypertable/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ python -m unittest tests.test_flask_hypertable
PypiClean
/Hippodamia-0.4.2.tar.gz/Hippodamia-0.4.2/hippodamia/onboarding.py
import datetime import pprint from hippodamia.enums import Enforcement from hippodamia.agentshadow.states.state_ids import state_ids import json import pelops.logging.mylogger from threading import Lock import collections class _GIDMapping: location = None room = None device = None type = None name = None def __init__(self, location=None, room=None, device=None, type=None, name=None): self.location = location self.room = room self.device = device self.type = type self.name = name def complete(self): return (self.location is not None and self.room is not None and self.device is not None and self.type is not None and self.name is not None) def __eq__(self, other): if not isinstance(other, self.__class__): # don't attempt to compare against unrelated types return NotImplemented return self.location == other.location \ and self.room == other.room \ and self.device == other.device \ and self.type == other.type and self.name == other.name class Onboarding: _logger = None _mqtt_client = None _agentshadows = None _asfactory = None _lock = None enforcement = None protocol_version = None topic_onboarding_request = None counter = 0 gid_list = None last_timestamp = None messages = None gid_blacklist = None def __init__(self, protocol_version, topic_onboarding_request, enforcement, agentshadows, asfactory, mqtt_client, logger): self.protocol_version = protocol_version self._logger = pelops.logging.mylogger.get_child(logger, __class__.__name__) self._mqtt_client = mqtt_client self.enforcement = enforcement self._agentshadows = agentshadows self._asfactory = asfactory self.gid_list = {} self.gid_blacklist = [] self.topic_onboarding_request = topic_onboarding_request self._lock = Lock() self.messages = collections.deque(maxlen=50) self._logger.info("__init__ done") def start(self): self._mqtt_client.subscribe(self.topic_onboarding_request, self._handler_onboarding_request) self._logger.info("subscribed to {}".format(self.topic_onboarding_request)) def stop(self): self._mqtt_client.unsubscribe(self.topic_onboarding_request, self._handler_onboarding_request) self._logger.info("unsubscribed from {}".format(self.topic_onboarding_request)) def _generate_unique_id(self, gid_mapping): if gid_mapping.type is None or gid_mapping.type == "": raise ValueError("value of gid_mapping.type must be set.") suffix = 0 while True: new_gid = "{}-{}".format(gid_mapping.type, suffix) if new_gid not in self.gid_list.keys(): return new_gid suffix += 1 def _last_gid_ok(self, last_gid, gid_mapping): if last_gid is None or last_gid == "": # last_gid is not a valid identifier return False if last_gid in self.gid_list: last_mapping = self.gid_list[last_gid] if gid_mapping == last_mapping: # last_gid's mapping values are identical to the one from the requesting service return True else: # last_gid in use by a different microservice return False # last_gid is unknown return True def _get_gid_for_gidmapping(self, gid_mapping): for gid, gm in self.gid_list.items(): if gm == gid_mapping: return gid return None def _is_known_gid_stopped(self, gid): try: state_id = self._agentshadows[gid].get_state_id() if not(state_id == state_ids.STOPPED or state_id == state_ids.ARCHIVED): return False except KeyError: pass return True def _same_session_id(self, gid, last_session): try: session = self._agentshadows[gid].properties.session if session is None or session == last_session: return True except KeyError: pass return False def _get_gid(self, identifier): gid_mapping = _GIDMapping(location=identifier["location"], room=identifier["room"], device=identifier["device"], type=identifier["type"], name=identifier["name"]) gid = None self._logger.debug("_get_gid - gid_mapping: location={}, room={}, device={}, type={}, name={}; complete={}" .format(gid_mapping.location, gid_mapping.room, gid_mapping.device, gid_mapping.type, gid_mapping.name, gid_mapping.complete())) if gid_mapping.complete(): if self._last_gid_ok(identifier["last-gid"], gid_mapping): self._logger.debug("_get_gid - _last_gid_ok('{}') == True".format(identifier["last-gid"])) gid = identifier["last-gid"] else: gid = self._get_gid_for_gidmapping(gid_mapping) self._logger.debug("_get_gid - gid for gidmapping={}".format(gid)) if not self._is_known_gid_stopped(gid) and not self._same_session_id(gid, identifier["last-session"]): self._logger.debug("_get_gid - agentshadow for gid {} is active (state: {}) and session ids do not match " "(last-session: {})" .format(gid, self._agentshadows[gid].get_state_id(), identifier["last-session"])) gid = None if gid in self.gid_blacklist: self._logger.debug("_get_gid - gid {} is blacklisted".format(gid)) gid = None if gid is None: gid = self._generate_unique_id(gid_mapping) self._logger.debug("_get_gid - generated unique id") self.gid_list[gid] = gid_mapping self._logger.info("_get_gid - gid: {}".format(gid)) return gid def _handler_onboarding_request(self, message): """ { "uuid": "550e8400-e29b-11d4-a716-446655440000", "onboarding-topic": "/hippodamia/550e8400-e29b-11d4-a716-446655440000", "protocol-version": 1, "timestamp": "1985-04-12T23:20:50.520Z", "identifier": { "last-gid": "copreus-1", "type": "DriverManager", "module": "copreus.drivermanager", "name": "display-driver", "location": "flat", "room": "living room", "device": "thermostat", "decription": "lorem ipsum", "host-name": "rpi", "node-id": "00-07-E9-AB-CD-EF", "ips": [ "192.168.0.1", "10.0.1.2", "2001:0db8:85a3:08d3:1319:8a2e:0370:7344" ], "config-hash": "cf23df2207d99a74fbe169e3eba035e633b65d94" } } """ with self._lock: message = message.decode("utf8") message = json.loads(message) self._logger.info("_handler_onboarding_request - received request") self._logger.debug("_handler_onboarding_request - message: {}".format(message)) self.counter += 1 self.last_timestamp = datetime.datetime.now() self.messages.append({"time": self.last_timestamp, "message": message}) if message["protocol-version"] != self.protocol_version: message = "handler_onboarding_request - expected protocol version {}, message: {}"\ .format(self.protocol_version, message) self._logger.error(message) else: identifier = message["identifier"] self._logger.debug("_handler_onboarding_request - identifier: {}".format(identifier)) gid = self._get_gid(identifier) shadow = None if gid not in self._agentshadows: self._logger.debug("_handler_onboarding_request - unknown gid") if self.enforcement == Enforcement.NONE: self._logger.debug("_handler_onboarding_request - Enforcement.NONE") shadow = self._asfactory.new_agentshadow(gid=gid) self._agentshadows[gid] = shadow shadow.start() elif self.enforcement == Enforcement.IGNORE: self._logger.debug("_handler_onboarding_request - Enforcement.IGNORE") return elif self.enforcement == Enforcement.STRICT: self._logger.debug("_handler_onboarding_request - Enforcement.STRICT") self._logger.error("handler_onboarding_request - required is set to strict and incoming is gid " "not pre-configured. '{}'".format(message)) return else: message = "handler_onboarding_request - dont know how to handler required '{}'"\ .format(self.enforcement) self._logger.error(message) raise ValueError(message) else: self._logger.debug("_handler_onboarding_request - existing gid {}".format(gid)) shadow = self._agentshadows[gid] self._logger.debug("_handler_onboarding_request - {}".format(shadow)) shadow.process_onboarding_request(message, session=self.counter) def get_stats(self, full=True): gid_list = list(self.gid_list.keys()) gid_list.sort() stats = { "counter": self.counter, "stored": len(self.messages), "gid_list": gid_list, "enforcement": self.enforcement.name, "protocol_version": self.protocol_version, "topic_onboarding_request": self.topic_onboarding_request, "last_timestamp": self.last_timestamp } if full: stats["messages"] = self.messages return stats def get_string_stats(self): output = "" output += "stats:\n{}\n".format(pprint.pformat(self.get_stats(full=False), indent=4)) try: output += "last message:\n{}\n".format(pprint.pformat(self.messages[-1], indent=4)) except IndexError: output += "last message: n/a\n" return output
PypiClean
/JitViewer-0.2.1.tar.gz/JitViewer-0.2.1/_jitviewer/static/canjs/1.1.4/can.mootools.min.js
(function(n,j){var c=n.can||{};if("undefined"===typeof GLOBALCAN||!1!==GLOBALCAN)n.can=c;c.isDeferred=function(a){var b=this.isFunction;return a&&b(a.then)&&b(a.pipe)};var Ra=0;c.cid=function(a,b){return a._cid?a._cid:a._cid=(b||"")+ ++Ra};c.addEvent=function(a,b){this.__bindEvents||(this.__bindEvents={});var d=a.split(".")[0];this.__bindEvents[d]||(this.__bindEvents[d]=[]);this.__bindEvents[d].push({handler:b,name:a});return this};c.removeEvent=function(a,b){if(this.__bindEvents){for(var d=0,c=this.__bindEvents[a.split(".")[0]], f;d<c.length;)f=c[d],b&&f.handler===b||!b&&f.name===a?c.splice(d,1):d++;return this}};c.dispatch=function(a){if(this.__bindEvents){var b=(this.__bindEvents[a.type.split(".")[0]]||[]).slice(0),d=this,e=[a].concat(a.data||[]);c.each(b,function(b){a.data=e.slice(1);b.handler.apply(d,e)})}};var Sa=/^\s*<(\w+)[^>]*>/,Ta=function(a,b){b===j&&(b=Sa.test(a)&&RegExp.$1);a&&c.isFunction(a.replace)&&(a=a.replace(/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,"<$1></$2>"));var d=document.createElement("div"), e=document.createElement("div");"tbody"===b||"tfoot"===b||"thead"===b?(e.innerHTML="<table>"+a+"</table>",d=3===e.firstChild.nodeType?e.lastChild:e.firstChild):"tr"===b?(e.innerHTML="<table><tbody>"+a+"</tbody></table>",d=3===e.firstChild.nodeType?e.lastChild:e.firstChild.firstChild):"td"===b||"th"===b?(e.innerHTML="<table><tbody><tr>"+a+"</tr></tbody></table>",d=3===e.firstChild.nodeType?e.lastChild:e.firstChild.firstChild.firstChild):"option"===b?(e.innerHTML="<select>"+a+"</select>",d=3===e.firstChild.nodeType? e.lastChild:e.firstChild):d.innerHTML=""+a;e={};d=d.childNodes;e.length=d.length;for(var f=0;f<d.length;f++)e[f]=d[f];return[].slice.call(e)};c.buildFragment=function(a){var a=Ta(a),b=document.createDocumentFragment();c.each(a,function(a){b.appendChild(a)});return b};var m=function(a,b){for(var d in b)b.hasOwnProperty(d)&&(a[d]=b[d])},z=function(a){if(!(this instanceof z))return new z;this._doneFuncs=[];this._failFuncs=[];this._resultArgs=null;this._status="";a&&a.call(this,this)};c.Deferred=z;c.when= z.when=function(){var a=c.makeArray(arguments);if(2>a.length){var b=a[0];return b&&c.isFunction(b.isResolved)&&c.isFunction(b.isRejected)?b:z().resolve(b)}var d=z(),e=0,f=[];c.each(a,function(b,c){b.done(function(){f[c]=2>arguments.length?arguments[0]:arguments;++e==a.length&&d.resolve.apply(d,f)}).fail(function(){d.reject(arguments)})});return d};var na=function(a,b){return function(d){var c=this._resultArgs=1<arguments.length?arguments[1]:[];return this.exec(d,this[a],c,b)}},oa=function(a,b){return function(){var d= this;c.each(Array.prototype.slice.call(arguments),function(c,f,h){c&&(c.constructor===Array?h.callee.apply(d,c):(d._status===b&&c.apply(d,d._resultArgs||[]),d[a].push(c)))});return this}};m(z.prototype,{pipe:function(a,b){var d=c.Deferred();this.done(function(){d.resolve(a.apply(this,arguments))});this.fail(function(){b?d.reject(b.apply(this,arguments)):d.reject.apply(d,arguments)});return d},resolveWith:na("_doneFuncs","rs"),rejectWith:na("_failFuncs","rj"),done:oa("_doneFuncs","rs"),fail:oa("_failFuncs", "rj"),always:function(){var a=c.makeArray(arguments);a.length&&a[0]&&this.done(a[0]).fail(a[0]);return this},then:function(){var a=c.makeArray(arguments);1<a.length&&a[1]&&this.fail(a[1]);a.length&&a[0]&&this.done(a[0]);return this},state:function(){switch(this._status){case "rs":return"resolved";case "rj":return"rejected";default:return"pending"}},isResolved:function(){return"rs"===this._status},isRejected:function(){return"rj"===this._status},reject:function(){return this.rejectWith(this,arguments)}, resolve:function(){return this.resolveWith(this,arguments)},exec:function(a,b,d,e){if(""!==this._status)return this;this._status=e;c.each(b,function(b){b.apply(a,d)});return this}});c.each=function(a,b,d){var c=0,f;if(a)if("number"===typeof a.length&&a.pop){a.attr&&a.attr("length");for(f=a.length;c<f&&!1!==b.call(d||a[c],a[c],c,a);c++);}else if(a.hasOwnProperty)for(f in a)if(a.hasOwnProperty(f)&&!1===b.call(d||a[f],a[f],f,a))break;return a};var O=Object.prototype.hasOwnProperty;c.isPlainObject=function(a){if(!a|| "object"!==typeof a||a.nodeType||null!=a&&a==a.window)return!1;try{if(a.constructor&&!O.call(a,"constructor")&&!O.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(b){return!1}for(var d in a);return d===j||O.call(a,d)};c.trim=function(a){return a&&a.trim()};m=function(){var a,b,d,e,f,h=arguments[0]||{},g=1,i=arguments.length,q=!1;"boolean"===typeof h&&(q=h,h=arguments[1]||{},g=2);"object"!==typeof h&&!c.isFunction(h)&&(h={});i===g&&(h=this,--g);for(;g<i;g++)if(null!=(a=arguments[g]))for(b in a)d= h[b],e=a[b],h!==e&&(q&&e&&(c.isPlainObject(e)||(f=c.isArray(e)))?(f?(f=!1,d=d&&c.isArray(d)?d:[]):d=d&&c.isPlainObject(d)?d:{},h[b]=c.extend(q,d,e)):e!==j&&(h[b]=e));return h};c.extend=m;c.makeArray=function(a){if(null==a)return[];try{return Type.isEnumerable(a)&&"string"!=typeof a?Array.prototype.slice.call(a):[a]}catch(b){var d=[],c;for(c=0;c<a.length;++c)d.push(a[c]);return d}};c.isArray=function(a){return"array"===typeOf(a)};c.inArray=function(a,b){return!b?-1:Array.prototype.indexOf.call(b,a)}; c.map=function(a,b){return Array.from(a||[]).map(b)};c.param=function(a){return Object.toQueryString(a)};c.isEmptyObject=function(a){return 0===Object.keys(a).length};c.proxy=function(a){var b=c.makeArray(arguments),a=b.shift();return a.bind.apply(a,b)};c.isFunction=function(a){return"function"==typeOf(a)};c.bind=function(a,b){this.bind&&this.bind!==c.bind?this.bind(a,b):this.addEvent?this.addEvent(a,b):this.nodeName&&1==this.nodeType?$(this).addEvent(a,b):c.addEvent.call(this,a,b);return this};c.unbind= function(a,b){this.unbind&&this.unbind!==c.unbind?this.unbind(a,b):this.removeEvent&&this.removeEvent(a,b);this.nodeName&&1==this.nodeType?$(this).removeEvent(a,b):c.removeEvent.call(this,a,b);return this};c.trigger=function(a,b,d,e){e=e===j?!0:e;d=d||[];if(a.fireEvent)for(a=a[0]||a;a;){b.type||(b={type:b,target:a});var f=a!==n?c.$(a).retrieve("events")[0]:a.retrieve("events");f&&f[b.type]&&f[b.type].keys.each(function(a){a.apply(this,[b].concat(d))},this);a=e&&a.parentNode?a.parentNode:null}else"string"=== typeof b&&(b={type:b}),b.target=b.target||a,b.data=d,c.dispatch.call(a,b)};c.delegate=function(a,b,d){this.delegate?this.delegate(a,b,d):this.addEvent&&this.addEvent(b+":relay("+a+")",d);return this};c.undelegate=function(a,b,d){this.undelegate?this.undelegate(a,b,d):this.removeEvent&&this.removeEvent(b+":relay("+a+")",d);return this};var pa={type:"method",success:j,error:j},P=function(a,b){for(var d in a)b[d]="function"==typeof b[d]?function(){a[d].apply(a,arguments)}:d[a]};c.ajax=function(a){var b= c.Deferred(),d=c.extend({},a),e;for(e in pa)d[e]!==j&&(d[pa[e]]=d[e],delete d[e]);d.method=d.method||"get";d.url=d.url.toString();var f=a.success,h=a.error;d.onSuccess=function(d){"json"===a.dataType&&(d=eval("("+d+")"));P(g.xhr,b);b.resolve(d,"success",g.xhr);f&&f(d,"success",g.xhr)};d.onError=function(){P(g.xhr,b);b.reject(g.xhr,"error");h(g.xhr,"error")};var g=new Request(d);g.send();P(g.xhr,b);return b};c.$=function(a){return a===n?n:$$(a)};var Ua=document.id;document.id=function(a){return a&& 11===a.nodeType?a:Ua.apply(document,arguments)};c.append=function(a,b){"string"===typeof b&&(b=c.buildFragment(b));return a.grab(b)};c.filter=function(a,b){return a.filter(b)};c.data=function(a,b,d){return d===j?a[0].retrieve(b):a.store(b,d)};c.addClass=function(a,b){return a.addClass(b)};c.remove=function(a){a=a.filter(function(a){if(1!==a.nodeType)a.parentNode.removeChild(a);else return!0});a.destroy();return a};var Va=Element.prototype.destroy;Element.implement({destroy:function(){c.trigger(this, "destroyed",[],!1);for(var a=this.getElementsByTagName("*"),b=0,d;(d=a[b])!==j;b++)c.trigger(d,"destroyed",[],!1);Va.apply(this,arguments)}});c.get=function(a,b){return a[b]};var Wa=Slick.uidOf;Slick.uidOf=function(a){return 1===a.nodeType||a===n||a.document===document?Wa(a):Math.random()};var Xa=/\=\=/,Ya=/([A-Z]+)([A-Z][a-z])/g,Za=/([a-z\d])([A-Z])/g,$a=/([a-z\d])([A-Z])/g,qa=/\{([^\}]+)\}/g,ab=/"/g,bb=/'/g;c.extend(c,{esc:function(a){return(""+(null===a||a===j||isNaN(a)&&"NaN"===""+a?"":a)).replace(/&/g, "&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(ab,"&#34;").replace(bb,"&#39;")},getObject:function(a,b,d){var a=a?a.split("."):[],e=a.length,f,h=0,g,i,b=c.isArray(b)?b:[b||n];if(!e)return b[0];for(;b[h];){f=b[h];for(i=0;i<e-1&&/^f|^o/.test(typeof f);i++)f=a[i]in f?f[a[i]]:d&&(f[a[i]]={});if(/^f|^o/.test(typeof f)&&(g=a[i]in f?f[a[i]]:d&&(f[a[i]]={}),g!==j))return!1===d&&delete f[a[i]],g;h++}},capitalize:function(a){return a.charAt(0).toUpperCase()+a.slice(1)},underscore:function(a){return a.replace(Xa, "/").replace(Ya,"$1_$2").replace(Za,"$1_$2").replace($a,"_").toLowerCase()},sub:function(a,b,d){var e=[];e.push(a.replace(qa,function(a,h){var g=c.getObject(h,b,d===j?d:!d);return g===j?(e=null,""):/^f|^o/.test(typeof g)&&e?(e.push(g),""):""+g}));return null===e?e:1>=e.length?e[0]:e},replacer:qa,undHash:/_|-/});var Q=0;c.Construct=function(){if(arguments.length)return c.Construct.extend.apply(c.Construct,arguments)};c.extend(c.Construct,{newInstance:function(){var a=this.instance(),b;a.setup&&(b= a.setup.apply(a,arguments));a.init&&a.init.apply(a,b||arguments);return a},_inherit:function(a,b,d){c.extend(d||a,a||{})},_overwrite:function(a,b,d,c){a[d]=c},setup:function(a){this.defaults=c.extend(!0,{},a.defaults,this.defaults)},instance:function(){Q=1;var a=new this;Q=0;return a},extend:function(a,b,d){function e(){if(!Q)return this.constructor!==e&&arguments.length?arguments.callee.extend.apply(arguments.callee,arguments):this.constructor.newInstance.apply(this.constructor,arguments)}"string"!= typeof a&&(d=b,b=a,a=null);d||(d=b,b=null);var d=d||{},f=this.prototype,h,g,i,q;q=this.instance();c.Construct._inherit(d,f,q);for(h in this)this.hasOwnProperty(h)&&(e[h]=this[h]);c.Construct._inherit(b,this,e);if(a){i=a.split(".");g=i.pop();i=f=c.getObject(i.join("."),n,!0);var cb=c.underscore(a.replace(/\./g,"_")),t=c.underscore(g);f[g]=e}c.extend(e,{constructor:e,prototype:q,namespace:i,shortName:g,_shortName:t,fullName:a,_fullName:cb});e.prototype.constructor=e;g=[this].concat(c.makeArray(arguments)); q=e.setup.apply(e,g);e.init&&e.init.apply(e,q||g);return e}});var v=function(a){return a&&(c.isArray(a)||c.isPlainObject(a)||a instanceof c.Observe)},R=function(a,b){return c.each(a,function(a){a&&a.unbind&&a.unbind("change"+b)})},S=function(a,b,d,e,f){e=e||l;f=f||l.List;a instanceof l?R([a],d._cid):a=c.isArray(a)?new f(a):new e(a);a.bind("change"+d._cid,function(){var e=c.makeArray(arguments),f=e.shift();e[0]=("*"===b?[d.indexOf(a),e[0]]:[b,e[0]]).join(".");f.triggeredNS=f.triggeredNS||{};f.triggeredNS[d._cid]|| (f.triggeredNS[d._cid]=!0,c.trigger(d,f,e))});return a},I=function(a,b,d){a.each(function(a,f){d[f]=v(a)&&c.isFunction(a[b])?a[b]():a});return d},ra=function(a){return function(){return c[a].apply(this,arguments)}},F=ra("addEvent"),sa=ra("removeEvent"),T=function(a){return c.isArray(a)?a:(""+a).split(".")},ta=1,G=0,U=[],V=[],l=c.Observe=c.Construct({bind:F,unbind:sa,id:"id",canMakeObserve:v,startBatch:function(a){G++;a&&V.push(a)},stopBatch:function(a,b){a?G=0:G--;if(0==G){var d=U.slice(0),e=V.slice(0); U=[];V=[];ta++;b&&this.startBatch();c.each(d,function(a){c.trigger.apply(c,a)});c.each(e,function(a){a})}},triggerBatch:function(a,b,d){if(!a._init){if(0==G)return c.trigger(a,b,d);U.push([a,{type:b,batchNum:ta},d])}},keys:function(a){var b=[];l.__reading&&l.__reading(a,"__keys");for(var d in a._data)b.push(d);return b}},{setup:function(a){this._data={};c.cid(this,".observe");this._init=1;this.attr(a);this.bind("change"+this._cid,c.proxy(this._changes,this));delete this._init},_changes:function(a, b,d,c,f){l.triggerBatch(this,{type:b,batchNum:a.batchNum},[c,f])},_triggerChange:function(a,b,d,e){l.triggerBatch(this,"change",c.makeArray(arguments))},attr:function(a,b){var d=typeof a;if("string"!==d&&"number"!==d)return this._attrs(a,b);if(b===j)return l.__reading&&l.__reading(this,a),this._get(a);this._set(a,b);return this},each:function(){l.__reading&&l.__reading(this,"__keys");return c.each.apply(j,[this.__get()].concat(c.makeArray(arguments)))},removeAttr:function(a){var a=T(a),b=a.shift(), d=this._data[b];if(a.length)return d.removeAttr(a);b in this._data&&(delete this._data[b],b in this.constructor.prototype||delete this[b],l.triggerBatch(this,"__keys"),this._triggerChange(b,"remove",j,d));return d},_get:function(a){var a=T(a),b=this.__get(a.shift());return a.length?b?b._get(a):j:b},__get:function(a){return a?this._data[a]:this._data},_set:function(a,b){var d=T(a),c=d.shift(),f=this.__get(c);if(v(f)&&d.length)f._set(d,b);else{if(d.length)throw"can.Observe: Object does not exist";this.__convert&& (b=this.__convert(c,b));this.__set(c,b,f)}},__set:function(a,b,d){if(b!==d){var c=this.__get().hasOwnProperty(a)?"set":"add";this.___set(a,v(b)?S(b,a,this):b);"add"==c&&l.triggerBatch(this,"__keys",j);this._triggerChange(a,c,b,d);d&&R([d],this._cid)}},___set:function(a,b){this._data[a]=b;a in this.constructor.prototype||(this[a]=b)},bind:F,unbind:sa,serialize:function(){return I(this,"serialize",{})},_attrs:function(a,b){if(a===j)return I(this,"attr",{});var a=c.extend({},a),d,e=this,f;l.startBatch(); this.each(function(d,g){f=a[g];f===j?b&&e.removeAttr(g):(e.__convert&&(f=e.__convert(g,f)),f instanceof c.Observe?e.__set(g,f,d):v(d)&&v(f)&&d.attr?d.attr(f,b):d!=f&&e.__set(g,f,d),delete a[g])});for(d in a)f=a[d],this._set(d,f);l.stopBatch();return this},compute:function(a){var b=this,d=function(d){return b.attr(a,d)};return c.compute?c.compute(d):d}}),db=[].splice,J=l({setup:function(a,b){this.length=0;c.cid(this,".observe");this._init=1;this.push.apply(this,c.makeArray(a||[]));this.bind("change"+ this._cid,c.proxy(this._changes,this));c.extend(this,b);delete this._init},_triggerChange:function(a,b,d,c){l.prototype._triggerChange.apply(this,arguments);~a.indexOf(".")||("add"===b?(l.triggerBatch(this,b,[d,+a]),l.triggerBatch(this,"length",[this.length])):"remove"===b?(l.triggerBatch(this,b,[c,+a]),l.triggerBatch(this,"length",[this.length])):l.triggerBatch(this,b,[d,+a]))},__get:function(a){return a?this[a]:this},___set:function(a,b){this[a]=b;+a>=this.length&&(this.length=+a+1)},serialize:function(){return I(this, "serialize",[])},splice:function(a,b){var d=c.makeArray(arguments),e;for(e=2;e<d.length;e++){var f=d[e];v(f)&&(d[e]=S(f,"*",this))}b===j&&(b=d[1]=this.length-a);e=db.apply(this,d);c.Observe.startBatch();0<b&&(this._triggerChange(""+a,"remove",j,e),R(e,this._cid));2<d.length&&this._triggerChange(""+a,"add",d.slice(2),e);c.Observe.stopBatch();return e},_attrs:function(a,b){if(a===j)return I(this,"attr",[]);a=c.makeArray(a);l.startBatch();this._updateAttrs(a,b);l.stopBatch()},_updateAttrs:function(a, b){for(var d=Math.min(a.length,this.length),c=0;c<d;c++){var f=this[c],h=a[c];v(f)&&v(h)?f.attr(h,b):f!=h&&this._set(c,h)}a.length>this.length?this.push.apply(this,a.slice(this.length)):a.length<this.length&&b&&this.splice(a.length)}});c.each({push:"length",unshift:0},function(a,b){var d=[][b];J.prototype[b]=function(){for(var b=[],c=a?this.length:0,h=arguments.length,g;h--;)g=arguments[h],b[h]=v(g)?S(g,"*",this,this.constructor.Observe,this.constructor):g;h=d.apply(this,b);(!this.comparator||!b.length)&& this._triggerChange(""+c,"add",b,j);return h}});c.each({pop:"length",shift:0},function(a,b){J.prototype[b]=function(){var d=arguments[0]&&c.isArray(arguments[0])?arguments[0]:c.makeArray(arguments),e=a&&this.length?this.length-1:0,d=[][b].apply(this,d);this._triggerChange(""+e,"remove",j,[d]);d&&d.unbind&&d.unbind("change"+this._cid);return d}});c.extend(J.prototype,{indexOf:function(a){this.attr("length");return c.inArray(a,this)},join:[].join,slice:function(){return new this.constructor(Array.prototype.slice.apply(this, arguments))},concat:function(){var a=[];c.each(c.makeArray(arguments),function(b,d){a[d]=b instanceof c.Observe.List?b.serialize():b});return new this.constructor(Array.prototype.concat.apply(this.serialize(),a))},forEach:function(a,b){c.each(this,a,b||this)},replace:function(a){c.isDeferred(a)?a.then(c.proxy(this.replace,this)):this.splice.apply(this,[0,this.length].concat(c.makeArray(a||[])));return this}});l.List=J;l.setup=function(){c.Construct.setup.apply(this,arguments);this.List=l.List({Observe:this}, {})};var eb=function(a,b,d){var e=new c.Deferred;a.then(function(){var a=c.makeArray(arguments);a[0]=b[d](a[0]);e.resolveWith(e,a)},function(){e.rejectWith(this,arguments)});"function"===typeof a.abort&&(e.abort=function(){return a.abort()});return e},fb=0,ua=/change.observe\d+/,K=function(a){c.Observe.__reading&&c.Observe.__reading(a,a.constructor.id);return a.__get(a.constructor.id)},va=function(a,b,d,c,f){var h;h=[a.serialize()];var g=a.constructor,i;"destroy"==b&&h.shift();"create"!==b&&h.unshift(K(a)); i=g[b].apply(g,h);h=i.pipe(function(d){a[f||b+"d"](d,i);return a});i.abort&&(h.abort=function(){i.abort()});h.then(d,c);return h},gb={create:{url:"_shortName",type:"post"},update:{data:function(a,b){var b=b||{},d=this.id;b[d]&&b[d]!==a&&(b["new"+c.capitalize(a)]=b[d],delete b[d]);b[d]=a;return b},type:"put"},destroy:{type:"delete",data:function(a){var b={};b.id=b[this.id]=a;return b}},findAll:{url:"_shortName"},findOne:{}},wa=function(a,b){return function(d){var d=a.data?a.data.apply(this,arguments): d,e=b||this[a.url||"_url"],f=d,h=a.type||"get",g={};"string"==typeof e?(e=e.split(/\s/),g.url=e.pop(),e.length&&(g.type=e.pop())):c.extend(g,e);g.data="object"==typeof f&&!c.isArray(f)?c.extend(g.data||{},f):f;g.url=c.sub(g.url,g.data,!0);return c.ajax(c.extend({type:h||"post",dataType:"json",success:void 0,error:void 0},g))}};c.Model=c.Observe({fullName:"can.Model",setup:function(a){this.store={};c.Observe.setup.apply(this,arguments);if(c.Model){this.List=W({Observe:this},{});var b=this,d=c.proxy(this._clean, b);c.each(gb,function(e,f){c.isFunction(b[f])||(b[f]=wa(e,b[f]));if(b["make"+c.capitalize(f)]){var h=b["make"+c.capitalize(f)](b[f]);c.Construct._overwrite(b,a,f,function(){this._reqs++;var a=h.apply(this,arguments),b=a.then(d,d);b.abort=a.abort;return b})}});if("can.Model"==b.fullName||!b.fullName)b.fullName="Model"+ ++fb;this._reqs=0;this._url=this._shortName+"/{"+this.id+"}"}},_ajax:wa,_clean:function(a){this._reqs--;if(!this._reqs)for(var b in this.store)this.store[b]._bindings||delete this.store[b]; return a},models:function(a,b){if(a){if(a instanceof this.List)return a;var d=this,e=[],f=b instanceof c.Observe.List?b:new (d.List||W),h=c.isArray(a),g=a instanceof W,g=h?a:g?a.serialize():a.data;f.length&&f.splice(0);c.each(g,function(a){e.push(d.model(a))});f.push.apply(f,e);h||c.each(a,function(a,b){"data"!==b&&f.attr(b,a)});return f}},model:function(a){if(a){a instanceof this&&(a=a.serialize());var b=a[this.id],b=(b||0===b)&&this.store[b]?this.store[b].attr(a,this.removeAttr||!1):new this(a); this._reqs&&(this.store[a[this.id]]=b);return b}}},{isNew:function(){var a=K(this);return!(a||0===a)},save:function(a,b){return va(this,this.isNew()?"create":"update",a,b)},destroy:function(a,b){if(this.isNew()){var d=this;return c.Deferred().done(function(a){d.destroyed(a)}).resolve(d)}return va(this,"destroy",a,b,"destroyed")},bind:function(a){ua.test(a)||(this._bindings||(this.constructor.store[this.__get(this.constructor.id)]=this,this._bindings=0),this._bindings++);return c.Observe.prototype.bind.apply(this, arguments)},unbind:function(a){ua.test(a)||(this._bindings--,this._bindings||delete this.constructor.store[K(this)]);return c.Observe.prototype.unbind.apply(this,arguments)},___set:function(a,b){c.Observe.prototype.___set.call(this,a,b);a===this.constructor.id&&this._bindings&&(this.constructor.store[K(this)]=this)}});c.each({makeFindAll:"models",makeFindOne:"model"},function(a,b){c.Model[b]=function(b){return function(c,f,h){c=eb(b.call(this,c),this,a);c.then(f,h);return c}}});c.each(["created", "updated","destroyed"],function(a){c.Model.prototype[a]=function(b){var d=this.constructor;b&&"object"==typeof b&&this.attr(b.attr?b.attr():b);c.trigger(this,a);c.trigger(this,"change",a);c.trigger(d,a,this)}});var W=c.Model.List=c.Observe.List({setup:function(){c.Observe.List.prototype.setup.apply(this,arguments);var a=this;this.bind("change",function(b,d){if(/\w+\.destroyed/.test(d)){var c=a.indexOf(b.target);-1!=c&&a.splice(c,1)}})}}),hb=/^\d+$/,ib=/([^\[\]]+)|(\[\])/g,jb=/([^?#]*)(#.*)?$/,xa= function(a){return decodeURIComponent(a.replace(/\+/g," "))};c.extend(c,{deparam:function(a){var b={},d;a&&jb.test(a)&&(a=a.split("&"),c.each(a,function(a){for(var a=a.split("="),c=xa(a.shift()),h=xa(a.join("=")),g=b,a=c.match(ib),c=0,i=a.length-1;c<i;c++)g[a[c]]||(g[a[c]]=hb.test(a[c+1])||"[]"==a[c+1]?[]:{}),g=g[a[c]];d=a.pop();"[]"==d?g.push(h):g[d]=h}));return b}});var ya=/\:([\w\.]+)/g,kb=function(a){var b=[];c.each(a,function(a,e){b.push(("className"===e?"class":e)+'="'+("href"===e?a:c.esc(a))+ '"')});return b.join(" ")},za=function(a,b){var c=0,e=0,f={},h;for(h in a.defaults)a.defaults[h]===b[h]&&(f[h]=1,c++);for(;e<a.names.length;e++){if(!b.hasOwnProperty(a.names[e]))return-1;f[a.names[e]]||c++}return c},X=!0,Y=n.location,x=c.each,m=c.extend;c.route=function(a,b){var b=b||{},d=[],e=a.replace(ya,function(e,h,g){d.push(h);return"([^\\"+(a.substr(g+e.length,1)||c.route._querySeparator)+"]"+(b[h]?"*":"+")+")"});c.route.routes[a]={test:RegExp("^"+e+"($|"+(c.route._querySeparator+"").replace(/([.?*+\^$\[\]\\(){}|\-])/g, "\\$1")+")"),route:a,names:d,defaults:b,length:a.split("/").length};return c.route};m(c.route,{_querySeparator:"&",_paramsMatcher:/^(?:&[^=]+=[^&]*)+/,param:function(a,b){var d,e=0,f,h=a.route,g=0;delete a.route;x(a,function(){g++});x(c.route.routes,function(b){f=za(b,a);f>e&&(d=b,e=f);if(f>=g)return!1});c.route.routes[h]&&za(c.route.routes[h],a)===e&&(d=c.route.routes[h]);if(d){var i=m({},a),h=d.route.replace(ya,function(b,c){delete i[c];return a[c]===d.defaults[c]?"":encodeURIComponent(a[c])}), q;x(d.defaults,function(a,b){i[b]===a&&delete i[b]});q=c.param(i);b&&c.route.attr("route",d.route);return h+(q?c.route._querySeparator+q:"")}return c.isEmptyObject(a)?"":c.route._querySeparator+c.param(a)},deparam:function(a){var b={length:-1};x(c.route.routes,function(c){c.test.test(a)&&c.length>b.length&&(b=c)});if(-1<b.length){var d=a.match(b.test),e=d.shift(),f=(e=a.substr(e.length-(d[d.length-1]===c.route._querySeparator?1:0)))&&c.route._paramsMatcher.test(e)?c.deparam(e.slice(1)):{},f=m(!0, {},b.defaults,f);x(d,function(a,d){a&&a!==c.route._querySeparator&&(f[b.names[d]]=decodeURIComponent(a))});f.route=b.route;return f}a.charAt(0)!==c.route._querySeparator&&(a=c.route._querySeparator+a);return c.route._paramsMatcher.test(a)?c.deparam(a.slice(1)):{}},data:new c.Observe({}),routes:{},ready:function(a){!1===a&&(X=a);if(!0===a||!0===X)c.route._setup(),Aa();return c.route},url:function(a,b){b&&(a=m({},Z,a));return"#!"+c.route.param(a)},link:function(a,b,d,e){return"<a "+kb(m({href:c.route.url(b, e)},d))+">"+a+"</a>"},current:function(a){return Y.hash=="#!"+c.route.param(a)},_setup:function(){c.bind.call(n,"hashchange",Aa)},_getHash:function(){return Y.href.split(/#!?/)[1]||""},_setHash:function(a){a=c.route.param(a,!0);Y.hash="#!"+a;return a}});x("bind unbind delegate undelegate attr removeAttr".split(" "),function(a){c.route[a]=function(){return c.route.data[a].apply(c.route.data,arguments)}});var Ba,Z,Aa=c.route.setState=function(){var a=c.route._getHash();Z=c.route.deparam(a);(!aa||a!== Ca)&&c.route.attr(Z,!0)},Ca,aa;c.route.bind("change",function(){aa=1;clearTimeout(Ba);Ba=setTimeout(function(){aa=0;var a=c.route.data.serialize();Ca=c.route._setHash(a)},1)});c.bind.call(document,"ready",c.route.ready);("complete"===document.readyState||"interactive"===document.readyState)&&X&&c.route.ready();c.route.constructor.canMakeObserve=c.Observe.canMakeObserve;var F=function(a,b,d){c.bind.call(a,b,d);return function(){c.unbind.call(a,b,d)}},y=c.isFunction,m=c.extend,x=c.each,lb=[].slice, Da=/\{([^\}]+)\}/g,mb=c.getObject("$.event.special",[c])||{},Ea=function(a,b,d,e){c.delegate.call(a,b,d,e);return function(){c.undelegate.call(a,b,d,e)}},ba;c.Control=c.Construct({setup:function(){c.Construct.setup.apply(this,arguments);if(c.Control){var a;this.actions={};for(a in this.prototype)this._isAction(a)&&(this.actions[a]=this._action(a))}},_shifter:function(a,b){var d="string"==typeof b?a[b]:b;y(d)||(d=a[d]);return function(){a.called=b;return d.apply(a,[this.nodeName?c.$(this):this].concat(lb.call(arguments, 0)))}},_isAction:function(a){var b=this.prototype[a],c=typeof b;return"constructor"!==a&&("function"==c||"string"==c&&y(this.prototype[b]))&&!(!mb[a]&&!ca[a]&&!/[^\w]/.test(a))},_action:function(a,b){Da.lastIndex=0;if(b||!Da.test(a)){var d=b?c.sub(a,[b,n]):a;if(!d)return null;var e=c.isArray(d),f=e?d[1]:d,h=f.split(/\s+/g),g=h.pop();return{processor:ca[g]||ba,parts:[f,h.join(" "),g],delegate:e?d[0]:j}}},processors:{},defaults:{}},{setup:function(a,b){var d=this.constructor,e=d.pluginName||d._fullName; this.element=c.$(a);e&&"can_control"!==e&&this.element.addClass(e);(e=c.data(this.element,"controls"))||c.data(this.element,"controls",e=[]);e.push(this);this.options=m({},d.defaults,b);this.on();return[this.element,this.options]},on:function(a,b,d,e){if(!a){this.off();var a=this.constructor,b=this._bindings,d=a.actions,e=this.element,f=c.Control._shifter(this,"destroy"),h,g;for(h in d)if(d.hasOwnProperty(h)&&(g=d[h]||a._action(h,this.options)))b.push(g.processor(g.delegate||e,g.parts[2],g.parts[1], h,this));c.bind.call(e,"destroyed",f);b.push(function(a){c.unbind.call(a,"destroyed",f)});return b.length}"string"==typeof a&&(e=d,d=b,b=a,a=this.element);e===j&&(e=d,d=b,b=null);"string"==typeof e&&(e=c.Control._shifter(this,e));this._bindings.push(b?Ea(a,c.trim(b),d,e):F(a,d,e));return this._bindings.length},off:function(){var a=this.element[0];x(this._bindings||[],function(b){b(a)});this._bindings=[]},destroy:function(){var a=this.constructor,a=a.pluginName||a._fullName;this.off();a&&"can_control"!== a&&this.element.removeClass(a);a=c.data(this.element,"controls");a.splice(c.inArray(this,a),1);c.trigger(this,"destroyed");this.element=null}});var ca=c.Control.processors;ba=function(a,b,d,e,f){e=c.Control._shifter(f,e);return d?Ea(a,c.trim(d),b,e):F(a,b,e)};x("change click contextmenu dblclick keydown keyup keypress mousedown mousemove mouseout mouseover mouseup reset resize scroll select submit focusin focusout mouseenter mouseleave touchstart touchmove touchcancel touchend touchleave".split(" "), function(a){ca[a]=ba});c.Control.processors.route=function(a,b,d,e,f){d=d||"";c.route(d);var h,g=function(a){if(c.route.attr("route")===d&&(a.batchNum===j||a.batchNum!==h))if(h=a.batchNum,a=c.route.attr(),delete a.route,c.isFunction(f[e]))f[e](a);else f[f[e]](a)};c.route.bind("change",g);return function(){c.route.unbind("change",g)}};var y=c.isFunction,nb=c.makeArray,Fa=1,k=c.view=function(a,b,d,e){y(d)&&(e=d,d=j);var f=y(e)?function(a){e(k.frag(a))}:null,a=k.render(a,b,d,f),h=c.Deferred();return y(a)? a:c.isDeferred(a)?(a.done(function(a,b){h.resolve.call(h,k.frag(a),b)}),h):k.frag(a)};c.extend(k,{frag:function(a,b){return k.hookup(k.fragment(a),b)},fragment:function(a){a=c.buildFragment(a,document.body);a.childNodes.length||a.appendChild(document.createTextNode(""));return a},toId:function(a){return c.map(a.toString().split(/\/|\./g),function(a){if(a)return a}).join("_")},hookup:function(a,b){var d=[],e,f;c.each(a.childNodes?c.makeArray(a.childNodes):a,function(a){1===a.nodeType&&(d.push(a),d.push.apply(d, c.makeArray(a.getElementsByTagName("*"))))});c.each(d,function(a){if(a.getAttribute&&(e=a.getAttribute("data-view-id"))&&(f=k.hookups[e]))f(a,b,e),delete k.hookups[e],a.removeAttribute("data-view-id")});return a},hookups:{},hook:function(a){k.hookups[++Fa]=a;return" data-view-id='"+Fa+"'"},cached:{},cachedRenderers:{},cache:!0,register:function(a){this.types["."+a.suffix]=a},types:{},ext:".ejs",registerScript:function(){},preload:function(){},render:function(a,b,d,e){y(d)&&(e=d,d=j);var f=ob(b);if(f.length){var h= new c.Deferred,g=c.extend({},b);f.push(Ga(a,!0));c.when.apply(c,f).then(function(a){var f=nb(arguments),i=f.pop();if(c.isDeferred(b))g=Ha(a);else for(var j in b)c.isDeferred(b[j])&&(g[j]=Ha(f.shift()));f=i(g,d);h.resolve(f,g);e&&e(f,g)});return h}var i,f=y(e),h=Ga(a,f);if(f)i=h,h.then(function(a){e(b?a(b,d):a)});else{if("resolved"===h.state()&&h.__view_id)return a=k.cachedRenderers[h.__view_id],b?a(b,d):a;h.then(function(a){i=b?a(b,d):a})}return i},registerView:function(a,b,d,e){b=(d||k.types[k.ext]).renderer(a, b);e=e||new c.Deferred;k.cache&&(k.cached[a]=e,e.__view_id=a,k.cachedRenderers[a]=b);return e.resolve(b)}});var Ia=function(a,b){if(!a.length)throw"can.view: No template or empty template:"+b;},Ga=function(a,b){var d=a.match(/\.[\w\d]+$/),e,f,h;a.match(/^#/)&&(a=a.substr(1));if(f=document.getElementById(a))d="."+f.type.match(/\/(x\-)?(.+)/)[2];!d&&!k.cached[a]&&(a+=d=k.ext);c.isArray(d)&&(d=d[0]);h=k.toId(a);if(a.match(/^\/\//))var g=a.substr(2),a=!n.steal?g:steal.config().root.mapJoin(g);e=k.types[d]; if(k.cached[h])return k.cached[h];if(f)return k.registerView(h,f.innerHTML,e);var i=new c.Deferred;c.ajax({async:b,url:a,dataType:"text",error:function(b){Ia("",a);i.reject(b)},success:function(b){Ia(b,a);k.registerView(h,b,e,i)}});return i},ob=function(a){var b=[];if(c.isDeferred(a))return[a];for(var d in a)c.isDeferred(a[d])&&b.push(a[d]);return b},Ha=function(a){return c.isArray(a)&&"success"===a[1]?a[0]:a};c.extend(k,{register:function(a){this.types["."+a.suffix]=a;k[a.suffix]=function(b,d){if(!d){var e= function(){return k.frag(e.render.apply(this,arguments))};e.render=function(){var c=a.renderer(null,b);return c.apply(c,arguments)};return e}k.preload(b,a.renderer(b,d));return c.view(b)}},registerScript:function(a,b,c){return"can.view.preload('"+b+"',"+k.types["."+a].script(b,c)+");"},preload:function(a,b){function d(){return k.frag(b.apply(this,arguments))}k.cached[a]=(new c.Deferred).resolve(function(a,c){return b.call(a,a,c)});d.render=b;return d}});var pb=function(a,b){var d;c.Observe&&(d=c.Observe.__reading, c.Observe.__reading=function(a,b){e.push({obj:a,attr:b})});var e=[],f=a.call(b);c.Observe&&(c.Observe.__reading=d);return{value:f,observed:e}},Ja=function(a,b,d){var e={},f=!0,h={value:j,teardown:function(){for(var a in e){var b=e[a];b.observe.obj.unbind(b.observe.attr,i);delete e[a]}}},g,i=function(a){if(a.batchNum===j||a.batchNum!==g){var b=h.value,c=q();h.value=c;c!==b&&d(c,b);g=g=a.batchNum}},q=function(){var d=pb(a,b),g=d.observed,d=d.value;f=!f;c.each(g,function(a){e[a.obj._cid+"|"+a.attr]? e[a.obj._cid+"|"+a.attr].matched=f:(e[a.obj._cid+"|"+a.attr]={matched:f,observe:a},a.obj.bind(a.attr,i))});for(var h in e)g=e[h],g.matched!==f&&(g.observe.obj.unbind(g.observe.attr,i),delete e[h]);return d};h.value=q();h.isListening=!c.isEmptyObject(e);return h};c.compute=function(a,b){if(a&&a.isComputed)return a;var d,e=0,f,h=!0;"function"===typeof a?f=function(g){return g===j?d?(e&&c.Observe.__reading&&c.Observe.__reading(f,"change"),d.value):a.call(b||this):a.apply(b||this,arguments)}:(f=function(b){if(b=== j)return c.Observe.__reading&&c.Observe.__reading(f,"change"),a;var d=a;a=b;d!==b&&c.Observe.triggerBatch(f,"change",[b,d]);return b},h=!1);f.isComputed=!0;c.cid(f,"compute");f.bind=function(g,i){c.addEvent.apply(f,arguments);e===0&&h&&(d=Ja(a,b||this,function(a,b){c.Observe.triggerBatch(f,"change",[a,b])}));e++};f.unbind=function(a,b){c.removeEvent.apply(f,arguments);e--;e===0&&h&&d.teardown()};return f};c.compute.binder=Ja;var qb=/(\r|\n)+/g,da={option:"textContent",textarea:"value"},Ka={tr:"tbody", option:"select",td:"tr",th:"tr",li:"ul"},La=function(a,b,c){if(a)return a;for(;c<b.length;){if("<"==b[c]&&Ka[b[c+1]])return Ka[b[c+1]];c++}},rb=function(a){eval(a)},sb=/([^\s]+)[\s]*=[\s]*$/,A=null,B=null,ea=null,r=null,fa=function(){return B?"'"+ea.match(sb)[1]+"'":A?1:0};c.view.Scanner=Scanner=function(a){c.extend(this,{text:{},tokens:[]},a);this.tokenReg=[];this.tokenSimple={"<":"<",">":">",'"':'"',"'":"'"};this.tokenComplex=[];this.tokenMap={};for(var a=0,b;b=this.tokens[a];a++)b[2]?(this.tokenReg.push(b[2]), this.tokenComplex.push({abbr:b[1],re:RegExp(b[2]),rescan:b[3]})):(this.tokenReg.push(b[1]),this.tokenSimple[b[1]]=b[0]),this.tokenMap[b[0]]=b[1];this.tokenReg=RegExp("("+this.tokenReg.slice(0).concat(["<",">",'"',"'"]).join("|")+")","g")};Scanner.prototype={helpers:[{name:/\s*\(([\$\w]+)\)\s*->([^\n]*)/,fn:function(a){a=a.match(/\s*\(([\$\w]+)\)\s*->([^\n]*)/);return"function(__){var "+a[1]+"=can.$(__);"+a[2]+"}"}}],scan:function(a,b){var c=[],e=0,f=this.tokenSimple,h=this.tokenComplex,a=a.replace(qb, "\n");a.replace(this.tokenReg,function(b,g){var i=arguments[arguments.length-2];i>e&&c.push(a.substring(e,i));if(f[b])c.push(b);else for(var o=0,j;j=h[o];o++)if(j.re.test(b)){c.push(j.abbr);j.rescan&&c.push(j.rescan(g));break}e=i+g.length});e<a.length&&c.push(a.substr(e));var g="",i=["var ___v1ew = [];"+(this.text.start||"")],k=function(a,b){i.push("___v1ew.push(",'"',a.split("\\").join("\\\\").split("\n").join("\\n").split('"').join('\\"').split("\t").join("\\t"),'"'+(b||"")+");")},l=[],t,m=null, n=!1,s="",p=[],D=0,u,o=this.tokenMap;for(A=B=ea=null;(u=c[D++])!==j;){if(null===m)switch(u){case o.left:case o.escapeLeft:case o.returnLeft:n=A&&1;case o.commentLeft:m=u;g.length&&k(g);g="";break;case o.escapeFull:n=A&&1;r=1;m=o.escapeLeft;g.length&&k(g);r=c[D++];g=r.content||r;r.before&&k(r.before);c.splice(D,0,o.right);break;case o.commentFull:break;case o.templateLeft:g+=o.left;break;case "<":0!==c[D].indexOf("!--")&&(A=1,n=0);g+=u;break;case ">":A=0;t="/"==g.substr(g.length-1);n||da[p[p.length- 1]]?(t?k(g.substr(0,g.length-1),',can.view.pending(),"/>"'):k(g,',can.view.pending(),">"'),g=""):g+=u;t&&(p.pop(),s=p[p.length-1]);break;case "'":case '"':A&&(B&&B===u?B=null:null===B&&(B=u,ea=t));default:"<"===t&&(s=u.split(/\s/)[0],0===s.indexOf("/")&&p.pop()===s.substr(1)?s=p[p.length-1]:p.push(s)),g+=u}else switch(u){case o.right:case o.returnRight:switch(m){case o.left:t=--g.split("{").length- --g.split("}").length;1==t?(i.push("___v1ew.push(","can.view.txt(0,'"+La(s,c,D)+"',"+fa()+",this,function(){", "var ___v1ew = [];",g),l.push({before:"",after:"return ___v1ew.join('')}));\n"})):(e=l.length&&-1==t?l.pop():{after:";"},e.before&&i.push(e.before),i.push(g,";",e.after));break;case o.escapeLeft:case o.returnLeft:(t=--g.split("{").length- --g.split("}").length)&&l.push({before:"return ___v1ew.join('')",after:"}));"});for(var m=m===o.escapeLeft?1:0,tb={insert:"___v1ew.push(",tagName:La(s,c,D),status:fa()},ga=0;ga<this.helpers.length;ga++){var ha=this.helpers[ga];if(ha.name.test(g)){g=ha.fn(g,tb);ha.name.source== /^>[\s]*\w*/.source&&(m=0);break}}"object"==typeof g?g.raw&&i.push(g.raw):i.push("___v1ew.push(","can.view.txt("+m+",'"+s+"',"+fa()+",this,function(){ "+(this.text.escape||"")+"return ",g,t?"var ___v1ew = [];":"}));");r&&(r.after&&r.after.length)&&(k(r.after.length),r=null)}m=null;g="";break;case o.templateLeft:g+=o.left;break;default:g+=u}t=u}g.length&&k(g);i.push(";");g={out:"with(_VIEW) { with (_CONTEXT) {"+i.join("")+" return ___v1ew.join('')}}"};rb.call(g,"this.fn = (function(_CONTEXT,_VIEW){"+ g.out+"});\r\n//@ sourceURL="+b+".js");return g}};var ia=!0;try{document.createTextNode("")._=0}catch(wb){ia=!1}var L={"class":"className",value:"value",innerText:"innerText",textContent:"textContent"},Ma={"":"span",table:"tbody",tr:"td",ol:"li",ul:"li",tbody:"tr",thead:"tr",tfoot:"tr",select:"option",optgroup:"option"},ub=/__!!__/g,da={option:"textContent"in document.createElement("option")?"textContent":"innerText",textarea:"value"},Na=c.each(["checked","disabled","readonly","required"],function(a){L[a]= a}),ja=function(a,b,d){var e=a.nodeName.toString().toLowerCase(),f=L[b];if(f){if(a[f]=-1<c.inArray(b,Na)?!0:d,"value"===f&&("input"===e||"textarea"===e))a.defaultValue=d}else a.setAttribute(b,d)},H=[],ka=function(a){if("string"==typeof a)return a;if(!a&&0!==a)return"";var b=a.hookup&&function(b,c){a.hookup.call(a,b,c)}||"function"==typeof a&&a;return b?(H.push(b),""):""+a},vb=function(a){return"string"==typeof a||"number"==typeof a?c.esc(a):ka(a)},E={},M={},N={},la="ejs_"+Math.random(),ma=0,C=function(a){if(ia|| 3!==a.nodeType)return a[la]?a[la]:a[la]=(a.nodeName?"element_":"obj_")+ ++ma;for(var b in M)if(M[b]===a)return b;M["text_"+ ++ma]=a;return"text_"+ma},Oa=function(a,b){var d=E[C(a)];if(d){var e=c.inArray(b,d);0<=e&&d.splice(e,1);d.length||delete E[C(a)]}},Pa=function(a,b){var c=E[C(a)];c||(c=E[C(a)]=[]);c.push(b)},Qa=function(a){a=Ma[a]||"span";return"span"===a?"@@!!@@":"<"+a+">"+Qa(a)+"</"+a+">"};c.extend(c.view,{pending:function(){var a=H.slice(0);lastHookups=a;H=[];return c.view.hook(function(b){c.each(a, function(a){a(b)})})},registerNode:function(a){var b=C(a);N[b]=a;c.each(a,function(a){Pa(a,b)})},unregisterNode:function(a){var b=C(a);c.each(a,function(a){Oa(a,b)});delete N[b]},txt:function(a,b,d,e,f){var h=c.compute.binder(f,e,function(a,b){n(a,b)});if(!h.isListening)return(a||0!==d?vb:ka)(h.value);var g,i,k=function(){h.teardown();i&&c.view.unregisterNode(i)},l=function(a){c.bind.call(a,"destroyed",k);g=a},m=function(a){a||(k(),c.unbind.call(g,"destroyed",k))},e=Ma[b]||"span",n,r=da[b];if(0=== d&&!r)return"<"+e+c.view.hook(a?function(a,b){n=function(a){d.nodeValue=""+a;m(d.parentNode)};var c=b&&11===a.parentNode.nodeType?b:a.parentNode,d=document.createTextNode(h.value);c.insertBefore(d,a);c.removeChild(a);l(c)}:function(a,b){n=function(a){d[0].parentNode&&e(a);m(d[0].parentNode)};var b=b&&11===a.parentNode.nodeType?b:a.parentNode,d,e=function(e){var f=c.view.frag(e,b),e=c.makeArray(f.childNodes),g=d?d[d.length-1]:a;g.nextSibling?g.parentNode.insertBefore(f,g.nextSibling):g.parentNode.appendChild(f); d?(f=c.makeArray(d),c.view.replace(d,e),c.remove(c.$(f))):(c.remove(c.$(a)),i=d=e,c.view.registerNode(d))};e(h.value,[a]);l(b)})+">"+Qa(e)+"</"+e+">";if(1===d){var s=h.value.replace(/['"]/g,"").split("=")[0];H.push(function(a){n=function(b){var b=(b||"").replace(/['"]/g,"").split("="),d=b[0];if(d!=s&&s){var e=s;-1<c.inArray(e,Na)?a[e]=!1:a.removeAttribute(e)}d&&(ja(a,d,b[1]),s=d)};l(a)});return h.value}var p=0===d?r:d;(0===d?lastHookups:H).push(function(a){n=function(){ja(a,p,g.render(),r)};var b= c.$(a),d;(d=c.data(b,"hooks"))||c.data(b,"hooks",d={});var e=(L[p]?a[L[p]]:a.getAttribute(p))||"",b=e.split("__!!__"),f=[],g;f.push(b.shift(),b.join("__!!__"));d[p]?d[p].bindings.push(h):d[p]={render:function(){var a=0;return e.replace(ub,function(){return ka(g.bindings[a++].value)})},bindings:[h],batchNum:j};g=d[p];f.splice(1,0,h.value);ja(a,p,f.join(""),r);l(a)});return"__!!__"},replace:function(a,b){a=c.makeArray(a);c.each(a,function(d){c.each(c.makeArray(E[C(d)]),function(e){var f=N[e],h=c.inArray(d, f),g=c.inArray(a[a.length-1],f);if(0<=h&&0<=g){for(var i=h;i<=g;i++)Oa(f[i],e);f.splice.apply(f,[h,g-h+1].concat(b));c.each(b,function(a){Pa(a,e)})}else c.view.unregisterNode(f)})})},canExpando:ia,textNodeMap:M,nodeMap:E,nodeListMap:N});var m=c.extend,w=function(a){if(this.constructor!=w){var b=new w(a);return function(a,c){return b.render(a,c)}}"function"==typeof a?this.template={fn:a}:(m(this,a),this.template=this.scanner.scan(this.text,this.name))};c.EJS=w;w.prototype.render=function(a,b){a=a|| {};return this.template.fn.call(a,a,new w.Helpers(a,b||{}))};m(w.prototype,{scanner:new c.view.Scanner({tokens:[["templateLeft","<%%"],["templateRight","%>"],["returnLeft","<%=="],["escapeLeft","<%="],["commentLeft","<%#"],["left","<%"],["right","%>"],["returnRight","%>"]]})});w.Helpers=function(a,b){this._data=a;this._extras=b;m(this,b)};w.Helpers.prototype={list:function(a,b){c.each(a,function(c,e){b(c,e,a)})}};c.view.register({suffix:"ejs",script:function(a,b){return"can.EJS(function(_CONTEXT,_VIEW) { "+ (new w({text:b,name:a})).template.out+" })"},renderer:function(a,b){return w({text:b,name:a})}})})(this);
PypiClean
/Fict-1.0.1.tar.gz/Fict-1.0.1/README.md
# FICT is a simple file integrity checking tool. ## Usage Basic usage is as follows. ``` cd ~ fict init fict add ~/Documents fict list fict compute fict check ``` The database will is stored in `~/.fict/fict_db`. The fict_db file that's normally in `~/.fict` can be manipulated manually. The changes will be ingested upon next run. The location of where the `fict_db` can be stored can be specified with the option `--fict-dir`. The default is in your `$HOME` `--hash-tool` defaults to sha512sum. Though you can pass in whatever tool you want to use. Some options are b2sum, md5sum, sha1sum, crc32. Use the tool that you feel best about. md5sum is available everywhere and is a good middle ground. b2sum is faster but not as available. We do a b2sum on every file by default but this can also be adjusted via `--default-hash-tool`. crc32 is simple but may take longer than all even though it's supposed to be faster. crc32 is meant for short communication verification rather than a 27GB mp4. **Note** that if your fict-db is defined to use one tool and you change it after the fact. The software will get confused. In that case it's recommended you open the fict_db file and use a tool like `sed(1)` to change all occurrences. The fict_db is written out every so often. At the time of writing this at around every 1000 computations. This can be changed in the code but it's not at the moment a cli parameter. The bigger the number the more potential lost work you can have if your system crashes. The shorter the number the more churn on your disk. If you notice any bugs feel free to email me. **Note** I use a Linux desktop so that's what I run this on. There may need to be adapting to get it to work on Macs. ## Installing `pip install Fict` You can also clone the code from [github](https://github.com/vhp/FICT) and run the fict command. ## Publishing to Pypi ``` pip install twine python3 setup.py sdist twine upload dist/* ```
PypiClean
/Cuckoo-2.0.7a1.tar.gz/Cuckoo-2.0.7a1/cuckoo/data/analyzer/windows/modules/auxiliary/human.py
import random import re import logging import threading from lib.common.abstracts import Auxiliary from lib.common.defines import ( KERNEL32, USER32, WM_GETTEXT, WM_GETTEXTLENGTH, WM_CLOSE, BM_CLICK, EnumWindowsProc, EnumChildProc, create_unicode_buffer ) log = logging.getLogger(__name__) RESOLUTION = { "x": USER32.GetSystemMetrics(0), "y": USER32.GetSystemMetrics(1) } def click(hwnd): USER32.SetForegroundWindow(hwnd) KERNEL32.Sleep(1000) USER32.SendMessageW(hwnd, BM_CLICK, 0, 0) def foreach_child(hwnd, lparam): # List of partial buttons labels to click. buttons = [ "yes", "oui", "ok", "i accept", "next", "suivant", "new", "nouveau", "install", "installer", "file", "fichier", "run", "start", "marrer", "cuter", "extract", "i agree", "accepte", "enable", "activer", "accord", "valider", "don't send", "ne pas envoyer", "don't save", "continue", "continuer", "personal", "personnel", "scan", "scanner", "unzip", "dezip", "open", "ouvrir", "close the program", "execute", "executer", "launch", "lancer", "save", "sauvegarder", "download", "load", "charger", "end", "fin", "terminer" "later", "finish", "end", "allow access", "remind me later", "save", "sauvegarder" ] # List of complete button texts to click. These take precedence. buttons_complete = [ "&Ja", # E.g., Dutch Office Word 2013. ] # List of buttons labels to not click. dontclick = [ "don't run", "i do not accept" ] classname = create_unicode_buffer(50) USER32.GetClassNameW(hwnd, classname, 50) # Check if the class of the child is button. if "button" in classname.value.lower(): # Get the text of the button. length = USER32.SendMessageW(hwnd, WM_GETTEXTLENGTH, 0, 0) text = create_unicode_buffer(length + 1) USER32.SendMessageW(hwnd, WM_GETTEXT, length + 1, text) if text.value in buttons_complete: log.info("Found button %r, clicking it" % text.value) click(hwnd) return True # Check if the button is set as "clickable" and click it. textval = text.value.replace("&", "").lower() for button in buttons: if button in textval: for btn in dontclick: if btn in textval: break else: log.info("Found button %r, clicking it" % text.value) click(hwnd) # Recursively search for childs (USER32.EnumChildWindows). return True # Callback procedure invoked for every enumerated window. # Purpose is to close any office window def get_office_window(hwnd, lparam): if USER32.IsWindowVisible(hwnd): text = create_unicode_buffer(1024) USER32.GetWindowTextW(hwnd, text, 1024) # TODO Would " - Microsoft (Word|Excel|PowerPoint)$" be better? if re.search("- (Microsoft|Word|Excel|PowerPoint)", text.value): USER32.SendNotifyMessageW(hwnd, WM_CLOSE, None, None) log.info("Closed Office window.") return True # Callback procedure invoked for every enumerated window. def foreach_window(hwnd, lparam): # If the window is visible, enumerate its child objects, looking # for buttons. if USER32.IsWindowVisible(hwnd): USER32.EnumChildWindows(hwnd, EnumChildProc(foreach_child), 0) return True def move_mouse(): x = random.randint(0, RESOLUTION["x"]) y = random.randint(0, RESOLUTION["y"]) # Originally was: # USER32.mouse_event(0x8000, x, y, 0, None) # Changed to SetCurorPos, since using GetCursorPos would not detect # the mouse events. This actually moves the cursor around which might # cause some unintended activity on the desktop. We might want to make # this featur optional. USER32.SetCursorPos(x, y) def click_mouse(): # Move mouse to top-middle position. USER32.SetCursorPos(RESOLUTION["x"] / 2, 0) # Mouse down. USER32.mouse_event(2, 0, 0, 0, None) KERNEL32.Sleep(50) # Mouse up. USER32.mouse_event(4, 0, 0, 0, None) class Human(threading.Thread, Auxiliary): """Human after all""" def __init__(self, options={}, analyzer=None): threading.Thread.__init__(self) Auxiliary.__init__(self, options, analyzer) self.do_run = True def stop(self): self.do_run = False def run(self): seconds = 0 # Global disable flag. if "human" in self.options: self.do_move_mouse = int(self.options["human"]) self.do_click_mouse = int(self.options["human"]) self.do_click_buttons = int(self.options["human"]) else: self.do_move_mouse = True self.do_click_mouse = True self.do_click_buttons = True # Per-feature enable or disable flag. if "human.move_mouse" in self.options: self.do_move_mouse = int(self.options["human.move_mouse"]) if "human.click_mouse" in self.options: self.do_click_mouse = int(self.options["human.click_mouse"]) if "human.click_buttons" in self.options: self.do_click_buttons = int(self.options["human.click_buttons"]) while self.do_run: if seconds and not seconds % 60: USER32.EnumWindows(EnumWindowsProc(get_office_window), 0) if self.do_click_mouse: click_mouse() if self.do_move_mouse: move_mouse() if self.do_click_buttons: USER32.EnumWindows(EnumWindowsProc(foreach_window), 0) KERNEL32.Sleep(1000) seconds += 1
PypiClean
/AutoWave-0.5.tar.gz/AutoWave-0.5/README.md
# AutoWave - Automatic Audio Classification Library <p align="center"><img src="https://github.com/TechyNilesh/AutoWave/blob/main/logo/autowave_logo.png?raw=true" alt="Brain+Machine"></p> **AutoWave** is an complete audio automatic classification library with other features plottings,audio agumentaion, data loading etc. ![Generic badge](https://img.shields.io/badge/AutoWave-v1-orange.svg) ![Generic badge](https://img.shields.io/badge/Artificial_Intelligence-Advance-green.svg) ![Generic badge](https://img.shields.io/badge/Python-v3-blue.svg) ![Generic badge](https://img.shields.io/badge/pip-v3-red.svg) [![Downloads](https://static.pepy.tech/personalized-badge/autowave?period=total&units=none&left_color=grey&right_color=blue&left_text=Downloads)](https://pepy.tech/project/autowave) <h2><img src="https://cdn2.iconfinder.com/data/icons/artificial-intelligence-6/64/ArtificialIntelligence9-512.png" alt="Brain+Machine" height="38" width="38"> Creators </h2> #### [Nilesh Verma](https://nileshverma.com "Nilesh Verma") #### [Satyajit Pattnaik](https://github.com/pik1989 "Satyajit Pattnaik") #### [Kalash Jindal](https://github.com/erickeagle "Kalash Jindal") ## Features - Training the model fastly - Augumentation can be done easily on one or multiple file and while training model we can use augumentation - Plotting of single or multiples files can be done by just running one function - Multiple models can be trained simultaneously ## Installation This library is compatible with both *windows* and *Linux system* you can just use **PIP command** to install this library on your system: ```shell pip install AutoWave ``` ## How To Use? We have provided the **Demo** folder under the *GitHub repository*, you can find the example in both **.py** and **.ipynb** file. Following are the ideal flow of the code: ### 1. Importing the Important Classes There are three important classes you need to load **AudioPlayer** - for playing the audio , **audioConversion** - for converting the single audio file, **read_file_properties** - for getting the info of any wave file ,**augumentOneFile** - for augumenting any single file ,**augumentFolder** - for augumenting the whole folder,**plotOneFile** - for plotting one file,**plotMultipleFile** -n for plotting multiple files ,**gen_data_from_folder** - for generating the data for classification,**Auto_Audio_Classification** - for training the model ```python # Importing the proper classes from AutoWave.audio_player import AudioPlayer from AutoWave.audio_conversion import audioConversion from AutoWave.WaveInfo import read_file_properties from AutoWave.augumentor import augumentOneFile,augumentFolder from AutoWave.plotting import plotOneFile,plotMultipleFile from AutoWave.DataLoad import gen_data_from_folder from AutoWave.Auto_Audio_Classification import Auto_Audio_Classification ``` ### 2. For playing the audio For playing the audio you can use **AudioPlayer** which takes the path of the file as input ```python AudioPlayer('Test_Data/car_horn/107090-1-1-0.wav') ``` ### 3. For converting the audio format For converting the format of single file using **audioConversion** which take input the filename, input format and output format. It return the converted file in the same path ```python audioConversion('test.mp3','mp3','wav') ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img1.png?raw=true" /> ### 4. For Augumenting the audio file For augumenting the single file **augumentOneFile** which take audio file path, output path, by default it augument one file 10 time by shifing, adding noise, changing pitch and streching. We can Off any one this functionality by making it False. ```python augumentOneFile('test.wav','augumented_data',aug_times=10,noise=True,shift=True,stretch=True,pitch=True) ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img2.png?raw=true" /> For folder **augementFolder** which takes a dataframe generated using the **gen_data_from_folder**, output path, by default it augument one file 10 time by shifing, adding noise, changing pitch and streching. We can Off any one this functionality by making it False. ### 5. Reading the Info of the Wave file For Reading the Wave file **read_file_properties** take the path of audio file and returns the filename, number of channels, sample rate and bit depth. ```python read_file_properties('test.wav') ``` ### 6. ForPloting the audio file For augumenting the single file **plotOneFile** which has different functions for ploting like **time_fre_domain** for plotting in time and frequency domain, **fre_doman** for ploting in frequency domain, **time_domain** for ploting in time doamin, **waveplot** for ploting the waveplot, **spectrogram** for ploting the spectrogram, **spectral_centroid** for ploting the spectral centroid, **spectraal_rolloff** for ploting the spectral rolloff, **spectral_brandwidth** for ploting the spectral brandwidth. For folder **plotMultipleFile** which takes a dataframe generated using the **gen_data_from_folder**, which has different functions for ploting like **time_fre_domain** for plotting in time and frequency domain, **fre_doman** for ploting in frequency domain, **time_domain** for ploting in time doamin, **waveplot** for ploting the waveplot, **spectrogram** for ploting the spectrogram, **spectral_centroid** for ploting the spectral centroid, **spectraal_rolloff** for ploting the spectral rolloff, **spectral_brandwidth** for ploting the spectral brandwidth. ```python plotOneFile.time_freq_domain('Test.wav') plotMultipleFile.time_freq_domain(data) ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img3.png?raw=true" /> ### 7. Loading the data For loading the data from the folder **gen_data_from_folder** which takes the input the folder containing the different classes of the audio file in different folders in a sinlge folder and it returns the dataframe tha path of the each and every file with there label. ```python dataset_dir = 'Test_Data/' data = gen_data_from_folder(dataset_dir,get_dataframe=True,label_folder=True) ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img4.png?raw=true" /> ### 8. For Training the model For training the model we use **Auto_Audio_Classification** which takes the input the size of the test data, we can augument the data to by making aug_data = True, if want the trainned model then we can make get_prediction_model which will return the best model with higher accuracy, by making return _dataframe = True it will return the results in the dataframe format. ```python model = Auto_Audio_Classification(test_size=0.2,label_encoding=True,result_dataframe=False,aug_data=True) model.fit(data) ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img5.png?raw=true" /> <img src="https://github.com/Autowave/Autowave/blob/main/img/img6.png?raw=true" /> <img src="https://github.com/Autowave/Autowave/blob/main/img/img7.png?raw=true" /> ```python audio_file = 'Test_Data/class_2/test1.wav' model.predict(audio_file) ``` <img src="https://github.com/Autowave/Autowave/blob/main/img/img8.jpeg?raw=true" />
PypiClean
/AutoAWSMFA-0.0.4.tar.gz/AutoAWSMFA-0.0.4/auto_aws_mfa/__main__.py
import boto3 import getpass import argparse import subprocess import configparser from boto3 import client from . import __version__ from .mfa import handle_mfa from os import getenv, path, name from datetime import datetime from .session import handle_session from sys import exit, stderr, argv from botocore.exceptions import ClientError from .util import check_duration, check_custom_token, get_credentials_path def main() -> int: argument_parser = argparse.ArgumentParser( prog='AutoAWSMFA', description='Auto update your AWS CLI MFA session token' ) argument_parser.add_argument( '-v', '--version', action='version', version='%(prog)s {}'.format(__version__), help='show the version number and exit' ) argument_parser.add_argument( '-p', '--profile', help='AWS profile to store the session token. Default looks for "AWS_PROFILE"', default=getenv('AWS_PROFILE') ) argument_parser.add_argument( '--credential-path', help='path to the aws credentials file', default=get_credentials_path() ) argument_parser.add_argument( '--arn', help='AWS ARN from the IAM console (Security credentials -> Assigned MFA device). This is saved to your .aws/credentials file' ) subparser = argument_parser.add_subparsers( title='subcommand', description='The subcommand to use among this suite of tools', dest='picked_cmd', help='Select a subcommand to execute' ) manage_mfa = subparser.add_parser( 'mfa', description='Manage MFA', help='Add MFA secret configuration key or get a token for registered MFA secret' ) manage_session = subparser.add_parser( 'session', description='Manage AMS CLI MFA session', help='Start AWS MFA session, use stored token or provide custom token "--custom-token"' ) mfa_group = manage_mfa.add_mutually_exclusive_group(required=True) mfa_group.add_argument( '--add-secret', action='store_true', help='Add MFA secret configuration key and genereate session tokens' ) mfa_group.add_argument( '--get-token', action='store_true', help='Display token, valid for 30 sec. Token added to clipboard to be used for GUI console login' ) mfa_group.add_argument( '--del-secret', action='store_true', help='Delete stored secret' ) manage_mfa.add_argument( '--non-aws', action='store_true', help='Store non aws secret and generate MFA token' ) session_group = manage_session.add_mutually_exclusive_group(required=True) session_group.add_argument( '--start', action='store_true', help='Start AWS MFA session for the selected profile' ) manage_session.add_argument( '--duration', help='Specify session token duration in minutes before it expires. Duration limitation as per AWS is minimum 15 and maximum 720 minutes, default is 720 minutes/12 Hrs', type=check_duration, default=720 ) manage_session.add_argument( '--custom-token', help='Provide 6 digit token from your MFA devices', type=check_custom_token ) if len(argv) == 1: argument_parser.print_help(stderr) return False parsed_args = argument_parser.parse_args() # Make sure either of the sub-command is specified if parsed_args.picked_cmd is None: argument_parser.print_help(stderr) return False config = configparser.ConfigParser() config.read(parsed_args.credential_path) permanent_profile = '{}-permanent'.format(parsed_args.profile) # Check if the profile exist in credentials file if parsed_args.profile not in config.sections(): profile_name = permanent_profile else: profile_name = parsed_args.profile # We want to skip profile check if the picked command is mfa and non-aws if parsed_args.picked_cmd == 'session': non_aws = False elif parsed_args.picked_cmd == 'mfa': non_aws = parsed_args.non_aws if (not non_aws) and (profile_name not in config.sections()): print('Profile "{}" not found in ~/.aws/credentials or in env variable AWS_PROFILE.\nSet the profile (--profile <name>) to any one from available list - \ \n\n {}'.format(parsed_args.profile, config.sections())) return False # Manage non aws MFA token if parsed_args.picked_cmd == 'mfa' and parsed_args.non_aws: return handle_mfa(parsed_args) # This will add suffix '-permanent' to the original profile if (parsed_args.profile and not permanent_profile) in config.sections(): config.add_section(permanent_profile) for key in config.items(parsed_args.profile): config.set(permanent_profile, key[0], key[1]) with open(parsed_args.credential_path, 'w') as configfile: config.write(configfile) config.read(parsed_args.credential_path) # Establish STS connection with profiles access key and secret try: sts_client = boto3.client( 'sts', aws_access_key_id=config.get( permanent_profile, 'aws_access_key_id'), aws_secret_access_key=config.get( permanent_profile, 'aws_secret_access_key') ) except Exception as e: print(e) return False # Get AWS MFA ARN to be used as service name in keyring and getting session token for MFA if parsed_args.arn is None: if 'aws_arn_mfa' in config[profile_name]: parsed_args.arn = config[profile_name]['aws_arn_mfa'] else: # Generate user_arn and replace "user" with "mfa" try: arn_result = sts_client.get_caller_identity() except ClientError as e: print(e) return False if arn_result['ResponseMetadata']['HTTPStatusCode'] != 200: argument_parser.error( arn_result['ResponseMetadata']['HTTPStatusCode']) # This should result in mfa_arn #"arn:aws:iam::<AWS acount ID>:user/<user name>" ==> arn:aws:iam::<AWS acount ID>:mfa/<user name parsed_args.arn = arn_result['Arn'].replace(':user/', ':mfa/', 1) # Handle MFA or SESSION based on picked command if parsed_args.picked_cmd == 'mfa': return handle_mfa(parsed_args) elif parsed_args.picked_cmd == 'session': return handle_session(parsed_args, sts_client, config) return 64 if __name__ == '__main__': exit(main())
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/date/posix.js.uncompressed.js
define("dojox/date/posix", ["dojo/_base/kernel", "dojo/date", "dojo/date/locale", "dojo/string", "dojo/cldr/supplemental"], function(dojo, dojoDate, dojoDateLocale, dojoString, dojoCldrSupplemental){ dojo.getObject("date.posix", true, dojox); dojox.date.posix.strftime = function(/*Date*/dateObject, /*String*/format, /*String?*/locale){ // // summary: // Formats the date object using the specifications of the POSIX strftime function // // description: // see http://www.opengroup.org/onlinepubs/007908799/xsh/strftime.html // zero pad var padChar = null; var _ = function(s, n){ return dojoString.pad(s, n || 2, padChar || "0"); }; var bundle = dojoDateLocale._getGregorianBundle(locale); var $ = function(property){ switch(property){ case "a": // abbreviated weekday name according to the current locale return dojoDateLocale.getNames('days', 'abbr', 'format', locale)[dateObject.getDay()]; case "A": // full weekday name according to the current locale return dojoDateLocale.getNames('days', 'wide', 'format', locale)[dateObject.getDay()]; case "b": case "h": // abbreviated month name according to the current locale return dojoDateLocale.getNames('months', 'abbr', 'format', locale)[dateObject.getMonth()]; case "B": // full month name according to the current locale return dojoDateLocale.getNames('months', 'wide', 'format', locale)[dateObject.getMonth()]; case "c": // preferred date and time representation for the current // locale return dojoDateLocale.format(dateObject, {formatLength: 'full', locale: locale}); case "C": // century number (the year divided by 100 and truncated // to an integer, range 00 to 99) return _(Math.floor(dateObject.getFullYear()/100)); case "d": // day of the month as a decimal number (range 01 to 31) return _(dateObject.getDate()); case "D": // same as %m/%d/%y return $("m") + "/" + $("d") + "/" + $("y"); case "e": // day of the month as a decimal number, a single digit is // preceded by a space (range ' 1' to '31') if(padChar == null){ padChar = " "; } return _(dateObject.getDate()); case "f": // month as a decimal number, a single digit is // preceded by a space (range ' 1' to '12') if(padChar == null){ padChar = " "; } return _(dateObject.getMonth()+1); case "g": // like %G, but without the century. break; case "G": // The 4-digit year corresponding to the ISO week number // (see %V). This has the same format and value as %Y, // except that if the ISO week number belongs to the // previous or next year, that year is used instead. console.warn("unimplemented modifier 'G'"); break; case "F": // same as %Y-%m-%d return $("Y") + "-" + $("m") + "-" + $("d"); case "H": // hour as a decimal number using a 24-hour clock (range // 00 to 23) return _(dateObject.getHours()); case "I": // hour as a decimal number using a 12-hour clock (range // 01 to 12) return _(dateObject.getHours() % 12 || 12); case "j": // day of the year as a decimal number (range 001 to 366) return _(dojoDateLocale._getDayOfYear(dateObject), 3); case "k": // Hour as a decimal number using a 24-hour clock (range // 0 to 23 (space-padded)) if(padChar == null){ padChar = " "; } return _(dateObject.getHours()); case "l": // Hour as a decimal number using a 12-hour clock (range // 1 to 12 (space-padded)) if(padChar == null){ padChar = " "; } return _(dateObject.getHours() % 12 || 12); case "m": // month as a decimal number (range 01 to 12) return _(dateObject.getMonth() + 1); case "M": // minute as a decimal number return _(dateObject.getMinutes()); case "n": return "\n"; case "p": // either `am' or `pm' according to the given time value, // or the corresponding strings for the current locale return bundle['dayPeriods-format-wide-' + (dateObject.getHours() < 12 ? "am" : "pm")]; case "r": // time in a.m. and p.m. notation return $("I") + ":" + $("M") + ":" + $("S") + " " + $("p"); case "R": // time in 24 hour notation return $("H") + ":" + $("M"); case "S": // second as a decimal number return _(dateObject.getSeconds()); case "t": return "\t"; case "T": // current time, equal to %H:%M:%S return $("H") + ":" + $("M") + ":" + $("S"); case "u": // weekday as a decimal number [1,7], with 1 representing // Monday return String(dateObject.getDay() || 7); case "U": // week number of the current year as a decimal number, // starting with the first Sunday as the first day of the // first week return _(dojoDateLocale._getWeekOfYear(dateObject)); case "V": // week number of the year (Monday as the first day of the // week) as a decimal number [01,53]. If the week containing // 1 January has four or more days in the new year, then it // is considered week 1. Otherwise, it is the last week of // the previous year, and the next week is week 1. return _(dojox.date.posix.getIsoWeekOfYear(dateObject)); case "W": // week number of the current year as a decimal number, // starting with the first Monday as the first day of the // first week return _(dojoDateLocale._getWeekOfYear(dateObject, 1)); case "w": // day of the week as a decimal, Sunday being 0 return String(dateObject.getDay()); case "x": // preferred date representation for the current locale // without the time return dojoDateLocale.format(dateObject, {selector:'date', formatLength: 'full', locale:locale}); case "X": // preferred time representation for the current locale // without the date return dojoDateLocale.format(dateObject, {selector:'time', formatLength: 'full', locale:locale}); case "y": // year as a decimal number without a century (range 00 to // 99) return _(dateObject.getFullYear()%100); case "Y": // year as a decimal number including the century return String(dateObject.getFullYear()); case "z": // time zone or name or abbreviation var timezoneOffset = dateObject.getTimezoneOffset(); return (timezoneOffset > 0 ? "-" : "+") + _(Math.floor(Math.abs(timezoneOffset)/60)) + ":" + _(Math.abs(timezoneOffset)%60); case "Z": // time zone or name or abbreviation return dojoDate.getTimezoneName(dateObject); case "%": return "%"; } }; // parse the formatting string and construct the resulting string var string = "", i = 0, index = 0, switchCase = null; while ((index = format.indexOf("%", i)) != -1){ string += format.substring(i, index++); // inspect modifier flag switch (format.charAt(index++)) { case "_": // Pad a numeric result string with spaces. padChar = " "; break; case "-": // Do not pad a numeric result string. padChar = ""; break; case "0": // Pad a numeric result string with zeros. padChar = "0"; break; case "^": // Convert characters in result string to uppercase. switchCase = "upper"; break; case "*": // Convert characters in result string to lowercase switchCase = "lower"; break; case "#": // Swap the case of the result string. switchCase = "swap"; break; default: // no modifier flag so decrement the index padChar = null; index--; break; } // toggle case if a flag is set var property = $(format.charAt(index++)); switch (switchCase){ case "upper": property = property.toUpperCase(); break; case "lower": property = property.toLowerCase(); break; case "swap": // Upper to lower, and versey-vicea var compareString = property.toLowerCase(); var swapString = ''; var ch = ''; for (var j = 0; j < property.length; j++){ ch = property.charAt(j); swapString += (ch == compareString.charAt(j)) ? ch.toUpperCase() : ch.toLowerCase(); } property = swapString; break; default: break; } switchCase = null; string += property; i = index; } string += format.substring(i); return string; // String }; dojox.date.posix.getStartOfWeek = function(/*Date*/dateObject, /*Number*/firstDay){ // summary: Return a date object representing the first day of the given // date's week. if(isNaN(firstDay)){ firstDay = dojoCldrSupplemental.getFirstDayOfWeek ? dojoCldrSupplemental.getFirstDayOfWeek() : 0; } var offset = firstDay; if(dateObject.getDay() >= firstDay){ offset -= dateObject.getDay(); }else{ offset -= (7 - dateObject.getDay()); } var date = new Date(dateObject); date.setHours(0, 0, 0, 0); return dojoDate.add(date, "day", offset); // Date } dojox.date.posix.setIsoWeekOfYear = function(/*Date*/dateObject, /*Number*/week){ // summary: Set the ISO8601 week number of the given date. // The week containing January 4th is the first week of the year. // week: // can be positive or negative: -1 is the year's last week. if(!week){ return dateObject; } var currentWeek = dojox.date.posix.getIsoWeekOfYear(dateObject); var offset = week - currentWeek; if(week < 0){ var weeks = dojox.date.posix.getIsoWeeksInYear(dateObject); offset = (weeks + week + 1) - currentWeek; } return dojoDate.add(dateObject, "week", offset); // Date } dojox.date.posix.getIsoWeekOfYear = function(/*Date*/dateObject){ // summary: Get the ISO8601 week number of the given date. // The week containing January 4th is the first week of the year. // See http://en.wikipedia.org/wiki/ISO_week_date var weekStart = dojox.date.posix.getStartOfWeek(dateObject, 1); var yearStart = new Date(dateObject.getFullYear(), 0, 4); // January 4th yearStart = dojox.date.posix.getStartOfWeek(yearStart, 1); var diff = weekStart.getTime() - yearStart.getTime(); if(diff < 0){ return dojox.date.posix.getIsoWeeksInYear(weekStart); } // Integer return Math.ceil(diff / 604800000) + 1; // Integer } dojox.date.posix.getIsoWeeksInYear = function(/*Date*/dateObject) { // summary: Determine the number of ISO8601 weeks in the year of the given // date. Most years have 52 but some have 53. // See http://www.phys.uu.nl/~vgent/calendar/isocalendar_text3.htm function p(y) { return y + Math.floor(y/4) - Math.floor(y/100) + Math.floor(y/400); } var y = dateObject.getFullYear(); return ( p(y) % 7 == 4 || p(y-1) % 7 == 3 ) ? 53 : 52; // Integer } return dojox.date.posix; });
PypiClean
/FairNLP-5.1.0.tar.gz/FairNLP-5.1.0/FNLP/LanguageEngines/Categorizer/MainCategories.py
extended_stop_words = ["with", "more", "s", "has", "have", "they", "this", "their", "was", "not", "said", "also", "most", "but", "from", "whether", "so", "ways", "if", "were", "have", "my", "being", "re", "what", "where", "many", "other", "t", "i", "than", "had", "who", "amoung", "get", "say", "could", "way"] # meta_names = FILE.load_dict_from_file("meta_names") # -------------------------------------> CATEGORIES AND THEIR KEY-TERMS <-------------------------------------------- # MAX_WEIGHT = 200 HIGH_WEIGHT = 130 MIDDLE_WEIGHT = 75 LOW_WEIGHT = 25 MINI_WEIGHT = 10 NANO_WEIGHT = 3 class MainCategories: # -> If any lists are added, add here. keys = ["search_terms", "weighted_terms", "rss_feeds", "secondary_weighted_terms", "url_sources"] def get_var(self, var_name): """ GETTER HELPER """ return self.__getattribute__(var_name) @staticmethod def combine_var_name(topic, term): return topic + "_" + term #################################################################################################################### # -> 0. General <- #################################################################################################################### general = "General" general_search_terms = ["business", "government", "federal government", "white house", "politics", "global news", "news", "united states economy", "global economy", "president"] general_weighted_terms = {"thousand": LOW_WEIGHT, "million": LOW_WEIGHT, "billion": MIDDLE_WEIGHT, "trillion": MIDDLE_WEIGHT, "space": MINI_WEIGHT, "mainstream": MINI_WEIGHT, "property": NANO_WEIGHT, "network": MINI_WEIGHT, "future": LOW_WEIGHT, "crowdfunding": NANO_WEIGHT, "economy": MINI_WEIGHT, "market valuation": MINI_WEIGHT, "Metrics": NANO_WEIGHT, "market action": NANO_WEIGHT, "dominance": NANO_WEIGHT, "assets": MINI_WEIGHT, "Drops": NANO_WEIGHT, "Dips": NANO_WEIGHT, "value proposition": MINI_WEIGHT, "fluctuates": NANO_WEIGHT, "portfolio": NANO_WEIGHT, "crash": NANO_WEIGHT, "optimistic": NANO_WEIGHT, "projections": NANO_WEIGHT, "hedge fund": NANO_WEIGHT, "hedgefund": NANO_WEIGHT, "holders": NANO_WEIGHT, "holder": NANO_WEIGHT, "owns": NANO_WEIGHT, "fund": NANO_WEIGHT, "game": NANO_WEIGHT, "gaming": NANO_WEIGHT, "market capitalization": LOW_WEIGHT, "developer": NANO_WEIGHT, "development": NANO_WEIGHT, "develop": NANO_WEIGHT, "engineer": NANO_WEIGHT, "engineering": NANO_WEIGHT, "4g": MINI_WEIGHT, "5g": MINI_WEIGHT, "6g": MINI_WEIGHT, "fiber": MINI_WEIGHT, "optical": MINI_WEIGHT, "fiber optic": MINI_WEIGHT, "business": NANO_WEIGHT, "businesses": NANO_WEIGHT, "meeting": NANO_WEIGHT, "meetings": NANO_WEIGHT, "committee": NANO_WEIGHT, "fed": MINI_WEIGHT, "the fed": MINI_WEIGHT, "federal": NANO_WEIGHT, "federal reserve": LOW_WEIGHT, "bull market": NANO_WEIGHT, "bear market": NANO_WEIGHT, "inflation": MINI_WEIGHT, "deflation": MINI_WEIGHT, "plunges": MINI_WEIGHT, "leaked": MIDDLE_WEIGHT, "announcement": MINI_WEIGHT} general_rss_feeds = ["http://feeds.marketwatch.com/marketwatch/topstories/", "http://www.infoworld.com/index.rss", "http://www.itworldcanada.com/feed", "http://www.macworld.com/index.rss", "http://www.roadtovr.com/feed",] #################################################################################################################### # -> 3. Weed <- #################################################################################################################### weed = "Weed" weed_search_terms = ['legalization', 'cannabis stock', 'cannabinoid-therapeutics', 'cannabinoid', 'medicinal marijuana', 'cannabis investors', 'pot stock', 'cannabis legalization', 'federal decriminalization', 'marijuana stocks', 'marijuana legalization bills', 'cannabis market', 'cannabis', 'weed', 'marijuana'] weed_weighted_terms = {"legalization": MIDDLE_WEIGHT, "decriminalization": MIDDLE_WEIGHT, "cannabinoid": MAX_WEIGHT, "cannabis": MAX_WEIGHT, "marijuana": MAX_WEIGHT, "pot": LOW_WEIGHT, "weed": MAX_WEIGHT, "stock": NANO_WEIGHT, "therapeutics": LOW_WEIGHT, "medicinal": HIGH_WEIGHT} weed_rss_feeds = [] #################################################################################################################### # -> 3. Stocks <- #################################################################################################################### stocks = "Stocks" stocks_search_terms = ['merge', 'merger', 'acquisition', 'stepping down', 'IPO', 'partnering', 'stocks', 'economy'] stocks_weighted_terms = {'merge': MINI_WEIGHT, 'merger': MIDDLE_WEIGHT, 'acquisition': HIGH_WEIGHT, 'stepping down': MIDDLE_WEIGHT, 'IPO': MIDDLE_WEIGHT, 'partnering': MIDDLE_WEIGHT, 'ROI': LOW_WEIGHT, 'Incentivize': LOW_WEIGHT, 'Monetize': MIDDLE_WEIGHT, 'Deliverable': MIDDLE_WEIGHT, 'Margin': MIDDLE_WEIGHT, 'Accounts Payable': MIDDLE_WEIGHT, 'Accounts Receivable': LOW_WEIGHT, 'Capital': LOW_WEIGHT, 'Fixed Costs': MIDDLE_WEIGHT, 'Variable Costs': MIDDLE_WEIGHT, 'Gross': MINI_WEIGHT, 'Net': MINI_WEIGHT, 'Benchmarking': LOW_WEIGHT, 'KPI': LOW_WEIGHT, 'Metrics': LOW_WEIGHT, 'Performance Review': MIDDLE_WEIGHT, 'R&D': MIDDLE_WEIGHT, 'B2B': MIDDLE_WEIGHT, 'B2C': LOW_WEIGHT, 'B2G': LOW_WEIGHT, 'Scalable': LOW_WEIGHT, 'Responsive Design': MIDDLE_WEIGHT, 'Core Competency': LOW_WEIGHT, 'Niche Market': MIDDLE_WEIGHT, 'Marketing': LOW_WEIGHT, 'Market Research': LOW_WEIGHT, 'Market Penetration': MIDDLE_WEIGHT, 'Inbound Marketing': MIDDLE_WEIGHT, 'Assets': MIDDLE_WEIGHT, 'Liabilities': MIDDLE_WEIGHT, 'Revenue': LOW_WEIGHT, 'Expenses': LOW_WEIGHT, 'Balance sheet': MIDDLE_WEIGHT, 'Net profit': MIDDLE_WEIGHT, 'Net loss': HIGH_WEIGHT, 'Profit margin': MIDDLE_WEIGHT, 'Cash flow': MIDDLE_WEIGHT, 'cash flow': MIDDLE_WEIGHT, 'Depreciation': LOW_WEIGHT, 'Fixed Asset': LOW_WEIGHT, 'Gross Profit': HIGH_WEIGHT, 'Intangible Asset': MINI_WEIGHT, 'Liquidity': LOW_WEIGHT, 'Profit & Loss': MIDDLE_WEIGHT, 'Shareholders Equity': MAX_WEIGHT, 'Annual Percentage': MINI_WEIGHT, 'Appraisal': MINI_WEIGHT, 'Balloon Loan': MIDDLE_WEIGHT, 'Bankruptcy': MIDDLE_WEIGHT, 'Bootstrapping': LOW_WEIGHT, 'Business Credit': MIDDLE_WEIGHT, 'Collateral': MINI_WEIGHT, 'Credit Limit': MIDDLE_WEIGHT, 'Debt Consolidation': MIDDLE_WEIGHT, "sell off": MIDDLE_WEIGHT, "target market": MINI_WEIGHT, "stockholder": MAX_WEIGHT, "stockholders": MAX_WEIGHT, "surges": LOW_WEIGHT, "surge": MINI_WEIGHT, "market capitalization": HIGH_WEIGHT, "Change management": HIGH_WEIGHT, "Lay off": HIGH_WEIGHT, "dow jones": MAX_WEIGHT, "dow": MIDDLE_WEIGHT, "s&p500": MAX_WEIGHT, "nasdaq": MAX_WEIGHT, "nyse": MAX_WEIGHT, "shareholders": MAX_WEIGHT } stocks_rss_feeds = ["http://feeds.marketwatch.com/marketwatch/topstories/", "https://seekingalpha.com/feed.xml", "https://www.investing.com/rss/news_25.rss", "https://blog.wallstreetsurvivor.com/feed/", "https://stockstotrade.com/blog/feed/", "https://www.cnbc.com/id/LOW_WEIGHT409666/device/rss/rss.html?x=1", "http://economictimes.indiatimes.com/markets/stocks/rssfeeds/2146842.cms", "https://mebfaber.com/feed/", "http://welcome.philstockworld.com/feed/", "https://www.reddit.com/r/stocks/.rss"] stocks_reddit_communities = ['wallstreetbets', 'WallStreetbetsELITE', 'deepfuckingvalue', 'stocks', 'investing', 'stockmarket'] stocks_twitter_users = ['mcuban', 'elonmusk', 'PeterLBrandt', 'CNBC', 'SJosephBurns', 'elerianm', 'IBDinvestors', 'jimcramer', 'bespokeinvest', 'wsbmod', 'galgitron', 'thevrsoldier', 'glewmecorp'] #################################################################################################################### # -> X. German <- #################################################################################################################### german = "German" german_search_terms = ['merge', 'merger', 'acquisition', 'stepping down', 'IPO', 'partnering'] german_weighted_terms = {'merge': MINI_WEIGHT, 'merger': MINI_WEIGHT, 'acquisition': MINI_WEIGHT, 'stepping down': MINI_WEIGHT, 'IPO': MINI_WEIGHT, 'partnering': MINI_WEIGHT, "Change management": MINI_WEIGHT, "Lay off": MINI_WEIGHT, "Kündigungen": MINI_WEIGHT, "Freiberufler": MINI_WEIGHT, "Freelancer": MINI_WEIGHT, "Interim": MINI_WEIGHT, "Interim manager": MINI_WEIGHT, "Accounting": MINI_WEIGHT, "Buchführung": MINI_WEIGHT, "Finance": MINI_WEIGHT, "Finanzen": MINI_WEIGHT, "Expansion": MINI_WEIGHT, "Stellenabbau": MINI_WEIGHT, "Stellenaufbau": MINI_WEIGHT, "Einkauf": MINI_WEIGHT, "Michael page": MINI_WEIGHT, "Robert half": MINI_WEIGHT, "Hays": MINI_WEIGHT, "Human Resources": MINI_WEIGHT, "HR": MINI_WEIGHT, "International": MINI_WEIGHT, "Business": MINI_WEIGHT, "Unternehmen": MINI_WEIGHT, 'BILANZ': 2, 'VERMÖGENSWERTE': 2, 'VERBINDLICHKEITEN': NANO_WEIGHT, 'EIGENKAPITAL': NANO_WEIGHT, 'EINNAHMEN': NANO_WEIGHT, 'AUFWAND': NANO_WEIGHT, 'GEWINN': 2, 'NETTOVERLUST': 2, 'KAPITALFLUSSRECHNUNG': NANO_WEIGHT, 'GEWINNMARGE': NANO_WEIGHT, 'VARIABLE KOSTEN': NANO_WEIGHT, 'Geld verdienen': NANO_WEIGHT, 'BARGELDFLUSS': 2, 'FESTKOSTEN': 2, 'Fixkosten': NANO_WEIGHT, 'Netto': NANO_WEIGHT, 'KPI': NANO_WEIGHT, 'Kernkompetenz': NANO_WEIGHT, 'Lieferbar': 2, 'Kreditorenbuchhaltung': 2, 'Debitorenbuchhaltung': NANO_WEIGHT, 'Skalierbar': NANO_WEIGHT, 'Leistungsüberprüfung': NANO_WEIGHT, 'Alleinstellungsmerkmal': NANO_WEIGHT} german_rss_feeds = ["https://www.wiwo.de/contentexport/feed/rss/schlagzeilen", "https://www.wiwo.de/contentexport/feed/rss/themen", "https://www.wiwo.de/contentexport/feed/rss/unternehmen", "https://www.wiwo.de/contentexport/feed/rss/finanzen", "https://rss.dw.com/xml/rss-de-all", "https://rss.dw.com/xml/rss-de-eco", "https://www.wirtschaftskurier.de/titelthema.html?type=9818", "http://www.wirtschaftskurier.de/unternehmen.html?type=9818", "http://www.wirtschaftskurier.de/finanzen.html?type=9818", "http://www.stern.de/feed/standard/wirtschaft/", "https://www.iwkoeln.de/rss/arbeitsmarkt.xml", "https://www.iwkoeln.de/rss/bildung-und-fachkraefte.xml", "https://www.spiegel.de/international/index.rss", "http://newsfeed.zeit.de/index", "https://www.thelocal.de/feeds/rss.php", "https://www.deutschland.de/en/feed-news/rss.xml", "https://itb-berlin-news.com/feed/", "https://munichnow.com/feed/", "http://feeds.t-online.de/rss/politik", "http://munichnow.com/feed", "https://newsfeed.zeit.de/index", "https://itb-berlin-news.com/feed", "https://www.deutschland.de/en/feed-news/rss.xml", "https://www.spiegel.de/international/index.rss", "https://www.thelocal.de/feeds/rss.php" ] german_reddit_communities = ['wallstreetbets', 'WallStreetbetsELITE', 'deepfuckingvalue', 'stocks', 'investing', 'stockmarket'] german_twitter_users = ['mcuban', 'elonmusk', 'PeterLBrandt', 'CNBC', 'SJosephBurns', 'elerianm', 'IBDinvestors', 'jimcramer', 'bespokeinvest', 'wsbmod', 'galgitron', 'thevrsoldier', 'glewmecorp'] #################################################################################################################### # -> X. Crypto <- #################################################################################################################### crypto = "Crypto" crypto_search_terms = ["decentralized", "crypto", "cryptocurrency", "bitcoin", "blockchain", "smart contract"] crypto_weighted_terms = {"metaverse": LOW_WEIGHT, "roblox": LOW_WEIGHT, "nft": MIDDLE_WEIGHT, "nfts": MIDDLE_WEIGHT, "nft's": MIDDLE_WEIGHT, "decentraland": LOW_WEIGHT, "meta": LOW_WEIGHT, "sandbox": LOW_WEIGHT, "mana": LOW_WEIGHT, "parcel": LOW_WEIGHT, "tokens": LOW_WEIGHT, "fork": NANO_WEIGHT, "hardfork": LOW_WEIGHT, "softfork": LOW_WEIGHT, "hard fork": LOW_WEIGHT, "soft fork": LOW_WEIGHT, "virtual": LOW_WEIGHT, "vr": LOW_WEIGHT, "nonfungible": LOW_WEIGHT, "staking": LOW_WEIGHT, "decentralized": HIGH_WEIGHT, "crypto": MAX_WEIGHT, "cryptocurrency": MAX_WEIGHT, "blockchain": MAX_WEIGHT, "dapp": HIGH_WEIGHT, "opensea": HIGH_WEIGHT, "ethereum": HIGH_WEIGHT, "bitcoin": MAX_WEIGHT, "xrp": HIGH_WEIGHT, "ripple": MIDDLE_WEIGHT, "xlm": LOW_WEIGHT, "web3": MAX_WEIGHT, "pos": LOW_WEIGHT, "poe": LOW_WEIGHT, "iot": NANO_WEIGHT, "btc": HIGH_WEIGHT, "eth": HIGH_WEIGHT, "Nano Collaborative": HIGH_WEIGHT, "nano": HIGH_WEIGHT, "Banano": LOW_WEIGHT, "time wonderland": MIDDLE_WEIGHT, "olympus dao": LOW_WEIGHT, "sushiswap": MAX_WEIGHT, "pancakeswap": HIGH_WEIGHT, "Stablecoins": MINI_WEIGHT, "stablecoin": HIGH_WEIGHT, "altcoin market": HIGH_WEIGHT, "alt market": HIGH_WEIGHT, "alt season": HIGH_WEIGHT, "erc7151": MAX_WEIGHT, "erc1155": MAX_WEIGHT, "smart contract": MAX_WEIGHT, "solidity": MAX_WEIGHT, "truffle": HIGH_WEIGHT, "ganache": HIGH_WEIGHT, "dex": MIDDLE_WEIGHT, "defi": HIGH_WEIGHT, "decentralized exchange": HIGH_WEIGHT, "centralized exchange": LOW_WEIGHT, "ico": HIGH_WEIGHT, "wallet": LOW_WEIGHT, "minable": LOW_WEIGHT, "coin": MINI_WEIGHT, "token": MIDDLE_WEIGHT, "proof of work": MAX_WEIGHT, "proof of stake": MAX_WEIGHT, "ercLOW_WEIGHT": MAX_WEIGHT, "hodl": MIDDLE_WEIGHT, "altcoin": MAX_WEIGHT, "dao": HIGH_WEIGHT, "coinbase": MAX_WEIGHT, "cold wallet": HIGH_WEIGHT, "hot wallet": HIGH_WEIGHT, "gas": LOW_WEIGHT, "initial coin offering": HIGH_WEIGHT, "Satoshi Nakomoto": MAX_WEIGHT, "stable coin": HIGH_WEIGHT, "Vitalik Buterin": MAX_WEIGHT, "Digital Currency": MAX_WEIGHT, "Distributed Ledger Technology": MAX_WEIGHT, "dlt": LOW_WEIGHT, "kyc": LOW_WEIGHT, "know your customer": MIDDLE_WEIGHT, "Nonfungible tokens": MAX_WEIGHT, "Proof of Authority": HIGH_WEIGHT, "Public Ledger": MIDDLE_WEIGHT, "collectible": HIGH_WEIGHT, "collectibles": HIGH_WEIGHT, "NFT Marketplace": MAX_WEIGHT, "nft worlds": MIDDLE_WEIGHT, } crypto_rss_feeds = ["https://cointelegraph.com/rss", "https://coindesk.com/feed", "https://news.bitcoin.com/feed", "https://minergate.com/blog/feed/", "https://coinjournal.net/feed", "https://cryptoinsider.com/feed", "http://www.newsbtc.com/feed", "https://twitter.com/jaxx_io/feed", "https://bitcoinmagazine.com/feed", "https://www.crypto-news.net/feed", "https://www.cryptoninjas.net/feed", "https://ethereumworldnews.com/feed", "https://bravenewcoin.com/feed", "http://www.financemagnates.com/feed", "http://www.cryptoquicknews.com/feed", "http://cryptscout.com/feed", "http://www.coinnewsasia.com/feed"] #################################################################################################################### # -> X. Metaverse <- #################################################################################################################### metaverse = "Metaverse" metaverse_search_terms = [ "metaverse", "virtual world", "vr", "decentraland", "decentralized", "nft", "decentralized world", "augmented reality", "blockchain", "ethereum", "virtual reality" ] metaverse_search_terms_test = ["metaverse", "virtual world", "nft"] """ -> Update secondary words -> Grab from the database and match terms from virtual worlds... -> Update the enhancer to re-analyze.. """ metaverse_secondary_weighted_terms = { "land": MIDDLE_WEIGHT, "parcel": MIDDLE_WEIGHT, "virtual land": LOW_WEIGHT, "real estate": LOW_WEIGHT, "DAO": LOW_WEIGHT, "decentralized autonomous organization": MIDDLE_WEIGHT, "build": 3, "building": 3, "device": MIDDLE_WEIGHT, "glasses": LOW_WEIGHT } metaverse_weighted_terms = { "0xearth": MIDDLE_WEIGHT, "3d": LOW_WEIGHT, "activision blizzard": LOW_WEIGHT, "alterverse": LOW_WEIGHT, "amd": MIDDLE_WEIGHT, "ar glasses": MAX_WEIGHT, "ar goggles": MAX_WEIGHT, "ar headset": MAX_WEIGHT, "artificial intelligent": MINI_WEIGHT, "arvr": MIDDLE_WEIGHT, "assets": LOW_WEIGHT, "augmented reality": MAX_WEIGHT, "avatar": LOW_WEIGHT, "average price": NANO_WEIGHT, "binance": MINI_WEIGHT, "bitcoin": 2, "blockchain": MIDDLE_WEIGHT, "bought": NANO_WEIGHT, "btc": 2, "centralized": LOW_WEIGHT, "collectible": MIDDLE_WEIGHT, "collectibles": MIDDLE_WEIGHT, "creator economy": MIDDLE_WEIGHT, "crypto based": NANO_WEIGHT, "cryptobased": MIDDLE_WEIGHT, "cryptocountries": HIGH_WEIGHT, "cryptocurrency": LOW_WEIGHT, "dapp": MIDDLE_WEIGHT, "decentraland": MAX_WEIGHT, "decentralisation": MINI_WEIGHT, "decentralised": LOW_WEIGHT, "decentralised economy": MIDDLE_WEIGHT, "decentralized": MINI_WEIGHT, "decentralized app": LOW_WEIGHT, "decentralized application": LOW_WEIGHT, "decentralized economy": MIDDLE_WEIGHT, "decentralized world": MAX_WEIGHT, "democratisation": MINI_WEIGHT, "denations": LOW_WEIGHT, "disrupt": NANO_WEIGHT, "disruption": NANO_WEIGHT, "edge computing": MINI_WEIGHT, "eth": LOW_WEIGHT, "ether": LOW_WEIGHT, "ethereum": LOW_WEIGHT, "etherland": MIDDLE_WEIGHT, "fintech": LOW_WEIGHT, "fungible": LOW_WEIGHT, "gaming": LOW_WEIGHT, "gained momentum": LOW_WEIGHT, "glewme": MAX_WEIGHT, "glewme city": MAX_WEIGHT, "glewmecity": MAX_WEIGHT, "gpu": MIDDLE_WEIGHT, "haptic": MINI_WEIGHT, "haptic suit": MINI_WEIGHT, "haptics": MINI_WEIGHT, "hatchables": LOW_WEIGHT, "headset": NANO_WEIGHT, "horizon": MINI_WEIGHT, "horizon world": MIDDLE_WEIGHT, "horizon worlds": MIDDLE_WEIGHT, "human interface": MINI_WEIGHT, "infrastructure": NANO_WEIGHT, "innovation": 3, "intel": LOW_WEIGHT, "investment": NANO_WEIGHT, "investment opportunity": MINI_WEIGHT, "iot": MINI_WEIGHT, "land": LOW_WEIGHT, "loopring": LOW_WEIGHT, "mana": MIDDLE_WEIGHT, "meta": LOW_WEIGHT, "meta key": LOW_WEIGHT, "meta platform": MIDDLE_WEIGHT, "meta platforms": MIDDLE_WEIGHT, "metaverse": MAX_WEIGHT, "metaverse industry": MAX_WEIGHT, "microsoft": LOW_WEIGHT, "multiple platforms": MINI_WEIGHT, "nft": LOW_WEIGHT, "NFT Marketplace": MIDDLE_WEIGHT, "nft worlds": MIDDLE_WEIGHT, "nft's": MIDDLE_WEIGHT, "nfts": MIDDLE_WEIGHT, "Niantic": MINI_WEIGHT, "nonfungible": MIDDLE_WEIGHT, "nvidia": MIDDLE_WEIGHT, "oculus": HIGH_WEIGHT, "opensea": MIDDLE_WEIGHT, "ovr land": LOW_WEIGHT, "ownable": MIDDLE_WEIGHT, "ownables": MIDDLE_WEIGHT, "parcel": MIDDLE_WEIGHT, "play earn": MIDDLE_WEIGHT, "poe": MINI_WEIGHT, "polka city": LOW_WEIGHT, "pos": MINI_WEIGHT, "property value": LOW_WEIGHT, "real estate": LOW_WEIGHT, "roblox": HIGH_WEIGHT, "Roblox Corporation": LOW_WEIGHT, "sandbox": MIDDLE_WEIGHT, "sft": LOW_WEIGHT, "semi fungible": LOW_WEIGHT, "sha-256": MINI_WEIGHT, "sha256": MINI_WEIGHT, "smart chain": MINI_WEIGHT, "snap": MINI_WEIGHT, "solana": MAX_WEIGHT, "sold": MINI_WEIGHT, "somnium": MAX_WEIGHT, "somnium space": MAX_WEIGHT, "spatial computing": LOW_WEIGHT, "speculation": NANO_WEIGHT, "stake": NANO_WEIGHT, "staking": NANO_WEIGHT, "stock": MINI_WEIGHT, "store value": MINI_WEIGHT, "state channel": MINI_WEIGHT, "superworld": HIGH_WEIGHT, "TakeTwo": MINI_WEIGHT, "the sandbox": MAX_WEIGHT, "token": LOW_WEIGHT, "tokens": LOW_WEIGHT, "treeverse": LOW_WEIGHT, "ue5": MIDDLE_WEIGHT, "unity": HIGH_WEIGHT, "unreal engine": HIGH_WEIGHT, "Vans World": MINI_WEIGHT, "vegas city": LOW_WEIGHT, "victoria": LOW_WEIGHT, "victoria vr": MAX_WEIGHT, "victoriavr": MAX_WEIGHT, "virtual": MIDDLE_WEIGHT, "virtual land": HIGH_WEIGHT, "virtual meeting": HIGH_WEIGHT, "virtual meetings": HIGH_WEIGHT, "virtual reality": HIGH_WEIGHT, "virtual world": HIGH_WEIGHT, "virtual worlds": HIGH_WEIGHT, "vr": LOW_WEIGHT, "vr glasses": MIDDLE_WEIGHT, "vr goggles": MIDDLE_WEIGHT, "vr headset": MIDDLE_WEIGHT, "vr world": MAX_WEIGHT, "war raiders": LOW_WEIGHT, "wearable": LOW_WEIGHT, "wearables": LOW_WEIGHT, "web3": MIDDLE_WEIGHT, "web3 gaming": HIGH_WEIGHT, "web3 game": HIGH_WEIGHT } metaverse_url_sources = [ "https://nftplazas.com/", "https://cointelegraph.com/rss", "https://coindesk.com/feed", "https://news.bitcoin.com/feed", "https://minergate.com/blog/feed/", "https://coinjournal.net/feed", "https://cryptoinsider.com/feed", "http://www.newsbtc.com/feed", "https://twitter.com/jaxx_io/feed", "https://bitcoinmagazine.com/feed", "https://www.crypto-news.net/feed", "https://www.cryptoninjas.net/feed", "https://ethereumworldnews.com/feed", "https://bravenewcoin.com/feed", "http://www.financemagnates.com/feed", "http://www.cryptoquicknews.com/feed", "http://cryptscout.com/feed", "http://www.coinnewsasia.com/feed" ] # -> This master weighted list comes dynamically from meta_names.json # metaverse_weighted_terms = DICT.merge_dicts(meta_names, metaverse_weighted_custom_terms) metaverse_rss_feeds = [ "https://cointelegraph.com/rss", "https://coindesk.com/feed", "https://news.bitcoin.com/feed", "https://minergate.com/blog/feed/", "https://coinjournal.net/feed", "https://cryptoinsider.com/feed", "http://www.newsbtc.com/feed", "https://twitter.com/jaxx_io/feed", "https://bitcoinmagazine.com/feed", "https://www.crypto-news.net/feed", "https://www.cryptoninjas.net/feed", "https://ethereumworldnews.com/feed", "https://bravenewcoin.com/feed", "http://www.financemagnates.com/feed", "http://www.cryptoquicknews.com/feed", "http://cryptscout.com/feed", "http://www.coinnewsasia.com/feed", "https://decentraland.org/blog/feed.xml"] #################################################################################################################### # -> 5. Android <- #################################################################################################################### android = "Android" android_search_terms = ["android", "android os", "android studio", "android developer", "kotlin", "jetbrains"] android_weighted_terms = {"android": MAX_WEIGHT, "java": MIDDLE_WEIGHT, "kotlin": HIGH_WEIGHT, "gradle": LOW_WEIGHT, "coroutine": LOW_WEIGHT, "jetpack": HIGH_WEIGHT, "async": 2, "google": MIDDLE_WEIGHT, "vr": 2, "ar": 2, "androiddevsummit": MAX_WEIGHT, "certification": NANO_WEIGHT, "beta": LOW_WEIGHT, "alpha": LOW_WEIGHT, "canary": 2, "jetbrains": LOW_WEIGHT, "api": 2, "summit": NANO_WEIGHT, "stable": NANO_WEIGHT, "android os": MAX_WEIGHT, "android developer": MAX_WEIGHT, "android studio": MAX_WEIGHT, "virtual reality": MINI_WEIGHT, "augmented reality": MINI_WEIGHT, "android dev": NANO_WEIGHT, "wear os": NANO_WEIGHT, "app bundle": NANO_WEIGHT, "app bundles": NANO_WEIGHT, "google play": MIDDLE_WEIGHT, "jetpack compose": MAX_WEIGHT, "dev kit": NANO_WEIGHT, "development kit": MINI_WEIGHT, "firebase": MIDDLE_WEIGHT } android_rss_feeds = ["https://www.reddit.com/r/Android.rss", "https://www.androidauthority.com/feed/", "http://feeds.feedburner.com/blogspot/hsDu", "https://medium.com/feed/tag/android", "https://www.reddit.com/r/androiddev.rss", "https://proandroiddev.com/feed", "https://medium.com/feed/@ian-alexander", "http://feeds.feedburner.com/FirebaseBlog", "https://blog.jetbrains.com/kotlin/feed/", "http://feeds.feedburner.com/GDBcode"] #################################################################################################################### # -> 5. apple <- #################################################################################################################### apple = "apple" apple_search_terms = ["ios", "apple", "apple products", "iphone", "macbook", "apple developer"] apple_weighted_terms = {"apple": MIDDLE_WEIGHT, "ios": HIGH_WEIGHT, "apple inc": MAX_WEIGHT, "iphone": HIGH_WEIGHT, "mac": MIDDLE_WEIGHT, "macintosh": MIDDLE_WEIGHT, "product release": MIDDLE_WEIGHT, "product releases": MIDDLE_WEIGHT, "new product": MIDDLE_WEIGHT, "road map": LOW_WEIGHT, "apple watches": HIGH_WEIGHT, "new products": MIDDLE_WEIGHT, "m1": LOW_WEIGHT, "imac": HIGH_WEIGHT, "apple tv": HIGH_WEIGHT, "airtag": MIDDLE_WEIGHT, "airtags": MIDDLE_WEIGHT, "ipad": HIGH_WEIGHT, "pro": MINI_WEIGHT, "new": NANO_WEIGHT, "product": NANO_WEIGHT, "products": NANO_WEIGHT, "mac pro": HIGH_WEIGHT, "macbook": HIGH_WEIGHT, "macbook pro": HIGH_WEIGHT, "app store": HIGH_WEIGHT, "project": MINI_WEIGHT, "tim cook": HIGH_WEIGHT, "m1 pro chip": HIGH_WEIGHT, "new model": LOW_WEIGHT, "new models": LOW_WEIGHT, "air": MINI_WEIGHT, "m2": LOW_WEIGHT, "m2 chip": LOW_WEIGHT, "apple software": HIGH_WEIGHT, "apples software": MIDDLE_WEIGHT, "conference": MINI_WEIGHT, "peek": NANO_WEIGHT, "airpods": HIGH_WEIGHT, "airpod": HIGH_WEIGHT, "max": MINI_WEIGHT, "silicon chip": MINI_WEIGHT, "silicon chips": MINI_WEIGHT, "apple silicon": MIDDLE_WEIGHT, "steve jobs": HIGH_WEIGHT, "homepod": HIGH_WEIGHT, "series": MINI_WEIGHT, "xcode": MAX_WEIGHT, "rumor": MINI_WEIGHT, "rumors": MINI_WEIGHT, "announced": LOW_WEIGHT, "update": MINI_WEIGHT, "updates": MINI_WEIGHT, "exclusive": MINI_WEIGHT } apple_rss_feeds = [] #################################################################################################################### # -> 6. programming <- #################################################################################################################### programming = "programming" programming_search_terms = ["programming", "software development", "computer science", "software engineer", "coding", "computer science", "cyber security"] programming_weighted_terms = {"kotlin": MAX_WEIGHT, "computer science": MAX_WEIGHT, "java": MAX_WEIGHT, "javascript": MAX_WEIGHT, "swift": MAX_WEIGHT, "python": MAX_WEIGHT, "jvm": MIDDLE_WEIGHT, "php": MIDDLE_WEIGHT, "bash": MIDDLE_WEIGHT, "scala": MINI_WEIGHT, "sql": HIGH_WEIGHT, "nosql": HIGH_WEIGHT, "mongodb": MAX_WEIGHT, "mongo": LOW_WEIGHT, "perl": MIDDLE_WEIGHT, "c": MIDDLE_WEIGHT, "c++": MIDDLE_WEIGHT, "c#": MIDDLE_WEIGHT, "ruby": MINI_WEIGHT, "go": LOW_WEIGHT, "web based applications": LOW_WEIGHT, "devops": LOW_WEIGHT, "programming langauge": MINI_WEIGHT, "programming languages": MIDDLE_WEIGHT, "apps": MINI_WEIGHT, "application": MINI_WEIGHT, "applications": MINI_WEIGHT, "object oriented": MINI_WEIGHT, "low level": MINI_WEIGHT, "gui development": MINI_WEIGHT, "backend": MINI_WEIGHT, "lifecycle": MINI_WEIGHT, "development": MIDDLE_WEIGHT, "developers": MIDDLE_WEIGHT, "html": MIDDLE_WEIGHT, "css": MIDDLE_WEIGHT, "xml": MIDDLE_WEIGHT, "script": LOW_WEIGHT, "scripting language": HIGH_WEIGHT, "scripting languages": HIGH_WEIGHT, "unix": HIGH_WEIGHT, "shell": LOW_WEIGHT, "package": MINI_WEIGHT, "library": MINI_WEIGHT, "module": MINI_WEIGHT, "dependency": MINI_WEIGHT, "high level": MINI_WEIGHT, "objc": MINI_WEIGHT, "objective c": HIGH_WEIGHT, "jetpack": MIDDLE_WEIGHT, "async": MIDDLE_WEIGHT, "androiddevsummit": MIDDLE_WEIGHT, "certification": NANO_WEIGHT, "beta": MIDDLE_WEIGHT, "alpha": LOW_WEIGHT, "canary": LOW_WEIGHT, "jetbrains": MIDDLE_WEIGHT, "api": LOW_WEIGHT, "summit": MINI_WEIGHT, "stable": MINI_WEIGHT, "android os": NANO_WEIGHT, "android developer": MIDDLE_WEIGHT, "android studio": MIDDLE_WEIGHT, "android dev": LOW_WEIGHT, "wear os": MINI_WEIGHT, "app bundle": MIDDLE_WEIGHT, "app bundles": NANO_WEIGHT, "google play": LOW_WEIGHT, "jetpack compose": MIDDLE_WEIGHT, "dev kit": MIDDLE_WEIGHT, "development kit": MIDDLE_WEIGHT, "firebase": MIDDLE_WEIGHT, "debugger": MIDDLE_WEIGHT, "code": MINI_WEIGHT, "senior": MINI_WEIGHT, "coding": MINI_WEIGHT, "programming": HIGH_WEIGHT, "programmer": HIGH_WEIGHT, "virtual machine": LOW_WEIGHT, "software": MIDDLE_WEIGHT, "software engineer": MAX_WEIGHT, "engineer": LOW_WEIGHT, "app": MINI_WEIGHT, "gradle": LOW_WEIGHT, "coroutine": LOW_WEIGHT, "xcode": MIDDLE_WEIGHT, "algo": MINI_WEIGHT } programming_rss_feeds = [ "http://www.thecrazyprogrammer.com/feed", "https://www.sitepoint.com/feed/", "https://www.raywenderlich.com/feed", "https://stackabuse.com/rss/", "https://blog.jooq.org/feed/", "http://feeds.hanselman.com/ScottHanselman", "https://tympanus.net/codrops/feed/", "https://medium.com/feed/better-programming", "https://medium.com/feed/a-technologists-pov", "https://blog.codepen.io/feed/", "https://hackr.io/programming/rss.xml", "https://www.techiedelight.com/feed/", "https://davidwalsh.name/feed", "https://codesignal.com/feed/", "https://alistapart.com/site/rss", "https://www.codingdojo.com/blog/feed", "https://fueled.com/feed/", "https://www.codewall.co.uk/feed/" ] @staticmethod def get_main_fopic_category_names(): test = MainCategories.__dict__.keys() variables = [] for item in test: if str(item).startswith("__"): continue elif str(item).startswith("keys"): continue elif str(item).__contains__("_"): continue else: variables.append(item) return variables
PypiClean
/GreenGlacier-1.0.0.tar.gz/GreenGlacier-1.0.0/greenglacier.py
from __future__ import print_function import os import hashlib import math import binascii import gevent import gevent.pool import gevent.queue import gevent.monkey gevent.monkey.patch_socket() gevent.monkey.patch_ssl() gevent.monkey.patch_os() from retrying import retry # the following helper functions are (temporarily) shamelessly stolen from boto.glacier.utils _MEGABYTE = 1024 * 1024 DEFAULT_PART_SIZE = 4 * _MEGABYTE MAXIMUM_NUMBER_OF_PARTS = 10000 def tree_hash(fo): """ Given a hash of each 1MB chunk (from chunk_hashes) this will hash together adjacent hashes until it ends up with one big one. So a tree of hashes. """ hashes = [] hashes.extend(fo) while len(hashes) > 1: new_hashes = [] while True: if len(hashes) > 1: first = hashes.pop(0) second = hashes.pop(0) new_hashes.append(hashlib.sha256(first + second).digest()) elif len(hashes) == 1: only = hashes.pop(0) new_hashes.append(only) else: break hashes.extend(new_hashes) return hashes[0] def chunk_hashes(bytestring, chunk_size=_MEGABYTE): chunk_count = int(math.ceil(len(bytestring) / float(chunk_size))) hashes = [] for i in range(chunk_count): start = i * chunk_size end = (i + 1) * chunk_size hashes.append(hashlib.sha256(bytestring[start:end]).digest()) if not hashes: return [hashlib.sha256(b'').digest()] return hashes def bytes_to_hex(str_as_bytes): return binascii.hexlify(str_as_bytes) def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE): """Calculate the minimum part size needed for a multipart upload. Glacier allows a maximum of 10,000 parts per upload. It also states that the maximum archive size is 10,000 * 4 GB, which means the part size can range from 1MB to 4GB (provided it is one 1MB multiplied by a power of 2). This function will compute what the minimum part size must be in order to upload a file of size ``size_in_bytes``. It will first check if ``default_part_size`` is sufficient for a part size given the ``size_in_bytes``. If this is not the case, then the smallest part size than can accomodate a file of size ``size_in_bytes`` will be returned. If the file size is greater than the maximum allowed archive size of 10,000 * 4GB, a ``ValueError`` will be raised. """ # The default part size (4 MB) will be too small for a very large # archive, as there is a limit of 10,000 parts in a multipart upload. # This puts the maximum allowed archive size with the default part size # at 40,000 MB. We need to do a sanity check on the part size, and find # one that works if the default is too small. part_size = _MEGABYTE if (default_part_size * MAXIMUM_NUMBER_OF_PARTS) < size_in_bytes: if size_in_bytes > (4096 * _MEGABYTE * 10000): raise ValueError("File size too large: %s" % size_in_bytes) min_part_size = size_in_bytes / 10000 power = 3 while part_size < min_part_size: part_size = math.ldexp(_MEGABYTE, power) power += 1 part_size = int(part_size) else: part_size = default_part_size return part_size # TODO: progress callbacks using basesubscriber class MultipartUploadPart(object): """ Represent a part - have a part number, the upload, etc. self.upload - does what you'd expect this should be the first phase in subclassing below to handle S3 """ pass class MultipartPartUploader(gevent.Greenlet): def __init__(self, upload, work, callback=None, retries=8): gevent.Greenlet.__init__(self) self.upload = upload self.work = work self.retries = retries if callback: self.link(callback) def _run(self): filename, offset, size = self.work print('uploading chunk %s' % offset) chunk = self.readfile(filename, offset, size) return self.upload_part(chunk, offset, size) def readfile(self, filename, offset, size): with open(filename, 'rb') as fileobj: fileobj.seek(offset * size) return fileobj.read(size) def upload_part(self, chunk, offset, size): @retry(stop_max_attempt_number=self.retries) def retry_upload(range, checksum, body): print('trying upload %s' % checksum) self.upload.upload_part(range=range, checksum=str(checksum), body=body) hashstring = bytes_to_hex(tree_hash(chunk_hashes(chunk))) first_byte = offset * size last_byte = first_byte + size - 1 rangestr = 'bytes %d-%d/*' % (first_byte, last_byte) retry_upload(rangestr, hashstring, chunk) return offset, hashstring class GreenGlacierUploader(object): class UploadFailedException(Exception): pass def __init__(self, vault, concurrent_uploads=10, part_size=4194304): self.vault = vault self.part_size = part_size # will be overridden on upload self.concurrent_uploads = concurrent_uploads def upload(self, filename, description=None): description = description or filename work_queue = gevent.queue.Queue() filesize = os.stat(filename).st_size minimum = minimum_part_size(filesize) self.part_size = min(self.part_size, minimum) if self.part_size else minimum total_parts = int((filesize / self.part_size) + 1) print('uploading %s with %s %s-sized parts' % (filename, total_parts, self.part_size)) self.res = [None] * total_parts multipart_upload = self.vault.initiate_multipart_upload(archiveDescription=description, partSize=str(self.part_size)) for part in range(total_parts): work_queue.put((filename, part, self.part_size)) active = gevent.pool.Pool(self.concurrent_uploads, MultipartPartUploader) while not work_queue.empty(): # TODO: replace with list e.g. if work: spawn(m, work.pop()) work = work_queue.get() active.spawn(multipart_upload, work, self.callback) active.join() # wait for final chunks to upload.. final_checksum = bytes_to_hex(tree_hash(self.res)) multipart_upload.complete(archiveSize=filesize, checksum=final_checksum) def callback(self, g): print('greenlet finished, saving value') try: part_num, chunk_hash = g.get() self.res[part_num] = chunk_hash except: g.upload.abort() raise
PypiClean
/Newgram-0.0.5.tar.gz/Newgram-0.0.5/newgram/client.py
import asyncio import functools import inspect import logging import os import platform import re import shutil import sys from concurrent.futures.thread import ThreadPoolExecutor from datetime import datetime, timedelta from hashlib import sha256 from importlib import import_module from io import StringIO, BytesIO from mimetypes import MimeTypes from pathlib import Path from typing import Union, List, Optional, Callable, AsyncGenerator import newgram from newgram import __version__, __license__ from newgram import enums from newgram import raw from newgram import utils from newgram.crypto import aes from newgram.errors import CDNFileHashMismatch from newgram.errors import ( SessionPasswordNeeded, VolumeLocNotFound, ChannelPrivate, BadRequest ) from newgram.handlers.handler import Handler from newgram.methods import Methods from newgram.session import Auth, Session from newgram.storage import FileStorage, MemoryStorage from newgram.types import User, TermsOfService from newgram.utils import ainput from .dispatcher import Dispatcher from .file_id import FileId, FileType, ThumbnailSource from .mime_types import mime_types from .parser import Parser from .session.internals import MsgId log = logging.getLogger(__name__) class Client(Methods): """Newgram Client, the main means for interacting with Telegram. Parameters: name (``str``): A name for the client, e.g.: "my_account". api_id (``int`` | ``str``, *optional*): The *api_id* part of the Telegram API key, as integer or string. E.g.: 12345 or "12345". api_hash (``str``, *optional*): The *api_hash* part of the Telegram API key, as string. E.g.: "0123456789abcdef0123456789abcdef". app_version (``str``, *optional*): Application version. Defaults to "Newgram x.y.z". device_model (``str``, *optional*): Device model. Defaults to *platform.python_implementation() + " " + platform.python_version()*. system_version (``str``, *optional*): Operating System version. Defaults to *platform.system() + " " + platform.release()*. lang_code (``str``, *optional*): Code of the language used on the client, in ISO 639-1 standard. Defaults to "en". ipv6 (``bool``, *optional*): Pass True to connect to Telegram using IPv6. Defaults to False (IPv4). proxy (``dict``, *optional*): The Proxy settings as dict. E.g.: *dict(scheme="socks5", hostname="11.22.33.44", port=1234, username="user", password="pass")*. The *username* and *password* can be omitted if the proxy doesn't require authorization. test_mode (``bool``, *optional*): Enable or disable login to the test servers. Only applicable for new sessions and will be ignored in case previously created sessions are loaded. Defaults to False. bot_token (``str``, *optional*): Pass the Bot API token to create a bot session, e.g.: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11" Only applicable for new sessions. session_string (``str``, *optional*): Pass a session string to load the session in-memory. Implies ``in_memory=True``. in_memory (``bool``, *optional*): Pass True to start an in-memory session that will be discarded as soon as the client stops. In order to reconnect again using an in-memory session without having to login again, you can use :meth:`~newgram.Client.export_session_string` before stopping the client to get a session string you can pass to the ``session_string`` parameter. Defaults to False. phone_number (``str``, *optional*): Pass the phone number as string (with the Country Code prefix included) to avoid entering it manually. Only applicable for new sessions. phone_code (``str``, *optional*): Pass the phone code as string (for test numbers only) to avoid entering it manually. Only applicable for new sessions. password (``str``, *optional*): Pass the Two-Step Verification password as string (if required) to avoid entering it manually. Only applicable for new sessions. workers (``int``, *optional*): Number of maximum concurrent workers for handling incoming updates. Defaults to ``min(32, os.cpu_count() + 4)``. workdir (``str``, *optional*): Define a custom working directory. The working directory is the location in the filesystem where Newgram will store the session files. Defaults to the parent directory of the main script. plugins (``dict``, *optional*): Smart Plugins settings as dict, e.g.: *dict(root="plugins")*. parse_mode (:obj:`~newgram.enums.ParseMode`, *optional*): Set the global parse mode of the client. By default, texts are parsed using both Markdown and HTML styles. You can combine both syntaxes together. no_updates (``bool``, *optional*): Pass True to disable incoming updates. When updates are disabled the client can't receive messages or other updates. Useful for batch programs that don't need to deal with updates. Defaults to False (updates enabled and received). takeout (``bool``, *optional*): Pass True to let the client use a takeout session instead of a normal one, implies *no_updates=True*. Useful for exporting Telegram data. Methods invoked inside a takeout session (such as get_chat_history, download_media, ...) are less prone to throw FloodWait exceptions. Only available for users, bots will ignore this parameter. Defaults to False (normal session). sleep_threshold (``int``, *optional*): Set a sleep threshold for flood wait exceptions happening globally in this client instance, below which any request that raises a flood wait will be automatically invoked again after sleeping for the required amount of time. Flood wait exceptions requiring higher waiting times will be raised. Defaults to 10 seconds. hide_password (``bool``, *optional*): Pass True to hide the password when typing it during the login. Defaults to False, because ``getpass`` (the library used) is known to be problematic in some terminal environments. max_concurrent_transmissions (``bool``, *optional*): Set the maximum amount of concurrent transmissions (uploads & downloads). A value that is too high may result in network related issues. Defaults to 1. """ APP_VERSION = f"Newgram {__version__}" DEVICE_MODEL = f"Newgram" SYSTEM_VERSION = f"{platform.system()} {platform.release()}" LANG_CODE = "id" PARENT_DIR = Path(sys.argv[0]).parent INVITE_LINK_RE = re.compile(r"^(?:https?://)?(?:www\.)?(?:t(?:elegram)?\.(?:org|me|dog)/(?:joinchat/|\+))([\w-]+)$") WORKERS = min(32, (os.cpu_count() or 0) + 4) # os.cpu_count() can be None WORKDIR = PARENT_DIR # Interval of seconds in which the updates watchdog will kick in UPDATES_WATCHDOG_INTERVAL = 5 * 60 MAX_CONCURRENT_TRANSMISSIONS = 1 mimetypes = MimeTypes() mimetypes.readfp(StringIO(mime_types)) def __init__( self, name: str, api_id: Union[int, str] = None, api_hash: str = None, app_version: str = APP_VERSION, device_model: str = DEVICE_MODEL, system_version: str = SYSTEM_VERSION, lang_code: str = LANG_CODE, ipv6: bool = False, proxy: dict = None, test_mode: bool = False, bot_token: str = None, session_string: str = None, in_memory: bool = None, phone_number: str = None, phone_code: str = None, password: str = None, workers: int = WORKERS, workdir: str = WORKDIR, plugins: dict = None, parse_mode: "enums.ParseMode" = enums.ParseMode.DEFAULT, no_updates: bool = None, takeout: bool = None, sleep_threshold: int = Session.SLEEP_THRESHOLD, hide_password: bool = False, max_concurrent_transmissions: int = MAX_CONCURRENT_TRANSMISSIONS ): super().__init__() self.name = name self.api_id = int(api_id) if api_id else None self.api_hash = api_hash self.app_version = app_version self.device_model = device_model self.system_version = system_version self.lang_code = lang_code.lower() self.ipv6 = ipv6 self.proxy = proxy self.test_mode = test_mode self.bot_token = bot_token self.session_string = session_string self.in_memory = in_memory self.phone_number = phone_number self.phone_code = phone_code self.password = password self.workers = workers self.workdir = Path(workdir) self.plugins = plugins self.parse_mode = parse_mode self.no_updates = no_updates self.takeout = takeout self.sleep_threshold = sleep_threshold self.hide_password = hide_password self.max_concurrent_transmissions = max_concurrent_transmissions self.executor = ThreadPoolExecutor(self.workers, thread_name_prefix="Handler") if self.session_string: self.storage = MemoryStorage(self.name, self.session_string) elif self.in_memory: self.storage = MemoryStorage(self.name) else: self.storage = FileStorage(self.name, self.workdir) self.dispatcher = Dispatcher(self) self.rnd_id = MsgId self.parser = Parser(self) self.session = None self.media_sessions = {} self.media_sessions_lock = asyncio.Lock() self.save_file_semaphore = asyncio.Semaphore(self.max_concurrent_transmissions) self.get_file_semaphore = asyncio.Semaphore(self.max_concurrent_transmissions) self.is_connected = None self.is_initialized = None self.takeout_id = None self.disconnect_handler = None self.me: Optional[User] = None self.message_cache = Cache(10000) # Sometimes, for some reason, the server will stop sending updates and will only respond to pings. # This watchdog will invoke updates.GetState in order to wake up the server and enable it sending updates again # after some idle time has been detected. self.updates_watchdog_task = None self.updates_watchdog_event = asyncio.Event() self.last_update_time = datetime.now() self.loop = asyncio.get_event_loop() def __enter__(self): return self.start() def __exit__(self, *args): try: self.stop() except ConnectionError: pass async def __aenter__(self): return await self.start() async def __aexit__(self, *args): try: await self.stop() except ConnectionError: pass async def updates_watchdog(self): while True: try: await asyncio.wait_for(self.updates_watchdog_event.wait(), self.UPDATES_WATCHDOG_INTERVAL) except asyncio.TimeoutError: pass else: break if datetime.now() - self.last_update_time > timedelta(seconds=self.UPDATES_WATCHDOG_INTERVAL): await self.invoke(raw.functions.updates.GetState()) async def authorize(self) -> User: if self.bot_token: return await self.sign_in_bot(self.bot_token) print(f"Welcome to Newgram (version {__version__})") print(f"Newgram is free software and comes with ABSOLUTELY NO WARRANTY. Licensed\n" f"under the terms of the {__license__}.\n") while True: try: if not self.phone_number: while True: value = await ainput("Enter phone number or bot token: ") if not value: continue confirm = (await ainput(f'Is "{value}" correct? (y/N): ')).lower() if confirm == "y": break if ":" in value: self.bot_token = value return await self.sign_in_bot(value) else: self.phone_number = value sent_code = await self.send_code(self.phone_number) except BadRequest as e: print(e.MESSAGE) self.phone_number = None self.bot_token = None else: break sent_code_descriptions = { enums.SentCodeType.APP: "Telegram app", enums.SentCodeType.SMS: "SMS", enums.SentCodeType.CALL: "phone call", enums.SentCodeType.FLASH_CALL: "phone flash call", enums.SentCodeType.FRAGMENT_SMS: "Fragment SMS", enums.SentCodeType.EMAIL_CODE: "email code" } print(f"The confirmation code has been sent via {sent_code_descriptions[sent_code.type]}") while True: if not self.phone_code: self.phone_code = await ainput("Enter confirmation code: ") try: signed_in = await self.sign_in(self.phone_number, sent_code.phone_code_hash, self.phone_code) except BadRequest as e: print(e.MESSAGE) self.phone_code = None except SessionPasswordNeeded as e: print(e.MESSAGE) while True: print("Password hint: {}".format(await self.get_password_hint())) if not self.password: self.password = await ainput("Enter password (empty to recover): ", hide=self.hide_password) try: if not self.password: confirm = await ainput("Confirm password recovery (y/n): ") if confirm == "y": email_pattern = await self.send_recovery_code() print(f"The recovery code has been sent to {email_pattern}") while True: recovery_code = await ainput("Enter recovery code: ") try: return await self.recover_password(recovery_code) except BadRequest as e: print(e.MESSAGE) except Exception as e: log.exception(e) raise else: self.password = None else: return await self.check_password(self.password) except BadRequest as e: print(e.MESSAGE) self.password = None else: break if isinstance(signed_in, User): return signed_in while True: first_name = await ainput("Enter first name: ") last_name = await ainput("Enter last name (empty to skip): ") try: signed_up = await self.sign_up( self.phone_number, sent_code.phone_code_hash, first_name, last_name ) except BadRequest as e: print(e.MESSAGE) else: break if isinstance(signed_in, TermsOfService): print("\n" + signed_in.text + "\n") await self.accept_terms_of_service(signed_in.id) return signed_up def set_parse_mode(self, parse_mode: Optional["enums.ParseMode"]): """Set the parse mode to be used globally by the client. When setting the parse mode with this method, all other methods having a *parse_mode* parameter will follow the global value by default. Parameters: parse_mode (:obj:`~newgram.enums.ParseMode`): By default, texts are parsed using both Markdown and HTML styles. You can combine both syntaxes together. Example: .. code-block:: python from newgram import enums # Default combined mode: Markdown + HTML await app.send_message("me", "1. **markdown** and <i>html</i>") # Force Markdown-only, HTML is disabled app.set_parse_mode(enums.ParseMode.MARKDOWN) await app.send_message("me", "2. **markdown** and <i>html</i>") # Force HTML-only, Markdown is disabled app.set_parse_mode(enums.ParseMode.HTML) await app.send_message("me", "3. **markdown** and <i>html</i>") # Disable the parser completely app.set_parse_mode(enums.ParseMode.DISABLED) await app.send_message("me", "4. **markdown** and <i>html</i>") # Bring back the default combined mode app.set_parse_mode(enums.ParseMode.DEFAULT) await app.send_message("me", "5. **markdown** and <i>html</i>") """ self.parse_mode = parse_mode async def fetch_peers(self, peers: List[Union[raw.types.User, raw.types.Chat, raw.types.Channel]]) -> bool: is_min = False parsed_peers = [] for peer in peers: if getattr(peer, "min", False): is_min = True continue username = None phone_number = None if isinstance(peer, raw.types.User): peer_id = peer.id access_hash = peer.access_hash username = ( peer.username.lower() if peer.username else peer.usernames[0].username.lower() if peer.usernames else None ) phone_number = peer.phone peer_type = "bot" if peer.bot else "user" elif isinstance(peer, (raw.types.Chat, raw.types.ChatForbidden)): peer_id = -peer.id access_hash = 0 peer_type = "group" elif isinstance(peer, raw.types.Channel): peer_id = utils.get_channel_id(peer.id) access_hash = peer.access_hash username = ( peer.username.lower() if peer.username else peer.usernames[0].username.lower() if peer.usernames else None ) peer_type = "channel" if peer.broadcast else "supergroup" elif isinstance(peer, raw.types.ChannelForbidden): peer_id = utils.get_channel_id(peer.id) access_hash = peer.access_hash peer_type = "channel" if peer.broadcast else "supergroup" else: continue parsed_peers.append((peer_id, access_hash, peer_type, username, phone_number)) await self.storage.update_peers(parsed_peers) return is_min async def handle_updates(self, updates): self.last_update_time = datetime.now() if isinstance(updates, (raw.types.Updates, raw.types.UpdatesCombined)): is_min = any(( await self.fetch_peers(updates.users), await self.fetch_peers(updates.chats), )) users = {u.id: u for u in updates.users} chats = {c.id: c for c in updates.chats} for update in updates.updates: channel_id = getattr( getattr( getattr( update, "message", None ), "peer_id", None ), "channel_id", None ) or getattr(update, "channel_id", None) pts = getattr(update, "pts", None) pts_count = getattr(update, "pts_count", None) if isinstance(update, raw.types.UpdateChannelTooLong): log.info(update) if isinstance(update, raw.types.UpdateNewChannelMessage) and is_min: message = update.message if not isinstance(message, raw.types.MessageEmpty): try: diff = await self.invoke( raw.functions.updates.GetChannelDifference( channel=await self.resolve_peer(utils.get_channel_id(channel_id)), filter=raw.types.ChannelMessagesFilter( ranges=[raw.types.MessageRange( min_id=update.message.id, max_id=update.message.id )] ), pts=pts - pts_count, limit=pts ) ) except ChannelPrivate: pass else: if not isinstance(diff, raw.types.updates.ChannelDifferenceEmpty): users.update({u.id: u for u in diff.users}) chats.update({c.id: c for c in diff.chats}) self.dispatcher.updates_queue.put_nowait((update, users, chats)) elif isinstance(updates, (raw.types.UpdateShortMessage, raw.types.UpdateShortChatMessage)): diff = await self.invoke( raw.functions.updates.GetDifference( pts=updates.pts - updates.pts_count, date=updates.date, qts=-1 ) ) if diff.new_messages: self.dispatcher.updates_queue.put_nowait(( raw.types.UpdateNewMessage( message=diff.new_messages[0], pts=updates.pts, pts_count=updates.pts_count ), {u.id: u for u in diff.users}, {c.id: c for c in diff.chats} )) else: if diff.other_updates: # The other_updates list can be empty self.dispatcher.updates_queue.put_nowait((diff.other_updates[0], {}, {})) elif isinstance(updates, raw.types.UpdateShort): self.dispatcher.updates_queue.put_nowait((updates.update, {}, {})) elif isinstance(updates, raw.types.UpdatesTooLong): log.info(updates) async def load_session(self): await self.storage.open() session_empty = any([ await self.storage.test_mode() is None, await self.storage.auth_key() is None, await self.storage.user_id() is None, await self.storage.is_bot() is None ]) if session_empty: if not self.api_id or not self.api_hash: raise AttributeError("The API key is required for new authorizations. " "More info: https://docs.newgram.org/start/auth") await self.storage.api_id(self.api_id) await self.storage.dc_id(2) await self.storage.date(0) await self.storage.test_mode(self.test_mode) await self.storage.auth_key( await Auth( self, await self.storage.dc_id(), await self.storage.test_mode() ).create() ) await self.storage.user_id(None) await self.storage.is_bot(None) else: # Needed for migration from storage v2 to v3 if not await self.storage.api_id(): if self.api_id: await self.storage.api_id(self.api_id) else: while True: try: value = int(await ainput("Enter the api_id part of the API key: ")) if value <= 0: print("Invalid value") continue confirm = (await ainput(f'Is "{value}" correct? (y/N): ')).lower() if confirm == "y": await self.storage.api_id(value) break except Exception as e: print(e) def load_plugins(self): if self.plugins: plugins = self.plugins.copy() for option in ["include", "exclude"]: if plugins.get(option, []): plugins[option] = [ (i.split()[0], i.split()[1:] or None) for i in self.plugins[option] ] else: return if plugins.get("enabled", True): root = plugins["root"] include = plugins.get("include", []) exclude = plugins.get("exclude", []) count = 0 if not include: for path in sorted(Path(root.replace(".", "/")).rglob("*.py")): module_path = '.'.join(path.parent.parts + (path.stem,)) module = import_module(module_path) for name in vars(module).keys(): # noinspection PyBroadException try: for handler, group in getattr(module, name).handlers: if isinstance(handler, Handler) and isinstance(group, int): self.add_handler(handler, group) log.info('[{}] [LOAD] {}("{}") in group {} from "{}"'.format( self.name, type(handler).__name__, name, group, module_path)) count += 1 except Exception: pass else: for path, handlers in include: module_path = root + "." + path warn_non_existent_functions = True try: module = import_module(module_path) except ImportError: log.warning('[%s] [LOAD] Ignoring non-existent module "%s"', self.name, module_path) continue if "__path__" in dir(module): log.warning('[%s] [LOAD] Ignoring namespace "%s"', self.name, module_path) continue if handlers is None: handlers = vars(module).keys() warn_non_existent_functions = False for name in handlers: # noinspection PyBroadException try: for handler, group in getattr(module, name).handlers: if isinstance(handler, Handler) and isinstance(group, int): self.add_handler(handler, group) log.info('[{}] [LOAD] {}("{}") in group {} from "{}"'.format( self.name, type(handler).__name__, name, group, module_path)) count += 1 except Exception: if warn_non_existent_functions: log.warning('[{}] [LOAD] Ignoring non-existent function "{}" from "{}"'.format( self.name, name, module_path)) if exclude: for path, handlers in exclude: module_path = root + "." + path warn_non_existent_functions = True try: module = import_module(module_path) except ImportError: log.warning('[%s] [UNLOAD] Ignoring non-existent module "%s"', self.name, module_path) continue if "__path__" in dir(module): log.warning('[%s] [UNLOAD] Ignoring namespace "%s"', self.name, module_path) continue if handlers is None: handlers = vars(module).keys() warn_non_existent_functions = False for name in handlers: # noinspection PyBroadException try: for handler, group in getattr(module, name).handlers: if isinstance(handler, Handler) and isinstance(group, int): self.remove_handler(handler, group) log.info('[{}] [UNLOAD] {}("{}") from group {} in "{}"'.format( self.name, type(handler).__name__, name, group, module_path)) count -= 1 except Exception: if warn_non_existent_functions: log.warning('[{}] [UNLOAD] Ignoring non-existent function "{}" from "{}"'.format( self.name, name, module_path)) if count > 0: log.info('[{}] Successfully loaded {} plugin{} from "{}"'.format( self.name, count, "s" if count > 1 else "", root)) else: log.warning('[%s] No plugin loaded from "%s"', self.name, root) async def handle_download(self, packet): file_id, directory, file_name, in_memory, file_size, progress, progress_args = packet os.makedirs(directory, exist_ok=True) if not in_memory else None temp_file_path = os.path.abspath(re.sub("\\\\", "/", os.path.join(directory, file_name))) + ".temp" file = BytesIO() if in_memory else open(temp_file_path, "wb") try: async for chunk in self.get_file(file_id, file_size, 0, 0, progress, progress_args): file.write(chunk) except BaseException as e: if not in_memory: file.close() os.remove(temp_file_path) if isinstance(e, asyncio.CancelledError): raise e return None else: if in_memory: file.name = file_name return file else: file.close() file_path = os.path.splitext(temp_file_path)[0] shutil.move(temp_file_path, file_path) return file_path async def get_file( self, file_id: FileId, file_size: int = 0, limit: int = 0, offset: int = 0, progress: Callable = None, progress_args: tuple = () ) -> Optional[AsyncGenerator[bytes, None]]: async with self.get_file_semaphore: file_type = file_id.file_type if file_type == FileType.CHAT_PHOTO: if file_id.chat_id > 0: peer = raw.types.InputPeerUser( user_id=file_id.chat_id, access_hash=file_id.chat_access_hash ) else: if file_id.chat_access_hash == 0: peer = raw.types.InputPeerChat( chat_id=-file_id.chat_id ) else: peer = raw.types.InputPeerChannel( channel_id=utils.get_channel_id(file_id.chat_id), access_hash=file_id.chat_access_hash ) location = raw.types.InputPeerPhotoFileLocation( peer=peer, photo_id=file_id.media_id, big=file_id.thumbnail_source == ThumbnailSource.CHAT_PHOTO_BIG ) elif file_type == FileType.PHOTO: location = raw.types.InputPhotoFileLocation( id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size ) else: location = raw.types.InputDocumentFileLocation( id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size ) current = 0 total = abs(limit) or (1 << 31) - 1 chunk_size = 1024 * 1024 offset_bytes = abs(offset) * chunk_size dc_id = file_id.dc_id session = Session( self, dc_id, await Auth(self, dc_id, await self.storage.test_mode()).create() if dc_id != await self.storage.dc_id() else await self.storage.auth_key(), await self.storage.test_mode(), is_media=True ) try: await session.start() if dc_id != await self.storage.dc_id(): exported_auth = await self.invoke( raw.functions.auth.ExportAuthorization( dc_id=dc_id ) ) await session.invoke( raw.functions.auth.ImportAuthorization( id=exported_auth.id, bytes=exported_auth.bytes ) ) r = await session.invoke( raw.functions.upload.GetFile( location=location, offset=offset_bytes, limit=chunk_size ), sleep_threshold=30 ) if isinstance(r, raw.types.upload.File): while True: chunk = r.bytes yield chunk current += 1 offset_bytes += chunk_size if progress: func = functools.partial( progress, min(offset_bytes, file_size) if file_size != 0 else offset_bytes, file_size, *progress_args ) if inspect.iscoroutinefunction(progress): await func() else: await self.loop.run_in_executor(self.executor, func) if len(chunk) < chunk_size or current >= total: break r = await session.invoke( raw.functions.upload.GetFile( location=location, offset=offset_bytes, limit=chunk_size ), sleep_threshold=30 ) elif isinstance(r, raw.types.upload.FileCdnRedirect): cdn_session = Session( self, r.dc_id, await Auth(self, r.dc_id, await self.storage.test_mode()).create(), await self.storage.test_mode(), is_media=True, is_cdn=True ) try: await cdn_session.start() while True: r2 = await cdn_session.invoke( raw.functions.upload.GetCdnFile( file_token=r.file_token, offset=offset_bytes, limit=chunk_size ) ) if isinstance(r2, raw.types.upload.CdnFileReuploadNeeded): try: await session.invoke( raw.functions.upload.ReuploadCdnFile( file_token=r.file_token, request_token=r2.request_token ) ) except VolumeLocNotFound: break else: continue chunk = r2.bytes # https://core.telegram.org/cdn#decrypting-files decrypted_chunk = aes.ctr256_decrypt( chunk, r.encryption_key, bytearray( r.encryption_iv[:-4] + (offset_bytes // 16).to_bytes(4, "big") ) ) hashes = await session.invoke( raw.functions.upload.GetCdnFileHashes( file_token=r.file_token, offset=offset_bytes ) ) # https://core.telegram.org/cdn#verifying-files for i, h in enumerate(hashes): cdn_chunk = decrypted_chunk[h.limit * i: h.limit * (i + 1)] CDNFileHashMismatch.check( h.hash == sha256(cdn_chunk).digest(), "h.hash == sha256(cdn_chunk).digest()" ) yield decrypted_chunk current += 1 offset_bytes += chunk_size if progress: func = functools.partial( progress, min(offset_bytes, file_size) if file_size != 0 else offset_bytes, file_size, *progress_args ) if inspect.iscoroutinefunction(progress): await func() else: await self.loop.run_in_executor(self.executor, func) if len(chunk) < chunk_size or current >= total: break except Exception as e: raise e finally: await cdn_session.stop() except newgram.StopTransmission: raise except Exception as e: log.exception(e) finally: await session.stop() def guess_mime_type(self, filename: str) -> Optional[str]: return self.mimetypes.guess_type(filename)[0] def guess_extension(self, mime_type: str) -> Optional[str]: return self.mimetypes.guess_extension(mime_type) class Cache: def __init__(self, capacity: int): self.capacity = capacity self.store = {} def __getitem__(self, key): return self.store.get(key, None) def __setitem__(self, key, value): if key in self.store: del self.store[key] self.store[key] = value if len(self.store) > self.capacity: for _ in range(self.capacity // 2 + 1): del self.store[next(iter(self.store))]
PypiClean
/Comickaze-2.0.1b1.tar.gz/Comickaze-2.0.1b1/comickaze/Comickaze.py
from typing import List import logging import coloredlogs import requests from .Downloader import Downloader from .objects import Suggestion, Comic, Chapter from .util import soupify, create_session class Comickaze: BASE_URL = "https://readcomicsonline.ru" def __init__(self, log_level: str = "ERROR"): """Comickaze instance Keyword Arguments: log_level {str} -- Log level (default: {"ERROR"}) """ self.log_level = log_level self.logger = logging.getLogger(__name__) coloredlogs.install(level=log_level, logger=self.logger) self.session = create_session() def search_comics(self, query: str) -> list: """Searches comics Arguments: query {str} -- Query Returns: list[Suggestion] -- Search results in a form of {Suggestion} """ self.logger.info(f"Searching for {query}...") res = self.session.get("{0}/search".format(self.BASE_URL), params={ "query": query }) suggestions = res.json()["suggestions"] self.logger.info(f"Search done. Found {len(suggestions)} suggestions.") return [Suggestion(self, suggestion["value"], suggestion["data"]) for suggestion in suggestions] def get_comic(self, link: str) -> Comic: """Gets information about the comic in the given link Arguments: link {str} -- Link of the comic Returns: Comic -- Comic object """ try: self.logger.info(f"Trying to access {link}") res = self.session.get(link) except: self.logger.error( f"Something went wrong accessing the page: {link}.") raise try: self.logger.info(f"Trying to parse the page...") soup = soupify(res.text) col = soup.find("div", attrs={"class": "col-sm-12"}) list_container = col.find("div", attrs={"class": "list-container"}) title = list_container.find( "h2", attrs={"class": "listmanga-header"}).text.strip() self.logger.debug(f"Found title: {title}") comic = Comic(title, link) image = list_container.find("img", attrs={"img-responsive"})["src"] comic.image = "https://www." + image[2:] self.logger.debug(f"Found image: {comic.image}") info_box = col.find("dl", attrs={"class": "dl-horizontal"}) d_tags = info_box.find_all("dt") d_data = info_box.find_all("dd") for i, tag in enumerate(d_tags): tag = tag.text.lower() data_text = d_data[i].text.strip() val = data_text if tag == "type": comic.comic_type = data_text elif tag == "status": comic.status = data_text elif tag == "other names": comic.other_names = data_text elif tag == "author(s)": _authors = d_data[i].find_all("a") comic.authors = [a.text.strip() for a in _authors] val = comic.authors elif tag == "date of release": comic.year = data_text elif tag == "categories": _categories = d_data[i].find_all("a") comic.categories = [c.text.strip() for c in _categories] val = comic.categories elif tag == "tags": _tags = d_data[i].find_all("a") comic.tags = [t.text.strip() for t in _tags] val = comic.tags elif tag == "views": try: views = int(data_text) except ValueError: views = data_text comic.views = views val = views elif tag == "rating": rating_div = d_data[i].find( "div", attrs={"id": "item-rating"}) score = rating_div["data-score"] try: rating = float(score) except ValueError: rating = score comic.rating = rating val = rating self.logger.debug(f"Found {tag}: {val}") comic.summary = col.find("div", attrs={"class": "manga well"}).find( "p").text.strip() self.logger.debug(f"Found summary: {comic.summary}") li_chapters = col.find("ul", attrs={"class": "chapters"}).find_all( "li", attrs={"class": "volume-0"}) comic.chapters = [] for chapter in li_chapters: _anch = chapter.find( "h5", attrs={"class": "chapter-title-rtl"}).find("a") chapter_title = _anch.text.strip() chapter_link = _anch["href"] try: date = chapter.find( "div", attrs={"class": "date-chapter-title-rtl"}).text.strip() except: date = None comic.chapters.append( Chapter(self, chapter_title, chapter_link, comic, date=date)) self.logger.info( f"Found {title} with {len(comic.chapters)} chapter(s).") return comic except: self.logger.error( f"Something went wrong parsing the page.") raise def get_chapter_pages(self, chapter: Chapter): """Gets the chapter's pages. URL of the pages images. Arguments: chapter {Chapter} -- Chapter Returns: list -- List of image urls """ link = chapter.link chapter_slug = link[link.rfind("/") + 1:] image_link_format = f"https://readcomicsonline.ru/uploads/manga/{chapter.comic.slug}/chapters/{chapter_slug}/" try: res = self.session.get(link) except: self.logger.error( f"Something went wrong accessing the page: {link}.") raise try: soup = soupify(res.text) pages_select = soup.find("select", attrs={"id": "page-list"}) for option in pages_select.find_all("option"): val = int(option["value"]) s_val = str(val) if val < 10: s_val = "0" + s_val chapter.pages.append(image_link_format + f"{s_val}.jpg") return chapter.pages except: self.logger.error( f"Something went wrong parsing the page.") raise def create_downloader(self, chapters: List[Chapter], number_of_threads=4, output_format="cbz", **kwargs) -> Downloader: """Wrapper function to create a Downloader object. Arguments: chapters {List[Chapter]} -- Chapters to be downloaded Keyword Arguments: number_of_threads {int} -- Number of threads to use when downloading (default: {4}) output_format {str} -- Coversion format (default: {"cbz"}) Returns: Downloader -- Downloader object """ return Downloader(chapters, output_format=output_format, number_of_threads=number_of_threads, log_level=self.log_level, **kwargs)
PypiClean
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/model/interfaces/jetsoncam_BAK.py
import numpy as np import cv2, queue, threading from imswitch.imcommon.model import initLogger # https://stackoverflow.com/questions/33432426/importerror-no-module-named-queue # bufferless VideoCapture class VideoCapture: def __init__(self, name): self.cap = cv2.VideoCapture(0, cv2.CAP_V4L2) self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) # camera parameters self.blacklevel = 0 self.exposure_time = 10 self.analog_gain = 0 self.pixel_format = "Mono8" self.SensorWidth = 1920 self.SensorHeight = 1080 self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.SensorWidth) self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.SensorHeight) #GSTREAMER does not work in conda cv2.VideoCapture(name, cv2.CAP_GSTREAMER) #self.cap.set(cv2.CAP_PROP_CONVERT_RGB, False); self.q = queue.Queue() t = threading.Thread(target=self._reader) t.daemon = True t.start() self._isopen = True # read frames as soon as they are available, keeping only most recent one def _reader(self): self._isopen = True while True: ret, frame = self.cap.read() if not ret: self._isopen = False break if not self.q.empty(): try: self.q.get_nowait() # discard previous (unprocessed) frame except Queue.Empty: pass self.q.put(frame) def isOpened(self): return self._isopen def release(self): self.cap.release() def read(self): return None, np.mean(self.q.get(), -1) class CameraJETSON: def __init__(self): super().__init__() self.__logger = initLogger(self, tryInheritParent=False) # many to be purged self.model = "JetsonCamera" # camera parameters self.blacklevel = 0 self.exposure_time = 10 self.analog_gain = 0 self.pixel_format = "Mono8" self.frame_id_last = 0 self.PreviewWidthRatio = 4 self.PreviewHeightRatio = 4 self.SensorWidth = 1920 self.SensorHeight = 1080 # self.camera = self.openCamera(self.SensorWidth, self.SensorHeight) #%% starting the camera import os # thhis is going to be suuper hacky, but opencv in an environemnt does not have gstreamer so we stream it through IP! os.system("/usr/bin/python3 /home/uc2/Downloads/imswitch/imswitch/imcontrol/model/interfaces/jetsonstreamer/jetsonstreamer.py") self.camera = self.openCamera(self.SensorWidth, self.SensorHeight) def start_live(self): # check if camera is open if not self.camera_is_open: self.openCamera(self.SensorWidth, self.SensorHeight) def stop_live(self): self.camera.release() self.camera_is_open = False def suspend_live(self): self.camera.release() self.camera_is_open = False def prepare_live(self): pass def close(self): self.camera.release() self.camera_is_open = False def set_value(self ,feature_key, feature_value): # Need to change acquisition parameters? try: feature = self.camera.feature(feature_key) feature.value = feature_value except Exception as e: self.__logger.error(e) self.__logger.error(feature_key) self.__logger.debug("Value not available?") def set_exposure_time(self,exposure_time): self.exposure_time = exposure_time self.set_value("ExposureTime", self.exposure_time*1000) def set_analog_gain(self,analog_gain): self.analog_gain = analog_gain self.set_value("Gain", self.analog_gain) def set_blacklevel(self,blacklevel): self.blacklevel = blacklevel self.set_value("BlackLevel", blacklevel) def set_pixel_format(self,format): self.pixelformat = format self.set_value("PixelFormat", format) def getLast(self): # get frame and save # frame_norm = cv2.normalize(self.frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) #TODO: Napari only displays 8Bit? return cv2.resize(self.camera.read()[1], dsize=None, fx=1/self.PreviewWidthRatio, fy=1/self.PreviewHeightRatio, interpolation= cv2.INTER_LINEAR) def getLastChunk(self): return self.camera.read()[1] def setROI(self, hpos, vpos, hsize, vsize): hsize = max(hsize, 256) # minimum ROI size vsize = max(vsize, 256) # minimum ROI size self.__logger.debug( f'{self.model}: setROI started with {hsize}x{vsize} at {hpos},{vpos}.') try: image_Height = self.camera.feature("Height") image_Width = self.camera.feature("Width") image_Height.value = hsize image_Width.value = vsize self.shape = (image_Width.value,image_Height.value) except Exception as e: self.__logger.error("Setting the ROI") self.__logger.error(e) def setPropertyValue(self, property_name, property_value): # Check if the property exists. if property_name == "gain": self.set_analog_gain(property_value) elif property_name == "exposure": self.set_exposure_time(property_value) elif property_name == "blacklevel": self.set_blacklevel(property_value) elif property_name == "pixel_format": self.stop_live() self.set_pixel_format(property_value) self.start_live() else: self.__logger.warning(f'Property {property_name} does not exist') return False return property_value def getPropertyValue(self, property_name): # Check if the property exists. if property_name == "gain": property_value = self.camera.gain elif property_name == "exposure": property_value = self.camera.exposure elif property_name == "blacklevel": property_value = self.camera.blacklevel elif property_name == "image_width": property_value = self.camera.SensorWidth elif property_name == "image_height": property_value = self.camera.SensorHeight elif property_name == "pixel_format": property_value = self.camera.PixelFormat else: self.__logger.warning(f'Property {property_name} does not exist') return False return property_value def openPropertiesGUI(self): pass def openCamera(self, width, height): # open camera camera = VideoCapture(self.gstreamer_pipeline(exposuretime=self.exposure_time*100000, capture_width=width, capture_height = height, display_width=width, display_height=height, flip_method=0))#, cv2.CAP_GSTREAMER) self.__logger.debug("Camera is open") # let the camera warm up for i in range(5): _, img = camera.read() self.__logger.debug("Camera is warmed up") self.SensorHeight = img.shape[0] self.SensorWidth = img.shape[1] self.shape = (self.SensorWidth,self.SensorHeight) self.camera_is_open = True return camera # gstreamer pipeline for the jetson IMX219 camera def gstreamer_pipeline(self, capture_width=640, capture_height=480, display_width=640, display_height=480, exposuretime=1, framerate=120, flip_method=0 ): #gst-launch-1.0 # nvarguscamerasrc awblock=true aelock=false exposuretimerange="100000 100000" gainrange="1 1" ispdigitalgainrange="1 1" ! 'video/x-raw(memory:NVMM),width=1920,height=1080,format=NV12' ! nvoverlaysink # nvarguscamerasrc awblock=true aelock=false width=(int)640, height=(int)480, exposuretimerange="(int)100000 (int)100000" gainrange="1 1" ispdigitalgainrange="1 1" format=(string)NV12, framerate=(fraction)120/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)640, height=(int)480, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsinkvideo/x-raw(memory:NVMM), exposuretime = int(exposuretime*100000) return ( 'nvarguscamerasrc ' 'exposuretimerange="%d %d" gainrange="1 1" ispdigitalgainrange="1 1" ' 'awblock=true aelock=true ' '! video/x-raw(memory:NVMM), ' #"width=(int)%d, height=(int)%d, " 'width=(int)%d, height=(int)%d, ' #" ##exposurecompensation=-2, aelock=true, " #exposuretimerange=34000 35873300, "format=(string)NV12, framerate=(fraction)%d/1 ! " "nvvidconv flip-method=%d ! " "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! " "videoconvert ! " "video/x-raw, format=(string)BGR ! appsink" % ( exposuretime, exposuretime, capture_width, capture_height, framerate, flip_method, display_width, display_height, ) )
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/mode/rust/rust.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror"), require("../../addon/mode/simple")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror", "../../addon/mode/simple"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; CodeMirror.defineSimpleMode("rust",{ start: [ // string and byte string {regex: /b?"/, token: "string", next: "string"}, // raw string and raw byte string {regex: /b?r"/, token: "string", next: "string_raw"}, {regex: /b?r#+"/, token: "string", next: "string_raw_hash"}, // character {regex: /'(?:[^'\\]|\\(?:[nrt0'"]|x[\da-fA-F]{2}|u\{[\da-fA-F]{6}\}))'/, token: "string-2"}, // byte {regex: /b'(?:[^']|\\(?:['\\nrt0]|x[\da-fA-F]{2}))'/, token: "string-2"}, {regex: /(?:(?:[0-9][0-9_]*)(?:(?:[Ee][+-]?[0-9_]+)|\.[0-9_]+(?:[Ee][+-]?[0-9_]+)?)(?:f32|f64)?)|(?:0(?:b[01_]+|(?:o[0-7_]+)|(?:x[0-9a-fA-F_]+))|(?:[0-9][0-9_]*))(?:u8|u16|u32|u64|i8|i16|i32|i64|isize|usize)?/, token: "number"}, {regex: /(let(?:\s+mut)?|fn|enum|mod|struct|type|union)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/, token: ["keyword", null, "def"]}, {regex: /(?:abstract|alignof|as|async|await|box|break|continue|const|crate|do|dyn|else|enum|extern|fn|for|final|if|impl|in|loop|macro|match|mod|move|offsetof|override|priv|proc|pub|pure|ref|return|self|sizeof|static|struct|super|trait|type|typeof|union|unsafe|unsized|use|virtual|where|while|yield)\b/, token: "keyword"}, {regex: /\b(?:Self|isize|usize|char|bool|u8|u16|u32|u64|f16|f32|f64|i8|i16|i32|i64|str|Option)\b/, token: "atom"}, {regex: /\b(?:true|false|Some|None|Ok|Err)\b/, token: "builtin"}, {regex: /\b(fn)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/, token: ["keyword", null ,"def"]}, {regex: /#!?\[.*\]/, token: "meta"}, {regex: /\/\/.*/, token: "comment"}, {regex: /\/\*/, token: "comment", next: "comment"}, {regex: /[-+\/*=<>!]+/, token: "operator"}, {regex: /[a-zA-Z_]\w*!/,token: "variable-3"}, {regex: /[a-zA-Z_]\w*/, token: "variable"}, {regex: /[\{\[\(]/, indent: true}, {regex: /[\}\]\)]/, dedent: true} ], string: [ {regex: /"/, token: "string", next: "start"}, {regex: /(?:[^\\"]|\\(?:.|$))*/, token: "string"} ], string_raw: [ {regex: /"/, token: "string", next: "start"}, {regex: /[^"]*/, token: "string"} ], string_raw_hash: [ {regex: /"#+/, token: "string", next: "start"}, {regex: /(?:[^"]|"(?!#))*/, token: "string"} ], comment: [ {regex: /.*?\*\//, token: "comment", next: "start"}, {regex: /.*/, token: "comment"} ], meta: { dontIndentStates: ["comment"], electricInput: /^\s*\}$/, blockCommentStart: "/*", blockCommentEnd: "*/", lineComment: "//", fold: "brace" } }); CodeMirror.defineMIME("text/x-rustsrc", "rust"); CodeMirror.defineMIME("text/rust", "rust"); });
PypiClean
/IQM_Vis-0.2.5.80-py3-none-any.whl/IQM_Vis/UI/experiment_mode.py
import os import random import threading import warnings import time import numpy as np import pandas as pd from PyQt6.QtWidgets import (QMainWindow, QHBoxLayout, QVBoxLayout, QTabWidget, QApplication, QPushButton, QLabel, QMessageBox) from PyQt6.QtCore import Qt, pyqtSignal, pyqtSlot, QObject, QThread from PyQt6.QtGui import QShortcut, QKeySequence import IQM_Vis from IQM_Vis.UI.custom_widgets import ClickLabel from IQM_Vis.UI import utils from IQM_Vis.utils import gui_utils, plot_utils, image_utils class make_experiment(QMainWindow): saved_experiment = pyqtSignal(str) reset_clicked_image = pyqtSignal(dict) def __init__(self, checked_transformation_params, data_store, image_display_size, rgb_brightness, display_brightness, default_save_dir=IQM_Vis.utils.save_utils.DEFAULT_SAVE_DIR, image_preprocessing='None', image_postprocessing='None', checked_metrics={}): super().__init__() self.checked_transformation_params = checked_transformation_params if self.checked_transformation_params == {}: return self.checked_metrics = checked_metrics self.data_store = data_store self.image_display_size = image_display_size self.rgb_brightness = rgb_brightness self.display_brightness = display_brightness self.default_save_dir = default_save_dir self.processing = {'pre': image_preprocessing, 'post': image_postprocessing} self.clicked_event = threading.Event() self.able_to_click = False self.image_change_worker = reset_image_widget_to_black() self.image_change_worker.completed.connect(self.click_completed) self.image_worker_thread = QThread() self.reset_clicked_image.connect(self.image_change_worker.change_to_solid) self.image_change_worker.moveToThread(self.image_worker_thread) self.image_worker_thread.start() self.stop_event = threading.Event() self.saved = False self.quit_experiment = False self.get_all_images() self._init_experiment_window_widgets() self.get_metric_scores() self.experiment_layout() self.setCentralWidget(self.experiments_tab) self.setWindowTitle('Experiment') # move to centre of the screen qr = self.frameGeometry() cp = self.screen().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) self.show_all_images() def closeEvent(self, event): # Ask for confirmation if not saved if not self.saved: answer = QMessageBox.question(self, "Confirm Exit...", "Are you sure you want to exit?\nAll unsaved data will be lost.", QMessageBox.StandardButton.No | QMessageBox.StandardButton.Yes, QMessageBox.StandardButton.Yes) else: answer = QMessageBox.StandardButton.Yes event.ignore() if answer == QMessageBox.StandardButton.Yes: self.quit_experiment = True if hasattr(self, 'range_worker'): self.range_worker.stop() if hasattr(self, 'image_worker_thread'): self.image_change_worker.stop() self.image_worker_thread.quit() self.image_worker_thread.wait() self.stop_event.set() self.clicked_event.set() event.accept() def quit(self): self.close() def show_all_images(self, tab='setup'): self.widget_experiments[tab]['images'].axes.axis('off') rows = int(len(self.experiment_transforms)**0.5) cols = int(np.ceil(len(self.experiment_transforms)/rows)) for i, trans in enumerate(self.experiment_transforms): ax = self.widget_experiments[tab]['images'].figure.add_subplot( rows, cols, i+1) ax.imshow(image_utils.calibrate_brightness( trans['image'], self.rgb_brightness, self.display_brightness, ubyte=False)) ax.axis('off') ax.set_title(make_name_for_trans(trans), fontsize=6) # self.widget_experiments[tab]['images'].figure.tight_layout() # time.sleep(5) # QApplication.processEvents() def get_all_images(self): ''' save image name ''' self.image_name = self.data_store.get_reference_image_name() ''' load all transformed images and sort them via MSE ''' # get all the transform values self.experiment_trans_params = plot_utils.get_all_single_transform_params( self.checked_transformation_params, num_steps='from_dict') # remove any params with value 0 self.experiment_trans_params = [ x for x in self.experiment_trans_params if not x[list(x.keys())[0]] == 0] # save the experiment ordering before reordering (for saving to csv col ordering) self.original_params_order = [] for single_trans in self.experiment_trans_params: trans_name = list(single_trans.keys())[0] param = single_trans[trans_name] data = {'transform_name': trans_name, 'transform_value': param} self.original_params_order.append(make_name_for_trans(data)) # REFERENCE image self.ref_image = self.data_store.get_reference_image() if hasattr(self.data_store, 'get_reference_unprocessed'): self.ref_image_unprocessed = self.data_store.get_reference_unprocessed() # get MSE for experiments to get a rough sorting mses = [] mse = IQM_Vis.IQMs.MSE() for trans in self.experiment_trans_params: mses.append( mse(self.ref_image, self.get_single_transform_im(trans))) # put median MSE at the end (best for quick sort) self.experiment_trans_params = sort_list( self.experiment_trans_params, mses) # sort array # take median out and get random shuffle for the rest median = self.experiment_trans_params.pop( len(self.experiment_trans_params)//2) random.shuffle(self.experiment_trans_params) self.experiment_trans_params.append(median) # load all images self.experiment_transforms = [] for single_trans in self.experiment_trans_params: trans_name = list(single_trans.keys())[0] param = single_trans[trans_name] img = self.get_single_transform_im(single_trans) data = {'transform_name': trans_name, 'transform_value': param, 'image': img} self.experiment_transforms.append(data) self.calc_max_comparisons(num_images=len(self.experiment_transforms)) def calc_max_comparisons(self, num_images): # calc expected number of comparisons - # http://homepages.math.uic.edu/~leon/cs-mcs401-r07/handouts/quicksort-continued.pdf self.min_expected_comps = num_images * np.log(num_images) self.max_expected_comps = 1.39 * num_images * np.log(num_images) def get_metric_scores(self): '''get IQM scores to save alongside the experiment for plotting/analysis purposes''' IQM_scores = {} for data in self.experiment_transforms: score_dict = self.data_store.get_metrics(transformed_image=data['image'], metrics_to_use=self.checked_metrics) scores = [] metrics = [] for name, score in score_dict.items(): metrics.append(name) scores.append(float(score)) IQM_scores[make_name_for_trans(data)] = scores IQM_scores['IQM'] = metrics self.IQM_scores_df = pd.DataFrame.from_dict(IQM_scores) self.IQM_scores_df.set_index('IQM', inplace=True) def _init_experiment_window_widgets(self): self.widget_experiments = {'exp': {}, 'preamble': {}, 'setup': {}, 'final':{}} ''' setup tab ''' self.widget_experiments['setup']['start_button'] = QPushButton( 'Setup', self) self.widget_experiments['setup']['start_button'].clicked.connect(self.setup_experiment) self.widget_experiments['setup']['quit_button'] = QPushButton('Quit', self) self.widget_experiments['setup']['quit_button'].clicked.connect(self.quit) QShortcut(QKeySequence("Ctrl+Q"), self.widget_experiments['setup']['quit_button'], self.quit) self.widget_experiments['setup']['images'] = gui_utils.MplCanvas(size=None) self.widget_experiments['setup']['text'] = QLabel(self) self.widget_experiments['setup']['text'].setText(f''' Experiment to be setup with the above images using the settings: Save folder: {self.default_save_dir} Image Display Size: {self.image_display_size} Image Calibration: Max RGB Brightness: {self.rgb_brightness} Max Display Brightness: {self.display_brightness} Expected Number of Comparisons: {int(self.min_expected_comps)} MAX Expected Number of Comparisons: {int(self.max_expected_comps)} Click the Setup button to setup up the experiment and hand over to the test subject. ''') # self.widget_experiments['setup']['text'].setAlignment( # Qt.AlignmentFlag.AlignCenter) ''' info tab ''' self.widget_experiments['preamble']['text'] = QLabel(self) self.widget_experiments['preamble']['text'].setText(''' For this experiment you will be shown a reference image and two similar images. You need to click on the image (A or B) which you believe to be most similar to the reference image. When you are ready, click the Start button to begin the experiment ''') self.running_experiment = False self.widget_experiments['preamble']['start_button'] = QPushButton('Start', self) self.widget_experiments['preamble']['start_button'].clicked.connect(self.toggle_experiment) self.widget_experiments['preamble']['quit_button'] = QPushButton('Quit', self) self.widget_experiments['preamble']['quit_button'].clicked.connect( self.quit) QShortcut(QKeySequence("Ctrl+Q"), self.widget_experiments['preamble']['quit_button'], self.quit) ''' experiment tab ''' self.widget_experiments['exp']['info'] = QLabel( 'Click on which image, A or B, is most similar to the reference image', self) for image in ['Reference', 'A', 'B']: self.widget_experiments['exp'][image] = {} self.widget_experiments['exp'][image]['data'] = ClickLabel(image) self.widget_experiments['exp'][image]['data'].setAlignment(Qt.AlignmentFlag.AlignCenter) # image label self.widget_experiments['exp'][image]['label'] = QLabel(image, self) self.widget_experiments['exp'][image]['label'].setAlignment(Qt.AlignmentFlag.AlignCenter) self.widget_experiments['exp']['A']['data'].clicked.connect(self.clicked_image) self.widget_experiments['exp']['B']['data'].clicked.connect(self.clicked_image) self.widget_experiments['exp']['quit_button'] = QPushButton('Quit', self) self.widget_experiments['exp']['quit_button'].clicked.connect(self.quit) QShortcut(QKeySequence("Ctrl+Q"), self.widget_experiments['exp']['quit_button'], self.quit) ''' finish tab ''' self.widget_experiments['final']['order_text'] = QLabel( 'Experiment Sorting Order:', self) self.widget_experiments['final']['images'] = gui_utils.MplCanvas(size=None) self.widget_experiments['final']['quit_button'] = QPushButton('Quit', self) self.widget_experiments['final']['quit_button'].clicked.connect( self.quit) QShortcut(QKeySequence("Ctrl+Q"), self.widget_experiments['final']['quit_button'], self.quit) self.widget_experiments['final']['save_label'] = QLabel('Not saved yet', self) def experiment_layout(self): ''' setup ''' experiment_text = QVBoxLayout() experiment_text.addWidget(self.widget_experiments['setup']['text']) experiment_setup_buttons = QHBoxLayout() experiment_setup_buttons.addWidget( self.widget_experiments['setup']['start_button']) experiment_setup_buttons.addWidget( self.widget_experiments['setup']['quit_button']) experiment_text.addLayout(experiment_setup_buttons) experiment_mode_setup = QVBoxLayout() experiment_mode_setup.addWidget(self.widget_experiments['setup']['images']) experiment_mode_setup.addLayout(experiment_text) experiment_mode_setup.setAlignment(Qt.AlignmentFlag.AlignCenter) experiment_mode_setup.addStretch() ''' info ''' experiment_info_buttons = QHBoxLayout() experiment_info_buttons.addWidget( self.widget_experiments['preamble']['start_button']) experiment_info_buttons.addWidget( self.widget_experiments['preamble']['quit_button']) experiment_mode_info = QVBoxLayout() experiment_mode_info.addWidget( self.widget_experiments['preamble']['text']) experiment_mode_info.setAlignment(Qt.AlignmentFlag.AlignCenter) experiment_mode_info.addLayout(experiment_info_buttons) ''' experiment ''' info = QVBoxLayout() info.addWidget(self.widget_experiments['exp']['info']) info.setAlignment(Qt.AlignmentFlag.AlignCenter) quit = QVBoxLayout() quit.addWidget(self.widget_experiments['exp']['quit_button']) quit.setAlignment(Qt.AlignmentFlag.AlignCenter) layouts = [] for im in ['A', 'Reference', 'B']: _layout = QVBoxLayout() for _, widget in self.widget_experiments['exp'][im].items(): _layout.addWidget(widget) _layout.setAlignment(Qt.AlignmentFlag.AlignTop) layouts.append(_layout) # add images to h box experiment_images = QHBoxLayout() for layout in layouts: experiment_images.addLayout(layout) experiment_images.setAlignment(Qt.AlignmentFlag.AlignTop) run_experiment = QVBoxLayout() run_experiment.addLayout(info) run_experiment.addLayout(experiment_images) run_experiment.addLayout(quit) run_experiment.setAlignment(Qt.AlignmentFlag.AlignCenter) ''' finished ''' finish_experiment = QVBoxLayout() finish_experiment.addWidget(self.widget_experiments['final']['order_text']) finish_experiment.addWidget(self.widget_experiments['final']['images']) finish_experiment.addWidget(self.widget_experiments['final']['save_label']) finish_experiment.addWidget(self.widget_experiments['final']['quit_button']) finish_experiment.setAlignment(Qt.AlignmentFlag.AlignCenter) finish_experiment.addStretch() self.experiments_tab = QTabWidget() for tab_layout, tab_name in zip([experiment_mode_setup, experiment_mode_info, run_experiment, finish_experiment], ['setup', 'info', 'run', 'finish']): utils.add_layout_to_tab(self.experiments_tab, tab_layout, tab_name) # experiment_mode_layout = QVBoxLayout() # experiment_mode_layout.addWidget(self.experiments_tab) # return experiment_mode_layout ''' experiment running functions''' def setup_experiment(self): self.experiments_tab.setCurrentIndex(1) self.experiments_tab.setTabEnabled(0, False) self.experiments_tab.setTabEnabled(2, False) self.experiments_tab.setTabEnabled(3, False) def toggle_experiment(self): if self.running_experiment: self.reset_experiment() self.experiments_tab.setTabEnabled(0, True) self.experiments_tab.setTabEnabled(1, True) self.running_experiment = False # self.widget_experiments['preamble']['start_button'].setText('Start') else: self.experiments_tab.setTabEnabled(2, True) self.start_experiment() self.experiments_tab.setTabEnabled(0, False) self.experiments_tab.setTabEnabled(1, False) # self.widget_experiments['preamble']['start_button'].setText('Reset') self.running_experiment = True def reset_experiment(self): self.experiments_tab.setCurrentIndex(1) self.init_style('light') def start_experiment(self): self.init_style('dark') self.experiments_tab.setCurrentIndex(2) # Display reference image gui_utils.change_im(self.widget_experiments['exp']['Reference']['data'], self.ref_image, resize=self.image_display_size, rgb_brightness=self.rgb_brightness, display_brightness=self.display_brightness) # get user sorting self.sorting_thread = threading.Thread(target=self.quick_sort) self.sorting_thread.start() # self.quick_sort(0, len(self.experiment_transforms)-1) def finish_experiment(self): self.experiments_tab.setTabEnabled(3, True) self.show_all_images(tab='final') self.init_style('light') self.experiments_tab.setCurrentIndex(3) # self.experiments_tab.setTabEnabled(2, False) self.save_experiment() if self.saved == True: self.widget_experiments['final']['save_label'].setText(f'Saved to {self.default_save_dir}') else: self.widget_experiments['final']['save_label'].setText(f'Save failed to {self.default_save_dir}') def save_experiment(self): # get the current transform functions trans_funcs = {} for single_trans in self.experiment_trans_params: trans_name = list(single_trans.keys())[0] trans_funcs[trans_name] = self.checked_transformation_params[trans_name]['function'] # make the experiment directory self.default_save_dir = os.path.join( self.default_save_dir, self.image_name) # get a unique directory (same image with diff trans need a new dir) i = 1 unique_dir_found = False new_dir = True while unique_dir_found == False: exp_save_dir = f'{self.default_save_dir}-experiment-{i}' if os.path.exists(exp_save_dir): # get transform funcs and params exp_trans_params = IQM_Vis.utils.save_utils.load_obj( os.path.join(exp_save_dir, 'transforms', 'transform_params.pkl')) exp_trans_funcs = IQM_Vis.utils.save_utils.load_obj( os.path.join(exp_save_dir, 'transforms', 'transform_functions.pkl')) # get image processing saved params processing_file = IQM_Vis.utils.save_utils.get_image_processing_file(exp_save_dir) procesing_same = False if os.path.exists(processing_file): processing = IQM_Vis.utils.save_utils.load_json_dict(processing_file) if processing == self.processing: procesing_same = True # check if experiment is the same if (exp_trans_params == self.original_params_order) and (trans_funcs == exp_trans_funcs) and procesing_same: self.default_save_dir = exp_save_dir unique_dir_found = True new_dir = False else: i += 1 else: self.default_save_dir = exp_save_dir unique_dir_found = True # make all the dirs and subdirs os.makedirs(self.default_save_dir, exist_ok=True) os.makedirs(os.path.join(self.default_save_dir, 'images'), exist_ok=True) os.makedirs(os.path.join(self.default_save_dir, 'transforms'), exist_ok=True) # save experiment images if not os.path.exists(IQM_Vis.utils.save_utils.get_original_image_file(self.default_save_dir)): image_utils.save_image(self.ref_image, IQM_Vis.utils.save_utils.get_original_image_file(self.default_save_dir)) if not os.path.exists(IQM_Vis.utils.save_utils.get_original_unprocessed_image_file(self.default_save_dir)): if hasattr(self, 'ref_image_unprocessed'): image_utils.save_image(self.ref_image_unprocessed, IQM_Vis.utils.save_utils.get_original_unprocessed_image_file(self.default_save_dir)) if new_dir == True: for trans in self.experiment_transforms: image_utils.save_image( trans['image'], os.path.join(self.default_save_dir, 'images', f'{make_name_for_trans(trans)}.png')) # save the transformations IQM_Vis.utils.save_utils.save_obj( IQM_Vis.utils.save_utils.get_transform_params_file(self.default_save_dir), self.original_params_order) IQM_Vis.utils.save_utils.save_obj( IQM_Vis.utils.save_utils.get_transform_functions_file(self.default_save_dir), dict(sorted(trans_funcs.items()))) # save the image pre/post processing options IQM_Vis.utils.save_utils.save_json_dict( IQM_Vis.utils.save_utils.get_image_processing_file(self.default_save_dir), self.processing) # save the experiment results exp_order = [] for trans in self.experiment_transforms: exp_order.append(make_name_for_trans(trans)) csv_file = IQM_Vis.utils.save_utils.save_experiment_results( self.original_params_order, exp_order, self.default_save_dir, self.times_taken, self.IQM_scores_df) self.saved = True self.saved_experiment.emit(csv_file) ''' sorting algorithm resource: https://www.geeksforgeeks.org/quick-sort/''' def quick_sort(self): self.times_taken = [] self._quick_sort(0, len(self.experiment_transforms)-1) if self.quit_experiment != True: self.finish_experiment() # for trans in self.experiment_transforms: # print(trans['transform_name'], trans['transform_value']) def _quick_sort(self, low, high): if low < high: # Find pivot elements such that element smaller than pivot are on the # left and elements greater than pivot are on the right pi = self.partition(low, high) if self.stop_event.is_set(): return # Recursive call on the left of pivot self._quick_sort(low, pi - 1) # Recursive call on the right of pivot self._quick_sort(pi + 1, high) def partition(self, low, high): ''' given an unsorted partition of the array between low and high, order elements lower than a given pivot point to the left and higher to the right''' # Choose the end element as pivot self.high = high self.low = low self.current_comparision = low self.pivot = self.experiment_transforms[self.high] # Pointer for greater element self.comp_pointer = low - 1 # Traverse through all elements and compare each element with pivot (by user clicking) while True: time0 = time.time() # randomly assign to image A or B ims_to_display = [ self.experiment_transforms[self.current_comparision], self.pivot] random.shuffle(ims_to_display) # display the images self.change_experiment_images(A_trans=ims_to_display[0], B_trans=ims_to_display[1]) # wait for image to be clicked self.clicked_event.clear() self.able_to_click = True self.clicked_event.wait() if self.stop_event.is_set(): return if self.less_than_pivot == True: # if self.experiment_transforms[self.current_comparision]['brightness'] <= self.pivot['brightness']: # If element smaller than pivot is found swap it with the greater element pointed by i self.comp_pointer += 1 # Swapping element at i with element at j self.swap_inds(self.comp_pointer, self.current_comparision) self.current_comparision += 1 self.times_taken.append(time.time()-time0) if self.current_comparision == self.high: break # Swap the pivot element with the greater element specified by i self.swap_inds(self.comp_pointer+1, self.high) # Return the position from where partition is done return self.comp_pointer + 1 def clicked_image(self, image_name, widget_name): if self.able_to_click == False: return self.able_to_click = False # get comparison to pivot trans_str = image_name[len(self.image_name)+1:] if trans_str != make_name_for_trans(self.pivot): # lower value # If element smaller than pivot is found swap it with the greater element pointed by i self.less_than_pivot = True else: self.less_than_pivot = False # make clicked image black to show user data = {'image_display_size': self.image_display_size, 'widget': self.widget_experiments['exp'][widget_name]['data']} self.reset_clicked_image.emit(data) # change to black image, after x amount of time will change to experimetn image def click_completed(self): # unlock the wait self.clicked_event.set() def swap_inds(self, i, j): (self.experiment_transforms[i], self.experiment_transforms[j]) = (self.experiment_transforms[j], self.experiment_transforms[i]) def get_single_transform_im(self, single_trans_dict): trans_name = list(single_trans_dict)[0] return image_utils.get_transform_image(self.data_store, {trans_name: self.checked_transformation_params[trans_name]}, single_trans_dict) def change_experiment_images(self, A_trans, B_trans): A = A_trans['image'] B = B_trans['image'] gui_utils.change_im(self.widget_experiments['exp']['A']['data'], A, resize=self.image_display_size, rgb_brightness=self.rgb_brightness, display_brightness=self.display_brightness) self.widget_experiments['exp']['A']['data'].setObjectName(f'{self.image_name}-{make_name_for_trans(A_trans)}') gui_utils.change_im(self.widget_experiments['exp']['B']['data'], B, resize=self.image_display_size, rgb_brightness=self.rgb_brightness, display_brightness=self.display_brightness) self.widget_experiments['exp']['B']['data'].setObjectName(f'{self.image_name}-{make_name_for_trans(B_trans)}') ''' UI ''' def init_style(self, style='light', css_file=None): if css_file == None: dir = os.path.dirname(os.path.abspath(__file__)) # css_file = os.path.join(dir, 'style-light.css') css_file = os.path.join(dir, f'style-{style}.css') if os.path.isfile(css_file): with open(css_file, 'r') as file: self.setStyleSheet(file.read()) else: warnings.warn('Cannot load css style sheet - file not found') class reset_image_widget_to_black(QObject): ''' change clicked image to black and pause ''' completed = pyqtSignal(float) def __init__(self, time=0.1): super().__init__() self.running = True self.time = time self.black_array = np.zeros([100, 100, 3]) # self.black_array[:, :, 1] = 1 # blue @pyqtSlot(dict) def change_to_solid(self, data): t_start = time.time() image_display_size = data['image_display_size'] widget = data['widget'] # make clicked image black to show user gui_utils.change_im(widget, self.black_array, resize=image_display_size) # pause for half the time needed (will use the loop below to wait for full time and also see if image has turned black yet) time.sleep(self.time/2) m = 1 t_time = time.time() - t_start # wait until black image is shown while (not (m == 0 or m == 63.75)) and (t_time < self.time): # 63.75 is when 4th channel is all ones , rest are 0 time.sleep(self.time/10) # get the image data from the widget to check if it's been made black yet pixmap = widget.pixmap() q_img = pixmap.toImage() ptr = q_img.bits() ptr = q_img.constBits() ptr.setsize(q_img.sizeInBytes()) np_img = np.array(ptr, copy=False).reshape( q_img.height(), q_img.width(), 4) m = np_img.mean() # calc if the time is up yet t_time = time.time() - t_start # all complete so send signal self.completed.emit(1.0) def stop(self): self.running = False def __del__(self): # close app upon garbage collection self.stop() def sort_list(list1, list2): # sort list1 based on list2 inds = np.argsort(list2) sorted_list1 = [] for i in inds: sorted_list1.append(list1[i]) return sorted_list1 def make_name_for_trans(trans): splitter = '-----' return f"{trans['transform_name']}{splitter}{trans['transform_value']}"
PypiClean
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/reading_import_profile.py
from msrest.serialization import Model class ReadingImportProfile(Model): """ReadingImportProfile. All required parameters must be populated in order to send to Azure. :param channel_interval_in_seconds: The interval of the readings in seconds :type channel_interval_in_seconds: int :param delimiter: Required. The string that represents how the file contents are delimited. Valid options are "\\t" for tab, " " for space and "," for comma. <span class='property-internal'>Required</span> <span class='property-internal'>Must be between 1 and 2 characters</span> <span class='property-internal'>One of , , , </span> :type delimiter: str :param number_of_header_rows: Required. Number of header rows before the data begins <span class='property-internal'>Required</span> <span class='property-internal'>Must be between 0 and 2147483647</span> :type number_of_header_rows: int :param timestamp_column_number: The number of the column that holds the timestamp <span class='property-internal'>Must be between 1 and 2147483647</span> :type timestamp_column_number: int :param timestamp_format: The format for the timestamp of the readings. An example is MM/dd/yyyy mm:hh:ss:zzz :type timestamp_format: str :param date_column_number: The number of the column that holds the date <span class='property-internal'>Must be between 1 and 2147483647</span> :type date_column_number: int :param time_column_number: The number of the column that holds the time <span class='property-internal'>Must be between 1 and 2147483647</span> :type time_column_number: int :param date_format: The format for the date of the readings. An example is MM/dd/yyyy :type date_format: str :param time_format: The format for the time of the readings. An example is mm:hh:ss:zzz :type time_format: str :param time_zone_id: The time zone for the readings :type time_zone_id: int :param meter_import_id_column_number: The number of the column that holds the meter import identifier <span class='property-internal'>Must be between 1 and 2147483647</span> :type meter_import_id_column_number: int :param channel_import_id_column_number: The number of the column that holds the channel import identifier <span class='property-internal'>Must be between 1 and 2147483647</span> :type channel_import_id_column_number: int :param number_of_columns: Required. The minimum number of columns in the import sheet <span class='property-internal'>Required</span> <span class='property-internal'>Must be between 1 and 2147483647</span> :type number_of_columns: int :param data_mapping: A list of columns from the import sheet with their observation type and unit :type data_mapping: list[~energycap.sdk.models.ReadingImportProfileColumn] :param estimated: :type estimated: ~energycap.sdk.models.Estimated :param note_column_number: Column number that holds a note to be stored with the reading <span class='property-internal'>Must be between 1 and 2147483647</span> :type note_column_number: int """ _validation = { 'delimiter': {'required': True, 'max_length': 2, 'min_length': 1}, 'number_of_header_rows': {'required': True, 'maximum': 2147483647, 'minimum': 0}, 'timestamp_column_number': {'maximum': 2147483647, 'minimum': 1}, 'date_column_number': {'maximum': 2147483647, 'minimum': 1}, 'time_column_number': {'maximum': 2147483647, 'minimum': 1}, 'meter_import_id_column_number': {'maximum': 2147483647, 'minimum': 1}, 'channel_import_id_column_number': {'maximum': 2147483647, 'minimum': 1}, 'number_of_columns': {'required': True, 'maximum': 2147483647, 'minimum': 1}, 'note_column_number': {'maximum': 2147483647, 'minimum': 1}, } _attribute_map = { 'channel_interval_in_seconds': {'key': 'channelIntervalInSeconds', 'type': 'int'}, 'delimiter': {'key': 'delimiter', 'type': 'str'}, 'number_of_header_rows': {'key': 'numberOfHeaderRows', 'type': 'int'}, 'timestamp_column_number': {'key': 'timestampColumnNumber', 'type': 'int'}, 'timestamp_format': {'key': 'timestampFormat', 'type': 'str'}, 'date_column_number': {'key': 'dateColumnNumber', 'type': 'int'}, 'time_column_number': {'key': 'timeColumnNumber', 'type': 'int'}, 'date_format': {'key': 'dateFormat', 'type': 'str'}, 'time_format': {'key': 'timeFormat', 'type': 'str'}, 'time_zone_id': {'key': 'timeZoneId', 'type': 'int'}, 'meter_import_id_column_number': {'key': 'meterImportIdColumnNumber', 'type': 'int'}, 'channel_import_id_column_number': {'key': 'channelImportIdColumnNumber', 'type': 'int'}, 'number_of_columns': {'key': 'numberOfColumns', 'type': 'int'}, 'data_mapping': {'key': 'dataMapping', 'type': '[ReadingImportProfileColumn]'}, 'estimated': {'key': 'estimated', 'type': 'Estimated'}, 'note_column_number': {'key': 'noteColumnNumber', 'type': 'int'}, } def __init__(self, **kwargs): super(ReadingImportProfile, self).__init__(**kwargs) self.channel_interval_in_seconds = kwargs.get('channel_interval_in_seconds', None) self.delimiter = kwargs.get('delimiter', None) self.number_of_header_rows = kwargs.get('number_of_header_rows', None) self.timestamp_column_number = kwargs.get('timestamp_column_number', None) self.timestamp_format = kwargs.get('timestamp_format', None) self.date_column_number = kwargs.get('date_column_number', None) self.time_column_number = kwargs.get('time_column_number', None) self.date_format = kwargs.get('date_format', None) self.time_format = kwargs.get('time_format', None) self.time_zone_id = kwargs.get('time_zone_id', None) self.meter_import_id_column_number = kwargs.get('meter_import_id_column_number', None) self.channel_import_id_column_number = kwargs.get('channel_import_id_column_number', None) self.number_of_columns = kwargs.get('number_of_columns', None) self.data_mapping = kwargs.get('data_mapping', None) self.estimated = kwargs.get('estimated', None) self.note_column_number = kwargs.get('note_column_number', None)
PypiClean
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/xinha/modules/Opera/Opera.js
Opera._pluginInfo={name:"Opera",origin:"Xinha Core",version:"$LastChangedRevision: 1084 $".replace(/^[^:]*:\s*(.*)\s*\$$/,"$1"),developer:"The Xinha Core Developer Team",developer_url:"$HeadURL: http://svn.xinha.org/trunk/modules/Opera/Opera.js $".replace(/^[^:]*:\s*(.*)\s*\$$/,"$1"),sponsor:"Gogo Internet Services Limited",sponsor_url:"http://www.gogo.co.nz/",license:"htmlArea"};function Opera(a){this.editor=a;a.Opera=this}Opera.prototype.onKeyPress=function(u){var d=this.editor;var j=d.getSelection();if(d.isShortCut(u)){switch(d.getKey(u).toLowerCase()){case"z":if(d._unLink&&d._unlinkOnUndo){Xinha._stopEvent(u);d._unLink();d.updateToolbar();return true}break;case"a":sel=d.getSelection();sel.removeAllRanges();range=d.createRange();range.selectNodeContents(d._doc.body);sel.addRange(range);Xinha._stopEvent(u);return true;break;case"v":if(!d.config.htmlareaPaste){return true}break}}switch(d.getKey(u)){case" ":var g=function(y,m){var x=y.nextSibling;if(typeof m=="string"){m=d._doc.createElement(m)}var s=y.parentNode.insertBefore(m,x);Xinha.removeFromParent(y);s.appendChild(y);x.data=" "+x.data;j.collapse(x,1);d._unLink=function(){var a=s.firstChild;s.removeChild(a);s.parentNode.insertBefore(a,s);Xinha.removeFromParent(s);d._unLink=null;d._unlinkOnUndo=false};d._unlinkOnUndo=true;return s};if(d.config.convertUrlsToLinks&&j&&j.isCollapsed&&j.anchorNode.nodeType==3&&j.anchorNode.data.length>3&&j.anchorNode.data.indexOf(".")>=0){var t=j.anchorNode.data.substring(0,j.anchorOffset).search(/\S{4,}$/);if(t==-1){break}if(d._getFirstAncestor(j,"a")){break}var h=j.anchorNode.data.substring(0,j.anchorOffset).replace(/^.*?(\S*)$/,"$1");var e=h.match(Xinha.RE_email);if(e){var v=j.anchorNode;var f=v.splitText(j.anchorOffset);var k=v.splitText(t);g(k,"a").href="mailto:"+e[0];break}RE_date=/([0-9]+\.)+/;RE_ip=/(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/;var p=h.match(Xinha.RE_url);if(p){if(RE_date.test(h)){break}var i=j.anchorNode;var b=i.splitText(j.anchorOffset);var q=i.splitText(t);g(q,"a").href=(p[1]?p[1]:"http://")+p[2];break}}break}switch(u.keyCode){case 27:if(d._unLink){d._unLink();Xinha._stopEvent(u)}break;break;case 8:case 46:if(!u.shiftKey&&this.handleBackspace()){Xinha._stopEvent(u)}default:d._unlinkOnUndo=false;if(j.anchorNode&&j.anchorNode.nodeType==3){var w=d._getFirstAncestor(j,"a");if(!w){break}if(!w._updateAnchTimeout){if(j.anchorNode.data.match(Xinha.RE_email)&&w.href.match("mailto:"+j.anchorNode.data.trim())){var l=j.anchorNode;var c=function(){w.href="mailto:"+l.data.trim();w._updateAnchTimeout=setTimeout(c,250)};w._updateAnchTimeout=setTimeout(c,1000);break}var n=j.anchorNode.data.match(Xinha.RE_url);if(n&&w.href.match(new RegExp("http(s)?://"+Xinha.escapeStringForRegExp(j.anchorNode.data.trim())))){var o=j.anchorNode;var r=function(){n=o.data.match(Xinha.RE_url);if(n){w.href=(n[1]?n[1]:"http://")+n[2]}w._updateAnchTimeout=setTimeout(r,250)};w._updateAnchTimeout=setTimeout(r,1000)}}}break}return false};Opera.prototype.handleBackspace=function(){var a=this.editor;setTimeout(function(){var e=a.getSelection();var g=a.createRange(e);var f=g.startContainer;var i=g.startOffset;var c=g.endContainer;var h=g.endOffset;var j=f.nextSibling;if(f.nodeType==3){f=f.parentNode}if(!(/\S/.test(f.tagName))){var d=document.createElement("p");while(f.firstChild){d.appendChild(f.firstChild)}f.parentNode.insertBefore(d,f);Xinha.removeFromParent(f);var b=g.cloneRange();b.setStartBefore(j);b.setEndAfter(j);b.extractContents();e.removeAllRanges();e.addRange(b)}},10)};Opera.prototype.inwardHtml=function(a){a=a.replace(/<(\/?)del(\s|>|\/)/ig,"<$1strike$2");return a};Opera.prototype.outwardHtml=function(a){return a};Opera.prototype.onExecCommand=function(f,e,d){switch(f){case"removeformat":var k=this.editor;var c=k.getSelection();var l=k.saveSelection(c);var j=k.createRange(c);var g=k._doc.body.getElementsByTagName("*");var a=(j.startContainer.nodeType==1)?j.startContainer:j.startContainer.parentNode;var h,b;if(c.isCollapsed){j.selectNodeContents(k._doc.body)}for(h=0;h<g.length;h++){b=g[h];if(j.isPointInRange(b,0)||(g[h]==a&&j.startOffset==0)){b.removeAttribute("style")}}this.editor._doc.execCommand(f,e,d);k.restoreSelection(l);return true;break}return false};Opera.prototype.onMouseDown=function(a){};Xinha.prototype.insertNodeAtSelection=function(b){if(b.ownerDocument!=this._doc){try{b=this._doc.adoptNode(b)}catch(f){}}this.focusEditor();var d=this.getSelection();var a=this.createRange(d);a.deleteContents();var c=a.startContainer;var h=a.startOffset;var g=b;d.removeAllRanges();switch(c.nodeType){case 3:if(b.nodeType==3){c.insertData(h,b.data);a=this.createRange();a.setEnd(c,h+b.length);a.setStart(c,h+b.length);d.addRange(a)}else{c=c.splitText(h);if(b.nodeType==11){g=g.firstChild}c.parentNode.insertBefore(b,c);this.selectNodeContents(g);this.updateToolbar()}break;case 1:if(b.nodeType==11){g=g.firstChild}c.insertBefore(b,c.childNodes[h]);this.selectNodeContents(g);this.updateToolbar();break}};Xinha.prototype.getParentElement=function(c){if(typeof c=="undefined"){c=this.getSelection()}var a=this.createRange(c);try{var d=a.commonAncestorContainer;if(!a.collapsed&&a.startContainer==a.endContainer&&a.startOffset-a.endOffset<=1&&a.startContainer.hasChildNodes()){d=a.startContainer.childNodes[a.startOffset]}while(d.nodeType==3){d=d.parentNode}return d}catch(b){return null}};Xinha.prototype.activeElement=function(a){if((a===null)||this.selectionEmpty(a)){return null}if(!a.isCollapsed){if(a.anchorNode.childNodes.length>a.anchorOffset&&a.anchorNode.childNodes[a.anchorOffset].nodeType==1){return a.anchorNode.childNodes[a.anchorOffset]}else{if(a.anchorNode.nodeType==1){return a.anchorNode}else{return null}}}return null};Xinha.prototype.selectionEmpty=function(a){if(!a){return true}if(typeof a.isCollapsed!="undefined"){return a.isCollapsed}return true};Xinha.prototype.saveSelection=function(){return this.createRange(this.getSelection()).cloneRange()};Xinha.prototype.restoreSelection=function(a){var b=this.getSelection();b.removeAllRanges();b.addRange(a)};Xinha.prototype.selectNodeContents=function(b,d){this.focusEditor();this.forceRedraw();var a;var e=typeof d=="undefined"?true:false;var c=this.getSelection();a=this._doc.createRange();if(e&&b.tagName&&b.tagName.toLowerCase().match(/table|img|input|textarea|select/)){a.selectNode(b)}else{a.selectNodeContents(b)}c.removeAllRanges();c.addRange(a);if(typeof d!="undefined"){if(d){c.collapse(a.startContainer,a.startOffset)}else{c.collapse(a.endContainer,a.endOffset)}}};Xinha.prototype.insertHTML=function(c){var e=this.getSelection();var a=this.createRange(e);this.focusEditor();var b=this._doc.createDocumentFragment();var f=this._doc.createElement("div");f.innerHTML=c;while(f.firstChild){b.appendChild(f.firstChild)}var d=this.insertNodeAtSelection(b)};Xinha.prototype.getSelectedHTML=function(){var b=this.getSelection();if(b.isCollapsed){return""}var a=this.createRange(b);return Xinha.getHTML(a.cloneContents(),false,this)};Xinha.prototype.getSelection=function(){var c=this._iframe.contentWindow.getSelection();if(c&&c.focusNode&&c.focusNode.tagName&&c.focusNode.tagName=="HTML"){var b=this._doc.getElementsByTagName("body")[0];var a=this.createRange();a.selectNodeContents(b);c.removeAllRanges();c.addRange(a);c.collapseToEnd()}return c};Xinha.prototype.createRange=function(b){this.activateEditor();if(typeof b!="undefined"){try{return b.getRangeAt(0)}catch(a){return this._doc.createRange()}}else{return this._doc.createRange()}};Xinha.prototype.isKeyEvent=function(a){return a.type=="keypress"};Xinha.prototype.getKey=function(a){return String.fromCharCode(a.charCode)};Xinha.getOuterHTML=function(a){return(new XMLSerializer()).serializeToString(a)};Xinha.cc=String.fromCharCode(8286);Xinha.prototype.setCC=function(i){var c=Xinha.cc;try{if(i=="textarea"){var f=this._textArea;var g=f.selectionStart;var k=f.value.substring(0,g);var a=f.value.substring(g,f.value.length);if(a.match(/^[^<]*>/)){var j=a.indexOf(">")+1;f.value=k+a.substring(0,j)+c+a.substring(j,a.length)}else{f.value=k+c+a}f.value=f.value.replace(new RegExp("(&[^"+c+"]*?)("+c+")([^"+c+"]*?;)"),"$1$3$2");f.value=f.value.replace(new RegExp("(<script[^>]*>[^"+c+"]*?)("+c+")([^"+c+"]*?<\/script>)"),"$1$3$2");f.value=f.value.replace(new RegExp("^([^"+c+"]*)("+c+")([^"+c+"]*<body[^>]*>)(.*?)"),"$1$3$2$4");f.value=f.value.replace(c,'<span id="XinhaOperaCaretMarker">MARK</span>')}else{var b=this.getSelection();var d=this._doc.createElement("span");d.id="XinhaOperaCaretMarker";b.getRangeAt(0).insertNode(d)}}catch(h){}};Xinha.prototype.findCC=function(i){if(i=="textarea"){var h=this._textArea;var j=h.value.search(/(<span\s+id="XinhaOperaCaretMarker"\s*\/?>((\s|(MARK))*<\/span>)?)/);if(j==-1){return}var e=RegExp.$1;var f=j+e.length;var k=h.value.substring(0,j);var b=h.value.substring(f,h.value.length);h.value=k;h.scrollTop=h.scrollHeight;var d=h.scrollTop;h.value+=b;h.setSelectionRange(j,j);h.focus();h.scrollTop=d}else{var g=this._doc.getElementById("XinhaOperaCaretMarker");if(g){this.focusEditor();var a=this.createRange();a.selectNode(g);var c=this.getSelection();c.addRange(a);c.collapseToStart();this.scrollToElement(g);g.parentNode.removeChild(g);return}}};Xinha.getDoctype=function(a){var b="";if(a.doctype){b+="<!DOCTYPE "+a.doctype.name+" PUBLIC ";b+=a.doctype.publicId?'"'+a.doctype.publicId+'"':"";b+=a.doctype.systemId?' "'+a.doctype.systemId+'"':"";b+=">"}return b};Xinha.prototype._standardInitIframe=Xinha.prototype.initIframe;Xinha.prototype.initIframe=function(){if(!this._iframeLoadDone){if(this._iframe.contentWindow&&this._iframe.contentWindow.xinhaReadyToRoll){this._iframeLoadDone=true;this._standardInitIframe()}else{var a=this;setTimeout(function(){a.initIframe()},5)}}};Xinha._addEventOperaOrig=Xinha._addEvent;Xinha._addEvent=function(a,c,b){if(a.tagName&&a.tagName.toLowerCase()=="select"&&c=="change"){return Xinha.addDom0Event(a,c,b)}return Xinha._addEventOperaOrig(a,c,b)};
PypiClean
/GTsegments-0.22.tar.gz/GTsegments-0.22/src/gtsegments/gbk.py
# Copyright (c) 2013-2016, Philippe Bordron <[email protected]> # # This file is part of GTsegments. # # GTsegments is free software: you can redistribute it and/or modify # it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GTsegments is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU LESSER GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE # along with GTsegments. If not, see <http://www.gnu.org/licenses/> import sys import os import re import logging from operator import itemgetter import Bio from Bio import SeqIO import logging logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) logger = logging.getLogger(__package__) DEFAULT_FIELD_OF_GENE_ID = "locus_tag" DB_XREF="db_xref" GENE_SYNONYM="gene_synonym" FIELD_LIST = [] def read_gbk(input_file, feature_type=['CDS']): seq_result = {} for gene_seq in SeqIO.parse(input_file, "genbank") : result = {} for f in gene_seq.features: if f.type == "source": chr_start = f.location.start chr_end = f.location.end #if f.type == 'CDS' or f.type == 'gene': if f.type in feature_type: #sys.stderr.write("%s\n" % (str(f.location))) #sys.stderr.write("%s\n" % (str(pos))) #sys.stderr.write("%s\n\n" % (str(f.qualifiers))) gene_name = '' start = len(gene_seq) end = -1 direction='' if float(Bio.__version__) <= 1.62 : start = int(re.match(r'[^0-9]*([0-9]+)[^0-9]*', str(f.location.start)).group(1)) end = int(re.match(r'[^0-9]*([0-9]+)[^0-9]*', str(f.location.end)).group(1)) if f.strand < 0 : direction = direction + '-' elif f.strand > 0 : direction = direction + '+' else: direction = direction + '?' else: for pos in f.location.parts: start = min(start, int(pos.start)) end = max(end, int(pos.end)) if pos.strand < 0 : direction = direction + '-' elif pos.strand > 0 : direction = direction + '+' else: direction = direction + '?' if start == chr_start and end == chr_end: # try to manage the gene that pass accross the origin of the chromosome start = f.location.parts[0].start end = f.location.parts[-1].end if start < end: # if the order if reversed start = f.location.parts[-1].start end = f.location.parts[0].end start = start + 1 # correct to the correct start position (end is ok) gene_id = "G_{}_{}".format(start, end) if gene_id not in result: result[gene_id] = {"start": start, "end": end, "dir" : direction} elif result[gene_id]["start"] != start or result[gene_id]["end"] != end: logger.error("Non consistent position between {} at [{:d}..{:d}] vs [{:d}..{:d}]".format("(" + " ".join(feature_type) + ")", result[gene_id]["start"], result[gene_id]["end"], start, end)) sys.exit(1) for key, value in f.qualifiers.items(): if key not in result[gene_id]: result[gene_id][key] = set() result[gene_id][key].update(value) seq_result[gene_seq.id] = (len(gene_seq), result) return seq_result def extract_info(genome_map, supp_field_list, alt_id = None): result = [] db_xref_set = set() db_xref_gene_map = {} field_list = list(FIELD_LIST) if alt_id and alt_id not in field_list and alt_id not in supp_field_list: field_list = [alt_id] + field_list for f in supp_field_list: if f not in field_list: field_list.append(f) for chr_id, gene_listing in genome_map.items(): alt_id_set = set() for gene_id, field_map in gene_listing.items(): if alt_id == None: line = [chr_id, gene_id, field_map["start"], field_map["end"], field_map["dir"]] else: if alt_id in field_map: g = field_map[alt_id].pop() field_map[alt_id].add(g) if g not in alt_id_set: alt_id_set.add(g) line = [chr_id, g, field_map["start"], field_map["end"], field_map["dir"]] else: logger.error("Alternative id '{}' is duplicated ({}) and cannot be used as an id".format(alt_id, g)) sys.exit(1) else: logger.error("Alternative id '{}' is missing for feature [{:d}..{:d}]".format(alt_id, field_map["start"], field_map["end"])) sys.exit(1) for field in field_list: if field in field_map: values = field_map[field] if field == DB_XREF: db_xref_gene_map[(chr_id, gene_id)] = {} for v in values: db, id_v = v.split(":") db_xref_set.add(db) db_xref_gene_map[(chr_id, gene_id)][db] = id_v if field == GENE_SYNONYM: values = [re.sub(';', '', v) for v in values] else: line.append(" ".join(values)) else: line.append("") result.append(line) # expansion and replace of the db_xref in the list # TODO return result #end def load_gbk(f, feature_type=['CDS'], alt_id = DEFAULT_FIELD_OF_GENE_ID): genome_map = {} length_map = {} for chr_id, chr_info in read_gbk(f, feature_type).items(): chr_length, gene_listing = chr_info genome_map[chr_id] = gene_listing listing = extract_info(genome_map, [], alt_id) listing.sort(key=itemgetter(0,2,3)) chr_dict = {} for l in listing: chr_id = l[0] gene_id = l[1] try: chr_dict[chr_id].append(gene_id) except KeyError: chr_dict[chr_id] = [] chr_dict[chr_id].append(gene_id) return chr_dict ################ # Main program # ################ # output headers OUTPUT_HEADER = ["chromosome_id", "gene_id", "left_end_position", "right_end_position", "transcription_direction"] + FIELD_LIST def main (argv, prog = os.path.basename(sys.argv[0])): logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) import argparse import textwrap parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ Extract, from a list of GenBank files (.gbk), the list of genes with their respective chromosome id, left end position, right end position and transcription direction and write it to the standard output in tabular file format (.tsv) exemple: %(prog)s chr1.gbk chr2.gbk... > gene_list.tsv '''), prog = prog ) # Requiered arguments ##################### chrp = parser.add_argument_group(title="GBK files") chrp.add_argument("gbk_file", type=argparse.FileType('r'), nargs='+', help="a list of GenBank (.gbk) files") # Optional arguments #################### parser.add_argument("-o", "--output", default=None, help="set an output file") parser.add_argument("-chr", "--chromosome_length", type=argparse.FileType('w'), help="produce the list of chromosomes and their respective length in the CHROMOSOME_LIST_OUTPUT file", metavar="CHROMOSOME_LIST_OUTPUT") filedp = parser.add_argument_group(title="optional switches") filedp.add_argument("-alt_id", "--alternate_id", type=str, default=None, help="select the specified field (like locus_tag) as the id of the gene if possible") filedp.add_argument("-f", "--add_field", type=str, nargs='+', default=[], metavar='FIELD', help="add columns listing the values in the selected fields") filedp.add_argument("-feat", "--features", type=str, nargs='+', default=["CDS"], metavar='FEAT', help="Which feature(s) to take in account. When many are given, (1) check the consitensy of positions and ids and (2) fuse the annotations (default: %(default)s)") #verbose_group = parser.add_mutually_exclusive_group() #verbose_group.add_argument("-v", "--verbosity", help="increase output verbosity", action="store_true") #verbose_group.add_argument("-q", "--quiet", help="quiet mode", action="store_true") parser.add_argument("-q", "--quiet", help="quiet mode", action="store_true") parser.add_argument("--no_header", default=False, help="do not display column names", action="store_true") args = parser.parse_args(argv) stream_out = sys.stdout if args.output: stream_out = open(args.output, 'w') if args.quiet: logger.setLevel(logging.CRITICAL) supp_field_list = args.add_field genome_map = {} length_map = {} for f in args.gbk_file: for chr_id, c in read_gbk(f, args.features).items(): chr_length, gene_listing = c genome_map[chr_id] = gene_listing length_map[chr_id] = chr_length gene_list = extract_info(genome_map, supp_field_list, args.alternate_id) gene_list.sort(key=itemgetter(0,2,3)) head = OUTPUT_HEADER + supp_field_list if not args.no_header: stream_out.write("{}\n".format("\t".join(head))) for elem in gene_list: line = [str(i) for i in elem] stream_out.write("{}\n".format("\t".join(line))) if args.output: stream_out.close() if args.chromosome_length: args.chromosome_length.write("chromosome_id\tlength\n") for k, v in length_map.items(): args.chromosome_length.write("{}\t{}\n".format(k, v)) if __name__ == '__main__': main(sys.argv[1:])
PypiClean
/CGATReport-0.9.1.tar.gz/CGATReport-0.9.1/doc/FAQ.rst
*** FAQ *** ========= Plotting ========= I can not see the full plot when using ``arange`` ************************************************* Note that the module uses python ranges. Thus, plotting a histogram with arange(0,100,1) will only bin the values from 0 to 99 and ignore values greater or larger that 100. I get the error message ``TypeError: a class that defines __slots__ without defining __getstate__ cannot be pickled`` ********************************************************************************************************************* The pickling mechanism used in the persistent cache does not deal well with objects of the type <class 'sqlalchemy.engine.base.RowProxy'>. These should be converted to tuples beforehand. I get the error message ``RuntimeError: maximum recursion depth exceeded while calling a Python object`` ******************************************************************************************************** This is possibly a data type error. If the type of a database column is defined as text (for example if there are so few values that the correct type can not be guessed), the Trackers might return a string instead of a numeric value, for example ``(u'0.64425349087',)`` instead of ``(u'0.64425349087',)``. These should be caught by the cgatreport`CGATReport.DataTypes`. How do I insert a link into a document ************************************** In order to add a link to a document, use the restructured text linking mechanism. Note that path names should be absolute path names. For example:: class ReportsList( Tracker ): '''provide links to reports in subdirectories.''' tracks = glob.glob("subdir*") def __call__( self, track ): datadir = os.path.abspath(os.curdir()) return "`medip_%(track)s <%(datadir)s/%(track)s/report/html/contents.html>`_" % locals()
PypiClean
/Mailchimp%20Python-0.1.12.tar.gz/Mailchimp Python-0.1.12/mailchimp/objects/mc_list_stats.py
from .base_object import BaseObject class MCListStats(BaseObject): item_url = None def __init__(self, json_data={}): super(MCListStats, self).__init__() self.member_count = None self.unsubscribe_count = None self.cleaned_count = None self.member_count_since_send = None self.unsubscribe_count_since_send = None self.cleaned_count_since_send = None self.campaign_count = None self.campaign_last_sent = None self.merge_field_count = None self.avg_sub_rate = None self.avg_unsub_rate = None self.target_sub_rate = None self.open_rate = None self.click_rate = None self.last_sub_date = None self.last_unsub_date = None if json_data: self.member_count = json_data.get('member_count') self.unsubscribe_count = json_data.get('unsubscribe_count') self.cleaned_count = json_data.get('cleaned_count') self.member_count_since_send = json_data.get('member_count_since_send') self.unsubscribe_count_since_send = json_data.get('unsubscribe_count_since_send') self.cleaned_count_since_send = json_data.get('cleaned_count_since_send') self.campaign_count = json_data.get('campaign_count') self.campaign_last_sent = self._parse_date(json_data.get('campaign_last_sent')) \ if json_data.get('campaign_last_sent') else None self.merge_field_count = json_data.get('merge_field_count') self.avg_sub_rate = json_data.get('avg_sub_rate') self.avg_unsub_rate = json_data.get('avg_unsub_rate') self.target_sub_rate = json_data.get('target_sub_rate') self.open_rate = json_data.get('open_rate') self.click_rate = json_data.get('click_rate') self.last_sub_date = self._parse_date(json_data.get('last_sub_date')) \ if json_data.get('last_sub_date') else None self.last_unsub_date = self._parse_date(json_data.get('last_unsub_date')) \ if json_data.get('last_unsub_date') else None
PypiClean
/ORMithorynque-0.1.1.tar.bz2/ORMithorynque-0.1.1/doc/html/searchindex.js
Search.setIndex({envversion:49,filenames:["benchmark","index","inheritance","querying","relation","transaction","tuto"],objects:{},objnames:{},objtypes:{},terms:{"byte":[0,4,6],"case":6,"class":[],"default":[4,5,6],"final":[3,6],"float":6,"import":[0,6],"int":[4,6],"new":[1,4,6],"return":[3,4],"static":6,"true":[4,6],add:[4,6],add_inhabit:4,add_lives_in:4,addit:[0,4,5,6],address:4,agress:1,all:[0,2,3],allow:1,alreadi:6,also:[0,5,6],alwai:0,ani:[5,6],anim:1,anoth:4,any:6,appear:1,append:4,architect:2,artist:2,artwork:2,attribut:[4,5,6],auto:5,autoincr:6,automat:[0,1,4,6],avail:1,backend:1,bad:5,baptist:[3,6],beat:0,becaus:0,begin_transact:5,better:0,bigger:0,binari:6,bithdai:6,blob:6,built:1,cach:1,call:[3,6],can:[1,3,4,6],challeng:1,chang:5,children:4,classif:0,classnam:0,code:0,collumn:2,column:[1,2,4,6],command:6,commit:5,compar:0,comparison:0,complex:1,consequ:5,context:5,contrari:[4,5],conveni:3,correspond:[3,4],count:3,creat:[],creation:0,cursor:3,data:[4,6],databas:[],database_filenam:6,datatyp:6,date:6,datetim:6,debug:6,debug_sql:6,decoratedroom:2,describ:3,doe:[3,6],don:[4,6],drawback:1,duck:1,due:0,dure:[4,5],each:[2,5],effici:1,either:3,els:[4,5],end:[4,5],end_transact:5,equival:3,exampl:[2,3,4,5,6],except:[4,5],execut:[3,5],exist:[1,3,6],expect:[0,4,5],extra:0,faster:1,featur:1,fetchal:3,fetchon:3,few:1,fewer:0,field:6,filenam:6,first_nam:[3,4,5,6],follow:[0,2,3,4,5,6],from:[2,3,6],full:0,funni:1,galleri:2,get:[],global:[2,3,6],gnu:1,goe:5,good:1,guess:6,have:4,here:[2,3,4,5,6],high:1,hous:4,house1:4,house2:4,howev:[0,5],icd10:0,immedi:5,impress:1,index:[1,4,6],indic:3,inhabit:4,inherit:[],initi:1,inner:1,insid:5,instal:6,installat:[],instanc:[],instead:[4,6],integ:6,integer:6,involv:4,jean:[3,6],jiba:6,join:3,kei:[1,6],lami:[3,6],languag:[1,3],lead:1,level:1,lgpl:1,licenc:1,line:0,list:[3,4,6],littl:1,live:4,lives_in:4,lower:0,main:1,mammal:1,mani:[],mapper:1,max:3,medic:0,memori:6,method:[3,4],miss:6,mix:1,mode:[5,6],modifi:[4,5],modul:[0,6],more:[1,3,4],mostli:0,multipl:[0,1,2],museum:2,must:[4,6],my_decorated_room:2,my_museum:2,name:[1,2,3,4,5,6],nativ:1,neat:1,need:4,newfirstnam:5,newnam:5,node:4,non:0,none:[0,3,6],notic:6,nowher:4,number:[0,5,6],obj:5,object:[0,1,2,3,4,5,6],obtain:3,one:[],open:6,opene:[],optim:1,option:6,order:4,orient:1,orm:[0,1],ormithorynqu:[],ornithorynqu:1,other:[0,2,4],our:4,own:[4,6],owner:4,paradigm:1,paramet:[4,6],parent:4,partial:0,pass:6,peewe:[0,1],per:[3,6],perform:[1,5,6],permiss:6,person:[3,4,6],person_name_index:6,pip:6,plain:[0,3],platypu:1,pleas:6,poni:[0,1],prebuilt:1,primari:6,prompt:6,provid:[3,5,6],pypi:1,python:[0,1,6],queri:[],rais:5,read:0,real:6,receiv:6,reduc:5,referenc:[],relat:[],relation_attribute1_attribute2:4,relation_inhabitant_lives_in:4,reload:1,rememb:3,remov:4,remove_inhabit:4,remove_lives_in:4,request:[1,5,6],requir:[0,1],reserv:6,result:0,rollback:5,room:2,root:6,row:[2,3],safe:1,sai:1,same:[2,4,6],save:[1,5],schema:[0,1],second:0,see:[1,6],seen:1,select:3,select_al:3,select_all_object:3,select_object_al:3,select_object_on:3,select_on:3,select_one_object:3,self:[],sever:[0,2],share:2,show:0,shown:0,side:4,similar:4,similarli:1,simpli:[4,6],singl:[0,1,3,4,5],singular:4,size:0,slighli:0,small:0,some:[3,4],somehow:1,someon:4,someone1:4,someone2:4,someth:5,something_goes_bad:5,somewher:4,soon:1,specif:[1,3],specifi:6,speed:0,sql:[0,1,3,5,6],sqlalchemi:[0,1],sqlattribut:[2,4,6],sqlite3:[0,1,3,6],sqlmanytomani:4,sqlobject:1,sqlobjet:0,sqlonetomani:4,sqlonetoon:4,start:[],statement:5,store:[0,2,6],str:[2,4,6],strang:1,string:[4,6],structur:1,support:[0,1,2,4,5,6],syntax:5,system:1,tabl:[],take:6,term:0,text:6,than:[0,1,6],thank:[0,1],thi:[0,3,4,6],thread:1,time:[4,6],timestamp:6,titl:2,too:6,top:1,transact:[],travers:0,tri:5,tupl:3,turn:6,two:[3,4,5],type:6,under:1,unicod:6,updat:[0,1,4,5,6],update:[],usual:6,valu:[4,6],variou:3,veri:[1,4],vinci:2,virtual:2,vitruv:2,week:4,when:[1,5],where:3,which:4,work:5,write:[0,1],written:1,yai:6,you:[3,6],your:6},titles:["Benchmark","Welcome to ORMithorynque&#8217;s documentation!","Inheritance","Querying","Relations","Transactions","Getting started"],titleterms:{"class":6,benchmark:0,content:1,creat:6,databas:6,document:1,get:6,inherit:2,installat:6,instanc:6,mani:4,one:4,opene:6,ormithorynqu:1,queri:3,referenc:4,relat:4,self:4,start:6,tabl:1,transact:5,update:6,welcom:1}})
PypiClean
/Geode_GEM-0.12.0-py3-none-any.whl/geode_gem/ui/widgets/widgets.py
# GObject from gi.repository import Gtk, Pango # ------------------------------------------------------------------------------ # Class # ------------------------------------------------------------------------------ class ListBoxItem(Gtk.ListBoxRow): def __init__(self): """ Constructor """ Gtk.ListBoxRow.__init__(self) # ------------------------------------ # Initialize variables # ------------------------------------ self.__widget = None # ------------------------------------ # Prepare interface # ------------------------------------ # Init widgets self.__init_widgets() # Init packing self.__init_packing() def __init_widgets(self): """ Initialize interface widgets """ # ------------------------------------ # Grids # ------------------------------------ self.grid = Gtk.Box() self.grid_labels = Gtk.Box() # Properties self.grid.set_homogeneous(False) self.grid.set_border_width(6) self.grid.set_spacing(12) self.grid_labels.set_orientation(Gtk.Orientation.VERTICAL) self.grid_labels.set_homogeneous(False) self.grid_labels.set_spacing(2) # ------------------------------------ # Labels # ------------------------------------ self.label_title = Gtk.Label() self.label_description = Gtk.Label() # Properties self.label_title.set_line_wrap(True) self.label_title.set_halign(Gtk.Align.START) self.label_title.set_justify(Gtk.Justification.FILL) self.label_title.set_line_wrap_mode(Pango.WrapMode.WORD_CHAR) self.label_description.set_hexpand(True) self.label_description.set_line_wrap(True) self.label_description.set_use_markup(True) self.label_description.set_no_show_all(True) self.label_description.set_halign(Gtk.Align.START) self.label_description.set_valign(Gtk.Align.START) self.label_description.set_justify(Gtk.Justification.FILL) self.label_description.set_line_wrap_mode(Pango.WrapMode.WORD_CHAR) self.label_description.get_style_context().add_class( Gtk.STYLE_CLASS_DIM_LABEL) def __init_packing(self): """ Initialize widgets packing in main window """ self.add(self.grid) self.grid.pack_start(self.grid_labels, True, True, 0) self.grid_labels.pack_start(self.label_title, True, True, 0) self.grid_labels.pack_start(self.label_description, True, True, 0) @staticmethod def new(text): """ Generate a new ListBoxItem with a specific label Parameters ---------- text : str Label text """ row = ListBoxItem() row.set_option_label(text) return row def set_option_label(self, text): """ Set the option label text Parameters ---------- text : str Label text """ self.label_title.set_text(text) def get_option_label(self): """ Retrieve the option label text Returns ------- str Label text """ return self.label_title.get_text() def set_description_label(self, text): """ Set the description label text Parameters ---------- text : str Label text """ if len(text) > 0: self.label_description.set_markup( "<span size=\"small\">%s</span>" % text) self.label_title.set_valign(Gtk.Align.END) else: self.label_title.set_valign(Gtk.Align.CENTER) self.label_description.set_visible(len(text) > 0) def set_widget(self, widget): """ Set a new internal widget Parameters ---------- widget : Gtk.Widget Internal widget to set """ # Remove previous widget if self.__widget is not None: self.grid.remove(self.__widget) # Add new widget if widget is not None: widget.set_valign(Gtk.Align.CENTER) if type(widget) in (Gtk.Switch, Gtk.Button, Gtk.SpinButton, Gtk.Image, Gtk.MenuButton): widget.set_halign(Gtk.Align.END) widget.set_hexpand(False) self.grid.set_homogeneous(False) self.grid.pack_start(widget, False, False, 0) else: widget.set_halign(Gtk.Align.FILL) widget.set_hexpand(True) self.grid.set_homogeneous(True) self.grid.pack_start(widget, True, True, 0) self.__widget = widget def get_widget(self): """ Retrieve internal widget Returns ------- Gtk.Widget Internal widget """ return self.__widget class ScrolledListBox(Gtk.ScrolledWindow): def __init__(self): """ Constructor """ Gtk.ScrolledWindow.__init__(self) # ---------------------------------------- # Prepare interface # ---------------------------------------- # Init widgets self.__init_widgets() # Init packing self.__init_packing() def __init_widgets(self): """ Initialize interface widgets """ self.set_propagate_natural_height(True) # ---------------------------------------- # Games list # ---------------------------------------- self.listbox = Gtk.ListBox() # Properties self.listbox.set_activate_on_single_click(True) self.listbox.set_selection_mode(Gtk.SelectionMode.NONE) def __init_packing(self): """ Initialize widgets packing in main window """ self.add(self.listbox)
PypiClean
/BioSAK-1.72.0.tar.gz/BioSAK-1.72.0/doc/iTOL.md
### Dataset types + ColorStrip https://itol.embl.de/help.cgi#strip BioSAK iTOL -ColorStrip -lg MagTaxon.txt -lt Phylum -o ColorStrip_taxon.txt + ColorRange https://itol.embl.de/help.cgi#dsRanges BioSAK iTOL -ColorRange -lg MagTaxon.txt -lt Phylum -o ColorRange_taxon.txt BioSAK iTOL -ColorRange -taxon Taxonomy.txt -rank f -lt Family -o ColorRange_taxon.txt + SimpleBar https://itol.embl.de/help.cgi#bar BioSAK iTOL -SimpleBar -lv MagSize.txt -scale 0-3-6-9 -lt Size -o SimpleBar_size.txt + Heatmap https://itol.embl.de/help.cgi#heatmap BioSAK iTOL -Heatmap -lm MagAbundance.txt -lt Abundance -o Heatmap_abundance.txt + Binary https://itol.embl.de/help.cgi#binary BioSAK iTOL -Binary -lm Binary_matrix.txt -lt Presence_Absence -gc lightblue -o PA_iTOL.txt BioSAK iTOL -Binary -lm Binary_matrix.txt -lt Presence_Absence -gc "#85C1E9" -o PA_iTOL.txt + ExternalShape https://itol.embl.de/help.cgi#shapes BioSAK iTOL -ExternalShape -lm identity_matrix.txt -lt Identity -scale 25-50-75-100 -o ExternalShape_identity.txt ### Input file format + Leaf-to-Group (`-lg`, tab separated, no header) genome_1 Bacteria genome_2 Archaea + Taxonomy (`-taxon`, tab separated, GTDB-format taxononomy string) genome_1 d__Bacteria;p__Proteobacteria;c__Alphaproteobacteria;o__Dongiales;f__Dongiaceae;g__Dongia;s__Dongia mobilis genome_2 d__Bacteria;p__Proteobacteria;c__Gammaproteobacteria;o__Arenicellales;f__LS-SOB;g__VYGS01;s__ + Group-to-Color (`-gc`) and Column-to-Color (`-cc`) (tab separated, no header) Bacteria #CCCC00 Archaea #9999FF Virus orange Eukaryote lightblue -Please note that only one color can be specified for Binary data, provide with `-gc lightblue` or `-gc "#85C1E9"` + Leaf-to-Value file format (`-lv`, tab separated, no header) genome_1 6.15 genome_2 6.63 + Leaf-to-Matrix file format (`-lm`, tab separated, header required!!!) Genome_id Sample_A Sample_B Sample_C genome_1 6.15 2.23 1.56 genome_2 6.63 1.72 2.55 ## Here is a tutorial 1. Here, I have a phylogenetic tree for 37 MAGs derived from six microbial communities (either surface-associated or planktonic). I have the taxonomy info of these MAGs at the class level, their sizes and their relative abundance across the six samples. This short note shows how to visualize all these info in one plot. 1. Download files from [demo_data/iTOL](../demo_data/iTOL) 1. File description (Please see below for how to prepare these files.): + A phylogenetic tree in [Newick](http://evolution.genetics.washington.edu/phylip/newicktree.html) format: [NorthSea_0_Tree.newick](files_needed/NorthSea_0_Tree.newick) + Taxonomy info: [NorthSea_1_Taxon_ColorStrip.txt](files_needed/NorthSea_1_Taxon_ColorStrip.txt), [NorthSea_1_Taxon_Range.txt](files_needed/NorthSea_1_Taxon_Range.txt) + Life-style info: [NorthSea_2_LifeStyle.txt](files_needed/NorthSea_2_LifeStyle.txt) + Abundance across samples: [NorthSea_3_Abundance.txt](files_needed/NorthSea_3_Abundance.txt) + MAG size info: [NorthSea_4_MAG_Size.txt](files_needed/NorthSea_4_MAG_Size.txt) 1. Upload **NorthSea_0_Tree.newick** to iTOL via [https://itol.embl.de/upload.cgi](https://itol.embl.de/upload.cgi). 1. Once you have the tree uploaded, you'll see the skeleton of the tree without any decoration. You can now play around with the control panel on the right side (e.g. change tree layout to circular). ![Step_1](figures/Step_1.jpg) 1. We are going to add the taxonomy info of our MAG to the tree now, which is really easy to do in iTOL. You just need to drag and drop **NorthSea_1_Taxon_ColorStrip.txt** to the **tree area**. 1. Do the same thing to **NorthSea_2_LifeStyle.txt**, **NorthSea_3_Abundance.txt** and **NorthSea_4_MAG_Size.txt** to add life-style, abundance and size info, you'll see trees like this: ![Tree_1](figures/Tree_1.jpg) 1. To get a tree with a circular layout and MAG classes coloured as in the figure below. You need to use **NorthSea_1_Taxon_Range.txt** instead of **NorthSea_1_Taxon_ColorStrip.txt**, choose "**Circular**" mode in the control panel, click "**At tips**" and then turn it **off**. ![Tree_2](figures/Tree_2.jpg) 1. Go to the **Export** panel, choose desired file format and export your tree to file. Remember to turn on **Colored ranges legend**, if you are using **NorthSea_1_Taxon_Range.txt** to color MAG classes. ![Step_2](figures/Step_2.jpg) ## How to prepare iTOL-recognizable files 1. Some tools suggested by iTOL: https://itol.embl.de/help.cgi#external 2. You can also use [BioSAK](https://github.com/songweizhi/BioSAK)'s iTOL module to generate iTOL-recognizable files. Please refers to the "Installation" section [here](https://github.com/songweizhi/BioSAK) for its installation. The purpose for developing this module is to generate iTOL-recognizable file for your dataset, parameters (e.g., colour, font size...) provided in the generated file might need further adjustment. Input files for the following commands can be found in `files_needed/raw_data`. BioSAK iTOL -ColorRange -lg raw_MAG_taxon.txt -lt Class -out NorthSea_1_Taxon_Range.txt BioSAK iTOL -ColorStrip -lg raw_MAG_taxon.txt -lt Class -out NorthSea_1_Taxon_ColorStrip.txt BioSAK iTOL -ColorStrip -lg raw_MAG_LifeStyle.txt -lt LifeStyle -out NorthSea_2_LifeStyle.txt BioSAK iTOL -Heatmap -lm raw_MAG_abundance.txt -lt Abundance -out NorthSea_3_Abundance.txt BioSAK iTOL -SimpleBar -lv raw_MAG_size.txt -scale 0-3-6-9 -lt MAG_Size -out NorthSea_4_MAG_Size.txt # for help BioSAK iTOL -h ## Help information 1. More examples: [https://itol.embl.de/help.cgi](https://itol.embl.de/help.cgi) 1. The Newick tree format: [http://evolution.genetics.washington.edu/phylip/newicktree.html](http://evolution.genetics.washington.edu/phylip/newicktree.html) 1. Hex Color Codes: [https://htmlcolorcodes.com](https://htmlcolorcodes.com) and [https://www.color-hex.com](https://www.color-hex.com)
PypiClean
/Gen_Gaus_Bi_distributions-0.1.tar.gz/Gen_Gaus_Bi_distributions-0.1/Gen_Gaus_Bi_distributions/Binomialdistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Binomial(Distribution): """ Binomial distribution class for calculating and visualizing a Binomial distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats to be extracted from the data file p (float) representing the probability of an event occurring n (int) number of trials TODO: Fill out all functions below """ def __init__(self, prob=.5, size=20): self.n = size self.p = prob Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev()) def calculate_mean(self): """Function to calculate the mean from p and n Args: None Returns: float: mean of the data set """ self.mean = self.p * self.n return self.mean def calculate_stdev(self): """Function to calculate the standard deviation from p and n. Args: None Returns: float: standard deviation of the data set """ self.stdev = math.sqrt(self.n * self.p * (1 - self.p)) return self.stdev def replace_stats_with_data(self): """Function to calculate p and n from the data set Args: None Returns: float: the p value float: the n value """ self.n = len(self.data) self.p = 1.0 * sum(self.data) / len(self.data) self.mean = self.calculate_mean() self.stdev = self.calculate_stdev() def plot_bar(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n]) plt.title('Bar Chart of Data') plt.xlabel('outcome') plt.ylabel('count') def pdf(self, k): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k))) b = (self.p ** k) * (1 - self.p) ** (self.n - k) return a * b def plot_bar_pdf(self): """Function to plot the pdf of the binomial distribution Args: None Returns: list: x values for the pdf plot list: y values for the pdf plot """ x = [] y = [] # calculate the x values to visualize for i in range(self.n + 1): x.append(i) y.append(self.pdf(i)) # make the plots plt.bar(x, y) plt.title('Distribution of Outcomes') plt.ylabel('Probability') plt.xlabel('Outcome') plt.show() return x, y def __add__(self, other): """Function to add together two Binomial distributions with equal p Args: other (Binomial): Binomial instance Returns: Binomial: Binomial distribution """ try: assert self.p == other.p, 'p values are not equal' except AssertionError as error: raise result = Binomial() result.n = self.n + other.n result.p = self.p result.calculate_mean() result.calculate_stdev() return result def __repr__(self): """Function to output the characteristics of the Binomial instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}, p {}, n {}".\ format(self.mean, self.stdev, self.p, self.n)
PypiClean
/MyConnect_SQL-1.1.0-py3-none-any.whl/MyConnect/AuthLogin.py
from tkinter import * from tkinter import messagebox import mysql.connector as MC from mysql.connector import errorcode # Var global lg global LoginFrame u1 = "" usernameGL = "" lgn = "" lg = Tk() LoginFrame = Frame(lg, width=350, height=350, bg="white") # Def class login(): # Windows def setTitle(title): lg.title(title) lg.geometry('925x500+300+200') lg.resizable(False, False) # LoginSetup class form(): # Form def form(title="Sign in", titleSize="20", titleColor="#57a1f8" ,positionX="300", positionY="70"): # Frame LoginFrame.place(x=positionX, y=positionY) # Title LoginHeading = Label(LoginFrame, text=title, fg=titleColor,bg='white', font=(titleSize)) LoginHeading.place(x=150, y=5) # User def userInput(placeholder="Username", widht="20"): # Global global userInputLogin # Def def on_enter(e): name = userInputLogin.get() if name==placeholder: userInputLogin.delete(0, END) def on_leave(e): name = userInputLogin.get() if name=='': userInputLogin.insert(0, placeholder) # Input userInputLogin = Entry(LoginFrame, width=widht, fg="black", border=0, bg="white", font=(11)) userInputLogin.place(x=30, y=80) # Placeholder userInputLogin.insert(0, placeholder) userInputLogin.bind('<FocusIn>', on_enter) userInputLogin.bind('<FocusOut>', on_leave) # Line Frame(LoginFrame, width=295, height=2, bg="black").place(x=25, y=108) # Password def passwordInput(placeholder="Password", widht="20"): # Global global passwordInputLogin # Def def on_enter(e): name = passwordInputLogin.get() if name==placeholder: passwordInputLogin.delete(0, END) passwordInputLogin.config(show="*") def on_leave(e): name = passwordInputLogin.get() if name=='': passwordInputLogin.insert(0, placeholder) passwordInputLogin.config(show="") # Input passwordInputLogin = Entry(LoginFrame, width=widht, fg="black", border=0, bg="white", font=(11)) passwordInputLogin.place(x=30, y=150) # PLaceholder passwordInputLogin.insert(0, placeholder) passwordInputLogin.bind('<FocusIn>', on_enter) passwordInputLogin.bind('<FocusOut>', on_leave) # Line Frame(LoginFrame, width=295, height=2, bg="black").place(x=25, y=177) # Button def buttonLogin(text="Sign in", widht=39, pady=7, bg="#57a1f8", fg="white", border=0): def signin(): username = userInputLogin.get() password = passwordInputLogin.get() try: if usernameGL != "": if username == usernameGL: if password == passwordGL: u1 = username print("Welcome "+username) lg.destroy() else: messagebox.showerror("Login", "Password is incorrect !") else: messagebox.showerror("Login", "Username is incorrect !") else: try: query='select * from '+Table+' where '+TableUser+'=%s and '+TablePassword+'=%s' cursor.execute(query ,(username ,password)) row = cursor.fetchone() if row == None: messagebox.showerror('Login', 'Invalid username or/and password') else: u1 = username print("Welcome "+username) lg.destroy() except MC.Error as err: messagebox.showerror("MySQL Error", err) except: print("Erreur") buttonLogin = Button(LoginFrame, text=text, width=widht, pady=pady, bg=bg, fg=fg, border=border, command=signin) buttonLogin.place(x=35,y=224) def buttonRegister(text="Register", widht=39, pady=7, bg="#57a1f8", fg="white", border=0): buttonRegister = Button(LoginFrame, text=text, width=widht, pady=pady, bg=bg, fg=fg, border=border) buttonRegister.place(x=35, y=284) def ConfigLogin(username, password): global usernameGL global passwordGL usernameGL = username passwordGL = password def MySQLConfig(host, username, password, database, table, userTable = "user", passwordTabel = "password"): config = { 'user': username, 'password': password, 'host': host, 'database': database, 'raise_on_warnings': True } try: # Global global cnx global Table global TableUser global TablePassword global cursor # Code cnx = MC.connect(**config) Table = table TableUser = userTable TablePassword = passwordTabel cursor = cnx.cursor() except MC.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: messagebox.showerror("MySQL Error", "Username or password is incorrect !") elif err.errno == errorcode.ER_BAD_DB_ERROR: messagebox.showerror("MySQL Error", "No found your datebase") else: messagebox.showerror("MySQL Error" ,err) # MainLoop def loop(): lg.mainloop() username = u1 # Def def on_closing(): lg.destroy() exit() # While lg.protocol("WM_DELETE_WINDOW", on_closing)
PypiClean
/AHRS-0.3.1-py3-none-any.whl/ahrs/filters/roleq.py
import numpy as np from ..common.orientation import ecompass from ..common.mathfuncs import cosd, sind class ROLEQ: """ Recursive Optimal Linear Estimator of Quaternion Uses OLEQ to estimate the initial attitude. Parameters ---------- gyr : numpy.ndarray, default: None N-by-3 array with measurements of angular velocity in rad/s. acc : numpy.ndarray, default: None N-by-3 array with measurements of acceleration in in m/s^2. mag : numpy.ndarray, default: None N-by-3 array with measurements of magnetic field in mT. Attributes ---------- gyr : numpy.ndarray N-by-3 array with N gyroscope samples. acc : numpy.ndarray N-by-3 array with N accelerometer samples. mag : numpy.ndarray N-by-3 array with N magnetometer samples. frequency : float Sampling frequency in Herz Dt : float Sampling step in seconds. Inverse of sampling frequency. Q : numpy.array, default: None M-by-4 Array with all estimated quaternions, where M is the number of samples. Equal to None when no estimation is performed. Raises ------ ValueError When dimension of input arrays ``gyr``, ``acc`` or ``mag`` are not equal. Examples -------- >>> gyr_data.shape, acc_data.shape, mag_data.shape # NumPy arrays with sensor data ((1000, 3), (1000, 3), (1000, 3)) >>> from ahrs.filters import ROLEQ >>> orientation = ROLEQ(gyr=gyr_data, acc=acc_data, mag=mag_data) >>> orientation.Q.shape # Estimated attitude (1000, 4) """ def __init__(self, gyr: np.ndarray = None, acc: np.ndarray = None, mag: np.ndarray = None, weights: np.ndarray = None, magnetic_ref: np.ndarray = None, frame: str = 'NED', **kwargs ): self.gyr = gyr self.acc = acc self.mag = mag self.a = weights if weights is not None else np.ones(2) self.Q = None self.frequency = kwargs.get('frequency', 100.0) self.Dt = kwargs.get('Dt', 1.0/self.frequency) self.q0 = kwargs.get('q0') self.frame = frame # Reference measurements self._set_reference_frames(magnetic_ref, self.frame) # Estimate all quaternions if data is given if self.acc is not None and self.gyr is not None and self.mag is not None: self.Q = self._compute_all() def _set_reference_frames(self, mref: float, frame: str = 'NED'): if frame.upper() not in ['NED', 'ENU']: raise ValueError(f"Invalid frame '{frame}'. Try 'NED' or 'ENU'") # Magnetic Reference Vector if mref is None: # Local magnetic reference of Munich, Germany from ..common.mathfuncs import MUNICH_LATITUDE, MUNICH_LONGITUDE, MUNICH_HEIGHT from ..utils.wmm import WMM wmm = WMM(latitude=MUNICH_LATITUDE, longitude=MUNICH_LONGITUDE, height=MUNICH_HEIGHT) self.m_ref = np.array([wmm.X, wmm.Y, wmm.Z]) if frame.upper() == 'NED' else np.array([wmm.Y, wmm.X, -wmm.Z]) elif isinstance(mref, (int, float)): cd, sd = cosd(mref), sind(mref) self.m_ref = np.array([cd, 0.0, sd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd]) else: self.m_ref = np.copy(mref) self.m_ref /= np.linalg.norm(self.m_ref) # Gravitational Reference Vector self.a_ref = np.array([0.0, 0.0, -1.0]) if frame.upper() == 'NED' else np.array([0.0, 0.0, 1.0]) def _compute_all(self) -> np.ndarray: """Estimate the quaternions given all data. Attributes ``gyr``, ``acc`` and ``mag`` must contain data. Returns ------- Q : array M-by-4 Array with all estimated quaternions, where M is the number of samples. """ if self.acc.shape != self.gyr.shape: raise ValueError("acc and gyr are not the same size") if self.acc.shape != self.mag.shape: raise ValueError("acc and mag are not the same size") num_samples = len(self.acc) Q = np.zeros((num_samples, 4)) Q[0] = ecompass(self.acc[0], self.mag[0], frame=self.frame, representation='quaternion') for t in range(1, num_samples): Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t]) return Q def attitude_propagation(self, q: np.ndarray, omega: np.ndarray) -> np.ndarray: """Attitude estimation from previous quaternion and current angular velocity. .. math:: \\mathbf{q}_\\omega = \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} = \\begin{bmatrix} q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\ q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\ q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\ q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w \\end{bmatrix} Parameters ---------- q : numpy.ndarray A-priori quaternion. omega : numpy.ndarray Angular velocity, in rad/s. Returns ------- q : numpy.ndarray Attitude as a quaternion. """ Omega_t = np.array([ [0.0, -omega[0], -omega[1], -omega[2]], [omega[0], 0.0, omega[2], -omega[1]], [omega[1], -omega[2], 0.0, omega[0]], [omega[2], omega[1], -omega[0], 0.0]]) q_omega = (np.identity(4) + 0.5*self.Dt*Omega_t) @ q # (eq. 37) return q_omega/np.linalg.norm(q_omega) def WW(self, Db, Dr): """W Matrix .. math:: \\mathbf{W} = D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3 Parameters ---------- Db : numpy.ndarray Normalized tri-axial observations vector. Dr : numpy.ndarray Normalized tri-axial reference vector. Returns ------- W_matrix : numpy.ndarray W Matrix. """ bx, by, bz = Db rx, ry, rz = Dr M1 = np.array([ [bx, 0.0, bz, -by], [0.0, bx, by, bz], [bz, by, -bx, 0.0], [-by, bz, 0.0, -bx]]) # (eq. 18a) M2 = np.array([ [by, -bz, 0.0, bx], [-bz, -by, bx, 0.0], [0.0, bx, by, bz], [bx, 0.0, bz, -by]]) # (eq. 18b) M3 = np.array([ [bz, by, -bx, 0.0], [by, -bz, 0.0, bx], [-bx, 0.0, -bz, by], [0.0, bx, by, bz]]) # (eq. 18c) return rx*M1 + ry*M2 + rz*M3 # (eq. 20) def oleq(self, acc: np.ndarray, mag: np.ndarray, q_omega: np.ndarray) -> np.ndarray: """OLEQ with a single rotation by R. Parameters ---------- acc : numpy.ndarray Sample of tri-axial Accelerometer. mag : numpy.ndarray Sample of tri-axial Magnetometer. q_omega : numpy.ndarray Preceding quaternion estimated with angular velocity. Returns ------- q : np.ndarray Final quaternion. """ a_norm = np.linalg.norm(acc) m_norm = np.linalg.norm(mag) if not a_norm > 0 or not m_norm > 0: # handle NaN return q_omega acc = np.copy(acc) / np.linalg.norm(acc) mag = np.copy(mag) / np.linalg.norm(mag) sum_aW = self.a[0]*self.WW(acc, self.a_ref) + self.a[1]*self.WW(mag, self.m_ref) # (eq. 31) R = 0.5*(np.identity(4) + sum_aW) # (eq. 33) q = R @ q_omega # (eq. 25) return q / np.linalg.norm(q) def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray) -> np.ndarray: """Update Attitude with a Recursive OLEQ Parameters ---------- q : numpy.ndarray A-priori quaternion. gyr : numpy.ndarray Sample of angular velocity in rad/s acc : numpy.ndarray Sample of tri-axial Accelerometer in m/s^2 mag : numpy.ndarray Sample of tri-axial Magnetometer in mT Returns ------- q : numpy.ndarray Estimated quaternion. """ q_g = self.attitude_propagation(q, gyr) # Quaternion from previous quaternion and angular velocity q = self.oleq(acc, mag, q_g) # Second stage: Estimate with OLEQ return q
PypiClean
/DirectoryStorage-1.1.21.tar.gz/DirectoryStorage-1.1.21/Full.py
import os, errno, time, sys, string, struct, md5, cPickle, random, re import cStringIO from ZODB import POSException from ZODB import TimeStamp from ZODB.ConflictResolution import ConflictResolvingStorage, ResolvedSerial from BaseDirectoryStorage import BaseDirectoryStorage from utils import z16, z64, z128, OMAGIC, TMAGIC, CMAGIC, oid2str, timestamp2tid from utils import DirectoryStorageError, DirectoryStorageVersionError, FileDoesNotExist from utils import DanglingReferenceError, POSGeorgeBaileyKeyError from utils import class_name_from_pickle from utils import ZODB_referencesf, logger class Full(BaseDirectoryStorage,ConflictResolvingStorage): def _load_object_file(self,oid): serial = self._get_current_serial(oid) if serial is None: raise POSException.POSKeyError(oid) if len(serial)!=8: raise DirectoryStorageError('Bad current revision for oid %r' % (stroid,)) data = self.filesystem.read_database_file('o'+oid2str(oid)+'.'+oid2str(serial)) if len(data)==72: # This object contains a zero length pickle. that means the objects creation was undone. raise POSGeorgeBaileyKeyError(oid) return data, serial def _get_current_serial(self,oid): # could use some caching here? stroid = oid2str(oid) try: data = self.filesystem.read_database_file('o'+stroid+'.c') except FileDoesNotExist: return None return _fix_serial(data,oid) def _begin(self, tid, u, d, e): # We override this to add our own attributes to the transaction object BaseDirectoryStorage._begin(self,tid,u,d,e) td = self._transaction_directory td.oids = {} td.refoids = {} def store(self, oid, serial, data, version, transaction): if self._is_read_only: raise POSException.ReadOnlyError('Can not store to a read-only DirectoryStorage') if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) if version: raise DirectoryStorageVersionError('Versions are not supported') conflictresolved = 0 old_serial = self._get_current_serial(oid) if old_serial is None: # no previous revision of this object old_serial = z64 elif old_serial!=serial: # The object exists in the database, but the serial number # given in the call is not the same as the last stored serial # number. First, attempt application level conflict # resolution, and if that fails, raise a ConflictError. data = self.tryToResolveConflict(oid, old_serial, serial, data) if data: conflictresolved = 1 else: raise POSException.ConflictError(serials=(old_serial, serial)) tid = self.get_current_transaction() assert len(tid)==8 body = self._make_file_body(oid,tid,old_serial,data) refoids = [] if self.check_dangling_references: ZODB_referencesf(data,refoids) self._write_object_file(oid,tid,body,refoids) if conflictresolved: return ResolvedSerial else: return tid def restore(self, oid, serial, data, version, prev_txn, transaction): # A lot like store() but without all the consistency checks. This # should only be used when we /know/ the data is good, hence the # method name. While the signature looks like store() there are some # differences: # # - serial is the serial number of /this/ revision, not of the # previous revision. It is used instead of self._serial, which is # ignored. # # - Nothing is returned # # - data can be None, which indicates a George Bailey object # (i.e. one who's creation has been transactionally undone). # # - prev_txn is a hint that an identical pickle has been stored # for the same oid in a previous transaction. Some other storages # use this to enable a space-saving optimisation. We dont. # if self._is_read_only: raise POSException.ReadOnlyError('Can not restore to a read-only DirectoryStorage') if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) if version: # should this be silently ignored? raise DirectoryStorageVersionError('Versions are not supported') old_serial = self._get_current_serial(oid) if old_serial is None: # no previous revision of this object old_serial = z64 if data is None: data = '' if oid>self._oid: self._oid = oid body = self._make_file_body(oid,serial,old_serial,data) self._write_object_file(oid,serial,body) def _write_object_file(self,oid,newserial,body,refoids=None): td = self._transaction_directory # refoids is a list of oids referenced by this object, which should be # checked for dangling references at transaction commit. If the refoids # parameter is not provided then we do not check any references if refoids: for refoid in refoids: td.refoids[refoid] = oid # td.oids is our primary index of objects modified in this transaction. # values in this mapping indicate whether the modified object is George Bailey is_george_bailey_revision = len(body)==72 td.oids[oid] = is_george_bailey_revision stroid = oid2str(oid) if body: td.write('o'+stroid+'.'+oid2str(newserial),body) td.write('o'+stroid+'.c',newserial) def _vote_impl(self): # Also need to write file describing this transaction td = self._transaction_directory assert td is not None # Verify that every object reference corresponds to a real # object that already exists in the database, or one that # is written in this transaction. The goal is to guarantee # that the storage contains no dangling references. Currently # there are still several ways that a dangling reference can # be created without detection: # 1. Writing a George Bailey object revision when another object # contains a reference to it. We only check for references # in objects written in this transaction. # 2. A concurrent pack may have scheduled a referenced object # for removal. It is not dangling now, but it would be # once the pack is complete good_old_oids = {} for refoid,soid in td.refoids.items(): if td.oids.has_key(refoid): if td.oids[refoid]: # A reference to a George Bailey object written in this # transaction. raise DanglingReferenceError(soid,refoid) else: # A reference to an ordinary object written in this # transaction pass elif good_old_oids.has_key(refoid): # We have already checked that it exists in the database pass else: # An object outside of this transaction. Try to load it. try: self._load_object_file(refoid) except POSException.POSKeyError: # Failed to load the object. raise DanglingReferenceError(soid,refoid) else: # This object already exists in the database. good_old_oids[refoid] = 1 # Record the oid of every modified object in the transaction file ob = string.join(td.oids.keys(),'') u,d,e = td.u,td.d,td.e assert self._prev_serial<self.get_current_transaction() body = struct.pack("!HHHIH",len(u),len(d),len(e),len(ob),0) + u + d + e + ob if self._md5_write: md5sum = md5.md5(body).digest() else: md5sum = z128 header = TMAGIC + \ struct.pack('!I',len(body)+48) + \ td.tid + \ z64 + \ self._prev_serial # The transaction file name has a dot in the middle of it as a clue to # the 'bushy' format that the trailing characters are not worth dividing into # subdirectories self._transaction_directory.write(_tid_filename(td.tid), header + md5sum + body) def supportsTransactionalUndo(self): return 1 def undoLog(self,first=0,last=-20,filter=None): if last < 0: last = first - last + 1 i = 0 r = [] if self.history_timeout>0: timeout = time.time()+self.history_timeout else: timeout = None tid = self.filesystem.read_database_file('x.serial') while i<last: strtid = oid2str(tid) try: data = self.filesystem.read_database_file(_tid_filename(tid)) except FileDoesNotExist: if tid>=self._last_pack: # missing file raise else: # earlier transactions have been lost to packing break self._check_transaction_file(tid,data,self._md5_undolog) lenu,lend,lene,leno,lenv = struct.unpack('!HHHIH',data[48:60]) d = { 'user_name' : data[60:60+lenu], 'time' : TimeStamp.TimeStamp(tid).timeTime(), 'description' : data[60+lenu:60+lenu+lend], 'id' : tid } if lene: try: e = cPickle.loads(data[60+lenu+lend:60+lenu+lend+lene]) d.update(e) except: pass # this transaction data lists all the objects modified in this transaction. # we must check whether they all have an earlier revision to load state from is_undoable = 1 oidblock = data[60+lenu+lend+lene:60+lenu+lend+lene+leno] assert 0==(len(oidblock)%8) while oidblock: if timeout is not None and time.time()>timeout: # We have spent too long processing this request. is_undoable = 0 break # oids are packed into the oidblock. no duplicates. oid,oidblock = oidblock[:8],oidblock[8:] stroid = oid2str(oid) # load the revision to be undone. try: odata = self.filesystem.read_database_file('o'+stroid+'.'+strtid) except FileDoesNotExist: if tid>=self._last_pack: # missing file raise else: # This file does not exist because it has been removed in a previous pack is_undoable = 0 break self._check_object_file(oid,tid,odata,self._md5_undo) # We can only undo this transaction if the previous revision of the object # was not removed by packing. prevtid = odata[56:64] if prevtid==z64: # the object was created in this transaction. Thats fine pass else: # Try to load the revision that will become the new current revision # if this transactions is undone. Does the storage API require this check? strprevtid = oid2str(prevtid) try: podata = self.filesystem.read_database_file('o'+stroid+'.'+strprevtid) except FileDoesNotExist: if prevtid>=self._last_pack: # missing file raise else: # The previous revision has been removed by packing is_undoable = 0 break self._check_object_file(oid,prevtid,podata,self._md5_undo) if is_undoable: if filter is None or filter(d): if i >= first: r.append(d) i += 1 tid = data[24:32] if tid==z64: # this was the first revision ever break if timeout is not None and time.time()>timeout: # We have spent too long processing this request. break return r def _check_transaction_file(self,tid,data,check_md5): strtid = oid2str(tid) if TMAGIC!=data[:4]: raise DirectoryStorageError('Bad magic number in transaction id %r' % (strtid,)) apptid = data[8:16] if tid!=apptid: raise DirectoryStorageError('tid mismatch %r %r' % (strtid,str2oid(apptid))) l = struct.unpack('!I',data[4:8])[0] if l!=len(data): raise DirectoryStorageError('Wrong length of file for tid %r, %d, %d' % (strtid,l,len(data))) md5sum = data[32:48] vdata = data[48:] if md5sum!=z128 and check_md5: if md5.md5(vdata).digest()!=md5sum: raise DirectoryStorageError('Pickle checksum error reading oid %r' % (stroid,)) def transactionalUndo(self, transaction_id, transaction): # Note that there may be a pack running concurrently. if self._is_read_only: raise POSException.ReadOnlyError('Can not undo in a read-only DirectoryStorage') if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) transaction_id = str(transaction_id) if len(transaction_id)!=8: raise DirectoryStorageError('Bad transaction_id') # A mapping from oid to the serial that it has been undone back to. td = self._transaction_directory if not hasattr(td,'undone'): td.undone={} # Load the transaction file so that we can find the list of oids modified # in this transaction strtid = oid2str(transaction_id) try: data = self.filesystem.read_database_file(_tid_filename(transaction_id)) except FileDoesNotExist: raise POSException.UndoError('No record of that transaction') lenu,lend,lene,leno,lenv = struct.unpack('!HHHIH',data[48:60]) oidblock = data[60+lenu+lend+lene:60+lenu+lend+lene+leno] assert 0==(len(oidblock)%8) oids = {} this_transaction = self.get_current_transaction() while oidblock: # oids are packed into the oidblock. no duplicates. oid,oidblock = oidblock[:8],oidblock[8:] assert not oids.has_key(oid) oids[oid] = 1 stroid = oid2str(oid) # load the revision to be undone data = self.filesystem.read_database_file('o'+stroid+'.'+strtid) self._check_object_file(oid,transaction_id,data,self._md5_undo) # check if this object is eligible for undo current = self._get_current_serial(oid) undocurrent = td.undone.get(oid,current) if undocurrent!=transaction_id: # The current revision is not the transaction being undone. # But maybe the current revision contains a *copy* of the revision being # undone, made during a previous undo operation... cdata = self.filesystem.read_database_file('o'+stroid+'.'+oid2str(undocurrent)) self._check_object_file(oid,undocurrent,cdata,self._md5_undo) if cdata[16:24]!=transaction_id: raise POSException.UndoError('Some objects modified by later transaction') # XXXX we probably should check for a copy of a copy of a copy of a copy. prevtid = data[56:64] if prevtid==z64: # The object was created in this transaction. body = self._make_file_body(oid,this_transaction,current,'',undofrom=prevtid) self._write_object_file(oid,this_transaction,body) else: # load the revision that will become the new current revision strprevtid = oid2str(prevtid) data = self.filesystem.read_database_file('o'+stroid+'.'+strprevtid) self._check_object_file(oid,prevtid,data,self._md5_undo) td.undone[oid] = prevtid # compute a new file body = self._make_file_body(oid,this_transaction,current,data[72:],undofrom=prevtid) self._write_object_file(oid,this_transaction,body) return oids.keys() def undo(self, transaction_id, transaction): return self.get_current_transaction(), self.transactionalUndo(transaction_id,transaction) def loadSerial(self, oid, serial): try: data = self.filesystem.read_database_file('o'+oid2str(oid)+'.'+oid2str(serial)) except FileDoesNotExist: raise POSException.POSKeyError(oid) self._check_object_file(oid,serial,data,self._md5_read) pickle = data[72:] if not pickle: # creation was undone raise POSException.POSKeyError(oid) return pickle def history(self,oid,version=None,size=1,filter=None): assert not version history = [] stroid = oid2str(oid) tid = self._get_current_serial(oid) if tid is None: # history of object with no current revision raise POSException.POSKeyError(oid) first = 1 if self.history_timeout>0: timeout = time.time()+self.history_timeout else: timeout = None while len(history)<size: strtid = oid2str(tid) # Some basic information we know before we start d = { 'time' : TimeStamp.TimeStamp(tid).timeTime(), 'serial' : tid, # used in Zope 2.6, 2.7 'tid' : tid, # used in Zope 2.8 'version' : '' } # First load the transaction file to get most of the information try: data = self.filesystem.read_database_file(_tid_filename(tid)) except FileDoesNotExist: if tid>=self._last_pack: # missing file raise else: # Transaction file removed by packing. # The object file may not exist either, but that will # be detected further down. This happens often when using # keep_policy=undoable, and possibly in other cases # if packing is interrupted d['user_name'] = 'User Name no longer recorded' d['description'] = 'Description no longer recorded' else: self._check_transaction_file(tid,data,self._md5_history) lenu,lend,lene,leno,lenv = struct.unpack('!HHHIH',data[48:60]) d['user_name'] = data[60:60+lenu] d['description'] = data[60+lenu:60+lenu+lend] # Next load the object file to get the size, and our next pointer try: data = self.filesystem.read_database_file('o'+stroid+'.'+strtid) except FileDoesNotExist: if tid>=self._last_pack or first: # missing file raise else: # Object file removed by packing. # The transaction file may or may not exist break self._check_object_file(oid,tid,data,self._md5_history) d['size'] = len(data)-72 if filter is None or filter(d): history.append(d) tid = data[56:64] if tid==z64: # there is no more history for this object break if timeout is not None and time.time()>timeout: # We have spent too long processing this request. break first = 0 return history _ok_to_pack_empty_storage = 1 # set by some unit tests def _pack(self,t,referencesf): if not self._has_root(): logger.log(self.filesystem.ENGINE_NOISE, 'Skipping pack of empty storage') # We assume it is empty if there is no root object. This assumption # is valid, although some ZODB unit tests break the rules. assert self._ok_to_pack_empty_storage return fs = self.filesystem # Packing uses a mark and sweep strategy. # # Pass 1 # # First, create the mark context and clear any previous marks. logger.info('Starting to pack') start_time = time.time() logger.log(self.filesystem.ENGINE_NOISE, 'Packing pass 1 of 4') mc = fs.mark_context('A') # # Pass 2 # # Reachable objects and transactions will be marked. # An object is reachable if: # 1. It is the root object # 2. it is referenced by a sufficiently recent revision # of another object which is reachable. This catches # most objects # 3. It was written in a sufficiently recent transaction. # This catches a few weird boundary cases, because # most objects are caught by 2. # logger.log(self.filesystem.ENGINE_NOISE, 'Packing pass 2a of 4') self._mark_reachable_objects(z64,t,referencesf,mc) logger.log(self.filesystem.ENGINE_NOISE, 'Packing pass 2b of 4') self._mark_recent_transactions(t,referencesf,mc) # Mark some admin files mc.mark('A/'+fs.filename_munge('x.serial')) mc.mark('A/'+fs.filename_munge('x.oid')) mc.mark('A/'+fs.filename_munge('x.packed')) # # Pass 3 # # Some reachable transactions may be earlier than non-reachable ones. # We need to modify the back-pointers in those transactions to # ensure everything links up when the unreachables are removed logger.log(self.filesystem.ENGINE_NOISE, 'Packing pass 3 of 4') self._relink_reachable_transactions(mc) # # Pass 4 # # unmarked files are swept away logger.log(self.filesystem.ENGINE_NOISE, 'Packing pass 4 of 4') total = self._remove_unmarked_objects(int(time.time()),mc) # # elapsed = time.time()-start_time elapsed = '%d:%02d:%02d' % (elapsed/3600,(elapsed/60)%60,elapsed%60) logger.info('Packing complete, removed %d files, elapsed time %s' % (total,elapsed)) def enter_snapshot(self,code): # The user is allowed to do things that might confuse our file marking return BaseDirectoryStorage.enter_snapshot(self,code) def _has_root(self): fs = self.filesystem stroid = oid2str(z64) name = 'o'+stroid+'.c' name = fs.filename_munge(name) name = os.path.join('A',name) return fs.exists(name) def _mark_recent_transactions(self,threshold,referencesf,mc): fs = self.filesystem tid = fs.read_file('A/'+fs.filename_munge('x.serial')) counter = 0 # We definitely need to keep the two most recent transaction files # to allow replication/backup to use the transaction file as a datum. # Here we keep all the object revisions in the two most recent # transactions too. Thats unnecessary, but safe. if self.min_pack_time==0: # If the min pack time is zero then we certainly dont care about replication # or backup. We are probably inside a ZODB unit test, which assumes # this safety precaution does not exist. inhibit it counter = 2 while tid>=threshold or counter<2: counter += 1 name = _tid_filename(tid) name = fs.filename_munge(name) name = os.path.join('A',name) mc.mark(name) try: data = fs.read_file(name) except FileDoesNotExist: if tid>=self._last_pack: raise else: return self._check_transaction_file(tid,data,0) lenu,lend,lene,leno,lenv = struct.unpack('!HHHIH',data[48:60]) oidblock = data[60+lenu+lend+lene:60+lenu+lend+lene+leno] assert 0==(len(oidblock)%8) while oidblock: oid,oidblock = oidblock[:8],oidblock[8:] # Most of these objects will already be marked as reachable. The only exceptions # are objects that are not reachable from the root - which only happens under # a few strange boundary conditions. (subtransactions, cross-transation # references, etc). Originally DirectoryStorage removed these objects when # packing. However this causes some minor complications for backup and # replication, so it is easier to keep them. if self._get_current_serial(oid) is None: # Missing file. This is acceptable pass else: # If we have the file, then mark it and everything that it references self._mark_reachable_objects(oid,threshold,referencesf,mc) tid = data[24:32] if tid==z64: # back to the beginning of history break def _mark_reachable_objects(self,oid,threshold,referencesf,mc): todo = {oid:None} while todo: # The current ZODB oids assignment policy means that chosing the # largest oid in the todo list leads to keeping a small todo list oid = max(todo.keys()) new = self._mark_reachable_objects_impl(oid,threshold,referencesf,mc) todo.update(new) del todo[oid] def _mark_reachable_objects_impl(self,oid,threshold,referencesf,mc): fs = self.filesystem # First mark the object current revision pointer file stroid = oid2str(oid) name = 'o'+stroid+'.c' name = fs.filename_munge(name) name = os.path.join('A',name) if mc.is_marked(name): # This object has already been marked, # so there is nothing more to do. return {} current = _fix_serial(fs.read_file(name),oid) mc.mark(name) # Next, check the files containing recent revisions of this object # to determine the set of referenced objects tid = current class_name = None keepclass = None allrefoids = {} first = 1 while 1: # Load this revision strtid = oid2str(tid) __traceback_info__ = stroid,strtid name = 'o'+stroid+'.'+strtid name = fs.filename_munge(name) name = os.path.join('A',name) try: data = fs.read_file(name) except FileDoesNotExist: if tid>=self._last_pack or first: # Missing file. This indicates database corruption. # It would be dangerous to continue from here because it may lead us to # think that some objects are unreachable, because they are only reachable # from this missing file. Continuing the pack could make things worse. raise else: # Revision does not exist. It must have been removed by packing break self._check_object_file(oid,tid,data,self._md5_pack) pickle = data[72:] if len(pickle)==0: # an object whose creation has been undone. # This revision references nothing pass else: # Record the referenced objects in the to-do list try: refoids = [] referencesf(pickle,refoids) for refoid in refoids: allrefoids[refoid] = 1 except (IOError,ValueError,EOFError),e: if first: # The current revision of an object can not be unpickled. Thats bad # maybe you could undo the last transaction, and hope the second-to-last # one can be unpickled? logger.critical( 'Failure to unpickle current revision of an object') else: # An old revision of an object can not be unpickled. Thats ok as long # as you dont want to undo. Packing with a different time would remove it. timestamp = TimeStamp.TimeStamp(tid).timeTime() ago = int((time.time()-timestamp)/(60*60*24)) logger.error( 'Failure to unpickle old revision of an object. ' 'You could remove it with a pack that removes ' 'revisions that are %d days old.' % (ago,)) raise if class_name is None: class_name = class_name_from_pickle(pickle) keepclass = self.keepclass.get(class_name) # Mark this file mc.mark(name) if tid>=threshold or self.keep_ancient_transactions: # Mark the corresponding transaction file name = _tid_filename(tid) name = fs.filename_munge(name) name = os.path.join('A',name) mc.mark(name) # check the previous revision of this object tid = data[56:64] if tid<threshold: # that revision is looking a little old. if keepclass is None or keepclass.expired(threshold,tid): # It will be discarded break else: # It normally would be discarded, but we have special instructions # to keep it longer than normal. #logger.info('keeping a %s' % class_name) pass first = 0 return allrefoids def _relink_reachable_transactions(self,mc): # Packing will retain all transactions after the threshold date, but # only those transactions before the threshold date which still contain # the most recent revision of an object. It will delete all Transactions # which occurred earlier than the pack date, and which no longer contain # the most recenet revision of something. # Some kept transactions may be earlier than a deleted ones. # We need to modify the back-pointers in those transactions to # ensure everything links up when things are removed. # # Once we start this process there are some objects (the ones which # will be deleted later in the pack process) which refer to a transaction # which is not linked from the current transaction. All of these changes # are not made through the journal, so any failure between now and the end # of packing may mean we are in this state for a long time. I suspect this # could cause a problem, but I cant think of any right now. # fs = self.filesystem tid = fs.read_file('A/'+fs.filename_munge('x.serial')) prev_name = '' prev_ptr = '' while 1: strtid = oid2str(tid) name = _tid_filename(tid) name = fs.filename_munge(name) name = os.path.join('A',name) try: data = fs.read_file(name) except FileDoesNotExist: if tid>=self._last_pack: raise else: return self._check_transaction_file(tid,data,0) if mc.is_marked(name): # This transaction is reachable # Ensure it is back-linked from the previous reachable transaction if prev_ptr and prev_ptr!=tid: fs.modify_file(prev_name,24,tid) # Record this... we may have to patch this file if an intermediate # transaction is not reachable. prev_name = name prev_ptr = data[24:32] else: pass tid = data[24:32] if tid==z64: # back to the beginning of history break _pointer_file_re = re.compile('^o[A-F0-9]{16}.c$') _object_file_re = re.compile('^o[A-F0-9]{16}.[A-F0-9]{16}$') _transaction_file_re = re.compile('^t[A-F0-9]{8}.[A-F0-9]{8}$') def _remove_unmarked_objects(self,now,mc,directory='A'): fs = self.filesystem total = 0 empty = 1 pretend = 0 for file in fs.listdir(directory): empty = 0 path = os.path.join(directory,file) if file.endswith('-deleted'): # this file is already awaiting delayed deletion try: time_deleted = int(string.split(file,'-')[-2]) except (ValueError,IndexError),e: # Wierd file name. delete it time_deleted = 0 if time_deleted + self.delay_delete < now: if pretend: print >> sys.stderr, 'packing would delay-remove %r' % (path,) else: fs.unlink(path) else: if fs.isdir(path): total += self._remove_unmarked_objects(now,mc,path) else: if mc.is_marked(path): if pretend: print >> sys.stderr, 'packing would keep %r' % (path,) else: total += 1 if pretend: print >> sys.stderr, 'packing would remove %r' % (path,) else: if self.delay_delete>0: fs.rename(path,path+'-'+str(now)+'-deleted') else: fs.unlink(path) if empty: fs.rmdir(directory) return total def _tid_filename(tid): return 't%02X%02X%02X.%02X%02X%02X%02X%02X' % struct.unpack('!8B', tid) def _fix_serial(data,oid): if len(data)==8: # the compact format. 8 bytes of serial. return data elif len(data)==12: # The old format. 4 bytes of magic number, then 8 bytes of serial. if data[:4]!=CMAGIC: raise DirectoryStorageError('Bad magic number in oid pointer %r' % (oid2str(oid),)) return data[4:12] else: raise DirectoryStorageError('Bad oid pointer file for oid %r' % (oid2str(oid),))
PypiClean
/FFTA-0.3.5.1-py3-none-any.whl/ffta/nfmd/NFMD.py
import numpy as np import torch import time class NFMD: def __init__(self, signal, num_freqs, window_size, windows=None, optimizer=torch.optim.SGD, optimizer_opts={'lr': 1e-4}, max_iters=1000, target_loss=1e-4, device='cpu'): ''' Initialize the object :param signal: temporal signal to be analyzed (should be 1-D) :type signal: numpy.ndarray :param num_freqs: number of frequencies to fit to signal. (Note: The 'mean' mode counts as a frequency mode) :type num_freqs: integer :param window_size: :type window_size: :param windows: :type windows: :param optimizer: Optimization algorithm to employ for learning. :type optimizer: optimizer object (torch.optim) :param optimizer_opts: Parameters to pass to the optimizer class. :type optimizer_opts: dict :param max_iters: number of steps for optimizer to take (maximum) :type max_iters: int the loss value at which the window is considered sufficiently 'fit' (note: setting this too low can cause issues by pushing freqs to 0):param target_loss: :type target_loss: float :param device: device to use for optimization (Note: default 'cpu', but could be 'cuda' with GPU) :type device: string ''' # Signal -- assumed 1D, needs to be type double self.x = signal.astype(np.double).flatten() self.n = signal.shape[0] # Signal Decomposition options self.num_freqs = num_freqs self.window_size = window_size self.windows = windows if not windows: self.windows = self.n # Stochastic Gradient Descent Options self.optimizer = optimizer self.optimizer_opts = optimizer_opts # If the learning rate is specified, scale it by # window size if 'lr' in optimizer_opts: self.optimizer_opts['lr'] /= window_size self.max_iters = max_iters self.target_loss = target_loss self.device = device def decompose_signal(self, update_freq: int = None): ''' Compute the slices of the windows used in the analysis. Note: this is equivalent to computing rectangular windows. :param update_freq: The number of optimizer steps between printed update statements. :type update_freq: int :returns: tuple (freqs, A, losses, indices) WHERE numpy.ndarray freqs is frequency vector numpy.ndarray A is coefficient vector numpy.ndarray losses is fit loss (MSE) for each window List indices is list of slice objects. each slice describes fit window indices` ''' # Compute window indices t1 = time.time() self.compute_window_indices() # Determine if printing updates verbose = update_freq != None # lists for results self.freqs = [] self.A = [] self.losses = [] self.window_fits = [] # Save the model fits # Tracker variables for previous freqs and A prev_freqs = None prev_A = None # iterate through each window: for i, idx_slice in enumerate(self.indices): # If update frequency is requested, print an update # at window <x> if verbose: if i % update_freq == 0: print("{}/{}".format(i, len(self.indices)), end="|") # Access data slice x_i = self.x[idx_slice].copy() # Determine number of SGD iterations to allow max_iters = self.max_iters if i == 0: self.max_iters = 10000 # Fit data in window to model loss, freqs, A = self.fit_window(x_i, freqs=prev_freqs, A=prev_A) # Store the results self.freqs.append(freqs) self.A.append(A) self.losses.append(loss) # Set the previous freqs and A variables prev_freqs = freqs prev_A = A #print (time.time() - t1, 's for decompose_signal') t2 = time.time() self.freqs = np.array(self.freqs) self.A = np.array(self.A) if self.device == 'cpu': self.losses = [loss.detach().numpy() for loss in self.losses] else: self.losses = [loss.detach().cpu().numpy() for loss in self.losses] #print (time.time() - t2, 's for detach') return self.freqs, self.A, self.losses, self.indices def compute_window_indices(self): ''' Sets the 'indices' attribute with computed index slices corresponding to the windows used in the analysis. Note: this is equivalent to computing rectangular windows. ''' t2 = time.time() # Define how many points between centerpoint of windows increment = int(self.n / self.windows) window_size = self.window_size # Initialize the indices lists self.indices = [] self.mid_idcs = [] # Populate the indices lists for i in range(self.windows): # Compute window slice indices idx_start = int(max(0, i * increment - window_size / 2)) idx_end = int(min(self.n, i * increment + window_size / 2)) if idx_end - idx_start == window_size: # Add the index slice to the indices list self.indices.append(slice(idx_start, idx_end)) idx_mid = int((idx_end + idx_start) / 2) self.mid_idcs.append(idx_mid) #print(time.time() - t2, 's for compute_window_indices') def fit_window(self, xt, freqs=None, A=None): ''' Fits a set of instantaneous frequency and component coefficient vectors to the provided data. :param xt: Temporal data of dimensions [T, ...] :typer xt: numpy.ndarray :param freqs: 1D vector of (guess) instantaneous frequencies (Note: assumes dt=1 in xt data array) :type freqs: numpy.ndarray, optional :param A: 1D vector of cosine/sine coefficients :type A: numpy.ndarray, optional :returns: tuple (loss, freqs, A) WHERE float loss is the loss for the fit window (mean squared error) numpy.ndarray freqs is frequency vector of instantaneous frequencies numpy.ndarray A is coefficient vector of component (sine/cosine) coefficients ''' # If no frequency is provided, generate initial frequency guess: if freqs is None: freqs, A = self.fft(xt) t2 = time.time() # Then begin SGD loss, freqs, A = self.sgd(xt, freqs, A, max_iters=self.max_iters) print(time.time() - t2, 's for sgd') return loss, freqs, A def fft(self, xt): ''' Given temporal data xt, fft performs the initial guess of the frequencies contained in the data using the FFT. :param xt: Temporal data of dimensions [T, ...] :type xt: numpy.array :returns: tuple (freqs, A) WHERE numpy.ndarray freqs is vector of instantaneous frequency estimates for each timepoint numpy.ndarray A is vector of component coefficients ''' # Ensure input signal is 1D: if len(xt.shape) == 1: xt = xt.reshape(-1, 1) # Gather model-fitting parameters k = self.num_freqs N = xt.shape[0] # Initialize a list of frequencies: freqs = [] for i in range(k): if len(freqs) == 0: residual = xt else: t = np.expand_dims(np.arange(N) + 1, -1) ws = np.asarray(freqs) Omega = np.concatenate([np.cos(t * 2 * np.pi * ws), np.sin(t * 2 * np.pi * ws)], -1) A = np.dot(np.linalg.pinv(Omega), xt) pred = np.dot(Omega, A) residual = pred - xt ffts = 0 for j in range(xt.shape[1]): ffts += np.abs(np.fft.fft(residual[:, j])[:N // 2]) w = np.fft.fftfreq(N, 1)[:N // 2] idxs = np.argmax(ffts) freqs.append(w[idxs]) ws = np.asarray(freqs) t = np.expand_dims(np.arange(N) + 1, -1) Omega = np.concatenate([np.cos(t * 2 * np.pi * ws), np.sin(t * 2 * np.pi * ws)], -1) A = np.dot(np.linalg.pinv(Omega), xt) return freqs, A def sgd(self, xt, freqs, A, max_iters=None): ''' Given temporal data xt, sgd improves the initial guess of omega by SGD. It uses the pseudo-inverse to obtain A. :param xt: Temporal data of dimensions [T, ...] :type xt: numpy.ndarray :param freqs: frequency vector :type freqs: numpy.ndarray :param A: Component coefficient vector :type A: numpy.ndarray :param max_iters: Number of optimizer steps to take (maximum) :type max_iters: :returns: tuple (loss, freqs, A) WHERE float loss is the loss for the fit window (mean squared error) numpy.ndarray freqs is frequency vector of instantaneous frequencies numpy.ndarray A is coefficient vector of component (sine/cosine) coefficients ''' # Set up PyTorch tensors for SGD A = torch.tensor(A, requires_grad=False, device=self.device) freqs = torch.tensor(np.asarray(freqs), requires_grad=True, device=self.device) xt = torch.tensor(xt, requires_grad=False, device=self.device) # Set up PyTorch Optimizer o2 = self.optimizer([freqs], **self.optimizer_opts) # Time indices t = torch.unsqueeze(torch.arange(len(xt), dtype=torch.get_default_dtype(), device=self.device) + 1, -1) # Determine how many iterations will be used if not max_iters: max_iters = self.max_iters # SGD to determine solution for i in range(max_iters): # Compute new model Omega = torch.cat([torch.cos(t * 2 * np.pi * freqs), torch.sin(t * 2 * np.pi * freqs)], -1) A = torch.matmul(torch.pinverse(Omega.data), xt) xhat = torch.matmul(Omega, A) # Compute Loss function loss = torch.mean((xhat - xt) ** 2) # Take a step o2.zero_grad() loss.backward() o2.step() # If loss is below fit threshold, end learning if loss < self.target_loss: break # Store the model fit: xhat = xhat.cpu().detach().numpy() self.window_fits.append(xhat) # Prepare the results A = A.cpu().detach().numpy() freqs = freqs.cpu().detach().numpy() return loss, freqs, A def predict(self, T): ''' Predicts the data from 1 to T. :param T: Prediction horizon (number of timepoints T) :type T: int :returns: xhat from 0 to T. :rtype: numpy.array ''' t = np.expand_dims(np.arange(T) + 1, -1) for i, idx_slice in enumerate(self.indices): local_freqs = self.freqs[i] Omega = np.concatenate([np.cos(t * 2 * np.pi * self.freqs), np.sin(t * 2 * np.pi * self.freqs)], -1) return np.dot(Omega, self.A) def correct_frequencies(self, dt): ''' Compute corrected frequency vector that takes into account the timestep dt in the signal :param dt: The time step between samples in the signal :type dt: float :returns: Timestamp-corrected frequency vector :rtype: numpy.ndarray ''' corrected_freqs = [] for freq in self.freqs: corrected_freqs.append(freq / dt) corrected_freqs = np.asarray(corrected_freqs) return corrected_freqs def compute_amps(self): ''' Compute the 'amplitude' of the Fourier mode. Amplitude = sqrt(A_1^2 + A_2^2) :returns: Amplitude vector, length = num_freqs :rtype: numpy.ndarray ''' # initialize amps list Amps = np.ndarray((self.A.shape[0], self.num_freqs)) # print(Amps.shape) # Populate amps list for i, A in enumerate(self.A): # print(A.shape) # Reshape the As list into a 2 x k matrix of # cosine and sine coefficients AsBs = A.reshape(-1, self.num_freqs) # Compute amplitude of each mode: for j in range(AsBs.shape[-1]): Amp = complex(*AsBs[:, j]) Amps[i, j] = abs(Amp) Amps = np.asarray(Amps) return Amps def compute_mean(self, lf_mode=None): ''' Computes the value of the mean mode. The mode is constructed by taking the value of the fit mean mode at the center of the window for each window data was fit in, and concatenating the center values. :param lf_mode: The index of the mode that is known to represent the mean. Note: if not provided, the lowest-average-IF mode is assumed to be the mean. :type lf_mode: optional, integer :returns: The reconstructed mean signal at each time point. :rtype: numpy.ndarray ''' # Initialize empty array means = np.ndarray(len(self.mid_idcs)) # Identify the low-frequency mode based on initial frequency estimate if lf_mode is None: lf_mode = np.argmin(np.mean(self.freqs[:, :], axis=0)) mid_idx = int(self.window_size / 2) # Iterate through each fourier object and compute the mean for i in range(len(self.mid_idcs)): # Grab the frequency and the amplitudes freq = self.freqs[i, lf_mode] A = self.A[i, lf_mode::self.num_freqs] # Compute the estimate t = np.expand_dims(np.arange(self.window_size) + 1, -1) Omega = np.concatenate([np.cos(t * 2 * np.pi * freq), np.sin(t * 2 * np.pi * freq)], -1) fit = np.dot(Omega, A) # Grab the centerpoint and add it to the means list means[i] = fit[mid_idx] return means def predict_window(self, i): ''' Show the sum of the modes fit to a window of index i. :param i: index of the window to retrieve the fit for. :type i: integer :returns: The sum of the reconstructed modes for the given window. :rtype: numpy.ndarray ''' return self.window_fits[i]
PypiClean
/Joule-0.9.41.tar.gz/Joule-0.9.41/joule/models/data_stream.py
from sqlalchemy.orm import relationship, Mapped from sqlalchemy import (Column, Integer, String, Boolean, Enum, ForeignKey, DateTime) from sqlalchemy.dialects.postgresql import BIGINT from typing import List, Dict, Optional, TYPE_CHECKING import configparser import enum import re from operator import attrgetter import datetime from joule.models.meta import Base from joule.errors import ConfigurationError from joule.models.data_store.data_store import StreamInfo from joule.models import element, annotation if TYPE_CHECKING: from joule.models import (Folder) # pragma: no cover class DataStream(Base): """ Attributes: name (str): stream name description (str): stream description datatype (DataStream.DATATYPE): data representation on disk see :ref:`sec-streams` decimate (bool): whether to store decimated data for stream visualization folder (joule.Folder): parent Folder elements (List[joule.Element]): array of stream elements keep_us (int): microseconds of data to keep (KEEP_NONE=0, KEEP_ALL=-1). """ __tablename__ = 'stream' __table_args__ = {"schema": "metadata"} id: int = Column(Integer, primary_key=True) name: str = Column(String, nullable=False) class DATATYPE(enum.Enum): FLOAT64 = enum.auto() FLOAT32 = enum.auto() INT64 = enum.auto() INT32 = enum.auto() INT16 = enum.auto() INT8 = enum.auto() UINT64 = enum.auto() UINT32 = enum.auto() UINT16 = enum.auto() UINT8 = enum.auto() datatype: DATATYPE = Column(Enum(DATATYPE), nullable=False) decimate: bool = Column(Boolean, default=True) # do not allow property changes if any of the following are true is_source: bool = Column(Boolean, default=False) is_destination: bool = Column(Boolean, default=False) is_configured: bool = Column(Boolean, default=False) KEEP_ALL = -1 KEEP_NONE = 0 keep_us: int = Column(BIGINT, default=KEEP_ALL) description: str = Column(String) folder_id: int = Column(Integer, ForeignKey('metadata.folder.id')) folder: Mapped["Folder"] = relationship("Folder", back_populates="data_streams") elements: Mapped[List[element.Element]] = relationship("Element", cascade="all, delete-orphan", back_populates="stream") annotations: Mapped[List[annotation.Annotation]] = relationship("Annotation", cascade="all, delete-orphan", back_populates="stream") updated_at: datetime.datetime = Column(DateTime, nullable=False) def merge_configs(self, other: 'DataStream') -> None: # replace configurable attributes with other's values # update the updated_at timestamp if configs are different if ( self.keep_us != other.keep_us or self.decimate != other.decimate or self.description != other.description or self.is_configured != other.is_configured or self.is_destination != other.is_destination or self.is_source != other.is_source or self.elements != other.elements ): self.updated_at = datetime.datetime.utcnow() self.keep_us = other.keep_us self.decimate = other.decimate self.description = other.description self.is_configured = other.is_configured self.is_destination = other.is_destination self.is_source = other.is_source self.elements = other.elements def update_attributes(self, attrs: Dict) -> None: updated = False if 'name' in attrs: self.name = validate_name(attrs['name']) updated = True if 'description' in attrs: self.description = attrs['description'] updated = True if 'elements' in attrs: element_configs = attrs['elements'] # make sure the number of configs is correct if len(element_configs) != len(self.elements): raise ConfigurationError("incorrect number of elements") for e in self.elements: e.update_attributes(element_configs[e.index]) updated = True if updated: self.touch() def __str__(self): return "DataStream [{name}]".format(name=self.name) def __repr__(self): return "<DataStream(id=%r, name='%s', datatype=%r)>" % ( self.id, self.name, self.datatype) @property def locked(self): """ bool: true if the stream has a configuration file or is an active part of the data pipeline. Attributes of locked streams cannot be changed. """ return self.is_configured or self.is_destination or self.is_source @property def active(self): """ bool: true if the stream is part of the data pipeline """ return self.is_source or self.is_destination @property def layout(self): """ str: formatted string specifying the datatype and number of elements """ return "%s_%d" % (self.datatype.name.lower(), len(self.elements)) @property def decimated_layout(self): # decimations are floats (min,mean,max) tuples return "float32_%d" % (len(self.elements) * 3) @property def data_width(self) -> int: return len(self.elements) + 1 @property def is_remote(self) -> bool: """ bool: true if the stream resides on a remote system """ try: return self._remote_node is not None except AttributeError: return False @property def remote_node(self) -> str: """ str: URL of remote host, blank if the stream is local """ try: return self._remote_node except AttributeError: return '' @property def remote_path(self) -> str: """ str: path on remote host, blank if the stream is local """ try: return self._remote_path except AttributeError: return '' def set_remote(self, node: str, path: str): """ Associate the stream with a remote system Args: url: remote URL path: stream path on remote system """ self._remote_node = node self._remote_path = path # update timestamps on all folders in hierarchy def touch(self, now: Optional[datetime.datetime] = None) -> None: if now is None: now = datetime.datetime.utcnow() self.updated_at = now if self.folder is not None: self.folder.touch(now) def to_json(self, info: Dict[int, StreamInfo] = None) -> Dict: """ Args: info: optional content added to ``data_info`` field Returns: Dictionary of DataStream attributes """ resp = { 'id': self.id, 'name': self.name, 'description': self.description, 'datatype': self.datatype.name.lower(), 'layout': self.layout, 'keep_us': self.keep_us, 'is_configured': self.is_configured, 'is_source': self.is_source, 'is_destination': self.is_destination, 'updated_at': self.updated_at.isoformat(), 'locked': self.locked, # meta attribute 'active': self.active, # meta attribute 'decimate': self.decimate, 'elements': [e.to_json() for e in sorted(self.elements, key=attrgetter('index'))], } if info is not None and self.id in info: resp['data_info'] = info[self.id].to_json() return resp def to_nilmdb_metadata(self) -> Dict: return { 'name': self.name, 'name_abbrev': '', 'delete_locked': False, 'streams': [e.to_nilmdb_metadata() for e in self.elements] } def from_json(data: Dict) -> DataStream: """ Construct a DataStream from a dictionary of attributes produced by :meth:`DataStream.to_json` Args: data: attribute dictionary Returns: DataStream """ elements = [] index = 0 for item in data["elements"]: item["index"] = index elements.append(element.from_json(item)) index += 1 return DataStream(id=data["id"], name=validate_name(data["name"]), description=data["description"], datatype=validate_datatype(data["datatype"].upper()), keep_us=data["keep_us"], decimate=data["decimate"], is_configured=data["is_configured"], is_source=data["is_source"], is_destination=data["is_destination"], elements=elements, updated_at=datetime.datetime.utcnow()) def from_config(config: configparser.ConfigParser) -> DataStream: """ Construct a DataStream from a configuration file Args: config: parsed *.conf file Returns: DataStream Raises: ConfigurationError """ try: main_configs: configparser.ConfigParser = config["Main"] except KeyError as e: raise ConfigurationError("Missing section [%s]" % e) from e try: datatype = validate_datatype(main_configs["datatype"]) keep_us = validate_keep(main_configs.get("keep", fallback="all")) decimate = main_configs.getboolean("decimate", fallback=True) name = validate_name(main_configs["name"]) description = main_configs.get("description", fallback="") stream = DataStream(name=name, description=description, datatype=datatype, keep_us=keep_us, decimate=decimate, updated_at=datetime.datetime.utcnow()) except KeyError as e: raise ConfigurationError("[Main] missing %s" % e.args[0]) from e # now try to load the elements element_configs = filter(lambda sec: re.match(r"Element\d", sec), config.sections()) index = 0 for name in element_configs: try: e = element.from_config(config[name]) e.index = index index += 1 stream.elements.append(e) except ConfigurationError as e: raise ConfigurationError("element <%s> %s" % (name, e)) from e # make sure we have at least one element if len(stream.elements) == 0: raise ConfigurationError( "missing element configurations, must have at least one") return stream def from_nilmdb_metadata(config_data: Dict, layout: str) -> DataStream: datatype = validate_datatype(layout.split('_')[0]) nelem = int(layout.split('_')[1]) if 'streams' in config_data: elements = config_data['streams'] else: elements = [{'column': i, 'name': "Element%d" % (i + 1), 'units': None, 'scale_factor': 1.0, 'offset': 0.0, 'plottable': True, 'discrete': False, 'default_min': None, 'default_max': None} for i in range(nelem)] # make sure the name is valid name = config_data["name"].replace("/", "_") stream = DataStream(name=name, description='', datatype=datatype, keep_us=DataStream.KEEP_ALL, decimate=True, updated_at=datetime.datetime.utcnow()) idx = 0 for metadata in elements: elem = element.from_nilmdb_metadata(metadata, idx) idx += 1 stream.elements.append(elem) return stream def validate_name(name: str) -> str: if name is None or len(name) == 0: raise ConfigurationError("missing name") if '/' in name: raise ConfigurationError("invalid name, '\\' not allowed") return name def validate_datatype(datatype: str) -> DataStream.DATATYPE: try: return DataStream.DATATYPE[datatype.upper()] except KeyError as e: valid_types = ", ".join([m.name.lower() for m in DataStream.DATATYPE]) raise ConfigurationError("invalid datatype [%s], choose from [%s]" % (datatype, valid_types)) from e def validate_keep(keep: str) -> int: if keep.lower() == "none": return DataStream.KEEP_NONE if keep.lower() == "all": return DataStream.KEEP_ALL match = re.fullmatch(r'^(\d+)([hdwmy])$', keep) if match is None: raise ConfigurationError("invalid [DataStream] keep" "use format #unit (eg 1w), none or all") units = { 'h': 60 * 60 * 1e6, # hours 'd': 24 * 60 * 60 * 1e6, # days 'w': 7 * 24 * 60 * 60 * 1e6, # weeks 'm': 4 * 7 * 24 * 60 * 60 * 1e6, # months 'y': 365 * 24 * 60 * 60 * 1e6 # years } unit = match.group(2) time = int(match.group(1)) if time <= 0: raise ConfigurationError("invalid [DataStream] keep" "use format #unit (eg 1w), none or all") return int(time * units[unit])
PypiClean
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/widgets/data/oweditdomain.py
import warnings from xml.sax.saxutils import escape from itertools import zip_longest, repeat, chain from contextlib import contextmanager from collections import namedtuple, Counter from functools import singledispatch, partial from typing import ( Tuple, List, Any, Optional, Union, Dict, Sequence, Iterable, NamedTuple, FrozenSet, Type, Callable, TypeVar, Mapping, Hashable, cast ) import numpy as np import pandas as pd from AnyQt.QtWidgets import ( QWidget, QListView, QTreeView, QVBoxLayout, QHBoxLayout, QFormLayout, QLineEdit, QAction, QActionGroup, QGroupBox, QStyledItemDelegate, QStyleOptionViewItem, QStyle, QSizePolicy, QDialogButtonBox, QPushButton, QCheckBox, QComboBox, QStackedLayout, QDialog, QRadioButton, QGridLayout, QLabel, QSpinBox, QDoubleSpinBox, QAbstractItemView, QMenu ) from AnyQt.QtGui import QStandardItemModel, QStandardItem, QKeySequence, QIcon from AnyQt.QtCore import ( Qt, QSize, QModelIndex, QAbstractItemModel, QPersistentModelIndex, QRect, QPoint, ) from AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot import Orange.data from Orange.preprocess.transformation import Transformation, Identity, Lookup from Orange.widgets import widget, gui, settings from Orange.widgets.utils import itemmodels from Orange.widgets.utils.buttons import FixedSizeButton from Orange.widgets.utils.itemmodels import signal_blocking from Orange.widgets.utils.widgetpreview import WidgetPreview from Orange.widgets.widget import Input, Output from Orange.i18n_config import * ndarray = np.ndarray # pylint: disable=invalid-name MArray = np.ma.MaskedArray DType = Union[np.dtype, type] A = TypeVar("A") # pylint: disable=invalid-name B = TypeVar("B") # pylint: disable=invalid-name V = TypeVar("V", bound=Orange.data.Variable) # pylint: disable=invalid-name H = TypeVar("H", bound=Hashable) # pylint: disable=invalid-name def __(key): return i18n.t("widget.data.data.oweditdomain." + key) def unique(sequence: Iterable[H]) -> Iterable[H]: """ Return unique elements in `sequence`, preserving their (first seen) order. """ # depending on Python >= 3.6 'ordered' dict implementation detail. return iter(dict.fromkeys(sequence)) class _DataType: def __eq__(self, other): """Equal if `other` has the same type and all elements compare equal.""" if type(self) is not type(other): return False return super().__eq__(other) def __ne__(self, other): return not self == other def __hash__(self): return hash((type(self), super().__hash__())) def name_type(self): """ Returns a tuple with name and type of the variable. It is used since it is forbidden to use names of variables in settings. """ type_number = { "Categorical": 0, "Real": 2, "Time": 3, "String": 4 } return self.name, type_number[type(self).__name__] #: An ordered sequence of key, value pairs (variable annotations) AnnotationsType = Tuple[Tuple[str, str], ...] # Define abstract representation of the variable types edited class Categorical( _DataType, NamedTuple("Categorical", [ ("name", str), ("categories", Tuple[str, ...]), ("annotations", AnnotationsType), ("linked", bool) ])): pass class Real( _DataType, NamedTuple("Real", [ ("name", str), # a precision (int, and a format specifier('f', 'g', or '') ("format", Tuple[int, str]), ("annotations", AnnotationsType), ("linked", bool) ])): pass class String( _DataType, NamedTuple("String", [ ("name", str), ("annotations", AnnotationsType), ("linked", bool) ])): pass class Time( _DataType, NamedTuple("Time", [ ("name", str), ("annotations", AnnotationsType), ("linked", bool) ])): pass Variable = Union[Categorical, Real, Time, String] VariableTypes = (Categorical, Real, Time, String) # Define variable transformations. class Rename(_DataType, namedtuple("Rename", ["name"])): """ Rename a variable. Parameters ---------- name : str The new name """ def __call__(self, var): # type: (Variable) -> Variable return var._replace(name=self.name) #: Mapping of categories. #: A list of pairs with the first element the original value and the second #: element the new value. If the first element is None then a category level #: is added. If the second element is None than the corresponding first level #: is dropped. If there are duplicated elements on the right the corresponding #: categories on the left are merged. #: The mapped order is defined by the translated elements after None removal #: and merges (the first occurrence of a multiplied elements defines its #: position): CategoriesMappingType = List[Tuple[Optional[str], Optional[str]]] class CategoriesMapping(_DataType, namedtuple("CategoriesMapping", ["mapping"])): """ Change categories of a categorical variable. Parameters ---------- mapping : CategoriesMappingType """ def __call__(self, var): # type: (Categorical) -> Categorical cat = tuple(unique(cj for _, cj in self.mapping if cj is not None)) return var._replace(categories=cat) class Annotate(_DataType, namedtuple("Annotate", ["annotations"])): """ Replace variable annotations. """ def __call__(self, var): return var._replace(annotations=self.annotations) class Unlink(_DataType, namedtuple("Unlink", [])): """Unlink variable from its source, that is, remove compute_value""" Transform = Union[Rename, CategoriesMapping, Annotate, Unlink] TransformTypes = (Rename, CategoriesMapping, Annotate, Unlink) CategoricalTransformTypes = (CategoriesMapping, Unlink) # Reinterpret vector transformations. class CategoricalVector( _DataType, NamedTuple("CategoricalVector", [ ("vtype", Categorical), ("data", Callable[[], MArray]), ])): ... class RealVector( _DataType, NamedTuple("RealVector", [ ("vtype", Real), ("data", Callable[[], MArray]), ])): ... class StringVector( _DataType, NamedTuple("StringVector", [ ("vtype", String), ("data", Callable[[], MArray]), ])): ... class TimeVector( _DataType, NamedTuple("TimeVector", [ ("vtype", Time), ("data", Callable[[], MArray]), ])): ... DataVector = Union[CategoricalVector, RealVector, StringVector, TimeVector] DataVectorTypes = (CategoricalVector, RealVector, StringVector, TimeVector) class AsString(_DataType, NamedTuple("AsString", [])): """Reinterpret a data vector as a string.""" def __call__(self, vector: DataVector) -> StringVector: var, _ = vector if isinstance(var, String): return vector return StringVector( String(var.name, var.annotations, False), lambda: as_string(vector.data()), ) class AsContinuous(_DataType, NamedTuple("AsContinuous", [])): """ Reinterpret as a continuous variable (values that do not parse as float are NaN). """ def __call__(self, vector: DataVector) -> RealVector: var, _ = vector if isinstance(var, Real): return vector elif isinstance(var, Categorical): def data() -> MArray: d = vector.data() a = categorical_to_string_vector(d, var.values) return MArray(as_float_or_nan(a, where=a.mask), mask=a.mask) return RealVector( Real(var.name, (6, 'g'), var.annotations, var.linked), data ) elif isinstance(var, Time): return RealVector( Real(var.name, (6, 'g'), var.annotations, var.linked), lambda: vector.data().astype(float) ) elif isinstance(var, String): def data(): s = vector.data() return MArray(as_float_or_nan(s, where=s.mask), mask=s.mask) return RealVector( Real(var.name, (6, "g"), var.annotations, var.linked), data ) raise AssertionError class AsCategorical(_DataType, namedtuple("AsCategorical", [])): """Reinterpret as a categorical variable""" def __call__(self, vector: DataVector) -> CategoricalVector: # this is the main complication in type transformation since we need # the data and not just the variable description var, _ = vector if isinstance(var, Categorical): return vector if isinstance(var, (Real, Time, String)): data, values = categorical_from_vector(vector.data()) return CategoricalVector( Categorical(var.name, values, var.annotations, var.linked), lambda: data ) raise AssertionError class AsTime(_DataType, namedtuple("AsTime", [])): """Reinterpret as a datetime vector""" def __call__(self, vector: DataVector) -> TimeVector: var, _ = vector if isinstance(var, Time): return vector elif isinstance(var, Real): return TimeVector( Time(var.name, var.annotations, var.linked), lambda: vector.data().astype("M8[us]") ) elif isinstance(var, Categorical): def data(): d = vector.data() s = categorical_to_string_vector(d, var.values) dt = pd.to_datetime(s, errors="coerce").values.astype("M8[us]") return MArray(dt, mask=d.mask) return TimeVector( Time(var.name, var.annotations, var.linked), data ) elif isinstance(var, String): def data(): s = vector.data() dt = pd.to_datetime(s, errors="coerce").values.astype("M8[us]") return MArray(dt, mask=s.mask) return TimeVector( Time(var.name, var.annotations, var.linked), data ) raise AssertionError ReinterpretTransform = Union[AsCategorical, AsContinuous, AsTime, AsString] ReinterpretTransformTypes = (AsCategorical, AsContinuous, AsTime, AsString) def deconstruct(obj): # type: (tuple) -> Tuple[str, Tuple[Any, ...]] """ Deconstruct a tuple subclass to its class name and its contents. Parameters ---------- obj : A tuple Returns ------- value: Tuple[str, Tuple[Any, ...]] """ cname = type(obj).__name__ args = tuple(obj) return cname, args def reconstruct(tname, args): # type: (str, Tuple[Any, ...]) -> Tuple[Any, ...] """ Reconstruct a tuple subclass (inverse of deconstruct). Parameters ---------- tname : str Type name args : Tuple[Any, ...] Returns ------- rval: Tuple[Any, ...] """ try: constructor = globals()[tname] except KeyError: raise NameError(tname) return constructor(*args) def formatter_for_dtype(dtype: np.dtype) -> Callable[[Any], str]: if dtype.metadata is None: return str else: return dtype.metadata.get("__formatter", str) # metadata abuse def masked_unique(data: MArray) -> Tuple[MArray, ndarray]: if not np.any(data.mask): return np.ma.unique(data, return_inverse=True) elif data.dtype.kind == "O": # np.ma.unique does not work for object arrays # (no ma.minimum_fill_value for object arrays) # maybe sorted(set(data.data[...])) unq = np.unique(data.data[~data.mask]) mapper = make_dict_mapper( DictMissingConst(len(unq), ((v, i) for i, v in enumerate(unq))) ) index = mapper(data.data) unq = np.array(unq.tolist() + [data.fill_value], dtype=data.dtype) unq_mask = [False] * unq.size unq_mask[-1] = True unq = MArray(unq, mask=unq_mask) return unq, index else: unq, index = np.ma.unique(data, return_inverse=True) assert not np.any(unq.mask[:-1]), \ "masked value if present must be in last position" return unq, index def categorical_from_vector(data: MArray) -> Tuple[MArray, Tuple[str, ...]]: formatter = formatter_for_dtype(data.dtype) unq, index = categorize_unique(data) if formatter is not str: # str(np.array([0], "M8[s]")[0]) is different then # str(np.array([0], "M8[s]").astype(object)[0]) which is what # as_string is doing names = tuple(map(formatter, unq.astype(object))) else: names = tuple(as_string(unq)) data = MArray( index, mask=data.mask, dtype=np.dtype(int, metadata={ "__formater": lambda i: names[i] if 0 <= i < unq.size else "?" }) ) return data, names def categorize_unique(data: MArray) -> Tuple[ndarray, MArray]: unq, index = masked_unique(data) if np.any(unq.mask): unq = unq[:-1] assert not np.any(unq.mask), "masked value if present must be last" unq = unq.data index[data.mask] = -1 index = MArray(index, mask=data.mask) return unq, index def categorical_to_string_vector(data: MArray, values: Tuple[str, ...]) -> MArray: lookup = np.asarray(values, object) out = np.full(data.shape, "", dtype=object) mask_ = ~data.mask out[mask_] = lookup[data.data[mask_]] return MArray(out, mask=data.mask, fill_value="") # Item models class DictItemsModel(QStandardItemModel): """A Qt Item Model class displaying the contents of a python dictionary. """ # Implement a proper model with in-place editing. # (Maybe it should be a TableModel with 2 columns) def __init__(self, parent=None, a_dict=None): super().__init__(parent) self._dict = {} self.setHorizontalHeaderLabels([i18n.t("common.general.key"), i18n.t("common.general.value")]) if a_dict is not None: self.set_dict(a_dict) def set_dict(self, a_dict): # type: (Dict[str, str]) -> None self._dict = a_dict self.setRowCount(0) for key, value in sorted(a_dict.items()): key_item = QStandardItem(key) value_item = QStandardItem(value) key_item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) value_item.setFlags(value_item.flags() | Qt.ItemIsEditable) self.appendRow([key_item, value_item]) def get_dict(self): # type: () -> Dict[str, str] rval = {} for row in range(self.rowCount()): key_item = self.item(row, 0) value_item = self.item(row, 1) rval[key_item.text()] = value_item.text() return rval class VariableEditor(QWidget): """ An editor widget for a variable. Can edit the variable name, and its attributes dictionary. """ variable_changed = Signal() def __init__(self, parent=None, **kwargs): super().__init__(parent, **kwargs) self.var = None # type: Optional[Variable] layout = QVBoxLayout() self.setLayout(layout) self.form = form = QFormLayout( fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow, objectName="editor-form-layout" ) layout.addLayout(self.form) self.name_edit = QLineEdit(objectName="name-editor") self.name_edit.editingFinished.connect( lambda: self.name_edit.isModified() and self.on_name_changed() ) form.addRow(__("row.name"), self.name_edit) self.unlink_var_cb = QCheckBox( __("row.unlink_var_cb"), self, toolTip=__("row.unlink_var_cb_tip") ) self.unlink_var_cb.toggled.connect(self._set_unlink) form.addRow("", self.unlink_var_cb) vlayout = QVBoxLayout(margin=0, spacing=1) self.labels_edit = view = QTreeView( objectName="annotation-pairs-edit", rootIsDecorated=False, editTriggers=QTreeView.DoubleClicked | QTreeView.EditKeyPressed, ) self.labels_model = model = DictItemsModel() view.setModel(model) view.selectionModel().selectionChanged.connect( self.on_label_selection_changed) agrp = QActionGroup(view, objectName="annotate-action-group") action_add = QAction( "+", self, objectName="action-add-label", toolTip=__("tooltip.add_label"), shortcut=QKeySequence(QKeySequence.New), shortcutContext=Qt.WidgetShortcut ) action_delete = QAction( "\N{MINUS SIGN}", self, objectName="action-delete-label", toolTip=__("tooltip.remove_label"), shortcut=QKeySequence(QKeySequence.Delete), shortcutContext=Qt.WidgetShortcut ) agrp.addAction(action_add) agrp.addAction(action_delete) view.addActions([action_add, action_delete]) def add_label(): row = [QStandardItem(), QStandardItem()] model.appendRow(row) idx = model.index(model.rowCount() - 1, 0) view.setCurrentIndex(idx) view.edit(idx) def remove_label(): rows = view.selectionModel().selectedRows(0) if rows: assert len(rows) == 1 idx = rows[0].row() model.removeRow(idx) action_add.triggered.connect(add_label) action_delete.triggered.connect(remove_label) agrp.setEnabled(False) self.add_label_action = action_add self.remove_label_action = action_delete # Necessary signals to know when the labels change model.dataChanged.connect(self.on_labels_changed) model.rowsInserted.connect(self.on_labels_changed) model.rowsRemoved.connect(self.on_labels_changed) vlayout.addWidget(self.labels_edit) hlayout = QHBoxLayout() hlayout.setContentsMargins(0, 0, 0, 0) button = FixedSizeButton( self, defaultAction=self.add_label_action, accessibleName="Add", ) hlayout.addWidget(button) button = FixedSizeButton( self, defaultAction=self.remove_label_action, accessibleName="Remove", ) hlayout.addWidget(button) hlayout.addStretch(10) vlayout.addLayout(hlayout) form.addRow(__("row.label"), vlayout) def set_data(self, var, transform=()): # type: (Optional[Variable], Sequence[Transform]) -> None """ Set the variable to edit. """ self.clear() self.var = var if var is not None: name = var.name annotations = var.annotations unlink = False for tr in transform: if isinstance(tr, Rename): name = tr.name elif isinstance(tr, Annotate): annotations = tr.annotations elif isinstance(tr, Unlink): unlink = True self.name_edit.setText(name) self.labels_model.set_dict(dict(annotations)) self.add_label_action.actionGroup().setEnabled(True) self.unlink_var_cb.setChecked(unlink) else: self.add_label_action.actionGroup().setEnabled(False) self.unlink_var_cb.setDisabled(var is None or not var.linked) def get_data(self): """Retrieve the modified variable. """ if self.var is None: return None, [] name = self.name_edit.text().strip() labels = tuple(sorted(self.labels_model.get_dict().items())) tr = [] if self.var.name != name: tr.append(Rename(name)) if self.var.annotations != labels: tr.append(Annotate(labels)) if self.var.linked and self.unlink_var_cb.isChecked(): tr.append(Unlink()) return self.var, tr def clear(self): """Clear the editor state. """ self.var = None self.name_edit.setText("") self.labels_model.setRowCount(0) self.unlink_var_cb.setChecked(False) @Slot() def on_name_changed(self): self.variable_changed.emit() @Slot() def on_labels_changed(self): self.variable_changed.emit() @Slot() def on_label_selection_changed(self): selected = self.labels_edit.selectionModel().selectedRows() self.remove_label_action.setEnabled(bool(len(selected))) def _set_unlink(self, unlink): self.unlink_var_cb.setChecked(unlink) self.variable_changed.emit() class GroupItemsDialog(QDialog): """ A dialog for group less frequent values. """ DEFAULT_LABEL = __("default_label") def __init__( self, variable: Categorical, data: Union[np.ndarray, List, MArray], selected_attributes: List[str], dialog_settings: Dict[str, Any], parent: QWidget = None, flags: Qt.WindowFlags = Qt.Dialog, **kwargs ) -> None: super().__init__(parent, flags, **kwargs) self.variable = variable self.data = data self.selected_attributes = selected_attributes # grouping strategy self.selected_radio = radio1 = QRadioButton(__("btn.group_select_value")) self.frequent_abs_radio = radio2 = QRadioButton( __("btn.group_value_less_than") ) self.frequent_rel_radio = radio3 = QRadioButton( __("btn.group_value_less_than") ) self.n_values_radio = radio4 = QRadioButton( __("btn.group_all_except") ) # if selected attributes available check the first radio button, # otherwise disable it if selected_attributes: radio1.setChecked(True) else: radio1.setEnabled(False) # they are remembered by number since radio button instance is # new object for each dialog checked = dialog_settings.get("selected_radio", 0) [radio2, radio3, radio4][checked].setChecked(True) label2 = QLabel(__("label.occurrence")) label3 = QLabel(__("label.occurrence")) label4 = QLabel(__("label.frequent_value")) self.frequent_abs_spin = spin2 = QSpinBox() max_val = len(data) spin2.setMinimum(1) spin2.setMaximum(max_val) spin2.setValue(dialog_settings.get("frequent_abs_spin", 10)) spin2.setMinimumWidth( self.fontMetrics().horizontalAdvance("X") * (len(str(max_val)) + 1) + 20 ) spin2.valueChanged.connect(self._frequent_abs_spin_changed) self.frequent_rel_spin = spin3 = QDoubleSpinBox() spin3.setMinimum(0) spin3.setDecimals(1) spin3.setSingleStep(0.1) spin3.setMaximum(100) spin3.setValue(dialog_settings.get("frequent_rel_spin", 10)) spin3.setMinimumWidth(self.fontMetrics().horizontalAdvance("X") * (2 + 1) + 20) spin3.setSuffix(" %") spin3.valueChanged.connect(self._frequent_rel_spin_changed) self.n_values_spin = spin4 = QSpinBox() spin4.setMinimum(0) spin4.setMaximum(len(variable.categories)) spin4.setValue( dialog_settings.get( "n_values_spin", min(10, len(variable.categories)) ) ) spin4.setMinimumWidth( self.fontMetrics().horizontalAdvance("X") * (len(str(max_val)) + 1) + 20 ) spin4.valueChanged.connect(self._n_values_spin_spin_changed) grid_layout = QGridLayout() # first row grid_layout.addWidget(radio1, 0, 0, 1, 2) # second row grid_layout.addWidget(radio2, 1, 0, 1, 2) grid_layout.addWidget(spin2, 1, 2) grid_layout.addWidget(label2, 1, 3) # third row grid_layout.addWidget(radio3, 2, 0, 1, 2) grid_layout.addWidget(spin3, 2, 2) grid_layout.addWidget(label3, 2, 3) # fourth row grid_layout.addWidget(radio4, 3, 0) grid_layout.addWidget(spin4, 3, 1) grid_layout.addWidget(label4, 3, 2, 1, 2) group_box = QGroupBox() group_box.setLayout(grid_layout) # grouped variable name new_name_label = QLabel(__("label.new_value_name")) self.new_name_line_edit = n_line_edit = QLineEdit( dialog_settings.get("name_line_edit", self.DEFAULT_LABEL) ) # it is shown gray when user removes the text and let user know that # word others is default one n_line_edit.setPlaceholderText(self.DEFAULT_LABEL) name_hlayout = QHBoxLayout() name_hlayout.addWidget(new_name_label) name_hlayout.addWidget(n_line_edit) # confirm_button = QPushButton("Apply") # cancel_button = QPushButton("Cancel") buttons = QDialogButtonBox( orientation=Qt.Horizontal, standardButtons=(QDialogButtonBox.Ok | QDialogButtonBox.Cancel), objectName="dialog-button-box", ) buttons.button(QDialogButtonBox.Ok).setText(i18n.t("common.btn.ok")) buttons.button(QDialogButtonBox.Cancel).setText(i18n.t("common.btn.cancel")) buttons.accepted.connect(self.accept) buttons.rejected.connect(self.reject) # join components self.setLayout(QVBoxLayout()) self.layout().addWidget(group_box) self.layout().addLayout(name_hlayout) self.layout().addWidget(buttons) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) def _frequent_abs_spin_changed(self) -> None: self.frequent_abs_radio.setChecked(True) def _n_values_spin_spin_changed(self) -> None: self.n_values_radio.setChecked(True) def _frequent_rel_spin_changed(self) -> None: self.frequent_rel_radio.setChecked(True) def get_merge_attributes(self) -> List[str]: """ Returns attributes that will be merged Returns ------- List of attributes' to be merged names """ if self.selected_radio.isChecked(): return self.selected_attributes if isinstance(self.data, MArray): non_nan = self.data[~self.data.mask] elif isinstance(self.data, np.ndarray): non_nan = self.data[~np.isnan(self.data)] else: # list non_nan = [x for x in self.data if x is not None] counts = Counter(non_nan) if self.n_values_radio.isChecked(): keep_values = self.n_values_spin.value() values = counts.most_common()[keep_values:] indices = [i for i, _ in values] elif self.frequent_abs_radio.isChecked(): indices = [v for v, c in counts.most_common() if c < self.frequent_abs_spin.value()] else: # self.frequent_rel_radio.isChecked(): n_all = sum(counts.values()) indices = [v for v, c in counts.most_common() if c / n_all * 100 < self.frequent_rel_spin.value()] indices = np.array(indices, dtype=int) # indices must be ints return np.array(self.variable.categories)[indices].tolist() def get_merged_value_name(self) -> str: """ Returns ------- New label of merged values """ return self.new_name_line_edit.text() or self.DEFAULT_LABEL def get_dialog_settings(self) -> Dict[str, Any]: """ Returns ------- Return the dictionary with vlues set by user in each of the line edits and selected radio button. """ settings_dict = { "frequent_abs_spin": self.frequent_abs_spin.value(), "frequent_rel_spin": self.frequent_rel_spin.value(), "n_values_spin": self.n_values_spin.value(), "name_line_edit": self.new_name_line_edit.text() } checked = [ i for i, s in enumerate( [self.frequent_abs_radio, self.frequent_rel_radio, self.n_values_radio] ) if s.isChecked()] # when checked empty radio button for selected values is selected # it is not stored in setting since its selection depends on users # selection of values in list if checked: settings_dict["selected_radio"] = checked[0] return settings_dict @contextmanager def disconnected(signal, slot, connection_type=Qt.AutoConnection): signal.disconnect(slot) try: yield finally: signal.connect(slot, connection_type) #: In 'reordable' models holds the original position of the item #: (if applicable). SourcePosRole = Qt.UserRole #: The original name SourceNameRole = Qt.UserRole + 2 #: The added/dropped state (type is ItemEditState) EditStateRole = Qt.UserRole + 1 class ItemEditState: NoState = 0 Dropped = 1 Added = 2 #: Role used to retrieve the count of 'key' values in the model. MultiplicityRole = Qt.UserRole + 0x67 class CountedListModel(itemmodels.PyListModel): """ A list model counting how many times unique `key` values appear in the list. The counts are cached and invalidated on any change to the model involving the changes to `keyRoles`. """ #: cached counts __counts_cache = None # type: Optional[Counter] def data(self, index, role=Qt.DisplayRole): # type: (QModelIndex, int) -> Any if role == MultiplicityRole: key = self.key(index) counts = self.__counts() return counts.get(key, 1) return super().data(index, role) def setData(self, index, value, role=Qt.EditRole): # type: (QModelIndex, Any, int)-> bool rval = super().setData(index, value, role) if role in self.keyRoles(): self.invalidateCounts() return rval def setItemData(self, index, data): # type: (QModelIndex, Dict[int, Any]) -> bool rval = super().setItemData(index, data) if self.keyRoles().intersection(set(data.keys())): self.invalidateCounts() return rval def endInsertRows(self): super().endInsertRows() self.invalidateCounts() def endRemoveRows(self): super().endRemoveRows() self.invalidateCounts() def endResetModel(self) -> None: super().endResetModel() self.invalidateCounts() def invalidateCounts(self) -> None: """ Invalidate the cached counts. """ self.__counts_cache = None # emit the change for the whole model self.dataChanged.emit( self.index(0), self.index(self.rowCount() - 1), [MultiplicityRole] ) def __counts(self): # type: () -> Counter if self.__counts_cache is not None: return self.__counts_cache counts = Counter() for index in map(self.index, range(self.rowCount())): key = self.key(index) try: counts[key] += 1 except TypeError: # pragma: no cover warnings.warn(__("tooltip.key_value_error").format(key)) self.__counts_cache = counts return self.__counts_cache def key(self, index): # type: (QModelIndex) -> Any """ Return the 'key' value that is to be counted. The default implementation returns Qt.EditRole value for the index Parameters ---------- index : QModelIndex The model index. Returns ------- key : Any """ return self.data(index, Qt.EditRole) def keyRoles(self): # type: () -> FrozenSet[int] """ Return a set of item roles on which `key` depends. The counts are invalidated and recomputed whenever any of the roles in this set changes. By default the only role returned is Qt.EditRole """ return frozenset({Qt.EditRole}) class CountedStateModel(CountedListModel): """ Count by EditRole (name) and EditStateRole (ItemEditState) """ # The purpose is to count the items with target name only for # ItemEditState.NoRole, i.e. excluding added/dropped values. # def key(self, index): # type: (QModelIndex) -> Tuple[Any, Any] # reimplemented return self.data(index, Qt.EditRole), self.data(index, EditStateRole) def keyRoles(self): # type: () -> FrozenSet[int] # reimplemented return frozenset({Qt.EditRole, EditStateRole}) def mapRectTo(widget: QWidget, parent: QWidget, rect: QRect) -> QRect: # pylint: disable=redefined-outer-name return QRect(widget.mapTo(parent, rect.topLeft()), rect.size()) def mapRectToGlobal(widget: QWidget, rect: QRect) -> QRect: # pylint: disable=redefined-outer-name return QRect(widget.mapToGlobal(rect.topLeft()), rect.size()) class CategoriesEditDelegate(QStyledItemDelegate): """ Display delegate for editing categories. Displayed items are styled for add, remove, merge and rename operations. """ def initStyleOption(self, option, index): # type: (QStyleOptionViewItem, QModelIndex)-> None super().initStyleOption(option, index) text = str(index.data(Qt.EditRole)) sourcename = str(index.data(SourceNameRole)) editstate = index.data(EditStateRole) counts = index.data(MultiplicityRole) if not isinstance(counts, int): counts = 1 suffix = None if editstate == ItemEditState.Dropped: option.state &= ~QStyle.State_Enabled option.font.setStrikeOut(True) text = sourcename suffix = __("suffix.drop") elif editstate == ItemEditState.Added: suffix = __("suffix.add") else: text = f"{sourcename} \N{RIGHTWARDS ARROW} {text}" if counts > 1: suffix = __("suffix.merge") if suffix is not None: text = text + " " + suffix option.text = text class CatEditComboBox(QComboBox): prows: List[QPersistentModelIndex] def createEditor( self, parent: QWidget, option: 'QStyleOptionViewItem', index: QModelIndex ) -> QWidget: view = option.widget assert isinstance(view, QAbstractItemView) selmodel = view.selectionModel() rows = selmodel.selectedRows(0) if len(rows) < 2: return super().createEditor(parent, option, index) # edit multiple selection cb = CategoriesEditDelegate.CatEditComboBox( editable=True, insertPolicy=QComboBox.InsertAtBottom) cb.setParent(view, Qt.Popup) cb.addItems( list(unique(str(row.data(Qt.EditRole)) for row in rows))) prows = [QPersistentModelIndex(row) for row in rows] cb.prows = prows return cb def updateEditorGeometry( self, editor: QWidget, option: 'QStyleOptionViewItem', index: QModelIndex ) -> None: if isinstance(editor, CategoriesEditDelegate.CatEditComboBox): view = cast(QAbstractItemView, option.widget) view.scrollTo(index) vport = view.viewport() vrect = view.visualRect(index) vrect = mapRectTo(vport, view, vrect) vrect = vrect.intersected(vport.geometry()) vrect = mapRectToGlobal(vport, vrect) size = editor.sizeHint().expandedTo(vrect.size()) editor.resize(size) editor.move(vrect.topLeft()) else: super().updateEditorGeometry(editor, option, index) def setModelData( self, editor: QWidget, model: QAbstractItemModel, index: QModelIndex ) -> None: if isinstance(editor, CategoriesEditDelegate.CatEditComboBox): text = editor.currentText() with signal_blocking(model): for prow in editor.prows: if prow.isValid(): model.setData(QModelIndex(prow), text, Qt.EditRole) # this could be better model.dataChanged.emit( model.index(0, 0), model.index(model.rowCount() - 1, 0), (Qt.EditRole,) ) else: super().setModelData(editor, model, index) class DiscreteVariableEditor(VariableEditor): """An editor widget for editing a discrete variable. Extends the :class:`VariableEditor` to enable editing of variables values. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.merge_dialog_settings = {} self._values = None form = self.layout().itemAt(0) assert isinstance(form, QFormLayout) #: A list model of discrete variable's values. self.values_model = CountedStateModel( flags=Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable ) vlayout = QVBoxLayout(spacing=1, margin=0) self.values_edit = QListView( editTriggers=QListView.DoubleClicked | QListView.EditKeyPressed, selectionMode=QListView.ExtendedSelection, uniformItemSizes=True, ) self.values_edit.setItemDelegate(CategoriesEditDelegate(self)) self.values_edit.setModel(self.values_model) self.values_model.dataChanged.connect(self.on_values_changed) self.values_edit.selectionModel().selectionChanged.connect( self.on_value_selection_changed) self.values_model.layoutChanged.connect(self.on_value_selection_changed) self.values_model.rowsMoved.connect(self.on_value_selection_changed) vlayout.addWidget(self.values_edit) hlayout = QHBoxLayout(spacing=1, margin=0) self.categories_action_group = group = QActionGroup( self, objectName="action-group-categories", enabled=False ) self.move_value_up = QAction( "Move up", group, iconText="\N{UPWARDS ARROW}", toolTip=__("tooltip.select_move_up"), shortcut=QKeySequence(Qt.ControlModifier | Qt.AltModifier | Qt.Key_BracketLeft), shortcutContext=Qt.WidgetShortcut, ) self.move_value_up.triggered.connect(self.move_up) self.move_value_down = QAction( "Move down", group, iconText="\N{DOWNWARDS ARROW}", toolTip=__("tooltip.select_move_down"), shortcut=QKeySequence(Qt.ControlModifier | Qt.AltModifier | Qt.Key_BracketRight), shortcutContext=Qt.WidgetShortcut, ) self.move_value_down.triggered.connect(self.move_down) self.add_new_item = QAction( "Add", group, iconText="+", objectName="action-add-item", toolTip=__("tooltip.append_item"), shortcut=QKeySequence(QKeySequence.New), shortcutContext=Qt.WidgetShortcut, ) self.remove_item = QAction( __("label.remove_item"), group, iconText="\N{MINUS SIGN}", objectName="action-remove-item", toolTip=__("tooltip.delete_item"), shortcut=QKeySequence(QKeySequence.Delete), shortcutContext=Qt.WidgetShortcut, ) self.rename_selected_items = QAction( __("label.rename_items"), group, iconText="=", objectName="action-rename-selected-items", toolTip=__("tooltip.rename_selected_items"), shortcut=QKeySequence(Qt.ControlModifier | Qt.Key_Equal), shortcutContext=Qt.WidgetShortcut, ) self.merge_items = QAction( "Merge", group, iconText="M", objectName="action-activate-merge-dialog", toolTip=__("tooltip.merge_item"), shortcut=QKeySequence(Qt.ControlModifier | Qt.MetaModifier | Qt.Key_Equal), shortcutContext=Qt.WidgetShortcut ) self.add_new_item.triggered.connect(self._add_category) self.remove_item.triggered.connect(self._remove_category) self.rename_selected_items.triggered.connect(self._rename_selected_categories) self.merge_items.triggered.connect(self._merge_categories) button1 = FixedSizeButton( self, defaultAction=self.move_value_up, accessibleName="Move up" ) button2 = FixedSizeButton( self, defaultAction=self.move_value_down, accessibleName="Move down" ) button3 = FixedSizeButton( self, defaultAction=self.add_new_item, accessibleName="Add" ) button4 = FixedSizeButton( self, defaultAction=self.remove_item, accessibleName="Remove" ) button5 = FixedSizeButton( self, defaultAction=self.rename_selected_items, accessibleName="Merge selected items" ) button6 = FixedSizeButton( self, defaultAction=self.merge_items, accessibleName="Merge infrequent", ) self.values_edit.addActions([ self.move_value_up, self.move_value_down, self.add_new_item, self.remove_item, self.rename_selected_items ]) self.values_edit.setContextMenuPolicy(Qt.CustomContextMenu) def context_menu(pos: QPoint): viewport = self.values_edit.viewport() menu = QMenu(self.values_edit) menu.setAttribute(Qt.WA_DeleteOnClose) menu.addActions([self.rename_selected_items, self.remove_item]) menu.popup(viewport.mapToGlobal(pos)) self.values_edit.customContextMenuRequested.connect(context_menu) hlayout.addWidget(button1) hlayout.addWidget(button2) hlayout.addSpacing(3) hlayout.addWidget(button3) hlayout.addWidget(button4) hlayout.addSpacing(3) hlayout.addWidget(button5) hlayout.addWidget(button6) hlayout.addStretch(10) vlayout.addLayout(hlayout) form.insertRow(2, __("box.value"), vlayout) QWidget.setTabOrder(self.name_edit, self.values_edit) QWidget.setTabOrder(self.values_edit, button1) QWidget.setTabOrder(button1, button2) QWidget.setTabOrder(button2, button3) QWidget.setTabOrder(button3, button4) QWidget.setTabOrder(button4, button5) QWidget.setTabOrder(button5, button6) def set_data(self, var, transform=()): raise NotImplementedError def set_data_categorical(self, var, values, transform=()): # type: (Optional[Categorical], Optional[Sequence[float]], Sequence[Transform]) -> None """ Set the variable to edit. `values` is needed for categorical features to perform grouping. """ # pylint: disable=too-many-branches super().set_data(var, transform=transform) self._values = values tr = None # type: Optional[CategoriesMapping] for tr_ in transform: if isinstance(tr_, CategoriesMapping): tr = tr_ items = [] if tr is not None: ci_index = {c: i for i, c in enumerate(var.categories)} for ci, cj in tr.mapping: if ci is None and cj is not None: # level added item = { Qt.EditRole: cj, EditStateRole: ItemEditState.Added, SourcePosRole: None } elif ci is not None and cj is None: # ci level dropped item = { Qt.EditRole: ci, EditStateRole: ItemEditState.Dropped, SourcePosRole: ci_index[ci], SourceNameRole: ci } elif ci is not None and cj is not None: # rename or reorder item = { Qt.EditRole: cj, EditStateRole: ItemEditState.NoState, SourcePosRole: ci_index[ci], SourceNameRole: ci } else: assert False, "invalid mapping: {!r}".format(tr.mapping) items.append(item) elif var is not None: items = [ {Qt.EditRole: c, EditStateRole: ItemEditState.NoState, SourcePosRole: i, SourceNameRole: c} for i, c in enumerate(var.categories) ] else: items = [] with disconnected(self.values_model.dataChanged, self.on_values_changed): self.values_model.clear() self.values_model.insertRows(0, len(items)) for i, item in enumerate(items): self.values_model.setItemData( self.values_model.index(i, 0), item ) self.add_new_item.actionGroup().setEnabled(var is not None) def __categories_mapping(self): # type: () -> CategoriesMappingType """ Encode and return the current state as a CategoriesMappingType """ model = self.values_model source = self.var.categories res = [] # type: CategoriesMappingType for i in range(model.rowCount()): midx = model.index(i, 0) category = midx.data(Qt.EditRole) source_pos = midx.data(SourcePosRole) # type: Optional[int] if source_pos is not None: source_name = source[source_pos] else: source_name = None state = midx.data(EditStateRole) if state == ItemEditState.Dropped: res.append((source_name, None)) elif state == ItemEditState.Added: res.append((None, category)) else: res.append((source_name, category)) return res def get_data(self): """Retrieve the modified variable """ var, tr = super().get_data() if var is None: return var, tr mapping = self.__categories_mapping() assert len(mapping) >= len(var.categories), f'{mapping}, {var}' if any(_1 != _2 or _2 != _3 for (_1, _2), _3 in zip_longest(mapping, var.categories)): tr.append(CategoriesMapping(mapping)) return var, tr def clear(self): """Clear the model state. """ super().clear() self.values_model.clear() def move_rows(self, rows, offset): if not rows: return assert len(rows) == 1 i = rows[0].row() if offset > 0: offset += 1 self.values_model.moveRows(QModelIndex(), i, 1, QModelIndex(), i + offset) self.variable_changed.emit() def move_up(self): rows = self.values_edit.selectionModel().selectedRows() self.move_rows(rows, -1) def move_down(self): rows = self.values_edit.selectionModel().selectedRows() self.move_rows(rows, 1) @Slot() def on_values_changed(self): self.variable_changed.emit() @Slot() def on_value_selection_changed(self): rows = self.values_edit.selectionModel().selectedRows() if len(rows) == 1: i = rows[0].row() self.move_value_up.setEnabled(i != 0) self.move_value_down.setEnabled(i != self.values_model.rowCount() - 1) else: self.move_value_up.setEnabled(False) self.move_value_down.setEnabled(False) def _remove_category(self): """ Remove the current selected category. If the item is an existing category present in the source variable it is marked as removed in the view. But if it was added in the set transformation it is removed entirely from the model and view. """ view = self.values_edit rows = view.selectionModel().selectedRows(0) if not rows: return # pragma: no cover for index in rows: model = index.model() state = index.data(EditStateRole) pos = index.data(SourcePosRole) if pos is not None and pos >= 0: # existing level -> only mark/toggle its dropped state, model.setData( index, ItemEditState.Dropped if state != ItemEditState.Dropped else ItemEditState.NoState, EditStateRole) elif state == ItemEditState.Added: # new level -> remove it model.removeRow(index.row()) else: assert False, "invalid state '{}' for {}" \ .format(state, index.row()) def _add_category(self): """ Add a new category """ view = self.values_edit model = view.model() with disconnected(model.dataChanged, self.on_values_changed, Qt.UniqueConnection): row = model.rowCount() if not model.insertRow(model.rowCount()): return index = model.index(row, 0) model.setItemData( index, { Qt.EditRole: "", SourcePosRole: None, EditStateRole: ItemEditState.Added } ) view.setCurrentIndex(index) view.edit(index) self.on_values_changed() def _merge_categories(self) -> None: """ Merge less common categories into one with the dialog for merge selection. """ view = self.values_edit model = view.model() # type: QAbstractItemModel selected_attributes = [ind.data(SourceNameRole) for ind in view.selectedIndexes()] dlg = GroupItemsDialog( self.var, self._values, selected_attributes, self.merge_dialog_settings.get(self.var, {}), self, windowTitle=__("window_title_import_option"), sizeGripEnabled=True, ) dlg.setWindowModality(Qt.WindowModal) status = dlg.exec() dlg.deleteLater() self.merge_dialog_settings[self.var] = dlg.get_dialog_settings() rows = (model.index(i, 0) for i in range(model.rowCount())) def complete_merge(text, merge_attributes): # write the new text for edit role in all rows with disconnected(model.dataChanged, self.on_values_changed): for row in rows: if row.data(SourceNameRole) in merge_attributes: model.setData(row, text, Qt.EditRole) self.variable_changed.emit() if status == QDialog.Accepted: complete_merge( dlg.get_merged_value_name(), dlg.get_merge_attributes() ) def _rename_selected_categories(self): """ Rename selected categories and merging them. Popup an editable combo box for selection/edit of a new value. """ view = self.values_edit selmodel = view.selectionModel() index = view.currentIndex() if not selmodel.isSelected(index): indices = selmodel.selectedRows(0) if indices: index = indices[0] # delegate to the CategoriesEditDelegate view.edit(index) class ContinuousVariableEditor(VariableEditor): # TODO: enable editing of display format... pass class TimeVariableEditor(VariableEditor): # TODO: enable editing of display format... pass def variable_icon(var): # type: (Union[Variable, Type[Variable], ReinterpretTransform]) -> QIcon if not isinstance(var, type): var = type(var) if issubclass(var, (Categorical, AsCategorical)): return gui.attributeIconDict[1] elif issubclass(var, (Real, AsContinuous)): return gui.attributeIconDict[2] elif issubclass(var, (String, AsString)): return gui.attributeIconDict[3] elif issubclass(var, (Time, AsTime)): return gui.attributeIconDict[4] else: return gui.attributeIconDict[-1] #: ItemDataRole storing the data vector transform #: (`List[Union[ReinterpretTransform, Transform]]`) TransformRole = Qt.UserRole + 42 class VariableEditDelegate(QStyledItemDelegate): ReinterpretNames = { AsCategorical: "categorical", AsContinuous: "numeric", AsString: "string", AsTime: "time" } def initStyleOption(self, option, index): # type: (QStyleOptionViewItem, QModelIndex) -> None super().initStyleOption(option, index) item = index.data(Qt.EditRole) var = tr = None if isinstance(item, DataVectorTypes): var = item.vtype option.icon = variable_icon(var) if isinstance(item, VariableTypes): var = item option.icon = variable_icon(item) elif isinstance(item, Orange.data.Variable): var = item option.icon = gui.attributeIconDict[var] transform = index.data(TransformRole) if not isinstance(transform, list): transform = [] if transform and isinstance(transform[0], ReinterpretTransformTypes): option.icon = variable_icon(transform[0]) if not option.icon.isNull(): option.features |= QStyleOptionViewItem.HasDecoration if var is not None: text = var.name for tr in transform: if isinstance(tr, Rename): text = ("{} \N{RIGHTWARDS ARROW} {}" .format(var.name, tr.name)) for tr in transform: if isinstance(tr, ReinterpretTransformTypes): text += f" (reinterpreted as " \ f"{self.ReinterpretNames[type(tr)]})" option.text = text if transform: # mark as changed (maybe also change color, add text, ...) option.font.setItalic(True) # Item model for edited variables (Variable). Define a display role to be the # source variable name. This is used only in keyboard search. The display is # otherwise completely handled by a delegate. class VariableListModel(itemmodels.PyListModel): def data(self, index, role=Qt.DisplayRole): # type: (QModelIndex, Qt.ItemDataRole) -> Any row = index.row() if not index.isValid() or not 0 <= row < self.rowCount(): return None if role == Qt.DisplayRole: item = self[row] if isinstance(item, VariableTypes): return item.name if isinstance(item, DataVectorTypes): return item.vtype.name return super().data(index, role) class ReinterpretVariableEditor(VariableEditor): """ A 'compound' variable editor capable of variable type reinterpretations. """ _editors = { Categorical: 0, Real: 1, String: 2, Time: 3, type(None): -1, } def __init__(self, parent=None, **kwargs): # Explicitly skip VariableEditor's __init__, this is ugly but we have # a completely different layout/logic as a compound editor (should # really not subclass VariableEditor). super(VariableEditor, self).__init__(parent, **kwargs) # pylint: disable=bad-super-call self.var = None # type: Optional[Variable] self.__transform = None # type: Optional[ReinterpretTransform] self.__data = None # type: Optional[DataVector] #: Stored transform state indexed by variable. Used to preserve state #: between type switches. self.__history = {} # type: Dict[Variable, List[Transform]] self.setLayout(QStackedLayout()) def decorate(editor: VariableEditor) -> VariableEditor: """insert an type combo box into a `editor`'s layout.""" form = editor.layout().itemAt(0) assert isinstance(form, QFormLayout) typecb = QComboBox(objectName="type-combo") typecb.addItem(variable_icon(Categorical), __("type.categorical"), Categorical) typecb.addItem(variable_icon(Real), __("type.numeric"), Real) typecb.addItem(variable_icon(String), __("type.text"), String) typecb.addItem(variable_icon(Time), __("type.time"), Time) typecb.activated[int].connect(self.__reinterpret_activated) form.insertRow(1, __("row.type"), typecb) # Insert the typecb after name edit in the focus chain name_edit = editor.findChild(QLineEdit, ) if name_edit is not None: QWidget.setTabOrder(name_edit, typecb) return editor # This is ugly. Create an editor for each type and insert a type # selection combo box into its layout. Switch between widgets # on type change. self.disc_edit = dedit = decorate(DiscreteVariableEditor()) cedit = decorate(ContinuousVariableEditor()) tedit = decorate(TimeVariableEditor()) sedit = decorate(VariableEditor()) for ed in [dedit, cedit, tedit, sedit]: ed.variable_changed.connect(self.variable_changed) self.layout().addWidget(dedit) self.layout().addWidget(cedit) self.layout().addWidget(sedit) self.layout().addWidget(tedit) def set_data(self, data, transform=()): # pylint: disable=arguments-differ # type: (Optional[DataVector], Sequence[Transform]) -> None """ Set the editor data. Note ---- This must be a `DataVector` as the vector's values are needed for type reinterpretation/casts. If the `transform` sequence contains ReinterpretTransform then it must be in the first position. """ type_transform = None # type: Optional[ReinterpretTransform] if transform: _tr = transform[0] if isinstance(_tr, ReinterpretTransformTypes): type_transform = _tr transform = transform[1:] assert not any(isinstance(t, ReinterpretTransformTypes) for t in transform) self.__transform = type_transform self.__data = data self.var = data.vtype if data is not None else None if type_transform is not None and data is not None: data = type_transform(data) if data is not None: var = data.vtype else: var = None index = self._editors.get(type(var), -1) self.layout().setCurrentIndex(index) if index != -1: w = self.layout().currentWidget() assert isinstance(w, VariableEditor) if isinstance(var, Categorical): w.set_data_categorical(var, data.data(), transform=transform) else: w.set_data(var, transform=transform) self.__history[var] = tuple(transform) cb = w.findChild(QComboBox, "type-combo") cb.setCurrentIndex(index) def get_data(self): # type: () -> Tuple[Variable, Sequence[Transform]] editor = self.layout().currentWidget() # type: VariableEditor var, tr = editor.get_data() if type(var) != type(self.var): # pylint: disable=unidiomatic-typecheck assert self.__transform is not None var = self.var tr = [self.__transform, *tr] return var, tr def __reinterpret_activated(self, index): layout = self.layout() assert isinstance(layout, QStackedLayout) if index == layout.currentIndex(): return current = layout.currentWidget() assert isinstance(current, VariableEditor) Specific = { Categorical: CategoricalTransformTypes } _var, _tr = current.get_data() if _var is not None: self.__history[_var] = _tr var = self.var transform = self.__transform # take/preserve the general transforms that apply to all types specific = Specific.get(type(var), ()) _tr = [t for t in _tr if not isinstance(t, specific)] layout.setCurrentIndex(index) w = layout.currentWidget() cb = w.findChild(QComboBox, "type-combo") cb.setCurrentIndex(index) cb.setFocus() target = cb.itemData(index, Qt.UserRole) assert issubclass(target, VariableTypes) if not isinstance(var, target): if target == Real: transform = AsContinuous() elif target == Categorical: transform = AsCategorical() elif target == Time: transform = AsTime() elif target == String: transform = AsString() else: transform = None var = self.var self.__transform = transform data = None if transform is not None and self.__data is not None: data = transform(self.__data) var = data.vtype if var in self.__history: tr = self.__history[var] else: tr = [] # type specific transform specific = Specific.get(type(var), ()) # merge tr and _tr tr = _tr + [t for t in tr if isinstance(t, specific)] with disconnected( w.variable_changed, self.variable_changed, Qt.UniqueConnection ): if isinstance(w, DiscreteVariableEditor): data = data or self.__data w.set_data_categorical(var, data.data(), transform=tr) else: w.set_data(var, transform=tr) self.variable_changed.emit() def set_merge_context(self, merge_context): self.disc_edit.merge_dialog_settings = merge_context def get_merge_context(self): return self.disc_edit.merge_dialog_settings class OWEditDomain(widget.OWWidget): name = __("name") description = __("desc") icon = "icons/EditDomain.svg" priority = 3125 keywords = ["rename", "drop", "reorder", "order"] class Inputs: data = Input("Data", Orange.data.Table, label=i18n.t("widget.data.data.common.data")) class Outputs: data = Output("Data", Orange.data.Table, label=i18n.t("widget.data.data.common.data")) class Error(widget.OWWidget.Error): duplicate_var_name = widget.Msg(__("msg_name_duplicated")) settingsHandler = settings.DomainContextHandler() settings_version = 2 _domain_change_store = settings.ContextSetting({}) _selected_item = settings.ContextSetting(None) # type: Optional[Tuple[str, int]] _merge_dialog_settings = settings.ContextSetting({}) output_table_name = settings.ContextSetting("") want_main_area = False def __init__(self): super().__init__() self.data = None # type: Optional[Orange.data.Table] #: The current selected variable index self.selected_index = -1 self._invalidated = False self.typeindex = 0 main = gui.hBox(self.controlArea, spacing=6) box = gui.vBox(main, i18n.t("common.general.variable")) self.variables_model = VariableListModel(parent=self) self.variables_view = self.domain_view = QListView( selectionMode=QListView.SingleSelection, uniformItemSizes=True, ) self.variables_view.setItemDelegate(VariableEditDelegate(self)) self.variables_view.setModel(self.variables_model) self.variables_view.selectionModel().selectionChanged.connect( self._on_selection_changed ) box.layout().addWidget(self.variables_view) box = gui.vBox(main, __("box.edit")) self._editor = ReinterpretVariableEditor() box.layout().addWidget(self._editor) self.le_output_name = gui.lineEdit( self.buttonsArea, self, "output_table_name", __("row.le_output_name"), orientation=Qt.Horizontal) gui.rubber(self.buttonsArea) bbox = gui.hBox(self.buttonsArea) breset_all = gui.button( bbox, self, __("btn.reset_all"), objectName="button-reset-all", toolTip=__("tooltip.reset_state"), autoDefault=False, callback=self.reset_all ) breset = gui.button( bbox, self, __("btn.reset_select"), objectName="button-reset", toolTip=__("tooltip.rest_state"), autoDefault=False, callback=self.reset_selected ) bapply = gui.button( bbox, self, i18n.t("common.btn.apply"), objectName="button-apply", toolTip=__("tooltip.apply_data"), default=True, autoDefault=False, callback=self.commit ) self.variables_view.setFocus(Qt.NoFocusReason) # initial focus @Inputs.data def set_data(self, data): """Set input dataset.""" self.closeContext() self.clear() self.data = data if self.data is not None: self.setup_model(data) self.le_output_name.setPlaceholderText(data.name) self.openContext(self.data) self._editor.set_merge_context(self._merge_dialog_settings) self._restore() else: self.le_output_name.setPlaceholderText("") self.commit() def clear(self): """Clear the widget state.""" self.data = None self.variables_model.clear() self.clear_editor() assert self.selected_index == -1 self.selected_index = -1 self._selected_item = None self._domain_change_store = {} self._merge_dialog_settings = {} def reset_selected(self): """Reset the currently selected variable to its original state.""" ind = self.selected_var_index() if ind >= 0: model = self.variables_model midx = model.index(ind) var = midx.data(Qt.EditRole) tr = midx.data(TransformRole) if not tr: return # nothing to reset editor = self._editor with disconnected(editor.variable_changed, self._on_variable_changed): model.setData(midx, [], TransformRole) editor.set_data(var, transform=[]) self._invalidate() def reset_all(self): """Reset all variables to their original state.""" self._domain_change_store = {} if self.data is not None: model = self.variables_model for i in range(model.rowCount()): midx = model.index(i) model.setData(midx, [], TransformRole) index = self.selected_var_index() if index >= 0: self.open_editor(index) self._invalidate() def selected_var_index(self): """Return the current selected variable index.""" rows = self.variables_view.selectedIndexes() assert len(rows) <= 1 return rows[0].row() if rows else -1 def setup_model(self, data: Orange.data.Table): model = self.variables_model vars_ = [] columns = [] for i, _, var, coldata in enumerate_columns(data): var = abstract(var) vars_.append(var) if isinstance(var, Categorical): data = CategoricalVector(var, coldata) elif isinstance(var, Real): data = RealVector(var, coldata) elif isinstance(var, Time): data = TimeVector(var, coldata) elif isinstance(var, String): data = StringVector(var, coldata) columns.append(data) model[:] = vars_ for i, d in enumerate(columns): model.setData(model.index(i), d, Qt.EditRole) def _restore(self, ): """ Restore the edit transform from saved state. """ model = self.variables_model for i in range(model.rowCount()): midx = model.index(i, 0) coldesc = model.data(midx, Qt.EditRole) # type: DataVector tr = self._restore_transform(coldesc.vtype) if tr: model.setData(midx, tr, TransformRole) # Restore the current variable selection i = -1 if self._selected_item is not None: for i, vec in enumerate(model): if vec.vtype.name_type() == self._selected_item: break if i == -1 and model.rowCount(): i = 0 if i != -1: itemmodels.select_row(self.variables_view, i) def _on_selection_changed(self): self.selected_index = self.selected_var_index() if self.selected_index != -1: self._selected_item = self.variables_model[self.selected_index].vtype.name_type() else: self._selected_item = None self.open_editor(self.selected_index) def open_editor(self, index): # type: (int) -> None self.clear_editor() model = self.variables_model if not 0 <= index < model.rowCount(): return idx = model.index(index, 0) vector = model.data(idx, Qt.EditRole) tr = model.data(idx, TransformRole) if tr is None: tr = [] editor = self._editor editor.set_data(vector, transform=tr) editor.variable_changed.connect( self._on_variable_changed, Qt.UniqueConnection ) def clear_editor(self): current = self._editor try: current.variable_changed.disconnect(self._on_variable_changed) except TypeError: pass current.set_data(None) current.layout().currentWidget().clear() @Slot() def _on_variable_changed(self): """User edited the current variable in editor.""" assert 0 <= self.selected_index <= len(self.variables_model) editor = self._editor var, transform = editor.get_data() model = self.variables_model midx = model.index(self.selected_index, 0) model.setData(midx, transform, TransformRole) self._store_transform(var, transform) self._invalidate() def _store_transform(self, var, transform): # type: (Variable, List[Transform]) -> None self._domain_change_store[deconstruct(var)] = [deconstruct(t) for t in transform] def _restore_transform(self, var): # type: (Variable) -> List[Transform] tr_ = self._domain_change_store.get(deconstruct(var), []) tr = [] for t in tr_: try: tr.append(reconstruct(*t)) except (NameError, TypeError) as err: warnings.warn( __("fail_restore_transform").format(t, err), UserWarning, stacklevel=2 ) return tr def _invalidate(self): self._set_modified(True) def _set_modified(self, state): self._invalidated = state b = self.findChild(QPushButton, "button-apply") if isinstance(b, QPushButton): f = b.font() f.setItalic(state) b.setFont(f) def commit(self): """ Apply the changes to the input data and send the changed data to output. """ self._set_modified(False) self.Error.duplicate_var_name.clear() data = self.data if data is None: self.Outputs.data.send(None) return model = self.variables_model def state(i): # type: (int) -> Tuple[DataVector, List[Transform]] midx = self.variables_model.index(i, 0) return (model.data(midx, Qt.EditRole), model.data(midx, TransformRole)) state = [state(i) for i in range(model.rowCount())] input_vars = data.domain.variables + data.domain.metas if self.output_table_name in ("", data.name) \ and not any(requires_transform(var, trs) for var, (_, trs) in zip(input_vars, state)): self.Outputs.data.send(data) return assert all(v_.vtype.name == v.name for v, (v_, _) in zip(input_vars, state)) output_vars = [] unlinked_vars = [] unlink_domain = False for (_, tr), v in zip(state, input_vars): if tr: var = apply_transform(v, data, tr) if requires_unlink(v, tr): unlinked_var = var.copy(compute_value=None) unlink_domain = True else: unlinked_var = var else: unlinked_var = var = v output_vars.append(var) unlinked_vars.append(unlinked_var) if len(output_vars) != len({v.name for v in output_vars}): self.Error.duplicate_var_name() self.Outputs.data.send(None) return domain = data.domain nx = len(domain.attributes) ny = len(domain.class_vars) def construct_domain(vars_list): # Move non primitive Xs, Ys to metas (if they were changed) Xs = [v for v in vars_list[:nx] if v.is_primitive()] Ys = [v for v in vars_list[nx: nx + ny] if v.is_primitive()] Ms = vars_list[nx + ny:] + \ [v for v in vars_list[:nx + ny] if not v.is_primitive()] return Orange.data.Domain(Xs, Ys, Ms) domain = construct_domain(output_vars) new_data = data.transform(domain) if unlink_domain: unlinked_domain = construct_domain(unlinked_vars) new_data = new_data.from_numpy( unlinked_domain, new_data.X, new_data.Y, new_data.metas, new_data.W, new_data.attributes, new_data.ids ) if self.output_table_name: new_data.name = self.output_table_name self.Outputs.data.send(new_data) def sizeHint(self): sh = super().sizeHint() return sh.expandedTo(QSize(660, 550)) def storeSpecificSettings(self): """ Update setting before context closes - also when widget closes. """ self._merge_dialog_settings = self._editor.get_merge_context() def send_report(self): if self.data is not None: model = self.variables_model state = ((model.data(midx, Qt.EditRole), model.data(midx, TransformRole)) for i in range(model.rowCount()) for midx in [model.index(i)]) parts = [] for vector, trs in state: if trs: parts.append(report_transform(vector.vtype, trs)) if parts: html = ("<ul>" + "".join(map("<li>{}</li>".format, parts)) + "</ul>") else: html = __("tooltip.no_changes") self.report_raw("", html) else: self.report_data(None) @classmethod def migrate_context(cls, context, version): # pylint: disable=bad-continuation if version is None or version <= 1: hints_ = context.values.get("domain_change_hints", ({}, -2))[0] store = [] ns = "Orange.data.variable" mapping = { "DiscreteVariable": lambda name, args, attrs: ("Categorical", (name, tuple(args[0][1]), ())), "TimeVariable": lambda name, _, attrs: ("Time", (name, ())), "ContinuousVariable": lambda name, _, attrs: ("Real", (name, (3, "f"), ())), "StringVariable": lambda name, _, attrs: ("String", (name, ())), } for (module, class_name, *rest), target in hints_.items(): if module != ns: continue f = mapping.get(class_name) if f is None: continue trs = [] key_mapped = f(*rest) item_mapped = f(*target[2:]) src = reconstruct(*key_mapped) # type: Variable dst = reconstruct(*item_mapped) # type: Variable if src.name != dst.name: trs.append(Rename(dst.name)) if src.annotations != dst.annotations: trs.append(Annotate(dst.annotations)) if isinstance(src, Categorical): if src.categories != dst.categories: assert len(src.categories) == len(dst.categories) trs.append(CategoriesMapping( list(zip(src.categories, dst.categories)))) store.append((deconstruct(src), [deconstruct(tr) for tr in trs])) context.values["_domain_change_store"] = (dict(store), -2) def enumerate_columns( table: Orange.data.Table ) -> Iterable[Tuple[int, str, Orange.data.Variable, Callable[[], ndarray]]]: domain = table.domain for i, (var, role) in enumerate( chain(zip(domain.attributes, repeat("x")), zip(domain.class_vars, repeat("y")), zip(domain.metas, repeat("m"))), ): if i >= len(domain.variables): i = len(domain.variables) - i - 1 data = partial(table_column_data, table, i) yield i, role, var, data def table_column_data( table: Orange.data.Table, var: Union[Orange.data.Variable, int], dtype=None ) -> MArray: col, copy = table.get_column_view(var) var = table.domain[var] # type: Orange.data.Variable if var.is_primitive() and not np.issubdtype(col.dtype, np.inexact): col = col.astype(float) copy = True if dtype is None: if isinstance(var, Orange.data.TimeVariable): dtype = np.dtype("M8[us]") col = col * 1e6 elif isinstance(var, Orange.data.ContinuousVariable): dtype = np.dtype(float) elif isinstance(var, Orange.data.DiscreteVariable): _values = tuple(var.values) _n_values = len(_values) dtype = np.dtype(int, metadata={ "__formatter": lambda i: _values[i] if 0 <= i < _n_values else "?" }) elif isinstance(var, Orange.data.StringVariable): dtype = np.dtype(object) else: assert False mask = orange_isna(var, col) if dtype != col.dtype: col = col.astype(dtype) copy = True if not copy: col = col.copy() return MArray(col, mask=mask) def report_transform(var, trs): # type: (Variable, List[Transform]) -> str """ Return a html fragment summarizing the changes applied by `trs` list. Parameters ---------- var : Variable A variable descriptor no which trs operates trs : List[Transform] A non empty list of `Transform` instances. Returns ------- report : str """ # pylint: disable=too-many-branches ReinterpretTypeCode = { AsCategorical: "C", AsContinuous: "N", AsString: "S", AsTime: "T", } def type_char(value: ReinterpretTransform) -> str: return ReinterpretTypeCode.get(type(value), "?") def strike(text): return "<s>{}</s>".format(escape(text)) def i(text): return "<i>{}</i>".format(escape(text)) def text(text): return "<span>{}</span>".format(escape(text)) assert trs rename = annotate = catmap = unlink = None reinterpret = None for tr in trs: if isinstance(tr, Rename): rename = tr elif isinstance(tr, Annotate): annotate = tr elif isinstance(tr, CategoriesMapping): catmap = tr elif isinstance(tr, Unlink): unlink = tr elif isinstance(tr, ReinterpretTransformTypes): reinterpret = tr if reinterpret is not None: header = "{} → ({}) {}".format( var.name, type_char(reinterpret), rename.name if rename is not None else var.name ) elif rename is not None: header = "{} → {}".format(var.name, rename.name) else: header = var.name if unlink is not None: header += "(unlinked from source)" values_section = None if catmap is not None: values_section = ("Values", []) lines = values_section[1] for ci, cj in catmap.mapping: if ci is None: item = cj + ("&nbsp;" * 3) + "(added)" elif cj is None: item = strike(ci) else: item = ci + " → " + cj lines.append(item) annotate_section = None if annotate is not None: annotate_section = (__("row.label"), []) lines = annotate_section[1] old = dict(var.annotations) new = dict(annotate.annotations) for name in sorted(set(old) - set(new)): lines.append( "<s>" + i(name) + " : " + text(old[name]) + "</s>" ) for name in sorted(set(new) - set(old)): lines.append( i(name) + " : " + text(new[name]) + "&nbsp;" * 3 + i("(new)") ) for name in sorted(set(new) & set(old)): if new[name] != old[name]: lines.append( i(name) + " : " + text(old[name]) + " → " + text(new[name]) ) html = ["<div style='font-weight: bold;'>{}</div>".format(header)] for title, contents in filter(None, [values_section, annotate_section]): section_header = "<div>{}:</div>".format(title) section_contents = "<br/>\n".join(contents) html.append(section_header) html.append( "<div style='padding-left: 1em;'>" + section_contents + "</div>" ) return "\n".join(html) def abstract(var): # type: (Orange.data.Variable) -> Variable """ Return `Varaible` descriptor for an `Orange.data.Variable` instance. Parameters ---------- var : Orange.data.Variable Returns ------- var : Variable """ annotations = tuple(sorted( (key, str(value)) for key, value in var.attributes.items() )) linked = var.compute_value is not None if isinstance(var, Orange.data.DiscreteVariable): return Categorical(var.name, tuple(var.values), annotations, linked) elif isinstance(var, Orange.data.TimeVariable): return Time(var.name, annotations, linked) elif isinstance(var, Orange.data.ContinuousVariable): return Real(var.name, (var.number_of_decimals, 'f'), annotations, linked) elif isinstance(var, Orange.data.StringVariable): return String(var.name, annotations, linked) else: raise TypeError def _parse_attributes(mapping): # type: (Iterable[Tuple[str, str]]) -> Dict[str, Any] # Use the same functionality that parses attributes # when reading text files return Orange.data.Flags([ "{}={}".format(*item) for item in mapping ]).attributes def apply_transform(var, table, trs): # type: (Orange.data.Variable, Orange.data.Table, List[Transform]) -> Orange.data.Variable """ Apply a list of `Transform` instances on an `Orange.data.Variable`. """ if trs and isinstance(trs[0], ReinterpretTransformTypes): reinterpret, trs = trs[0], trs[1:] coldata = table_column_data(table, var) var = apply_reinterpret(var, reinterpret, coldata) if trs: return apply_transform_var(var, trs) else: return var def requires_unlink(var: Orange.data.Variable, trs: List[Transform]) -> bool: # Variable is only unlinked if it has compute_value or if it has other # transformations (that might had added compute_value) return trs is not None \ and any(isinstance(tr, Unlink) for tr in trs) \ and (var.compute_value is not None or len(trs) > 1) def requires_transform(var: Orange.data.Variable, trs: List[Transform]) -> bool: # Unlink is treated separately: Unlink is required only if the variable # has compute_value. Hence tranform is required if it has any # transformations other than Unlink, or if unlink is indeed required. return trs is not None and ( not all(isinstance(tr, Unlink) for tr in trs) or requires_unlink(var, trs) ) @singledispatch def apply_transform_var(var, trs): # type: (Orange.data.Variable, List[Transform]) -> Orange.data.Variable raise NotImplementedError @apply_transform_var.register(Orange.data.DiscreteVariable) def apply_transform_discete(var, trs): # type: (Orange.data.DiscreteVariable, List[Transform]) -> Orange.data.Variable # pylint: disable=too-many-branches name, annotations = var.name, var.attributes mapping = None for tr in trs: if isinstance(tr, Rename): name = tr.name elif isinstance(tr, CategoriesMapping): mapping = tr.mapping elif isinstance(tr, Annotate): annotations = _parse_attributes(tr.annotations) source_values = var.values if mapping is not None: dest_values = list(unique(cj for ci, cj in mapping if cj is not None)) else: dest_values = var.values def positions(values): rval = {c: i for i, c in enumerate(values)} assert len(rval) == len(values) return rval source_codes = positions(source_values) dest_codes = positions(dest_values) if mapping is not None: # construct a lookup table lookup = np.full(len(source_values), np.nan, dtype=float) for ci, cj in mapping: if ci is not None and cj is not None: i, j = source_codes[ci], dest_codes[cj] lookup[i] = j lookup = Lookup(var, lookup) else: lookup = Identity(var) variable = Orange.data.DiscreteVariable( name, values=dest_values, compute_value=lookup ) variable.attributes.update(annotations) return variable @apply_transform_var.register(Orange.data.ContinuousVariable) def apply_transform_continuous(var, trs): # type: (Orange.data.ContinuousVariable, List[Transform]) -> Orange.data.Variable name, annotations = var.name, var.attributes for tr in trs: if isinstance(tr, Rename): name = tr.name elif isinstance(tr, Annotate): annotations = _parse_attributes(tr.annotations) variable = Orange.data.ContinuousVariable( name=name, compute_value=Identity(var) ) variable.attributes.update(annotations) return variable @apply_transform_var.register(Orange.data.TimeVariable) def apply_transform_time(var, trs): # type: (Orange.data.TimeVariable, List[Transform]) -> Orange.data.Variable name, annotations = var.name, var.attributes for tr in trs: if isinstance(tr, Rename): name = tr.name elif isinstance(tr, Annotate): annotations = _parse_attributes(tr.annotations) variable = Orange.data.TimeVariable( name=name, have_date=var.have_date, have_time=var.have_time, compute_value=Identity(var) ) variable.attributes.update(annotations) return variable @apply_transform_var.register(Orange.data.StringVariable) def apply_transform_string(var, trs): # type: (Orange.data.StringVariable, List[Transform]) -> Orange.data.Variable name, annotations = var.name, var.attributes for tr in trs: if isinstance(tr, Rename): name = tr.name elif isinstance(tr, Annotate): annotations = _parse_attributes(tr.annotations) variable = Orange.data.StringVariable( name=name, compute_value=Identity(var) ) variable.attributes.update(annotations) return variable def ftry( func: Callable[..., A], error: Union[Type[BaseException], Tuple[Type[BaseException]]], default: B ) -> Callable[..., Union[A, B]]: """ Wrap a `func` such that if `errors` occur `default` is returned instead.""" def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except error: return default return wrapper class DictMissingConst(dict): """ `dict` with a constant for `__missing__()` value. """ __slots__ = ("__missing",) def __init__(self, missing, *args, **kwargs): self.__missing = missing super().__init__(*args, **kwargs) def __missing__(self, key): return self.__missing def __eq__(self, other): return super().__eq__(other) and isinstance(other, DictMissingConst) \ and (self.__missing == other[()] # get `__missing` or np.isnan(self.__missing) and np.isnan(other[()])) def __hash__(self): return hash((self.__missing, frozenset(self.items()))) def make_dict_mapper( mapping: Mapping, dtype: Optional[DType] = None ) -> Callable: """ Wrap a `mapping` into a callable ufunc-like function with `out`, `dtype`, `where`, ... parameters. If `dtype` is passed to `make_dict_mapper` it is used as a the default return dtype, otherwise the default dtype is `object`. """ _vmapper = np.frompyfunc(mapping.__getitem__, 1, 1) def mapper(arr, out=None, dtype=dtype, **kwargs): arr = np.asanyarray(arr) if out is None and dtype is not None and arr.shape != (): out = np.empty_like(arr, dtype) return _vmapper(arr, out, dtype=dtype, **kwargs) return mapper def time_parse(values: Sequence[str], name="__"): tvar = Orange.data.TimeVariable(name) parse_time = ftry(tvar.parse, ValueError, np.nan) _values = [parse_time(v) for v in values] if np.all(np.isnan(_values)): # try parsing it with pandas (like in transform) dti = pd.to_datetime(values, errors="coerce") _values = datetime_to_epoch(dti) date_only = getattr(dti, "_is_dates_only", False) if np.all(dti != pd.NaT): tvar.have_date = True tvar.have_time = not date_only return tvar, _values as_string = np.frompyfunc(str, 1, 1) parse_float = ftry(float, ValueError, float("nan")) _parse_float = np.frompyfunc(parse_float, 1, 1) def as_float_or_nan( arr: ndarray, out: Optional[ndarray] = None, where: Optional[ndarray] = True, dtype=None, **kwargs ) -> ndarray: """ Convert elements of the input array using builtin `float`, fill elements where conversion failed with NaN. """ if out is None: out = np.full(arr.shape, np.nan, float if dtype is None else dtype) if np.issubdtype(arr.dtype, np.inexact) or \ np.issubdtype(arr.dtype, np.integer): np.copyto(out, arr, casting="unsafe", where=where) return out return _parse_float(arr, out, where=where, **kwargs) def copy_attributes(dst: V, src: Orange.data.Variable) -> V: # copy `attributes` and `sparse` members from src to dst dst.attributes = dict(src.attributes) dst.sparse = src.sparse return dst # Building (and applying) concrete type transformations on Table columns @singledispatch def apply_reinterpret(var, tr, data): # type: (Orange.data.Variable, ReinterpretTransform, MArray) -> Orange.data.Variable """ Apply a re-interpret transform to an `Orange.data.Table`'s column """ raise NotImplementedError @apply_reinterpret.register(Orange.data.DiscreteVariable) def apply_reinterpret_d(var, tr, data): # type: (Orange.data.DiscreteVariable, ReinterpretTransform, ndarray) -> Orange.data.Variable if isinstance(tr, AsCategorical): return var elif isinstance(tr, AsString): f = Lookup(var, np.array(var.values, dtype=object), unknown="") rvar = Orange.data.StringVariable( name=var.name, compute_value=f ) elif isinstance(tr, AsContinuous): f = Lookup(var, np.array(list(map(parse_float, var.values))), unknown=np.nan) rvar = Orange.data.ContinuousVariable( name=var.name, compute_value=f, sparse=var.sparse ) elif isinstance(tr, AsTime): _tvar, values = time_parse(var.values) f = Lookup(var, np.array(values), unknown=np.nan) rvar = Orange.data.TimeVariable( name=var.name, have_date=_tvar.have_date, have_time=_tvar.have_time, compute_value=f, ) else: assert False return copy_attributes(rvar, var) @apply_reinterpret.register(Orange.data.ContinuousVariable) def apply_reinterpret_c(var, tr, data: MArray): if isinstance(tr, AsCategorical): # This is ill defined and should not result in a 'compute_value' # (post-hoc expunge from the domain once translated?) values, index = categorize_unique(data) coldata = index.astype(float) coldata[index.mask] = np.nan tr = LookupMappingTransform( var, DictMissingConst( np.nan, {v: i for i, v in enumerate(values)} ) ) values = tuple(as_string(values)) rvar = Orange.data.DiscreteVariable( name=var.name, values=values, compute_value=tr ) elif isinstance(tr, AsContinuous): return var elif isinstance(tr, AsString): tstr = ToStringTransform(var) rvar = Orange.data.StringVariable( name=var.name, compute_value=tstr ) elif isinstance(tr, AsTime): rvar = Orange.data.TimeVariable( name=var.name, compute_value=Identity(var) ) else: assert False return copy_attributes(rvar, var) @apply_reinterpret.register(Orange.data.StringVariable) def apply_reinterpret_s(var: Orange.data.StringVariable, tr, data: MArray): if isinstance(tr, AsCategorical): # This is ill defined and should not result in a 'compute_value' # (post-hoc expunge from the domain once translated?) _, values = categorical_from_vector(data) mapping = DictMissingConst( np.nan, {v: float(i) for i, v in enumerate(values)} ) tr = LookupMappingTransform(var, mapping) rvar = Orange.data.DiscreteVariable( name=var.name, values=values, compute_value=tr ) elif isinstance(tr, AsContinuous): rvar = Orange.data.ContinuousVariable( var.name, compute_value=ToContinuousTransform(var) ) elif isinstance(tr, AsString): return var elif isinstance(tr, AsTime): tvar, _ = time_parse(np.unique(data.data[~data.mask])) rvar = Orange.data.TimeVariable( name=var.name, have_date=tvar.have_date, have_time=tvar.have_time, compute_value=ReparseTimeTransform(var) ) else: assert False return copy_attributes(rvar, var) @apply_reinterpret.register(Orange.data.TimeVariable) def apply_reinterpret_t(var: Orange.data.TimeVariable, tr, data): if isinstance(tr, AsCategorical): values, _ = categorize_unique(data) or_values = values.astype(float) / 1e6 mapping = DictMissingConst( np.nan, {v: i for i, v in enumerate(or_values)} ) tr = LookupMappingTransform(var, mapping) values = tuple(as_string(values)) rvar = Orange.data.DiscreteVariable( name=var.name, values=values, compute_value=tr ) elif isinstance(tr, AsContinuous): rvar = Orange.data.TimeVariable( name=var.name, compute_value=Identity(var) ) elif isinstance(tr, AsString): rvar = Orange.data.StringVariable( name=var.name, compute_value=ToStringTransform(var) ) elif isinstance(tr, AsTime): return var else: assert False return copy_attributes(rvar, var) def orange_isna(variable: Orange.data.Variable, data: ndarray) -> ndarray: """ Return a bool mask masking N/A elements in `data` for the `variable`. """ if variable.is_primitive(): return np.isnan(data) else: return data == variable.Unknown class ToStringTransform(Transformation): """ Transform a variable to string. """ def transform(self, c): if self.variable.is_string: return c elif self.variable.is_discrete or self.variable.is_time: r = column_str_repr(self.variable, c) elif self.variable.is_continuous: r = as_string(c) mask = orange_isna(self.variable, c) return np.where(mask, "", r) class ToContinuousTransform(Transformation): def transform(self, c): if self.variable.is_time: return c elif self.variable.is_continuous: return c elif self.variable.is_discrete: lookup = Lookup( self.variable, as_float_or_nan(self.variable.values), unknown=np.nan ) return lookup.transform(c) elif self.variable.is_string: return as_float_or_nan(c) else: raise TypeError def datetime_to_epoch(dti: pd.DatetimeIndex) -> np.ndarray: """Convert datetime to epoch""" data = dti.values.astype("M8[us]") mask = np.isnat(data) data = data.astype(float) / 1e6 data[mask] = np.nan return data class ReparseTimeTransform(Transformation): """ Re-parse the column's string repr as datetime. """ def transform(self, c): c = column_str_repr(self.variable, c) c = pd.to_datetime(c, errors="coerce") return datetime_to_epoch(c) class LookupMappingTransform(Transformation): """ Map values via a dictionary lookup. """ def __init__( self, variable: Orange.data.Variable, mapping: Mapping, dtype: Optional[np.dtype] = None ) -> None: super().__init__(variable) self.mapping = mapping self.dtype = dtype self._mapper = make_dict_mapper(mapping, dtype) def transform(self, c): return self._mapper(c) def __reduce_ex__(self, protocol): return type(self), (self.variable, self.mapping, self.dtype) def __eq__(self, other): return self.variable == other.variable \ and self.mapping == other.mapping \ and self.dtype == other.dtype def __hash__(self): return hash((type(self), self.variable, self.mapping, self.dtype)) @singledispatch def column_str_repr(var: Orange.data.Variable, coldata: ndarray) -> ndarray: """Return a array of str representations of coldata for the `variable.""" _f = np.frompyfunc(var.repr_val, 1, 1) return _f(coldata) @column_str_repr.register(Orange.data.DiscreteVariable) def column_str_repr_discrete( var: Orange.data.DiscreteVariable, coldata: ndarray ) -> ndarray: values = np.array(var.values, dtype=object) lookup = Lookup(var, values, "?") return lookup.transform(coldata) @column_str_repr.register(Orange.data.StringVariable) def column_str_repr_string( var: Orange.data.StringVariable, coldata: ndarray ) -> ndarray: return np.where(coldata == var.Unknown, "?", coldata) if __name__ == "__main__": # pragma: no cover WidgetPreview(OWEditDomain).run(Orange.data.Table("iris"))
PypiClean
/HuTao%20agent-1.0.3.tar.gz/HuTao agent-1.0.3/walnut_agent/common/Fzpagpoco.py
__author__ = 'paomian' import logging from airtest.core.api import * class PKpoco(object): @property def platform(self): return self.Spoco.desired_capabilities['platformName'] def __init__(self, spoco): self.Spoco = spoco def is_element_text_exists(self, text): """ UI text元素是否存在 """ return self.Spoco(text=text).exists() def is_element_name_exists(self, name): """ UI name元素是否存在 """ return self.Spoco(name=name).exists() def find_element_by_Name(self, name): """ 根据 name 查找 UI 元素 """ logging.warning(f'正在识别元素: [{name}] ') try: if self.is_element_name_exists(name) == True: return self.Spoco(name=name) except: logging.warning(f'UI元素不存在: [{name}] ') print("识别失败,找不到目标元素:", name) snapshot(msg="失败截图") return self def find_element_by_Text(self, text): """ Text查找元素 """ logging.warning(f'正在识别元素: [{text}] ') try: if self.is_element_text_exists(text) == True: # print(self.Spoco(text=text)) # print(poco.is_element_exists(text)) return self.Spoco(text=text) except: logging.warning(f'Text匹配失败: [{text}] ') print("识别失败,找不到目标元素:", text) snapshot(filename="test.png", msg="失败截图") return self def find_Text(self, text): logging.warning(f'正在识别元素: [{text}] ') return self.Spoco(text=text) def find_name(self, name): logging.warning(f'正在识别元素: [{name}] ') return self.Spoco(name=name) def find_desc(self, desc): logging.warning(f'正在识别元素: [{desc}] ') return self.Spoco(desc=desc) def find_all(self): # logging.warning(f'正在识别元素: [{text}] ') return self def up(self): ''' 上滑 ''' xy = self.Spoco.get_screen_size() print(xy) x = xy[0] y = xy[1] swipe([x * 0.5, y * 0.9], [x * 0.5, y * 0.1]) def down(self): ''' 下滑 ''' xy = self.Spoco.get_screen_size() print(xy) x = xy[0] y = xy[1] swipe([x * 0.5, y * 0.1], [x * 0.5, y * 0.9]) def left(self): ''' 左滑 ''' xy = self.Spoco.get_screen_size() print(xy) x = xy[0] y = xy[1] swipe([x * 0.9, y * 0.5], [x * 0.1, y * 0.5]) def right(self): ''' 右滑 ''' xy = self.Spoco.get_screen_size() print(xy) x = xy[0] y = xy[1] swipe([x * 0.1, y * 0.5], [x * 0.9, y * 0.5]) def back(self): ''' 智能手机返回 ''' xy = self.Spoco.get_screen_size() print(xy) x = xy[0] y = xy[1] swipe([x * 0.03, y * 0.5], [x * 0.9, y * 0.5]) def click(self): pass def set_text(self, name): pass def shouquan(self): """ 手机权限授权框 """ # 红米手机权限图片授权弹框 if PKpoco.is_element_text_exists(self, "仅在使用该应用时允许"): PKpoco.find_element_by_Text(self, "仅在使用该应用时允许").click() # 三星s20 图片授权弹框 elif PKpoco.is_element_text_exists(self, "允许"): PKpoco.find_element_by_Text(self, "允许").click() else: pass
PypiClean
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/sync.py
import asyncio import functools import inspect from . import events, errors, utils, connection from .client.account import _TakeoutClient from .client.telegramclient import TelegramClient from .tl import types, functions, custom from .tl.custom import ( Draft, Dialog, MessageButton, Forward, Button, Message, InlineResult, Conversation, ) from .tl.custom.chatgetter import ChatGetter from .tl.custom.sendergetter import SenderGetter def _syncify_wrap(t, method_name): method = getattr(t, method_name) @functools.wraps(method) def syncified(*args, **kwargs): coro = method(*args, **kwargs) loop = asyncio.get_event_loop() return coro if loop.is_running() else loop.run_until_complete(coro) # Save an accessible reference to the original method setattr(syncified, "__tl.sync", method) setattr(t, method_name, syncified) def syncify(*types): """ Converts all the methods in the given types (class definitions) into synchronous, which return either the coroutine or the result based on whether ``asyncio's`` event loop is running. """ # Our asynchronous generators all are `RequestIter`, which already # provide a synchronous iterator variant, so we don't need to worry # about asyncgenfunction's here. for t in types: for name in dir(t): if ( not name.startswith("_") or name == "__call__" ) and inspect.iscoroutinefunction(getattr(t, name)): _syncify_wrap(t, name) syncify( TelegramClient, _TakeoutClient, Draft, Dialog, MessageButton, ChatGetter, SenderGetter, Forward, Message, InlineResult, Conversation, ) # Private special case, since a conversation's methods return # futures (but the public function themselves are synchronous). _syncify_wrap(Conversation, "_get_result") __all__ = [ "TelegramClient", "Button", "types", "functions", "custom", "errors", "events", "utils", "connection", ]
PypiClean
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/poco/sdk/interfaces/input.py
__author__ = 'lxn3032' import warnings class InputInterface(object): """ This is one of the main communication interfaces. This interface ensures the ability for simulated input on target device. So far, the interface supports only some basic methods definitions. The motion event will be added in future to provide full support for mobile devices. All coordinates are in NormalizedCoordinate system, see ``NormalizedCoordinate`` for more details. """ def click(self, x, y): """ Perform click action as simulated input on target device. Coordinates arguments are all in range of 0~1. Args: y (:obj:`float`): y-coordinate x (:obj:`float`): x-coordinate """ raise NotImplementedError def swipe(self, x1, y1, x2, y2, duration): """ Perform swipe action as simulated input on target device from point A to B within given time interval to perform the action. Coordinates arguments are all in range of 0~1. Args: x1 (:obj:`float`): x-coordinate of the start point y1 (:obj:`float`): y-coordinate of the start point x2 (:obj:`float`): x-coordinate of the end point y2 (:obj:`float`): y-coordinate of the end point duration (:obj:`float`): time interval to perform the swipe action """ raise NotImplementedError def longClick(self, x, y, duration): """ Perform press action as simulated input on target device within given seconds. Coordinates arguments are all in range of 0~1. Args: x (:obj:`float`): x-coordinate y (:obj:`float`): y-coordinate duration (:obj:`float`): time interval to perform the action """ raise NotImplementedError def setTouchDownDuration(self, duration): """ Set touch down duration for each click operation. Args: duration (:obj:`float`): the duration in seconds """ warnings.warn("This implementation of poco does not support changing default touch down duration. " "Setting touch down duration to {}s takes no effects.".format(duration)) def getTouchDownDuration(self): """ Get touch down duration for each click operation. Each implementation should provide a default value. Return: :obj:`float`: the touch down duration in seconds """ raise NotImplementedError def keyevent(self, keycode): """ Send a key event to target device. Args: keycode (:obj:`int` or :obj:`char`): Ascii key code """ raise NotImplementedError def applyMotionEvents(self, events): """ Apply a motion event on the device touch panel Args: events (:py:obj:`list`): list of events. each event is MotionEvent data (``['u/d/m/s', (x, y), contact_id]``) """ raise NotImplementedError
PypiClean
/FC%20CloudTrails%20SDK-1.0.4.tar.gz/FC CloudTrails SDK-1.0.4/fc/cloudtrailsdk/utils/functions.py
import json import os import logging import sys import traceback from fc.cloudtrailsdk.client.tracker import Tracker from fc.cloudtrailsdk.model.event import Event, ExceptionEvent, DependencyEvent logger = logging.getLogger(__name__) def configure_tracker(app_name="", app_version="", tracker_environment=None): region = os.environ.get("AWS_REGION") tracker_environment = os.environ.get("TRACKER_ENVIRONMENT", "eCloudTrailsStreamQA") if tracker_environment is None else tracker_environment tracker = Tracker( tracker_environment, region=region, app_name=app_name, app_version=app_version ) return tracker def send_custom_logger(app_name="", app_version="", tracker_environment=None, **properties): try: tracker = configure_tracker(app_name=app_name, app_version=app_version, tracker_environment=tracker_environment) custom_event = Event() # Determinar si las propiedades contienen dimensiones y en tal caso actualizarlos if properties.get('Dimensions', None) is not None: custom_event.Dimensions.update(properties.get('Dimensions', None)) properties.pop('Dimensions', None) # Determinar si las propiedades contienen un EventType if properties.get('EventType', None) is not None: custom_event.EventType = properties.get('EventType', None) properties.pop('EventType', None) custom_event.Properties.update(properties) tracker.track_event(custom_event) except Exception as e: logger.error(e.message) def send_exception_logger(app_name="", app_version="", tracker_environment=None, **properties): try: exc_type, exc_value, exc_traceback = sys.exc_info() tracker = configure_tracker(app_name=app_name, app_version=app_version, tracker_environment=tracker_environment) exception_event = ExceptionEvent(exc_value.__str__(), exc_type.__name__, traceback.format_exc(), **properties) # Determinar si las propiedades contienen dimensiones y en tal caso actualizarlos if properties.get('Dimensions', None) is not None: exception_event.Dimensions.update(properties.get('Dimensions', None)) exception_event.Properties('Dimensions', None) tracker.track_exception(exception_event) except Exception as e: logger.error(e.message) def send_dependency_logger(dependency_name, dependency_duration): try: tracker = configure_tracker() dependency_event = DependencyEvent(dependency_name, dependency_duration) tracker.track_dependency(dependency_event) except Exception as e: logger.error(e.message) def send_cloudtrails_apigateway_event(app_name, app_version, event, response, **properties): """ Permite enviar a cloutrails los request :param request: :param response: :return: """ #Extraer los datos del evento Apigateway Proxy Integration request_data = { "RequestMethod": event['httpMethod'], "RequestScheme": "https", "RequestHost": event['headers'].get('Host', ''), "RequestPath": event.get('path', ''), "RequestQueryString": json.dumps(event.get('queryStringParameters', {})), "ClientIP": event['headers'].get('X-Forwarded-For', ''), "UserAgent": event['headers'].get('User-Agent', ''), "RequestPayload": json.dumps(event.get('body', {})), "ResponsePayload": json.dumps(response), "ResponseHttpStatus": response.get('code', '0'), "Headers": json.dumps(event.get('headers', {})) } properties.update(request_data) send_custom_logger(app_name=app_name, app_version=app_version, **properties)
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/kn.js
;(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' && typeof require === 'function' ? factory(require('../moment')) : typeof define === 'function' && define.amd ? define(['../moment'], factory) : factory(global.moment) }(this, (function (moment) { 'use strict'; //! moment.js locale configuration var symbolMap = { 1: '೧', 2: '೨', 3: '೩', 4: '೪', 5: '೫', 6: '೬', 7: '೭', 8: '೮', 9: '೯', 0: '೦', }, numberMap = { '೧': '1', '೨': '2', '೩': '3', '೪': '4', '೫': '5', '೬': '6', '೭': '7', '೮': '8', '೯': '9', '೦': '0', }; var kn = moment.defineLocale('kn', { months: 'ಜನವರಿ_ಫೆಬ್ರವರಿ_ಮಾರ್ಚ್_ಏಪ್ರಿಲ್_ಮೇ_ಜೂನ್_ಜುಲೈ_ಆಗಸ್ಟ್_ಸೆಪ್ಟೆಂಬರ್_ಅಕ್ಟೋಬರ್_ನವೆಂಬರ್_ಡಿಸೆಂಬರ್'.split( '_' ), monthsShort: 'ಜನ_ಫೆಬ್ರ_ಮಾರ್ಚ್_ಏಪ್ರಿಲ್_ಮೇ_ಜೂನ್_ಜುಲೈ_ಆಗಸ್ಟ್_ಸೆಪ್ಟೆಂ_ಅಕ್ಟೋ_ನವೆಂ_ಡಿಸೆಂ'.split( '_' ), monthsParseExact: true, weekdays: 'ಭಾನುವಾರ_ಸೋಮವಾರ_ಮಂಗಳವಾರ_ಬುಧವಾರ_ಗುರುವಾರ_ಶುಕ್ರವಾರ_ಶನಿವಾರ'.split( '_' ), weekdaysShort: 'ಭಾನು_ಸೋಮ_ಮಂಗಳ_ಬುಧ_ಗುರು_ಶುಕ್ರ_ಶನಿ'.split('_'), weekdaysMin: 'ಭಾ_ಸೋ_ಮಂ_ಬು_ಗು_ಶು_ಶ'.split('_'), longDateFormat: { LT: 'A h:mm', LTS: 'A h:mm:ss', L: 'DD/MM/YYYY', LL: 'D MMMM YYYY', LLL: 'D MMMM YYYY, A h:mm', LLLL: 'dddd, D MMMM YYYY, A h:mm', }, calendar: { sameDay: '[ಇಂದು] LT', nextDay: '[ನಾಳೆ] LT', nextWeek: 'dddd, LT', lastDay: '[ನಿನ್ನೆ] LT', lastWeek: '[ಕೊನೆಯ] dddd, LT', sameElse: 'L', }, relativeTime: { future: '%s ನಂತರ', past: '%s ಹಿಂದೆ', s: 'ಕೆಲವು ಕ್ಷಣಗಳು', ss: '%d ಸೆಕೆಂಡುಗಳು', m: 'ಒಂದು ನಿಮಿಷ', mm: '%d ನಿಮಿಷ', h: 'ಒಂದು ಗಂಟೆ', hh: '%d ಗಂಟೆ', d: 'ಒಂದು ದಿನ', dd: '%d ದಿನ', M: 'ಒಂದು ತಿಂಗಳು', MM: '%d ತಿಂಗಳು', y: 'ಒಂದು ವರ್ಷ', yy: '%d ವರ್ಷ', }, preparse: function (string) { return string.replace(/[೧೨೩೪೫೬೭೮೯೦]/g, function (match) { return numberMap[match]; }); }, postformat: function (string) { return string.replace(/\d/g, function (match) { return symbolMap[match]; }); }, meridiemParse: /ರಾತ್ರಿ|ಬೆಳಿಗ್ಗೆ|ಮಧ್ಯಾಹ್ನ|ಸಂಜೆ/, meridiemHour: function (hour, meridiem) { if (hour === 12) { hour = 0; } if (meridiem === 'ರಾತ್ರಿ') { return hour < 4 ? hour : hour + 12; } else if (meridiem === 'ಬೆಳಿಗ್ಗೆ') { return hour; } else if (meridiem === 'ಮಧ್ಯಾಹ್ನ') { return hour >= 10 ? hour : hour + 12; } else if (meridiem === 'ಸಂಜೆ') { return hour + 12; } }, meridiem: function (hour, minute, isLower) { if (hour < 4) { return 'ರಾತ್ರಿ'; } else if (hour < 10) { return 'ಬೆಳಿಗ್ಗೆ'; } else if (hour < 17) { return 'ಮಧ್ಯಾಹ್ನ'; } else if (hour < 20) { return 'ಸಂಜೆ'; } else { return 'ರಾತ್ರಿ'; } }, dayOfMonthOrdinalParse: /\d{1,2}(ನೇ)/, ordinal: function (number) { return number + 'ನೇ'; }, week: { dow: 0, // Sunday is the first day of the week. doy: 6, // The week that contains Jan 6th is the first week of the year. }, }); return kn; })));
PypiClean
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-table/src/locale/bootstrap-table-fr-CH.js
$.fn.bootstrapTable.locales['fr-CH'] = { formatCopyRows () { return 'Copy Rows' }, formatPrint () { return 'Print' }, formatLoadingMessage () { return 'Chargement en cours' }, formatRecordsPerPage (pageNumber) { return `${pageNumber} lignes par page` }, formatShowingRows (pageFrom, pageTo, totalRows, totalNotFiltered) { if (totalNotFiltered !== undefined && totalNotFiltered > 0 && totalNotFiltered > totalRows) { return `Affiche de ${pageFrom} à ${pageTo} sur ${totalRows} lignes (filtrés à partir de ${totalNotFiltered} lignes)` } return `Affiche de ${pageFrom} à ${pageTo} sur ${totalRows} lignes` }, formatSRPaginationPreText () { return 'page précédente' }, formatSRPaginationPageText (page) { return `vers la page ${page}` }, formatSRPaginationNextText () { return 'page suivante' }, formatDetailPagination (totalRows) { return `Affiche ${totalRows} lignes` }, formatClearSearch () { return 'Effacer la recherche' }, formatSearch () { return 'Recherche' }, formatNoMatches () { return 'Pas de lignes trouvés' }, formatPaginationSwitch () { return 'Cacher/Afficher pagination' }, formatPaginationSwitchDown () { return 'Afficher pagination' }, formatPaginationSwitchUp () { return 'Cacher pagination' }, formatRefresh () { return 'Rafraichir' }, formatToggle () { return 'Basculer' }, formatToggleOn () { return 'Afficher vue carte' }, formatToggleOff () { return 'Cacher vue carte' }, formatColumns () { return 'Colonnes' }, formatColumnsToggleAll () { return 'Tout basculer' }, formatFullscreen () { return 'Plein écran' }, formatAllRows () { return 'Tout' }, formatAutoRefresh () { return 'Rafraîchissement automatique' }, formatExport () { return 'Exporter les données' }, formatJumpTo () { return 'Aller à' }, formatAdvancedSearch () { return 'Recherche avancée' }, formatAdvancedCloseButton () { return 'Fermer' }, formatFilterControlSwitch () { return 'Cacher/Afficher controls' }, formatFilterControlSwitchHide () { return 'Cacher controls' }, formatFilterControlSwitchShow () { return 'Afficher controls' } } $.extend($.fn.bootstrapTable.defaults, $.fn.bootstrapTable.locales['fr-CH'])
PypiClean
/morelia-0.9.2-py3-none-any.whl/morelia/matchers.py
r""" Steps ===== .. _matching-steps: Matching steps -------------- When Morelia executes steps described in feature files it looks inside passed :py:class:`unittest.TestCase` object and search for methods which name starts with `step_`. Then it selects correct method using: * `Regular expressions`_ * `Format-like strings`_ * `Method names`_ If you look in example from :ref:`usage-guide`: .. code-block:: python # test_acceptance.py from pathlib import Path import unittest from morelia import verify class CalculatorTestCase(unittest.TestCase): def test_addition(self): ''' Addition feature ''' filename = Path(__file__) / "calculator.feature" verify(filename, self) def step_I_have_powered_calculator_on(self): r'I have powered calculator on' self.stack = [] def step_I_enter_a_number_into_the_calculator(self, number): r'I enter "(\d+)" into the calculator' # match by regexp self.stack.append(int(number)) def step_I_press_add(self): # matched by method name self.result = sum(self.stack) def step_the_result_should_be_on_the_screen(self, number): r'the result should be "{number}" on the screen' # match by format-like string self.assertEqual(int(number), self.result) You'll see three types of matching. Regular expressions ^^^^^^^^^^^^^^^^^^^ Method ``step_I_enter_number_into_the_calculator`` from example is matched by :py:mod:`regular expression <re>` as it's docstring .. code-block:: python r'I enter "(\d+)" into the calculator' matches steps: .. code-block:: cucumber When I enter "50" into the calculator And I enter "70" into the calculator Regular expressions, such as ``(\d+)``, are expanded into positional step arguments, such as ``number`` in above example. If you would use named groups like ``(?P<number>\d+)`` then capttured expressions from steps will be put as given keyword argument to method. Remember to use tight expressions, such as ``(\d+)``, not expressions like ``(\d*)`` or ``(.*)``, to validate your input. Format-like strings ^^^^^^^^^^^^^^^^^^^ Method ``step_the_result_should_be_on_the_screen`` from example is matched by :ref:`format-like strings <python:formatspec>` as it's docstring .. code-block:: python r'the result should be "{number}" on the screen' matches step: .. code-block:: cucumber Then the result should be "120" on the screen Method names ^^^^^^^^^^^^ Method ``step_I_press_add`` from example is matched by method name which matches step: .. code-block:: cucumber And I press add Choosing which matchers to use ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default morelia search for methods using in order: - format-like strings matcher ("parse" matcher) - regex matcher ("regex" matcher) - method names matcher ("method" matcher) You can override it in your pyproject.toml file. E.g.: .. code-block:: toml [tool.morelia.default] matchers=["regex", "parse"] .. _matching-tables: Tables ------ If you use Scenarios with tables and `<angles>` around the payload variable names: .. code-block:: cucumber Scenario: orders above $100.00 to the continental US get free ground shipping When we send an order totaling $<total>, with a 12345 SKU, to our warehouse And the order will ship to <destination> Then the ground shipping cost is $<cost> And <rapid> delivery might be available | total | destination | cost | rapid | | 98.00 | Rhode Island | 8.25 | yes | | 101.00 | Rhode Island | 0.00 | yes | | 99.00 | Kansas | 8.25 | yes | | 101.00 | Kansas | 0.00 | yes | | 99.00 | Hawaii | 8.25 | yes | | 101.00 | Hawaii | 8.25 | yes | | 101.00 | Alaska | 8.25 | yes | | 99.00 | Ontario, Canada | 40.00 | no | | 99.00 | Brisbane, Australia | 55.00 | no | | 99.00 | London, United Kingdom | 55.00 | no | | 99.00 | Kuantan, Malaysia | 55.00 | no | | 101.00 | Tierra del Fuego | 55.00 | no | then that Scenario will unroll into a series of scenarios, each with one value from the table inserted into their placeholders `<total>`, `<destination>`, and `<rapid>`. So this step method will receive each line in the "destination" column: .. code-block:: python def step_the_order_will_ship_to_(self, location): r'the order will ship to (.*)' (And observe that naming the placeholder the same as the method argument is a *reeeally* good idea, but naturally unenforceable.) Morelia will take each line of the table, and construct a complete test case out of the Scenario steps, running :py:meth:`unittest.TestCase.setUp()` and :py:meth:`unittest.TestCase.tearDown()` around them. If you use many tables then Morelia would use permutation of all rows in all tables: .. code-block:: cucumber Scenario: orders above $100.00 to the continental US get free ground shipping When we send an order totaling $<total>, with a 12345 SKU, to our warehouse And the order will ship to <destination> And we choose that delivery should be <speed> | speed | | rapid | | regular | Then the ground shipping cost is $<cost> | total | destination | cost | | 98.00 | Rhode Island | 8.25 | | 101.00 | Rhode Island | 0.00 | | 99.00 | Kansas | 8.25 | In above example 2 * 3 = 6 different test cases would be generated. .. _matching-docstrings: Doc Strings ----------- Docstrings attached to steps are passed as keyword argument `_text` into method: .. code-block:: cucumber Feature: Addition In order to avoid silly mistakes As a math idiot I want to be told the sum of two numbers Scenario: Add two numbers Given I have powered calculator on When I enter "50" into the calculator And I enter "70" into the calculator And I press add Then I would see on the screen ''' Calculator example ================== 50 +70 --- 120 ''' .. code-block:: python def step_i_would_see_on_the_screen(self, _text): pass # or def step_i_would_see_on_the_screen(self, **kwargs): _text = kwargs.pop('_text') Morelia is smart enough not to passing this argument if you don't name it. Below example won't raise exception: .. code-block:: python def step_i_would_see_on_the_screen(self): pass It'll be simply assumed that you ignore docstring. .. _labels-matching: Labels ------ Labels attached to features and scenarios are available as keyword argument `_label`: .. code-block:: cucumber @web @android @ios Feature: Addition In order to avoid silly mistakes As a math idiot I want to be told the sum of two numbers @wip Scenario: Add two numbers Given I have powered calculator on When I enter "50" into the calculator And I enter "70" into the calculator And I press add Then the result should be "120" on the screen .. code-block:: python def step_I_enter_number_into_the_calculator(self, num, _label): pass As like with doc-strings you can ommit keyword parameter if you don't need it: .. code-block:: python def step_I_enter_number_into_the_calculator(self, num): pass Labels allows you to implement custom logic depending on labels given. .. note:: **Compatibility** Morelia does not connects any custom logic with labels as some other Behavior Driven Development tools do. You are put in the charge and should add logic if any. If you are looking for ability to selectivly running features and scenarios look at :py:func:`morelia.decorators.tags` decorator. Matchers Classes ---------------- """ import re import unicodedata from abc import ABCMeta, abstractmethod import parse class IStepMatcher: """Matches methods to steps. Subclasses should implement at least `match` and `suggest` methods. """ __metaclass__ = ABCMeta def __init__(self, suite, step_pattern="^step_"): self._suite = suite self._matcher = re.compile(step_pattern) self._next = None def _get_all_step_methods(self): match = self._matcher.match return [method_name for method_name in dir(self._suite) if match(method_name)] def add_matcher(self, matcher): """Add new matcher at end of CoR. :param IStepMatcher matcher: matcher to add :returns: self """ if self._next is None: self._next = matcher else: self._next.add_matcher(matcher) return self def find(self, predicate, augmented_predicate, step_methods=None): if step_methods is None: step_methods = self._get_all_step_methods() method, args, kwargs = self.match(predicate, augmented_predicate, step_methods) if method: return method, args, kwargs if self._next is not None: return self._next.find(predicate, augmented_predicate, step_methods) return None, (), {} @abstractmethod def match(self, predicate, augmented_predicate, step_methods): """Match method from suite to given predicate. :param str predicate: step predicate :param str augmented_predicate: step augmented_predicate :param list step_methods: list of all step methods from suite :returns: (method object, args, kwargs) :rtype: (method, tuple, dict) """ pass # pragma: nocover def suggest(self, predicate): """Suggest method definition. Method is used to suggest methods that should be implemented. :param str predicate: step predicate :returns: (suggested method definition, suggested method name, suggested docstring) :rtype: (str, str, str) """ docstring, extra_arguments = self._suggest_doc_string(predicate) method_name = self.slugify(predicate) suggest = " def step_{method_name}(self{args}):\n {docstring}\n\n raise NotImplementedError('{predicate}')\n\n".format( method_name=method_name, args=extra_arguments, docstring=docstring, predicate=predicate.replace("'", "\\'"), ) return suggest, method_name, docstring def _suggest_doc_string(self, predicate): predicate = predicate.replace("'", r"\'").replace("\n", r"\n") arguments = self._add_extra_args(r'["\<](.+?)["\>]', predicate) arguments = self._name_arguments(arguments) predicate = self.replace_placeholders(predicate, arguments) predicate = re.sub(r" \s+", r"\\s+", predicate) arguments = self._format_arguments(arguments) return "r'%s'" % predicate, arguments def _name_arguments(self, extra_arguments): if not extra_arguments: return "" arguments = [] number_arguments_count = sum( 1 for arg_type, arg in extra_arguments if arg_type == "number" ) if number_arguments_count < 2: num_suffixes = iter([""]) else: num_suffixes = iter(range(1, number_arguments_count + 1)) for arg_type, arg in extra_arguments: if arg_type == "number": arguments.append("number%s" % next(num_suffixes)) else: arguments.append(arg) return arguments def _format_arguments(self, arguments): if not arguments: return "" return ", " + ", ".join(arguments) def replace_placeholders(self, predicate, arguments): predicate = re.sub(r'".+?"', '"([^"]+)"', predicate) predicate = re.sub(r"\<.+?\>", "(.+)", predicate) return predicate def _add_extra_args(self, matcher, predicate): args = re.findall(matcher, predicate) result = [] for arg in args: try: float(arg) except ValueError: arg = ("id", self.slugify(arg)) else: arg = ("number", arg) result.append(arg) return result def slugify(self, predicate): result = [] for part in re.split(r"[^\w]+", predicate): part = ( unicodedata.normalize("NFD", part) .encode("ascii", "replace") .decode("utf-8") ) part = part.replace("??", "_").replace("?", "") try: float(part) except ValueError: pass else: part = "number" result.append(part) return "_".join(result).strip("_") class MethodNameStepMatcher(IStepMatcher): """Matcher that matches steps by method name.""" def match(self, predicate, augmented_predicate, step_methods): """See :py:meth:`IStepMatcher.match`.""" matches = self.__find_matching_methods(step_methods, predicate) return self.__select_best_match(matches) def __find_matching_methods(self, step_methods, predicate): clean = re.sub(r"[^\w]", "_?", predicate) pattern = "^step_" + clean + "$" regexp = re.compile(pattern) for method_name in step_methods: if regexp.match(method_name): method = self._suite.__getattribute__(method_name) yield (method, (), {}) def __select_best_match(self, matches): try: best_match = next(iter(matches)) except StopIteration: return None, (), {} else: method, args, kwargs = best_match return method, args, kwargs def suggest(self, predicate): """See :py:meth:`IStepMatcher.suggest`.""" method_name = self.slugify(predicate) suggest = " def step_{method_name}(self):\n\n raise NotImplementedError('{predicate}')\n\n".format( method_name=method_name, predicate=predicate.replace("'", "\\'") ) return suggest, method_name, "" def slugify(self, predicate): predicate = ( unicodedata.normalize("NFD", predicate) .encode("ascii", "replace") .decode("utf-8") ) predicate = predicate.replace("??", "_").replace("?", "") return re.sub(r"[^\w]+", "_", predicate, re.U).strip("_") class RegexpStepMatcher(IStepMatcher): """Matcher that matches steps by regexp in docstring.""" def match(self, predicate, augmented_predicate, step_methods): """See :py:meth:`IStepMatcher.match`.""" matches = self.__find_matching_methods(step_methods, augmented_predicate) return self.__select_best_match(matches) def __find_matching_methods(self, step_methods, augmented_predicate): for method, doc in self.__find_methods_with_docstring(step_methods): doc = re.compile("^" + doc + "$") match = doc.match(augmented_predicate) if match: kwargs = match.groupdict() if not kwargs: args = match.groups() else: args = () yield (method, args, kwargs) return None, (), {} def __find_methods_with_docstring(self, step_methods): for method_name in step_methods: method = self._suite.__getattribute__(method_name) doc = method.__doc__ if doc: yield method, doc def __select_best_match(self, matches): try: best_match = next(iter(matches)) except StopIteration: return None, (), {} else: method, args, kwargs = best_match return method, args, kwargs class ParseStepMatcher(IStepMatcher): """Matcher that matches steps by format-like string in docstring.""" def match(self, predicate, augmented_predicate, step_methods): """See :py:meth:`IStepMatcher.match`.""" matches = self.__find_matching_methods(step_methods, augmented_predicate) return self.__select_best_match(matches) def __find_matching_methods(self, step_methods, augmented_predicate): for method, doc in self.__find_methods_with_docstring(step_methods): match = parse.parse(doc, augmented_predicate) if match: args = match.fixed kwargs = match.named yield (len(args) + len(kwargs), method, tuple(args), kwargs) def __find_methods_with_docstring(self, step_methods): for method_name in step_methods: method = self._suite.__getattribute__(method_name) doc = method.__doc__ if doc: yield method, doc def __select_best_match(self, matches): matches = sorted(matches, reverse=True) try: best_match = next(iter(matches)) except StopIteration: return None, (), {} else: _, method, args, kwargs = best_match return method, args, kwargs def replace_placeholders(self, predicate, arguments): arguments = iter(arguments) def repl(match): if match.group(0).startswith('"'): return '"{%s}"' % next(arguments) return "{%s}" % next(arguments) predicate = re.sub(r'".+?"|\<.+?\>', repl, predicate) return predicate
PypiClean
/HafrenHaver-1.0.277.tar.gz/HafrenHaver-1.0.277/README.md
# HafrenHaver Arcane Audio-Visual Computations ========== [![CircleCI](https://img.shields.io/circleci/build/github/InnovAnon-Inc/HafrenHaver?color=%23FF1100&logo=InnovAnon%2C%20Inc.&logoColor=%23FF1133&style=plastic)](https://circleci.com/gh/InnovAnon-Inc/HafrenHaver) [![Repo Size](https://img.shields.io/github/repo-size/InnovAnon-Inc/HafrenHaver?color=%23FF1100&logo=InnovAnon%2C%20Inc.&logoColor=%23FF1133&style=plastic)](https://github.com/InnovAnon-Inc/HafrenHaver) ![Lines of code](https://img.shields.io/tokei/lines/github/InnovAnon-Inc/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![CodeFactor Grade](https://img.shields.io/codefactor/grade/github/InnovAnon-Inc/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) [![Latest Release](https://img.shields.io/github/commits-since/InnovAnon-Inc/HafrenHaver/latest?color=%23FF1100&include_prereleases&logo=InnovAnon%2C%20Inc.&logoColor=%23FF1133&style=plastic)](https://github.com/InnovAnon-Inc/HafrenHaver/releases/latest) ![Libraries.io dependency status for GitHub repo](https://img.shields.io/librariesio/github/InnovAnon-Inc/HafrenHaver?color=FF1100&logoColor=FF1133&style=plastic) [![License Summary](https://img.shields.io/github/license/InnovAnon-Inc/HafrenHaver?color=%23FF1100&label=Free%20Code%20for%20a%20Free%20World%21&logo=InnovAnon%2C%20Inc.&logoColor=%23FF1133&style=plastic)](https://tldrlegal.com/license/unlicense#summary) ![PyPI - Implementation](https://img.shields.io/pypi/implementation/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - Wheel](https://img.shields.io/pypi/wheel/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - Downloads](https://img.shields.io/pypi/dd/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - Format](https://img.shields.io/pypi/format/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - Status](https://img.shields.io/pypi/status/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI - License](https://img.shields.io/pypi/l/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![PyPI](https://img.shields.io/pypi/v/HafrenHaver?color=FF1100&logo=InnovAnon-Inc&logoColor=FF1133&style=plastic) ![Dependent repos (via libraries.io)](https://img.shields.io/librariesio/dependent-repos/pypi/HafrenHaver?color=FF1100&style=plastic) ![Libraries.io dependency status for latest release](https://img.shields.io/librariesio/release/pypi/HafrenHaver?color=FF1100&style=plastic) ![Libraries.io SourceRank](https://img.shields.io/librariesio/sourcerank/pypi/HafrenHaver?style=plastic) (Work in progress...) Proof of concept for InnovAnon's Human Programming Technology; a tool for accelerating alchemical works. The core functionality will be a myriad apps for modeling, viewing and controlling complex (and often cyclical) data. Transposing a harmonic structure (i.e., song) should be as simple as turning a dial. There should also be ways for comparing harmonic structures, such as the differences between scales and chords under different tuning systems. The goal is to provide high level descriptions of music, graphical layouts, lyrics, etc., and to generate sounds and visuals satisfying the constraints. And to use known spiritual technologies to enhancs or replace rigorous sadhanas with a form of worship better suited to the Western lifestyle: relatively instant results, presented in a gamified fashion so its fun. Bald eagles and cheeseburgers! Currently, the implementation of song meter involves a combination of cadences (binary sequences specifying whether one thing is the same as or different than another) and hash functions (to implement repetition of motifs... i.e., a section cadence may specify that first and second phrases differ from each other, but rather than generating a unique phrase cadence for the second one, it might reuse a phrase cadence from elsewhere in the piece). # SevernSieve (TODO needs to be rewritten. expensive ops don't have to be done so often) P.o.C. for variable-length wheel factorization ========== An implementation of variable-length wheel factorization (a compression technique for the sieve of eratosthenes) that theoretically has a slightly lower upper bound to its asymptotic complexity. There's one problem of efficiency to be resolved in the implementation. Hashing could be used as a work-around until a formula is derived. # HAL Primitive Precursor of the YellShell ========== Uses speech recognition to convert voice commands to text, which are preprocessed (i.e., stopwords are removed, NATO phonetics are converted, etc.), then interpreted as an English-like domain-specific language, converting the commands to Event objects the underlying GUI can recognize and forward to the contained App(s). # CircleApp, SquareApp, AngleApp, PentaApp, HexaApp (TODO semi-complete) App Geometries based on 2D Projections of Platonic Solids ========== Animations can rotate the underlying 3D shape implied by these app geometries, implying that what we perceive is the shadow of the true nature of things. # RecursiveComposite Automatic Fractalizer to Fill Negative Space caused by Null Child Nodes ========== Implies the fractal nature of existence. # Magic/Matrix Display Arbitrary Code using Patterns based on (Relatively) Prime Numbers ========== Implies the mathematical nature of existence. Code is preprocessed for display. Comments and imports are removed. TODO shorten/rename variables Currently available in Circular Ring pattern and in filled square pattern. TODO filled circle and rectangular frame. TODO figure out something with triangles, etc. # AestheticLayout (TODO) Layout Manager for Non-Standard Geometries ========== The specialized layout manager will manage app geometries, positive and negative space, etc., as well as framing sets of apps. These "frames" will be decorated with designs such as the recursive composite and the matrix text. # GPS App (TODO temp, pressure, alt display and underlying middleware) Graphical Representation of an Astronomical Observer ========== Allows user to select map projection, centering the projection at the user's coordinates. Uses an exotic-style display for temperature, pressure and altitude. # Classical Clock App Graphical Representation of the Classical Time given an Astronomical Observer ========== It's gonna be steam punk, yo. # Solfeggio App (TODO) Graphical Selector for Base Frequency ========== Two Styles: - Using traditional solfeggio frequencies, and the option of standard or classical hertz - Empirical tuning, initial guess based on temperature and pressure # ColorManager (TODO) Manages Color Palettes and Color Schemes for Apps ========== Color palettes as a function of base frequency, scale, and brainwave: the formula for synesthesia. The above subprojects are the requirements for the pre-alpha release: Tuning the Yellow Bell The alpha release will actually have sound. # OnePunch (TODO) Paginated Rate-Limit-Aware Memoizing Cache for REST APIs ========== Keeps track of API and artist credits. With logic for recycling cached results and fetching new results. Maybe with load balancing. # Usage Notes Common Operations ========== - (Un)Install (TODO): ```python3 -m pip {|un}install --upgrade .``` - Install API Keys: ```grep -qF '*.key' .gitignore && echo -n '<API KEY>' >| <funcname>.key``` - Run Unit Tests (TODO): ```for k in *.py ; do PATH=.:$PATH $k || break ; done``` - Blast the Caches: ```rm *.cache``` # TODO ---------- - binaural beats - isochronic pulses (including graphics) - monaural beats - other effects... like phasing - poetic meter - subliminal programming - ndimensional and possibly true non-euclidean topologies # Underlying Concepts ---------- - graphics based on sacred geometry - automatic management of aesthetics: - color schemes - layouts - balancing circular vs angular geometries - animation speeds and types - management of tick speeds, including sample rate and frame rate - exploring in a reasonable way the vast harmonic space made possible by the combination of: - just intonation - exotic modes - synchronization of: - implied isochronic pulse caused by polyrhythms - acoustic beat (i.e., monaural beat) caused by polytonic harmonies - color palette (colors selected as a function of base frequency and scale) - tempo - fractals - tuning the yellow bell: how to select solfeggio frequencies (i.e., base frequencies), scales and modes. - computing from the ground up with a good college try given to abandoning musical tradition while maintaining an awareness of it... in other words, traditional structures in Western and Eastern music should be reduced to a collection of presets. Ultimately, it should break free from the preset scale length of 12 notes, instead deriving scale lengths using the harmonic variation of Euclid's algorithm. # Project Name ---------- HafrenHaver (verb): To go on and on about a legendary British princess who was drowned in the River Severn by her repudiated stepmother Gwendolen. SevernSieve (noun): A device for separating wanted elements from unwanted material in the River Severn. # Purpose ---------- To what end are we engineering literal mind control technology? The state of the art in brainwave entrainment technology is really quite amazing, making attaining advanced states of consciousness that previously took decades (or lifetimes according to some dogmas) possible within a few months of consistent practice. The implications of this is that the neigh-mythical state of turiya is easily accessible to even "armchair" mystics. However, the implementations of these technologies are lagging behind the current research. Listening to isochronic pulses can be nerve-wracking and downright repetitive, even when masked with pink noise such as nature sounds and buzzword musical textures, such as singing bowls, which is known to reduce the effectiveness of the isochronic technologies. Thus, the state of the art is to either shine a proverbial laser into your ear-balls or to reduce the effectiveness of the technology. Furthermore, isochronic technology is known to be effective over both audible and visual wavelengths, but few known implementations implement the latter, much less both. This framework will make it quick and easy to stand up apps that have these technologies embedded in them at a fundamental level. While our focus is on generating tolerable pink noise and creating an interface for controlling the necessary models, the middleware will be able to manage layouts, colorschemes, audio, etc., for any sort of app, including games: imagine, for example, a tetris implementation that can induce trancelike states, enhancing the effectiveness of positive affirmations to enhance your gaming experience. Play games, get turiya for free. That layer will enable a useful subproject: gamifying the learning process, as well as generating mnemonic songs given plain natural language statements, and facilitating the synchronization of the classroom in reciting these songs. For anyone who has taken classical Latin, this process technique should be familiar. In a nutshell: - target a cymatic base frequency, such as salinated water - use prime number math (plus a layer for keys, tetrachords, scales, modes and chords) for all tempos, acoustic beats (caused by harmonies) and even color schemes - induce a particular brainwave frequency via the combination of monaural beats, visual and audible isochronic pulses So... what's the result like? Imagine that you spend all day working on some great project, you inevitably get tired and go to sleep, but when you enter your dream cycle, you remember that break time is over, sit down at your terminal, and continue coding in the dream world, switching to your desk with a paper and pencil if the terminal is getting too dreamlike, then, when you wake up refreshed, and continue coding in the waking world, the code is easier and quicker to write, because you've already written it before. That is what is possible in a Level 1 Dream State on a night when your turiya is strong enough. Though being the world's most productive office worker is not the point. That's just the beginning of what can be achieved: that's only a Level 1 Dream State. These technologies will help make the other dream levels accessible as well, if used while sleeping. After attaining a Level 4 Dream State, the next strange state of consciousness is Level 4 Ascension, when you begin to dream in four dimensions. Admittedly, the advanced topologies will require exotic physics engines, including multidimensional raycasting at a minimum, as well as curious computations related to audio engineering. That is a TODO for far in the future, but that will make Level 4 Ascension accessible to the masses, at least the ones who have stereophonic input to their listening devices--headphones for their ears. That layer will enable a fun subproject, a massive undertaking in its own right: a sandbox to merge game genres into a single universe, allowing for focus from FPS-scale gameplay to simtower-, simcity-, civ-level and beyond. There will be a focus on randomized content generation; AIs will be trained within this sandbox, in an effort to generate more realistic, randomized buildings and city structures: Ph'nglui mglw'nafh Cthulhu R'lyeh wgah'nagl fhtagn. Once completed, this project should contribute to our effort to achieve a critical mass of individuals who have achieved turiya and Level 4 Ascension, which should, in theory, make these abilities the default for human consciousness. Bigger picture: what next after we make the three dimensional world obselete for the collective unconscious? Well, we build drones to replace our niche in the ecosystem, making ourselves obselete to this world, and turn our attention toward getting the Hell off this rock. # Credits ---------- The lionshare of the credit goes to that inner monologue that gives voice to subtle and vague impressions from the subconscious mind-brain (yeah... "mind-brain") or perhaps a supernatural entity that nobody has actually seen. Thanks InnovAnon! S. Faust for his exploration of turiya and its possibilities and for his mentorship. TruthStream: their entertaining spin on these technologies has been downright inspirational during this undertaking. Other credits are specifically linked in the source, such as StackOverflow threads where the good people ensured forward progress, even on days when I watched several sunrises. # Dedication ---------- Honorable mention to known Western code monks, who may not have contributed anything to this particular project, but rather to the Western cyber-monastic tradition in general: - Terry A. Davis - Sasha Gallagher # License ---------- If we wrote it, then you own it: we write technology that no one should have, and release it to the public domain... one sinister line at a time. Other code, obviously, retains the original licenses. # Innovations Anonymous Free Code for a Free World! ========== ![Corporate Logo](https://innovanon-inc.github.io/assets/images/logo.gif)
PypiClean
/Flask-Views-0.2.1.tar.gz/Flask-Views-0.2.1/flask_views/edit.py
from flask import request, redirect from flask_views.base import TemplateResponseMixin, View class FormMixin(object): """ Mixin for handling form submissions. """ form_class = None """ Set this to the form class (WTForms) you want to use. .. seealso:: http://wtforms.simplecodes.com/ """ initial = {} """ Set this to the initial data for form fields. Example:: initial = { 'title': 'Initial title value', 'body': 'Initial body value.', } """ success_url = None """ Set this to the URL the user should be redirected to after a successful form submission. """ def get_initial(self): """ Return initial form data. Override this method when the initial form data should be generated dynamically. By default this returns :py:data:`~flask_views.edit.FormMixin.initial`. :return: :py:data:`~flask_views.edit.FormMixin.initial`. """ return self.initial def get_success_url(self): """ Return success URL. Override this method when the success URL depends on the submitted form data or when it should be generated during an request. By default, this returns :py:data:`~flask_views.edit.FormMixin.success_url`. :return: :py:data:`~flask_views.edit.FormMixin.success_url`. """ return self.success_url def get_context_data(self, **kwargs): """ Return a ``dict`` containing the context data. :return: A ``dict`` containing the given keyword arguments. """ return kwargs def get_form_kwargs(self): """ Return parameters for creating the form instance. :return: A ``dict`` containing the arguments for creating the form instance. """ kwargs = {'formdata': request.form} kwargs.update(self.get_initial()) return kwargs def get_form(self): """ Return an instance of the form class. :return: Instance :py:class:`~flask_views.edit.FormMixin.form_class`. """ return self.form_class(**self.get_form_kwargs()) def form_valid(self, form): """ Handle valid form submission. This redirects the user to the URL returned by :py:meth:`~flask_views.edit.FormMixin.get_success_url`. You want to override this method for processing the submitted form data. :param form: Instance of the form. :return: Redirect to URL returned by :py:meth:`~flask_views.edit.FormMixin.get_success_url`. """ return redirect(self.get_success_url()) def form_invalid(self, form): """ Handle invalid form submission. This will render the response with the current ``form`` in the context so that the errors can be displayed to the user. :param form: Instance of the form class. :return: Response containing the ``form`` in the context. """ return self.render_to_response(self.get_context_data(form=form)) class ProcessFormMixin(object): """ Mixin for processing form data on GET and POST requests. """ methods = ['GET', 'POST'] def get(self, *args, **kwargs): """ Handler for GET requests. This will call ``render_to_response`` with an instance of the form as ``form`` in the context data. :return: Output of ``render_to_response`` method implementation. """ form = self.get_form() return self.render_to_response(self.get_context_data(form=form)) def post(self, *args, **kwargs): """ Handler for POST requests. On a valid form submission, this will dispatch the request to the ``form_valid`` method, else it is dispatched to ``form_invalid``. :return: Output of ``form_valid`` or ``form_invalid``. """ form = self.get_form() if form.validate(): return self.form_valid(form) else: return self.form_invalid(form) class BaseFormView(FormMixin, ProcessFormMixin, View): """ Base view for displaying a form. This class inherits from: * :py:class:`.FormMixin` * :py:class:`.ProcessFormMixin` * :py:class:`.View` This class implements all logic for processing forms, but does not include rendering responses. See :py:class:`.FormView` for an usage example. """ class FormView(TemplateResponseMixin, BaseFormView): """ View for displaying a form, including rendering of template. This class inherits from: * :py:class:`.TemplateResponseMixin` * :py:class:`.BaseFormView` This class implements all logic for displaying and processing form submissions, including rendering of templates. Usage example:: class ContactFormView(FormView): form_class = ContactForm template_name = 'contact_form.html' def form_valid(self, form): # Do something with the submitted form data return super(ContactFormView, self).form_valid(form) def get_success_url(self): return url_for('contact.form') An instance of the form class will be available in the template context under the ``form`` variable. """
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/image/FlickrBadge.js.uncompressed.js
define("dojox/image/FlickrBadge", ["dojo", "dojox/main", "dojox/image/Badge", "dojox/data/FlickrRestStore"], function(dojo, dojox){ dojo.getObject("image", true, dojox); return dojo.declare("dojox.image.FlickrBadge", dojox.image.Badge, { children: "a.flickrImage", // userid: String // If you know your Flickr userid, you can set it to prevent a call to fetch the id userid: "", // username: String // Your Flickr username username: "", // setid: String // The id of the set to display setid: "", // tags: String|Array // A comma separated list of tags or an array of tags to grab from Flickr tags: "", // searchText: String // Free text search. Photos who's title, description, or tags contain the text will be displayed searchText: "", // target: String // Where to display the pictures when clicked on. Valid values are the same as the target attribute // of the A tag. target: "", apikey: "8c6803164dbc395fb7131c9d54843627", _store: null, postCreate: function(){ if(this.username && !this.userid){ var def = dojo.io.script.get({ url: "http://www.flickr.com/services/rest/", preventCache: true, content: { format: "json", method: "flickr.people.findByUsername", api_key: this.apikey, username: this.username }, callbackParamName: "jsoncallback" }); def.addCallback(this, function(data){ if(data.user && data.user.nsid){ this.userid = data.user.nsid; if(!this._started){ this.startup(); } } }); } }, startup: function(){ if(this._started){ return; } if(this.userid){ var query = { userid: this.userid }; if(this.setid){ query["setid"] = this.setid; } if(this.tags){ query.tags = this.tags; } if(this.searchText){ query.text = this.searchText; } var args = arguments; this._store = new dojox.data.FlickrRestStore({ apikey: this.apikey }); this._store.fetch({ count: this.cols * this.rows, query: query, onComplete: dojo.hitch(this, function(items){ dojo.forEach(items, function(item){ var a = dojo.doc.createElement("a"); dojo.addClass(a, "flickrImage"); a.href = this._store.getValue(item, "link"); if(this.target){ a.target = this.target; } var img = dojo.doc.createElement("img"); img.src = this._store.getValue(item, "imageUrlThumb"); dojo.style(img, { width: "100%", height: "100%" }); a.appendChild(img); this.domNode.appendChild(a); }, this); dojox.image.Badge.prototype.startup.call(this, args); }) }); } } }); });
PypiClean
/Gbtestapi-0.1a10-py3-none-any.whl/gailbot/core/utils/download.py
import shutil from typing import List from tqdm.auto import tqdm import requests from zipfile import ZipFile import os import socket from .logger import makelogger logger = makelogger("download") def download_from_urls( urls: List[str], download_dir: str, unzip: bool = True, chunkSize: int = 8192 ) -> List[str]: """ Download from a list of urls and return a path to the directory containing the data from each url """ # Create paths dataset_download_path = os.path.join(download_dir, "download") dataset_extract_path = download_dir if os.path.isdir(dataset_download_path): shutil.rmtree(dataset_download_path) os.makedirs(dataset_download_path) os.makedirs(dataset_extract_path, exist_ok=True) # Download each url as a zip file. extracted_paths = [] for i, url in enumerate(urls): # Create a temp. dir for this specific url name = os.path.splitext(os.path.basename(url))[0] url_temp_path = "{}.zip".format(os.path.join(dataset_download_path, name)) with requests.get(url, stream=True) as r: r.raise_for_status() pbar = tqdm( total=int(r.headers.get("content-length", 0)), desc="{}".format(name) ) with open(url_temp_path, "wb+") as f: for chunk in r.iter_content(chunk_size=chunkSize): if chunk: # filter out keep-alive new chunks f.write(chunk) pbar.update(len(chunk)) if unzip: with ZipFile(url_temp_path, "r") as zipObj: # Extract all the contents of zip file in different directory extract_path = os.path.join(dataset_extract_path, name) extracted_paths.append(extract_path) if os.path.exists(extract_path): shutil.rmtree(extract_path) os.makedirs(extract_path) zipObj.extractall(extract_path) # Remove the temp folders shutil.rmtree(dataset_download_path) return extracted_paths def is_internet_connected() -> bool: """ True if connected to the internet, false otherwise """ try: # connect to the host -- tells us if the host is actually # reachable sock = socket.create_connection(("www.google.com", 80)) if sock is not None: print("Clossing socket") sock.close return True except OSError: pass return False
PypiClean
/Cosmos_Coin_Masternode_Setup-1.1.0-py3-none-any.whl/MasternodeSetup/masternode.py
# MIT License # # Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/ # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from . import vps from . import core import argparse import configparser CONFIG_FILENAME = os.path.join(os.path.dirname(__file__), "conf", "config.ini") DESCRIPTION = "End to end script to setup a masternode" ROOT_USER = "root" def getConfig(): """Parse configuration file. Returns: dict: Dictionary containing the options parsed. """ config = configparser.ConfigParser() config.read(CONFIG_FILENAME) return config def begin(args): """Wrapper function that starts the application with the given arguments. Args: args (obj): Object containing the command line arguments parsed. """ try: # Parse file configuration config = getConfig() # Ensure all local requirements met (e.g. wallet installed) core.checkPrerequisites(config) # Setup VPS - Update packages, install binaries vps.setup(args.vps, ROOT_USER, args.password, config) # Setup masternode locally core.setup(args.vps, ROOT_USER, args.password, args.name, config) except Exception as e: print("Masternode setup failed. Reason: {0}.".format(str(e))) raise e def setup(): """Program entrypoint. This function will parse all program arguments and start the application. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument("--name", action="store", required=True, help="The name to be given to the masternode") parser.add_argument("--vps", action="store", required=True, help="The IP address of the VPS server to be used") parser.add_argument("--password", action="store", required=True, help="The root password for the VPS provided") args = parser.parse_args() begin(args) if __name__ == "__main__": setup()
PypiClean
/BlazeWeb-0.6.2-py3-none-any.whl/blazeweb/utils/filesystem.py
import os from os import path from shutil import copy2, copystat, rmtree from blazeutils import NotGiven from blazeweb.globals import settings from blazeweb.hierarchy import list_component_mappings, hm __all__ = [ 'mkdirs', 'copy_static_files', ] class Error(EnvironmentError): pass try: WindowsError except NameError: WindowsError = None def mkdirs(newdir, mode=NotGiven): """ a "safe" verision of makedirs() that will only create the directory if it doesn't already exist. This avoids having to catch an Error Exception that might be a result of the directory already existing or might be a result of an error creating the directory. By checking for the diretory first, any exception was created by the directory not being able to be created. """ if mode is NotGiven: mode = settings.default.dir_mode if os.path.isdir(newdir): pass elif os.path.isfile(newdir): raise OSError("a file with the same name as the desired " "dir, '%s', already exists." % newdir) else: os.makedirs(newdir, mode) def copy_static_files(delete_existing=False): """ copy's files from the apps and components to the static directory defined in the settings. Files are copied in a hierarchical way such that apps and components lower in priority have their files overwritten by apps/components with higher priority. """ statroot = settings.dirs.static if delete_existing: app_stat_path = path.join(statroot, 'app') if path.exists(app_stat_path): rmtree(app_stat_path) component_stat_path = path.join(statroot, 'component') if path.exists(component_stat_path): rmtree(component_stat_path) for app, pname, package in list_component_mappings(reverse=True, inc_apps=True): package_mod = hm.builtin_import(package or app, fromlist=['']) pkgdir = path.dirname(package_mod.__file__) if package or not pname: srcpath = pkgdir else: srcpath = path.join(pkgdir, 'components', pname) srcpath = path.join(srcpath, 'static') if path.isdir(srcpath): if not pname: targetpath = 'app' else: targetpath = path.join('component', pname) targetpath = path.join(statroot, targetpath) copytree(srcpath, targetpath) def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() mkdirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error) as why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors)
PypiClean
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/has-unicode/README.md
has-unicode =========== Try to guess if your terminal supports unicode ```javascript var hasUnicode = require("has-unicode") if (hasUnicode()) { // the terminal probably has unicode support } ``` ```javascript var hasUnicode = require("has-unicode").tryHarder hasUnicode(function(unicodeSupported) { if (unicodeSupported) { // the terminal probably has unicode support } }) ``` ## Detecting Unicode What we actually detect is UTF-8 support, as that's what Node itself supports. If you have a UTF-16 locale then you won't be detected as unicode capable. ### Windows Since at least Windows 7, `cmd` and `powershell` have been unicode capable, but unfortunately even then it's not guaranteed. In many localizations it still uses legacy code pages and there's no facility short of running programs or linking C++ that will let us detect this. As such, we report any Windows installation as NOT unicode capable, and recommend that you encourage your users to override this via config. ### Unix Like Operating Systems We look at the environment variables `LC_ALL`, `LC_CTYPE`, and `LANG` in that order. For `LC_ALL` and `LANG`, it looks for `.UTF-8` in the value. For `LC_CTYPE` it looks to see if the value is `UTF-8`. This is sufficient for most POSIX systems. While locale data can be put in `/etc/locale.conf` as well, AFAIK it's always copied into the environment.
PypiClean
/lib/Utils/__init__.py
from __future__ import absolute_import, division, print_function import sys import json import itertools from socket import timeout as socket_timeout try: from urllib.request import urlopen from urllib.error import URLError except ImportError: from urllib2 import urlopen, URLError # Functions def get_pypi_version(): """ Returns the most up-to-date version number on PyPI. Return None on error """ try: addr = "https://pypi.org/pypi/FoxDot/json" page = urlopen(addr, timeout=2.5) data = json.loads(page.read().decode("utf-8")) version = data["info"]["version"] except (URLError, socket_timeout): version = None return version def stdout(*args): """ Forces prints to stdout and not console """ sys.__stdout__.write(" ".join([str(s) for s in args]) + "\n") def sliceToRange(s): start = s.start if s.start is not None else 0 stop = s.stop step = s.step if s.step is not None else 1 try: return list(range(start, stop, step)) except OverflowError: raise TypeError("range() integer end argument expected, got NoneType") def LCM(*args): """ Lowest Common Multiple """ args = [n for n in args if n != 0] # Base case if len(args) == 0: return 1 elif len(args) == 1: return args[0] X = list(args) while any([X[0]!=K for K in X]): i = X.index(min(X)) X[i] += args[i] return X[0] def EuclidsAlgorithm(n, k, lo=0, hi=1): if n == 0: return [n for i in range(k)] data = [[hi if i < n else lo] for i in range(k)] while True: k = k - n if k <= 1: break elif k < n: n, k = k, n for i in range(n): data[i] += data[-1] del data[-1] return [x for y in data for x in y] def PulsesToDurations(data): """ Returns a list of durations based on pulses (1s) and blanks (0s). Data should be a list of [1,0] where 1 is a pulse. """ count, seq = 1, [] for item in data[1:]: if item == 1: seq.append(count) count = 1 else: count += 1 seq.append(count) return seq def get_first_item(array): """ Returns first item from a possibly nested list""" try: return get_first_item(array[0]) except (TypeError, IndexError): return array def modi(array, i, debug=0): """ Returns the modulo index i.e. modi([0,1,2],4) will return 1 """ try: return array[i % len(array)] except(TypeError, AttributeError, ZeroDivisionError): return array def get_expanded_len(data): """ (0,(0,2)) returns 4. int returns 1 """ if type(data) is str and len(data) == 1: return 1 l = [] try: for item in data: try: l.append(get_expanded_len(item)) except(TypeError, AttributeError): l.append(1) return LCM(*l) * len(data) except TypeError: return 1 def max_length(*patterns): """ Returns the largest length pattern """ return max([len(p) for p in patterns]) def get_inverse_op(method): """ Returns the opposite __dunder__ method e.g. get_inverse_op("__add__") -> "__radd__" get_inverse_op("__ror__") -> "__or__" """ if method.startswith("__r"): return method.replace("__r", "__") elif method.startswith("__"): return method.replace("__", "__r", 1) return method def isiterable(obj): """ Returns true if an object is iterable by using `iter(obj)`""" try: iter(obj) return True except: return False def recursive_any(seq): """ Like any but checks lists recursively """ for item in seq: if isiterable(item): if recursive_any(item): return True else: if bool(item): return True else: return False # Classes class dots: """ Class for representing long Patterns in strings """ def __repr__(self): return '...'
PypiClean
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/db/backends/spatialite/base.py
from ctypes.util import find_library from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.backends.sqlite3.base import ( DatabaseWrapper as SQLiteDatabaseWrapper, ) from .client import SpatiaLiteClient from .features import DatabaseFeatures from .introspection import SpatiaLiteIntrospection from .operations import SpatiaLiteOperations from .schema import SpatialiteSchemaEditor class DatabaseWrapper(SQLiteDatabaseWrapper): SchemaEditorClass = SpatialiteSchemaEditor # Classes instantiated in __init__(). client_class = SpatiaLiteClient features_class = DatabaseFeatures introspection_class = SpatiaLiteIntrospection ops_class = SpatiaLiteOperations def __init__(self, *args, **kwargs): # Trying to find the location of the SpatiaLite library. # Here we are figuring out the path to the SpatiaLite library # (`libspatialite`). If it's not in the system library path (e.g., it # cannot be found by `ctypes.util.find_library`), then it may be set # manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting. self.lib_spatialite_paths = [name for name in [ getattr(settings, 'SPATIALITE_LIBRARY_PATH', None), 'mod_spatialite.so', 'mod_spatialite', find_library('spatialite'), ] if name is not None] super().__init__(*args, **kwargs) def get_new_connection(self, conn_params): conn = super().get_new_connection(conn_params) # Enabling extension loading on the SQLite connection. try: conn.enable_load_extension(True) except AttributeError: raise ImproperlyConfigured( 'SpatiaLite requires SQLite to be configured to allow ' 'extension loading.' ) # Load the SpatiaLite library extension on the connection. for path in self.lib_spatialite_paths: try: conn.load_extension(path) except Exception: if getattr(settings, 'SPATIALITE_LIBRARY_PATH', None): raise ImproperlyConfigured( 'Unable to load the SpatiaLite library extension ' 'as specified in your SPATIALITE_LIBRARY_PATH setting.' ) continue else: break else: raise ImproperlyConfigured( 'Unable to load the SpatiaLite library extension. ' 'Library names tried: %s' % ', '.join(self.lib_spatialite_paths) ) return conn def prepare_database(self): super().prepare_database() # Check if spatial metadata have been initialized in the database with self.cursor() as cursor: cursor.execute("PRAGMA table_info(geometry_columns);") if cursor.fetchall() == []: cursor.execute("SELECT InitSpatialMetaData(1)")
PypiClean
/ETSProjectTools-0.6.0.tar.gz/ETSProjectTools-0.6.0/enthought/proxy/util.py
import os import urllib2 from connect_HTTP_handler import ConnectHTTPHandler from connect_HTTPS_handler import ConnectHTTPSHandler def install_proxy_handlers(pinfo, cfg=None): """ Use a proxy for future urllib2.urlopen commands. The specified pinfo should be a dictionary containing the following: * host: the servername of the proxy * port: the port to use on the proxy server * user: (optional) username for authenticating with the proxy server. * pass: (optional) password for authenticating with the proxy server. """ h = pinfo['host'] p = pinfo['port'] usr = pinfo['user'] pwd = pinfo['pass'] # Only install a custom opener if a host was actually specified. if h is not None and len(h) > 0: handlers = [] # Add handlers to deal with using the proxy. handlers.append(ConnectHTTPSHandler(info=pinfo)) handlers.append(ConnectHTTPHandler(info=pinfo)) # Create a proxy opener, add an authentication handler, and install it. opener = urllib2.build_opener(*handlers) if cfg != None: setup_authentication(cfg, opener) urllib2.install_opener(opener) return def get_proxy_info(proxystr=None): """ Get proxy config from string or environment variables. If a proxy string is passed in, it overrides whatever might be in the environment variables. Returns dictionary of identified proxy information. Raises ValueError on any configuration error. """ default_port = 80 # Only check for env variables if no explicit proxy string was provided. if proxystr is None or len(proxystr) < 1: proxy_info = { 'host' : os.environ.get('PROXY_HOST', None), 'port' : os.environ.get('PROXY_PORT', default_port), 'user' : os.environ.get('PROXY_USER', None), 'pass' : os.environ.get('PROXY_PASS', None) } # Parse the passed proxy string else: proxy_info = {} res = proxystr.split('@') if len(res) == 1: user_pass = [None] host_port = res[0].split(':') elif len(res) == 2: user_pass = res[0].split(':') host_port = res[1].split(':') else: raise ValueError('Invalid proxy string: "%s"' % proxystr) if len(user_pass) == 1: proxy_info['user'] = user_pass[0] proxy_info['pass'] = None elif len(user_pass) == 2: proxy_info['user'] = user_pass[0] proxy_info['pass'] = user_pass[1] else: raise ValueError('Invalid user:pass in proxy string: ' '"%s"' % user_pass) if len(host_port) == 1: proxy_info['host'] = host_port[0] proxy_info['port'] = default_port elif len(host_port) == 2: proxy_info['host'] = host_port[0] try: p = int(host_port[1]) except: raise ValueError('Port specification must be an integer. ' 'Had "%s"' % host_port[1]) proxy_info['port'] = p else: raise ValueError('Invalid host:port in proxy string: ' '"%s"' % host_port) # If a user was specified, but no password was, prompt for it now. user = proxy_info.get('user', None) if user is not None and len(user) > 0: pwd = proxy_info.get('pass', None) if pwd is None or len(pwd) < 1: import getpass proxy_info['pass'] = getpass.getpass() return proxy_info def setup_authentication(cfg, opener=None): """ """ # Configure a password manager with the user's authentication info. passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() section = 'url auth info' if cfg.has_section(section): for dummy, info in cfg.items(section): # Ensure the info includes both a username and a url. if '@' not in info: raise ValueError('Invalid %s string: "%s"' % (section, info)) userpass, url = info.split('@') # Ensure we have both a user and password. if ':' in userpass: user, password = userpass.split(':') else: user = userpass prompt = 'Password for %s@%s: ' % (user, url) import getpass password = getpass.getpass(prompt) passmgr.add_password(None, url, user, password) # Create a basic auth handler that uses our authentication info. handler = urllib2.HTTPBasicAuthHandler(passmgr) # Add to an existing opener if one was specified and otherwise, create and # register our own. if opener is not None: opener.add_handler(handler) else: opener = urllib2.build_opener(handler) urllib2.install_opener(opener) return def setup_proxy(proxystr='', cfg=None): """ Configure and install proxy support. The specified proxy string is parsed via ``get_proxy_info`` and then installed via ``install_proxy_handler``. If proxy settings are detected and a handler is installed, then this method returns True. Otherwise it returns False. Raises ValueError in the event of any problems. """ installed = False info = get_proxy_info(proxystr) if 'host' in info and info['host'] is not None: install_proxy_handlers(info, cfg) installed = True return installed
PypiClean
/Nitrous-0.9.3-py3-none-any.whl/turbogears/i18n/data/es_AR.py
languages={'gv': u'ga\xe9lico man\xe9s', 'gu': u'goujarat\xed', 'gd': u'ga\xe9lico escoc\xe9s', 'ga': u'irland\xe9s', 'gl': 'gallego', 'la': u'lat\xedn', 'ln': 'lingala', 'lo': 'laosiano', 'tt': u't\xe1taro', 'tr': 'turco', 'ts': 'tsonga', 'lv': 'letonio', 'lt': 'lituano', 'th': u'tailand\xe9s', 'ti': 'tigrinya', 'te': 'telugu', 'haw': 'hawaiano', 'yo': 'yoruba', 'de': u'alem\xe1n', 'da': u'dan\xe9s', 'qu': 'quechua', 'el': 'griego', 'eo': 'esperanto', 'en': u'ingl\xe9s', 'zh': 'chino', 'za': 'zhuang', 'eu': 'vasco', 'et': 'estonio', 'es': u'espa\xf1ol', 'ru': 'ruso', 'ro': 'rumano', 'be': 'bielorruso', 'bg': u'b\xfalgaro', 'uk': 'ucraniano', 'wo': 'uolof', 'bn': u'bengal\xed', 'bo': 'tibetano', 'bh': 'bihari', 'bi': 'bislama', 'br': u'bret\xf3n', 'ja': u'japon\xe9s', 'om': 'oromo', 'root': u'ra\xedz', 'or': 'oriya', 'xh': 'xhosa', 'co': 'corso', 'ca': u'catal\xe1n', 'cy': u'gal\xe9s', 'cs': 'checo', 'ps': 'pashto', 'pt': u'portugu\xe9s', 'tl': 'tagalo', 'pa': u'punjab\xed', 'vi': 'vietnamita', 'pl': 'polaco', 'hy': 'armenio', 'hr': 'croata', 'iu': 'inuktitut', 'hu': u'h\xfangaro', 'hi': 'hindi', 'ha': 'hausa', 'he': 'hebreo', 'mg': 'malgache', 'uz': 'uzbeko', 'ml': 'malayalam', 'mo': 'moldavo', 'mn': 'mongol', 'mi': u'maor\xed', 'ik': 'inupiak', 'mk': 'macedonio', 'ur': 'urdu', 'mt': u'malt\xe9s', 'ms': 'malayo', 'mr': 'marathi', 'ug': 'uigur', 'ta': 'tamil', 'my': 'birmano', 'aa': 'afar', 'af': 'afrikaans', 'sw': 'swahili', 'is': u'island\xe9s', 'am': u'am\xe1rico', 'it': 'italiano', 'sv': 'sueco', 'as': u'asam\xe9s', 'ar': u'\xe1rabe', 'su': u'sundan\xe9s', 'zu': u'zul\xfa', 'az': 'azerbayano', 'ie': 'interlingue', 'id': 'indonesio', 'nl': u'holand\xe9s', 'nn': 'nynorsk noruego', 'no': 'noruego', 'na': 'nauruano', 'nb': 'bokmal noruego', 'ne': u'nepal\xed', 'vo': 'volapuk', 'so': u'somal\xed', 'fr': u'franc\xe9s', 'sm': 'samoano', 'fa': 'farsi', 'fi': u'finland\xe9s', 'sa': u's\xe1nscrito', 'fo': u'fero\xe9s', 'ka': 'georgiano', 'kk': 'kazajo', 'sr': 'serbio', 'sq': u'alban\xe9s', 'ko': 'coreano', 'kn': 'canara', 'km': 'kmer', 'kl': u'groenland\xe9s', 'sk': 'eslovaco', 'si': u'cingal\xe9s', 'sh': 'serbocroata', 'kw': u'c\xf3rnico', 'ku': 'kurdo', 'sl': 'esloveno', 'ky': 'kirghiz', 'sg': 'sango'} countries={'BD': 'Bangladesh', 'BE': u'B\xe9lgica', 'BF': 'Burkina Faso', 'BG': 'Bulgaria', 'BA': 'Bosnia y Hercegovina', 'BB': 'Barbados', 'WF': 'Wallis y Futuna', 'BM': 'Bermudas', 'BN': u'Brun\xe9i', 'BO': 'Bolivia', 'BH': u'Bahr\xe1in', 'BI': 'Burundi', 'BJ': u'Ben\xedn', 'BT': u'But\xe1n', 'JM': 'Jamaica', 'BV': 'Isla Bouvet', 'BW': 'Botsuana', 'WS': 'Samoa', 'BR': 'Brasil', 'BS': 'Bahamas', 'BY': 'Bielorrusia', 'BZ': 'Belice', 'RU': 'Rusia', 'RW': 'Ruanda', 'TL': 'Timor Oriental', 'RE': u'R\xe9union', 'TM': u'Turkmenist\xe1n', 'TJ': u'Tayikist\xe1n', 'RO': 'Rumania', 'TK': 'Tokelau', 'GW': 'Guinea-Bissau', 'GU': 'Guam', 'GT': 'Guatemala', 'GS': 'Islas Georgia del Sur y Sandwich del Sur', 'GR': 'Grecia', 'GQ': 'Guinea Ecuatorial', 'GP': 'Guadalupe', 'JP': u'Jap\xf3n', 'GY': 'Guyana', 'GF': 'Guayana Francesa', 'GE': 'Georgia', 'GD': 'Granada', 'GB': 'Reino Unido', 'GA': u'Gab\xf3n', 'SV': 'El Salvador', 'GN': 'Guinea', 'GM': 'Gambia', 'GL': 'Groenlandia', 'GI': 'Gibraltar', 'GH': 'Ghana', 'OM': u'Om\xe1n', 'TN': u'T\xfanez', 'JO': 'Jordania', 'SP': 'Serbia', 'HR': 'Croacia', 'HT': u'Hait\xed', 'HU': u'Hungr\xeda', 'HK': u'Hong Kong, Regi\xf3n administrativa especial de China', 'HN': 'Honduras', 'HM': 'Islas Heard y McDonald', 'VE': 'Venezuela', 'PR': 'Puerto Rico', 'PS': 'Territorios Palestinos', 'PW': 'Palau', 'PT': 'Portugal', 'SJ': 'Svalbard y Jan Mayen', 'PY': 'Paraguay', 'IQ': 'Irak', 'PA': u'Panam\xe1', 'PF': 'Polinesia Francesa', 'PG': u'Pap\xfaa Nueva Guinea', 'PE': u'Per\xfa', 'PK': u'Pakist\xe1n', 'PH': 'Filipinas', 'PN': 'Pitcairn', 'PL': 'Polonia', 'PM': u'San Pedro y Miquel\xf3n', 'ZM': 'Zambia', 'EH': u'S\xe1hara Occidental', 'EE': 'Estonia', 'EG': 'Egipto', 'ZA': u'Sud\xe1frica', 'EC': 'Ecuador', 'IT': 'Italia', 'VN': 'Vietnam', 'SB': u'Islas Salom\xf3n', 'ET': u'Etiop\xeda', 'SO': 'Somalia', 'ZW': 'Zimbabue', 'SA': u'Arabia Saud\xed', 'ES': u'Espa\xf1a', 'ER': 'Eritrea', 'MD': 'Moldova', 'MG': 'Madagascar', 'MA': 'Marruecos', 'MC': u'M\xf3naco', 'UZ': u'Uzbekist\xe1n', 'MM': 'Myanmar', 'ML': u'Mal\xed', 'MO': u'Macao, Regi\xf3n administrativa especial de China', 'MN': 'Mongolia', 'MH': 'Islas Marshall', 'MK': 'Macedonia', 'MU': 'Mauricio', 'MT': 'Malta', 'MW': 'Malaui', 'MV': 'Maldivas', 'MQ': 'Martinica', 'MP': 'Islas Marianas del Norte', 'MS': 'Montserrat', 'MR': 'Mauritania', 'UG': 'Uganda', 'MY': 'Malasia', 'MX': u'M\xe9xico', 'IL': 'Israel', 'FR': 'Francia', 'IO': u'Territorios Brit\xe1nicos del Oc\xe9ano \xcdndico', 'SH': 'Santa Elena', 'FI': 'Finlandia', 'FJ': 'Fidji', 'FK': 'Islas Falkland (Malvinas)', 'FM': 'Micronesia', 'FO': 'Islas Feroe', 'NI': 'Nicaragua', 'NL': u'Pa\xedses Bajos', 'NO': 'Noruega', 'NA': 'Namibia', 'VU': 'Vanuatu', 'NC': 'Nueva Caledonia', 'NE': u'N\xedger', 'NF': 'Isla Norfolk', 'NG': 'Nigeria', 'NZ': 'Nueva Zelanda', 'NP': 'Nepal', 'NR': 'Nauru', 'NU': 'Niue', 'CK': 'Islas Cook', 'CI': 'Costa de Marfil', 'CH': 'Suiza', 'CO': 'Colombia', 'CN': 'China', 'CM': u'Camer\xfan', 'CL': 'Chile', 'CC': 'Islas Cocos (Keeling)', 'CA': u'Canad\xe1', 'CG': 'Congo', 'CF': u'Rep\xfablica Centroafricana', 'CD': u'Rep\xfablica Democr\xe1tica del Congo', 'CZ': u'Rep\xfablica Checa', 'CY': 'Chipre', 'CX': 'Isla de Christmas', 'CR': 'Costa Rica', 'Fallback': 'en', 'CV': 'Cabo Verde', 'CU': 'Cuba', 'SZ': 'Suazilandia', 'SY': 'Siria', 'KG': u'Kirguizist\xe1n', 'KE': 'Kenia', 'SR': 'Suriname', 'KI': 'Kiribati', 'KH': 'Camboya', 'KN': u'San Crist\xf3bal y Nieves', 'KM': 'Comores', 'ST': u'Santo Tom\xe9 y Pr\xedncipe', 'SK': 'Eslovaquia', 'KR': 'Corea del Sur', 'SI': 'Eslovenia', 'KP': 'Corea del Norte', 'KW': 'Kuwait', 'SN': 'Senegal', 'SM': 'San Marino', 'SL': 'Sierra Leona', 'SC': 'Seychelles', 'KZ': u'Kazajist\xe1n', 'KY': u'Islas Caim\xe1n', 'SG': 'Singapur', 'SE': 'Suecia', 'SD': u'Sud\xe1n', 'DO': u'Rep\xfablica Dominicana', 'DM': 'Dominica', 'DJ': 'Yibuti', 'DK': 'Dinamarca', 'VG': u'Islas V\xedrgenes Brit\xe1nicas', 'DE': 'Alemania', 'YE': 'Yemen', 'DZ': 'Argelia', 'US': 'Estados Unidos', 'UY': 'Uruguay', 'YU': 'Yugoslavia', 'YT': 'Mayotte', 'UM': 'Islas menores alejadas de Estados Unidos', 'LB': u'L\xedbano', 'LC': 'Saint Lucia', 'LA': 'Laos', 'TV': 'Tuvalu', 'TW': u'Taiw\xe1n, Rep\xfablica de China', 'TT': 'Trinidad y Tabago', 'TR': u'Turqu\xeda', 'LK': 'Sri Lanka', 'LI': 'Liechtenstein', 'LV': 'Letonia', 'TO': 'Tonga', 'LT': 'Lituania', 'LU': 'Luxemburgo', 'LR': 'Liberia', 'LS': 'Lesoto', 'TH': 'Tailandia', 'TF': 'Territorios Australes Franceses', 'TG': 'Togo', 'TD': 'Chad', 'TC': 'Islas Turcas y Caicos', 'LY': 'Libia', 'VA': 'Ciudad del Vaticano', 'VC': 'San Vicente y las Granadinas', 'AE': u'Emiratos \xc1rabes Unidos', 'AD': 'Andorra', 'AG': 'Antigua y Barbuda', 'AF': u'Afganist\xe1n', 'AI': 'Anguila', 'VI': u'Islas V\xedrgenes de los Estados Unidos', 'IS': 'Islandia', 'IR': u'Ir\xe1n', 'AM': 'Armenia', 'AL': 'Albania', 'AO': 'Angola', 'AN': 'Antillas Neerlandesas', 'AQ': u'Ant\xe1rtica', 'AS': 'Samoa Americana', 'AR': 'Argentina', 'AU': 'Australia', 'AT': 'Austria', 'AW': 'Aruba', 'IN': 'India', 'TZ': 'Tanzania', 'AZ': u'Azerbaiy\xe1n', 'IE': 'Irlanda', 'ID': 'Indonesia', 'UA': 'Ucrania', 'QA': 'Qatar', 'MZ': 'Mozambique'} months=['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre'] abbrMonths=['ene', 'feb', 'mar', 'abr', 'may', 'jun', 'jul', 'ago', 'sep', 'oct', 'nov', 'dic'] days=['lunes', 'martes', u'mi\xe9rcoles', 'jueves', 'viernes', u's\xe1bado', 'domingo'] abbrDays=['lun', 'mar', u'mi\xe9', 'jue', 'vie', u's\xe1b', 'dom'] dateFormats={'medium': '%d/%m/%Y', 'full': '%%(dayname)s %d de %%(monthname)s de %Y', 'long': '%d de %%(monthname)s de %Y', 'short': '%d/%m/%y'} numericSymbols={'group': '.', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'}
PypiClean
/InsightIDR4Py-0.3.1.tar.gz/InsightIDR4Py-0.3.1/README.md
# InsightIDR4Py A Python client allowing simplified interaction with Rapid7's InsightIDR REST API. InsightIDR4Py allows users to perform numerous actions within Rapid7 [InsightIDR](https://docs.rapid7.com/insightidr/). This tool handles some of the challenges and complexities of using the InsightIDR REST API, including polling queries in progress, paginated responses, handling the JSON output, and time range queries. These capabilities can be particularly useful for automating processes, integrating log data with other APIs (like VirusTotal), managing content in the InsightIDR platform, and performing multi-tenant workflows (for instance, updating content across tenants for consistency, or copying content from one InsightIDR tenant to another). For some ideas on how InsightIDR4Py can be used, check out this [blog post](https://micahbabinski.medium.com/button-pusher-to-masterbuilder-automating-siem-workflows-3f51874a80e) where I cover some use cases. The API capabilities provided by InsightIDR4Py include: ## Logsearch * Query Events * Query Groups ## Saved Queries * List Saved Queries * Get a Saved Query * Create Saved Query * Replace a Saved Query * Update a Saved Query * Delete a Saved Query ## Custom Alerts* * List Custom Alerts * Get a Custom Alert * Create Custom Alert * Replace a Custom Alert * Update a Custom Alert * Delete a Custom Alert *Only pattern detection alerts are supported currently. ## Investigations * List Investigations * Get an Investigation * Create Investigation * Close Investigations in Bulk * List Alerts by Investigation * List Rapid7 Product Alerts by Investigation * Update Investigation * List Comments on an Investigation * Create Comment * Delete Comment ## Threats * Create Threat * Add Indicators to Threat * Replace Threat Indicators * Delete Threat Happy analyzing :monocle_face: and happy administering! :hammer: # Installation InsightIDR4Py is available on [PyPI](https://pypi.org/project/InsightIDR4Py/) and can be installed using: ``` pip install InsightIDR4Py ``` # Prerequisites You will need obtain an API key from the InsightIDR system. The documentation for this can be found [here](https://docs.rapid7.com/insight/managing-platform-api-keys/). From there, you'll use this API key value to create the InsightIDR API object as shown below: ```python import InsightIDR4Py as idr # define API key (store this value securely) api_key = "API_Key_Here" # create the InsightIDR object api = idr.InsightIDR(api_key) ``` Remember to store the API key securely! There are several ways to do this, and you should make sure that the way you choose aligns with your organization's security policy. Python's [keyring](https://pypi.org/project/keyring/) library is one possibility. # Examples ## Example 1: Query DNS Logs for Suspicious TLDs ```python import InsightIDR4Py as idr # create the InsightIDR object api = idr.InsightIDR(api_key) # define the query parameters logset_name = "DNS Query" query = "where(public_suffix IN [buzz, top, club, work, surf, tw, gq, ml, cf, biz, tk, cam, xyz, bond])" time_range = "Last 36 Hours" # query the logs events = api.QueryEvents(logset_name, query, time_range) # print out an event print(event[0]) ``` Result: ```python {'timestamp': '2021-09-28T15:11:45.000Z', 'asset': 'windesk05.organization.com', 'source_address': '192.168.4.10', 'query': 'regulationprivilegescan.top', 'public_suffix': 'top', 'top_private_domain': 'regulationprivilegescan.top', 'query_type': 'A', 'source_data': '09/28/2021 8:11:45 AM 1480 PACKET 00000076ED1A0140 UDP Rcv 192.168.4.121 c3b3 Q [0001 D NOERROR] A (3)regulationprivilegescan(3)top(0)'} ``` ## Example 2: Query Authentication Logs for top Five Failed Logins, Grouped by Count ```python import InsightIDR4Py as idr # create the InsightIDR object api = idr.InsightIDR(api_key) # define the query parameters logset_name = "Asset Authentication" query = "where(source_json.eventCode = 4625) groupby(destination_account) limit(5)" time_range = "Last 24 Hours" # query the logs groups = api.QueryGroups(logset_name, query, time_range) # print out the groups for group in groups.items(): print(group) ``` Result: ``` ('Mark.Corrigan', 132) ('Jeremy.Usborne', 102) ('Sophie.Chapman', 88) ('Alan.Johnson', 64) ('Super.Hans', 24) ``` ## Example 3: Query VPN Logins from a Certain IP Range and Check the Results Using [AbuseIPDB](https://www.abuseipdb.com/) This example uses [python-abuseipdb](https://github.com/meatyite/python-abuseipdb), a Python object oriented wrapper for AbuseIPDB v2 API. It requires an API key, which you can get by creating a free account. From there, go to User Account > API, choose Create Key, and enter this string into the abuse_ip_db_api_key variable in the example below. The same API key security principles mentioned above apply here. Guard your API keys to prevent rogue usage! ```python import InsightIDR4Py as idr import abuseipdb import * # create the InsightIDR object api = idr.InsightIDR(api_key) # define the AbuseIPDB API key abuse_ip_db_api_key = "YOUR_KEY_HERE" # define the query parameters logset_name = "Ingress Authentication" query = "where(service = vpn AND source_ip = IP(64.62.128.0/17))" time_range = "Last 24 Hours" # query the logs events = api.QueryEvents(logset_name, query, time_range) # check the source IP addresses in AbuseIPDB and display results if len(events) > 0: ipdb = AbuseIPDB(abuse_ip_db_api_key) for event in events: check = ipdb.check(event["source_ip"]) print("----------") print("IP Address: " + ip_check.ipAddress) print("Last reported at: " + ip_check.lastReportedAt) print("Abuse confidence score: " + str(ip_check.abuseConfidenceScore)) print("Abuser country: " + ip_check.countryName) print("Abuser ISP: " + ip_check.isp) print("Total reports of abuser: " + str(ip_check.totalReports)) print("----------") ``` # License This repository is licensed under an [MIT license](https://github.com/mbabinski/InsightIDR4Py/blob/main/LICENSE), which grants extensive permission to use this material however you wish. # Contributing You are welcome to contribute however you wish! I appreciate feedback in any format.
PypiClean
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/sync_status.py
import re # noqa: F401 import sys # noqa: F401 from typing import ( Optional, Union, List, Dict, ) from MergePythonSDK.shared.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, OpenApiModel, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from MergePythonSDK.shared.exceptions import ApiAttributeError from MergePythonSDK.shared.model_utils import import_model_by_name def lazy_import(): from MergePythonSDK.hris.model.sync_status_status_enum import SyncStatusStatusEnum globals()['SyncStatusStatusEnum'] = SyncStatusStatusEnum class SyncStatus(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() defined_types = { 'model_name': (str,), # noqa: E501 'model_id': (str,), # noqa: E501 'status': (bool, dict, float, int, list, str, none_type,), # noqa: E501 'is_initial_sync': (bool,), # noqa: E501 'last_sync_start': (datetime, none_type,), # noqa: E501 'next_sync_start': (datetime, none_type,), # noqa: E501 } return defined_types @cached_property def discriminator(): return None attribute_map = { 'model_name': 'model_name', # noqa: E501 'model_id': 'model_id', # noqa: E501 'status': 'status', # noqa: E501 'is_initial_sync': 'is_initial_sync', # noqa: E501 'last_sync_start': 'last_sync_start', # noqa: E501 'next_sync_start': 'next_sync_start', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, model_name, model_id, status, is_initial_sync, *args, **kwargs): # noqa: E501 """SyncStatus - a model defined in OpenAPI Args: model_name (str): model_id (str): status (bool, dict, float, int, list, str, none_type): is_initial_sync (bool): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) last_sync_start (datetime): [optional] # noqa: E501 next_sync_start (datetime): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.model_name = model_name self.model_id = model_id self.status = status self.is_initial_sync = is_initial_sync self.last_sync_start = kwargs.get("last_sync_start", None) self.next_sync_start = kwargs.get("next_sync_start", None) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, model_name, model_id, status, is_initial_sync, *args, **kwargs): # noqa: E501 """SyncStatus - a model defined in OpenAPI Args: model_name (str): model_id (str): status (bool, dict, float, int, list, str, none_type): is_initial_sync (bool): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) last_sync_start (datetime): [optional] # noqa: E501 next_sync_start (datetime): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.model_name: Union[str] = model_name self.model_id: Union[str] = model_id self.status: Union[bool, dict, float, int, list, str, none_type] = status self.is_initial_sync: Union[bool] = is_initial_sync self.last_sync_start: Union[datetime] = kwargs.get("last_sync_start", None) self.next_sync_start: Union[datetime] = kwargs.get("next_sync_start", None)
PypiClean
/ELDAM_LCA-1.0-py3-none-any.whl/eldam/core/parameters.py
from collections import OrderedDict from openpyxl.utils import quote_sheetname from eldam.utils.misc import find_data_file from eldam.settings import ELDA_TEMPLATE_VERSION ELDA_TEMPLATE_FILEPATH = find_data_file('files/EldaTemplate.xlsm') ELDA_INDEX_FILEPATH = find_data_file('files/EldaIndex.xlsx') JINJA2_TEMPLATES_FOLDER = find_data_file('files/templates') # Lists of field names for every flow field SP_PRODUCT_FIELDS = ['Name', 'Amount', 'Unit', 'Allocation %', 'Waste type', 'Category', 'Comment'] SP_WASTE_TREATMENT_FIELDS = ['Name', 'Amount', 'Unit', 'Waste type', 'Category', 'Comment'] SP_TECHNOSPHERE_FIELDS = ['Name', 'Amount', 'Unit', 'Distribution', 'SD2 or 2SD', 'Min', 'Max', 'Comment'] SP_BIOSPHERE_FIELDS = ['Name', 'Sub-compartment', 'Amount', 'Unit', 'Distribution', 'SD2 or 2SD', 'Min', 'Max', 'Comment'] SP_INPUT_PARAMETER_FIELDS = ['Name', 'Value', 'Distribution', 'SD2 or 2SD', 'Min', 'Max', 'Hide', 'Comment'] SP_CALCULATED_PARAMETER_FIELDS = ['Name', 'Expression', 'Comment'] # List of categories used with function SimaProXlsxReader.get_data_by_category() # for importing data of each category # Also used by Elda.__init__() to add additional data according to flow category # List of tuples constituted as: # (Name of the category in the xlsx file, Field names, {Data to add: Value}) FLOW_CATEGORIES = [('Products', SP_PRODUCT_FIELDS, {'Type': 'Output/Technosphere/Product'}), ('Waste treatment', SP_WASTE_TREATMENT_FIELDS, {'Type': 'Output/Technosphere/Waste treatment product'}), ('Avoided products', SP_TECHNOSPHERE_FIELDS, {'Type': 'Output/Technosphere/Avoided product'}), ('Materials/fuels', SP_TECHNOSPHERE_FIELDS, {'Type': 'Input/Technosphere'}), ('Electricity/heat', SP_TECHNOSPHERE_FIELDS, {'Type': 'Input/Technosphere'}), ('Resources', SP_BIOSPHERE_FIELDS, {'Type': 'Input/Nature'}), ('Emissions to air', SP_BIOSPHERE_FIELDS, {'Type': 'Output/Emission', 'Compartment': 'Air'}), ('Emissions to water', SP_BIOSPHERE_FIELDS, {'Type': 'Output/Emission', 'Compartment': 'Water'}), ('Emissions to soil', SP_BIOSPHERE_FIELDS, {'Type': 'Output/Emission', 'Compartment': 'Soil'}), ('Final waste flows', SP_BIOSPHERE_FIELDS, {'Type': 'Output/Final waste flows'}), ('Waste to treatment', SP_TECHNOSPHERE_FIELDS, {'Type': 'Output/Technosphere/Waste'})] PARAMETER_CATEGORIES = [('Input parameters', {'Type': 'Input parameter', 'Level': 'Process'}), ('Calculated parameters', {'Type': 'Calculated parameter', 'Level': 'Process'}), ('Database Input parameters', {'Type': 'Input parameter', 'Level': 'Database'}), ('Database Calculated parameters', {'Type': 'Calculated parameter', 'Level': 'Database'}), ('Project Input parameters', {'Type': 'Input parameter', 'Level': 'Project'}), ('Project Calculated parameters', {'Type': 'Calculated parameter', 'Level': 'Project'})] PROCESS_DATA_CATEGORIES = FLOW_CATEGORIES + [('Non material emissions', SP_BIOSPHERE_FIELDS, {}), ('Social issues', SP_BIOSPHERE_FIELDS, {}), ('Economic issues', SP_BIOSPHERE_FIELDS, {})] PROCESS_PARAMETERS_CATEGORIES = [('Input parameters', SP_INPUT_PARAMETER_FIELDS, {}), ('Calculated parameters', SP_CALCULATED_PARAMETER_FIELDS, {})] COMMON_PARAMETERS_CATEGORIES = [('Database Input parameters', SP_INPUT_PARAMETER_FIELDS, {}), ('Database Calculated parameters', SP_CALCULATED_PARAMETER_FIELDS, {}), ('Project Input parameters', SP_INPUT_PARAMETER_FIELDS, {}), ('Project Calculated parameters', SP_CALCULATED_PARAMETER_FIELDS, {})] # Dictionary mapping flow field names to Elda column SIMAPRO_FLOW_FIELDS_NAMES = ['Type', 'Name', 'Amount', 'Unit', 'Allocation %', 'Waste type', 'Comment', 'Distribution', 'SD2 or 2SD', 'Min', 'Max', 'Sub-compartment', 'Compartment', 'Category'] PRODUCT_FLOW_TYPES = ['Output/Technosphere/Product', 'Output/Technosphere/Waste treatment product'] TECHNOSPHERE_FLOW_TYPES = ['Output/Technosphere/Avoided product', 'Output/Technosphere/Waste', 'Input/Technosphere'] BIOSPHERE_FLOW_TYPES = ['Input/Nature', 'Output/Emission', 'Output/Final waste flows'] # Used to show data in the GUI PROCESS_ATTRIBUTES = OrderedDict(name='Name', date='Date', category_type="Category type", project='Project', author='Author', allocation_rules='Allocation rules', comment='Comment') PRODUCT_FLOW_ATTRIBUTES = OrderedDict(name='Name', type='Type', unit='Unit', amount='Amount', category='Category', waste_type='Waste type', allocation='Allocation', data_source='Data source', library='Library', comment='Comment', review_state='Review state', comment_for_reviewer='Comment for reviewer', reviewer_comment='Reviewer comment', uncertainty='Uncertainty', stdev='StDev', min_value='Min', max_value='Max') TECHNOSPHERE_FLOW_ATTRIBUTES = OrderedDict(name='Name', type='Type', unit='Unit', amount='Amount', data_source='Data source', library='Library', comment='Comment', review_state='Review state', comment_for_reviewer='Comment for reviewer', reviewer_comment='Reviewer comment', uncertainty='Uncertainty', stdev='StDev', min_value='Min', max_value='Max', modification_code='Modification code', modification_comment='Modification comment', relevance_code='Relevance code', relevance_comment='Relevance comment', confidence_code='Confidence code', confidence_comment='Confidence comment') BIOSPHERE_FLOW_ATTRIBUTES = OrderedDict(name='Name', type='Type', compartment='Compartment', sub_compartment='Sub-compartment', unit='Unit', amount='Amount', data_source='Data source', library='Library', comment='Comment', review_state='Review state', comment_for_reviewer='Comment for reviewer', reviewer_comment='Reviewer comment', uncertainty='Uncertainty', stdev='StDev', min_value='Min', max_value='Max', relevance_code='Relevance code', relevance_comment='Relevance comment', confidence_code='Confidence code', confidence_comment='Confidence comment') INPUT_PARAMETERS_ATTRIBUTES = OrderedDict(name='Name', value_or_formula='Value', uncertainty='Uncertainty', stdev='StDev', min_value='Min', max_value='Max', comment='Comment', review_state='Review state', comment_for_reviewer='Comment for reviewer', reviewer_comment='Reviewer comment', level='Level') CALCULATED_PARAMETERS_ATTRIBUTES = OrderedDict(name='Name', value_or_formula='Value', comment='Comment', review_state='Review state', comment_for_reviewer='Comment for reviewer', reviewer_comment='Reviewer comment', level='Level') # Review attributes names REVIEW_ATTRIBUTES = ['value', 'comment', 'comment_for_reviewer', 'review_state', 'reviewer_comment'] # SimaPro built-in variables, lowercase only # Seems that Pi is the only SimaPro built-in variable. SIMAPRO_BUILT_IN_VARIABLES = {'pi': '3.1415926535', 'Pi': '3.1415926535', 'PI': '3.1415926535'} ELDA_INDEX_COLUMNS = {'file_name': 'A', 'location': 'B', 'process_name': 'C', 'project': 'D', 'comment': 'E', 'author': 'F', 'contact': 'G', 'long_term_contact': 'H', 'last_version': 'I', 'last_version_date': 'J', 'status': 'K', 'product_flows': 'L', 'technosphere_flows_nb': 'M', 'biosphere_flows_nb': 'N', 'input_parameters_nb': 'O', 'calculated_parameters_nb': 'P' } ELDA_INDEX_COLUMNS_TO_CENTER = ['last_version', 'technosphere_flows_nb', 'biosphere_flows_nb', 'input_parameters_nb', 'calculated_parameters_nb'] # Process attributes that can be copied by the "Copy Elda metadata" feature ATTRIBUTES_TO_COPY = [ 'name', 'synonym', 'category_type', 'comment', 'allocation_rules', 'author', 'contact', 'long_term_contact', 'step', 'project', 'step_in_project', 'reference_period', 'time_validity_limit', 'geographic_representativeness', 'technology_description', 'technology_scale', 'technology_level'] EXCEL_EXTERNAL_LINK_PATTERN = r"""'.*\[.*\].*'\!\$?[A-Z]+\$?\d+""" # This is the only Elda cell address that is defined outside EldaTemplateParameters. It is used for determining the # elda template version and thus cannot be changed from version to version ELDA_TEMPLATE_VERSION_CELL = 'B22' class EldaTemplateParameters: """ Class used to store parameters related to the Elda template and to handle changes between versions """ def __init__(self, elda_template_version=ELDA_TEMPLATE_VERSION): """ Here are defined the parameters used by the current version of ELDAM Args: elda_template_version (str): Version of Eldam for which to get the parameters. Default is current version. """ # Column of every flow fields self.FLOW_FIELDS_COLUMNS = {'type': 'B', 'name': 'C', 'library': 'D', 'compartment': 'E', 'waste_type': 'F', 'sub_compartment': 'F', 'unit': 'G', 'amount': 'H', 'formula': 'I', 'allocation': 'J', 'allocation_formula': 'K', 'category': 'L', 'data_source': 'M', 'comment': 'O', 'comment_for_reviewer': 'P', 'review_state': 'Q', 'reviewer_comment': 'S', 'uncertainty': 'T', 'stdev': 'U', 'min_value': 'V', 'max_value': 'W', 'modification_code': 'X', 'modification_comment': 'Y', 'relevance_code': 'Z', 'relevance_comment': 'AA', 'confidence_code': 'AB', 'confidence_comment': 'AC'} # Parameters fields order in the elda self.INPUT_PARAMETER_FIELDS_ORDER = ['name', 'value', 'merged_column', 'comment', 'comment_for_reviewer', 'review_state', 'review_state_litteral', 'reviewer_comment', 'uncertainty', 'stdev', 'min_value', 'max_value', 'level'] self.CALCULATED_PARAMETER_FIELDS_ORDER = ['name', 'value', 'formula', 'comment', 'comment_for_reviewer', 'review_state', 'review_state_litteral', 'reviewer_comment', 'level'] # Row number of the first flow on the elda self.FIRST_FLOW_ROW_NUMBER = 36 # Row number of the first flow on the elda self.LAST_FLOW_ROW_NUMBER = 185 # Default last input/calculated parameter cell coords self.DEFAULT_LAST_INPUT_PARAMETER_CELL_COORDS = '$L$23' self.DEFAULT_LAST_CALCULATED_PARAMETER_CELL_COORDS = '$L$33' # Cell in which the last input/calculated parameter cell coords are stored self.LAST_INPUT_PARAMETER_CELL_COORDS_CELL = "Q1" self.LAST_CALCULATED_PARAMETER_CELL_COORDS_CELL = "Q2" # Parameters blocs dimensions self.INPUT_PARAMETERS_BLOCK_HEIGHT = 9 # number of rows self.INPUT_PARAMETERS_BLOCK_WIDTH = 13 # number of columns self.CALCULATED_PARAMETERS_BLOCK_HEIGHT = 8 # number of rows self.CALCULATED_PARAMETERS_BLOCK_WIDTH = 9 # number of columns # Parameters name column self.PARAMETERS_NAME_COLUMN = 'L' # Process metadata cells self.METADATA_CELLS = {'name': 'D10', 'synonym': 'D11', 'category_type': 'D12', 'comment': 'D27', 'allocation_rules': 'D13', 'author': 'D5', 'contact': 'D6', 'long_term_contact': 'D7', 'step': 'D14', 'project': 'D15', 'step_in_project': 'D16', 'reference_period': 'D18', 'time_validity_limit': 'D19', 'geographic_representativeness': 'D21', 'technology_description': 'D23', 'technology_scale': 'D24', 'technology_level': 'D25', 'input_mass': 'D31', 'output_mass': 'D32'} self.VERSION_INFO_CELLS = {'version_creator': 'M6', 'version_contact': 'M7', 'version_comment': 'M9', 'inventory_review_state': 'M12'} self.VERSION_DATE_CELL = 'M8' # Row numbers for parameters blocks self.PARAMETERS_FIRST_ROW = {'Input parameter': 16, 'Calculated parameter': 27} self.PARAMETERS_LAST_ROW = {'Input parameter': 23, 'Calculated parameter': 33} # Ranges of cells to be cleaned by EldaVersionWriter.clean_data() to remove parameters and flows from the sheet. self.CELLS_TO_CLEAN = [ # First parameter block 'L16:P23', 'L27:M33', 'O27:P33', 'S16:X23', 'S27:X33', # Flow data 'C36:H185', 'J36:J185', 'L36:p185', 'S36:AD185', ] # Range of cells to be deletes (value, style, conditional formatting and data validation) # by EldaVersionWriter.clean_data() to remove parameters and flows from the sheet. self.CELLS_TO_DELETE = [ # Other parameter blocks 'Z15:{}33', ] # Ranges of cells to be replaced by EldaVersionWriter.clean_data(). # Item key is the replacement string and item value is the list of ranges of cells to be replaced self.CELLS_TO_REPLACE = {"Select a type": ['B36:B185'], # Review state 0: [ # Flows 'Q36:Q185', # Parameters 'Q16:Q23', 'Q27:Q33' ] } # Formula used for changed values conditional formatting self.CHANGED_VALUE_CF_FORMULA = \ '($M$2<>0)*(B5<>INDIRECT(ADDRESS(ROW(B5),COLUMN(B5),4,,CONCATENATE("V",$M$1,".",$M$2-1))))' # Cells coordinates used in various places self.GENERAL_COMMENT_CELL = 'D27' self.MAJOR_VERSION_NUMBER_CELL = 'M1' self.MAJOR_VERSION_NUMBER_FIXED_CELL = 'M1' self.MINOR_VERSION_NUMBER_CELL = 'M2' self.MINOR_VERSION_NUMBER_FIXED_CELL = '$M$2' self.NEXT_MINOR_VERSION_NUMBER_CELL = 'M3' self.REVIEW_STATE_CELL = 'M4' self.FIRST_INPUT_PARAMETER_VALUE_CELL = 'M16' self.FIRST_CALCULATED_PARAMETER_VALUE_CELL = 'M27' self.PROCESS_NAME_CELL = 'D10' # Comments size handling # On reading a workbook, openpyxl looses comments positions and size. # If they do not fit the default size, they must be reset manually # Width, Height self.COMMENT_DIMENSIONS = {'medium': (200, 100), 'big': (200, 150), 'huge': (500, 220), } self.ELDA_COMMENTS = { 'L12': 'medium', 'B12': 'medium', 'B14': 'huge', 'B23': 'medium', 'B25': 'huge', 'C33': 'medium', 'F35': 'medium', 'H35': 'medium', 'L35': 'medium', 'M35': 'medium', 'U15': 'medium', 'U35': 'medium', 'X35': 'big', 'Z35': 'big', 'AB35': 'big' } self.SIMAPRO_UNITS_DEFINITION_CELLS = f"{quote_sheetname('Simapro units')}!$A$1:$A$196" if elda_template_version != ELDA_TEMPLATE_VERSION: self.load_version_parameters(elda_template_version) def load_version_parameters(self, elda_template_version): """ Updates the parameters for a defined anterior version of the Elda template. If there are multiple versions between the current one and the demanded one, the changes are accumulated to go back one version at a time. Args: elda_template_version (str): Version of the Elda template for which to get the parameters. """ major, minor = elda_template_version.split(".") major = int(major) minor = int(minor) # If the version to go back to is anterior or equal to 0.18 if (major <= 0) or (minor <= 18): # # # Here are the differences from template 0.18 to 0.19 # # # self.FLOW_FIELDS_COLUMNS = {'type': 'B', 'name': 'C', 'library': 'D', 'compartment': 'E', 'waste_type': 'F', 'sub_compartment': 'F', 'unit': 'G', 'amount': 'H', 'formula': 'I', 'allocation': 'J', 'category': 'K', 'data_source': 'L', 'comment': 'N', 'review_state': 'P', 'comment_for_reviewer': 'O', 'reviewer_comment': 'R', 'modification_code': 'W', 'modification_comment': 'X', 'relevance_code': 'Y', 'relevance_comment': 'Z', 'confidence_code': 'AA', 'confidence_comment': 'AB', 'uncertainty': 'S', 'stdev': 'T', 'min_value': 'U', 'max_value': 'V'} self.DEFAULT_LAST_INPUT_PARAMETER_CELL_COORDS = '$K$23' self.DEFAULT_LAST_CALCULATED_PARAMETER_CELL_COORDS = '$K$33' self.LAST_INPUT_PARAMETER_CELL_COORDS_CELL = "O1" self.LAST_CALCULATED_PARAMETER_CELL_COORDS_CELL = "O2" self.PARAMETERS_NAME_COLUMN = 'K' self.VERSION_INFO_CELLS = {'version_creator': 'L6', 'version_contact': 'L7', 'version_comment': 'L9', 'inventory_review_state': 'L12'} self.VERSION_DATE_CELL = 'L8' self.CELLS_TO_CLEAN = [ 'K16:O23', 'K27:L33', 'N27:O33', 'R16:W23', 'R27:W33', 'C36:H185', 'J36:O185', 'R36:AC185', ] self.CELLS_TO_DELETE = [ # Other parameter blocks 'Y15:{}33', ] self.CELLS_TO_REPLACE = {"Select a type": ['B36:B185'], 0: [ 'P36:P185', 'P16:P23', 'P27:P33' ] } self.CHANGED_VALUE_CF_FORMULA = \ '($L$2<>0)*(B5<>INDIRECT(ADDRESS(ROW(B5),COLUMN(B5),4,,CONCATENATE("V",$L$1,".",$L$2-1))))' self.GENERAL_COMMENT_CELL = 'D27' self.MAJOR_VERSION_NUMBER_CELL = 'L1' self.MINOR_VERSION_NUMBER_CELL = 'L2' self.MINOR_VERSION_NUMBER_FIXED_CELL = '$L$2' self.NEXT_MINOR_VERSION_NUMBER_CELL = 'L3' self.REVIEW_STATE_CELL = 'L4' self.FIRST_INPUT_PARAMETER_VALUE_CELL = 'L16' self.FIRST_CALCULATED_PARAMETER_VALUE_CELL = 'L27' self.PROCESS_NAME_CELL = 'D10' self.ELDA_COMMENTS = { 'K12': 'medium', 'B12': 'medium', 'B14': 'huge', 'B23': 'medium', 'B25': 'huge', 'C33': 'medium', 'F35': 'medium', 'H35': 'medium', 'L35': 'medium', 'T15': 'medium', 'T35': 'medium', 'W35': 'big', 'Y35': 'big', 'AA35': 'big' }
PypiClean
/ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/developers.rst
====================== Custom reading/writing ====================== One of the new features introduced in ATpy 0.9.2 is the ability for users to write their own read/write functions and *register* them with ATpy. A read or write function needs to satisfy the following requirements: * The first argument should be a ``Table`` instance (in the case of a single table reader/writer) or a ``TableSet`` instance (in the case of a table set reader/writer) * The function can take any other arguments, with the exception of the keyword arguments ``verbose`` and ``type``. * The function should not return anything, but rather should operate directly on the table or table set instance passed as the first argument * If the file format supports masking/null values, the function should take into account that there are two ways to mask values (see :ref:`maskingandnull`). The ``Table`` instance has a ``_masked`` attribute that specifies whether the user wants a Table with masked arrays, or with a null value. The function should take this into account. For example, in the built-in FITS reader, the table is populated with ``add_column`` in the following way:: if self._masked: self.add_column(name, data, unit=columns.units[i], \ mask=data==columns.nulls[i]) else: self.add_column(name, data, unit=columns.units[i], \ null=columns.nulls[i]) The reader/writer function can then fill the table by using the ``Table`` methods described in :ref:`api` (for a single table reader/writer) or :ref:`apiset` (for a table set reader/writer). In particular, a single table reader will likely contain calls to ``add_column``, while a single table writer will likely contain references to the ``data`` attribute of ``Table``. Once a custom function is available, the user can register it using one of the four ATpy functions: * ``atpy.register_reader``: Register a reader function for single tables * ``atpy.register_set_reader``: Register a reader function for table sets * ``atpy.register_writer``: Register a writer function for single tables * ``atpy.register_set_writer``: Register a writer function for tables sets The API for these functions is of the form ``(ttype, function, override=True/False)``, where ``ttype`` is the code name for the format (like the build-in ``fits``, ``vo``, ``ipac``, or ``sql`` types), function is the actual function to use, and override allows the user to override existing definitions (for example to provide an improved ``ipac`` reader). For example, if a function is defined for reading HDF5 tables, which we can call hdf5.read, then one would first need to register this function after importing atpy:: >>> import atpy >>> atpy.register_reader('hdf5', hdf5.read) This type can then be used when reading in a table:: >>> t = atpy.Table('mytable.hdf5', type='hdf5') It is also possible to register extensions for a specific type using ``atpy.register_extensions``. This function expects a table type and a list of file extensions to associate with it. For example, by setting:: >>> atpy.register_extensions('hdf5', ['hdf5', 'hdf']) One can then read in an HDF5 table without specifying the type:: >>> t = atpy.Table('mytable.hdf5') We encourage users to send us examples of reader/writer functions for various formats, and would be happy in future to include readers and writers for commonly used formats in ATpy.
PypiClean
/GS-PRACTICE-1.0.1.tar.gz/GS-PRACTICE-1.0.1/src/gspractice/run_gspractice.py
import os import sys import argparse import logging logger = logging.getLogger(__name__) #logger.setLevel(logging.INFO) import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import rpy2.robjects as robjects from rpy2.robjects import pandas2ri pandas2ri.activate() import joblib import umap # specify library directory script_dir=os.path.dirname(os.path.abspath(__file__)) lib_dir=script_dir+"/data/" # ----- Begin code for this module. ----- _toolName = "GS-PRACTICE" _defOutPrefix = "GS-PRACTICE" # Default output prefix. _defDecompositionSuffix = "_decomposed.tsv" # from MutationalPatterns _defPredictionSuffix = "_prediction.tsv" # from subtype prediction _defPlotSuffix = "_umap" # from umap projection with TCGA _defFigureFormat="png" _defFigureSize="5,5" ############################################################ def getRequiredInputsParser(): """Constructs parser for absolute minimum required inputs.""" parser = argparse.ArgumentParser(add_help=False) inputOpts = parser.add_argument_group("Input options") inputOpts.add_argument( '-i', "--input_data", type=str, metavar='.vcf, .list, .maf', required=True, help='Input filename for single VCF file, single list of paths to multiple vcf files, single MAF file') return parser ############################################################ ############################################################ def getOptionalInputsParser(): """Constructs parser for optional arguments.""" parser = argparse.ArgumentParser(add_help=False) extraInOpts = parser.add_argument_group('Extra input options') extraInOpts.add_argument( "-sn", "--sample_name", type=str,metavar='string', required=False, help="""a sample name for single VCF file. This option is ignored for multiple VCFs or MAF file""") extraInOpts.add_argument( "-snl", "--sample_name_list", type=str,metavar='path to sample name list', required=False, help="""path to a list of sample names for multiple VCF files. This is ignored for single VCF or MAF file""") extraInOpts.add_argument( "-gv", '--genome_version', type=str, metavar='string', required=False, choices=["hg19", "hg38"], default="hg38", help='Reference genome version, choose from hg19 or hg38, default:hg38') extraInOpts.add_argument( "-udc","--use_default_clfs", action="store_true",required=False, #metavar='flag', help="""Use the default classifiers. It is recommended you make and save classifiers in your own environment. Default:False """) extraInOpts.add_argument( "-udu","--use_default_umap", action="store_true",required=False, #metavar='flag', help="""Use the default umap projector. It is recommended you make and save umap projector in your own environment. Default:False""") extraInOpts.add_argument( "-if","--input_format", choices=["maf","vcf","vcfs"],required=False,default=None, help="""You can specify the format of the input file by selecting from 'maf', 'vcf', 'vcfs'. If not specified (default), will be inferred from extension name.""") return parser ############################################################ ############################################################ def getOptionalOutputsParser(): """Constructs parser for optional arguments.""" parser = argparse.ArgumentParser(add_help=False) outOpts = parser.add_argument_group('Output options') outOpts.add_argument( "-o", '--out_prefix', type=str, metavar='prefix', required=False, default=_defOutPrefix, help="""Prefix (Path) to generating output files from analyses. Default: '%s'.""" % _defOutPrefix) outOpts.add_argument( "-nts",'--no_table_sort', action="store_true", #metavar='flag', help="""Do not sort the prediction result table. Default behavior is false (to sort).""") outOpts.add_argument( "-np", '--no_plot', action="store_true", #metavar='flag', help="""Do not plot mapping figure. Default behavior is false (to plot).""") outOpts.add_argument( "-ff", '--figure_format', type=str, metavar='string', required=False, default=_defFigureFormat, dest='figure_format', help="""Formats supported by matplotlib are accepted, such as 'pdf','png','jpg','tif'... dafault is 'png' """) outOpts.add_argument( "-fs", '--figure_size', type=str, metavar='string', required=False, default=_defFigureSize, dest='figure_size', help=""" Size of mapping figure. In two dimentional numbers separated by comma. Default: 5,5 """) outOpts.add_argument( "-ms", '--marker_size', type=float, metavar='float', required=False, default=None, dest='marker_size', help=""" Size of markers in the figure. This is automatically set by the sample size. Default: None """) logOptions = parser.add_argument_group('Logging options') logOptions.add_argument( '--logfile', type=str, metavar='filename', required=False, default=None, help="Output messages to given logfile, default is stderr.") logOptions.add_argument( "-v", "--verbose", action="store_true", help="Increase output verbosity") return parser ############################################################ ############################################################ def analyzeSignature(input_data, genome_version="hg38", sample_name=None, sample_name_list=None,input_format=None): # import data and library cosmic=lib_dir+"cosmic_v2_signatures.tsv" with warnings.catch_warnings(): warnings.simplefilter('ignore') robjects.r("genome_version = '%s'" % genome_version) robjects.r(""" suppressMessages(library(MutationalPatterns)) if (genome_version=="hg38"){ref_genome <-"BSgenome.Hsapiens.UCSC.hg38"} if (genome_version=="hg19"){ref_genome <-"BSgenome.Hsapiens.UCSC.hg19"} suppressMessages( library(ref_genome, character.only = TRUE) ) """) robjects.r("cosmic='%s'" % cosmic) # check format of input_data, make GenomeRange if (input_format=="maf") | (".maf" in input_data): logger.info("input file format = MAF file") robjects.r("in_f='%s'" % input_data) robjects.r(""" suppressMessages(library(GenomicRanges)) maf=read.delim(in_f,comment.char="#") # remove indels snp=maf$Variant_Type=="SNP" maf=maf[snp,] # adjust status if (!grepl(pattern="chr", x=maf$Chromosome[1])){ maf$Chromosome=paste0("chr",maf$Chromosome) } colnames(maf)[which( colnames(maf)=="Reference_Allele" )]="REF" colnames(maf)[which( colnames(maf)=="Tumor_Seq_Allele2" )]="ALT" # make grl grl=makeGRangesListFromDataFrame( maf, keep.extra.columns=TRUE, ignore.strand=TRUE, seqinfo=NULL, seqnames.field=c("Chromosome"), start.field="Start_Position", end.field="End_Position", strand.field="Strand", starts.in.df.are.0based=FALSE, split.field = "Tumor_Sample_Barcode", names.field = "Hugo_Symbol") # limit chromosomes chromosomes = paste0('chr', c(1:22,'X')) seqlevels(grl, pruning.mode = 'tidy') = chromosomes # set genome version GenomeInfoDb::genome(grl)=genome_version """) elif (input_format=="vcf") | (".vcf" in input_data): logger.info("input file format = single VCF file") robjects.r("in_f='%s'" % input_data) if sample_name: robjects.r("sample_names='%s'" % sample_name) else: basename=os.path.basename( input_data ) sample_name=basename.replace(".vcf","") robjects.r("sample_names='%s'" % sample_name) robjects.r(""" # make grl grl = read_vcfs_as_granges( vcf_files = in_f, sample_names = sample_names, genome = ref_genome) """) elif (input_format=="vcfs") | (".list" in input_data): logger.info("input file format = a list of path to multiple VCF files") robjects.r("vcfs=readLines('%s')" % input_data ) if sample_name_list: robjects.r("sample_names='%s'" % sample_name_list) else: robjects.r("sample_names=NULL") robjects.r(""" if (is.null(sample_names)){ basenames = gsub(pattern = "^.*/",replacement="",vcfs) sample_names = gsub(".vcf","",basenames)} else { sample_names = readLines(sample_names)} # make grl grl = read_vcfs_as_granges( vcf_files = vcfs, sample_names = sample_names, genome = ref_genome) """) else: logger.critical("Check input file name or format. Youcan select format by --input-format option.") sys.exit(-1) # MutationalPatterns robjects.r(""" # 96 mutational profile mut_mat <- mut_matrix(vcf_list = grl, ref_genome = ref_genome) # COSMIC reference signature cancer_signatures = read.table(cosmic, sep = "\t", header = TRUE) # Match the order of the mutation types to MutationalPatterns standard new_order = match(row.names(mut_mat), cancer_signatures$MutationType) # Reorder cancer signatures dataframe cancer_signatures = cancer_signatures[as.vector(new_order),] # Add trinucletiode changes names as row.names row.names(cancer_signatures) = cancer_signatures$MutationType # Keep only 96 contributions of the signatures in matrix cancer_signatures = as.matrix(cancer_signatures[,4:ncol(cancer_signatures)]) # Fit mutation matrix to the COSMIC mutational signatures: fit_res = fit_to_signatures(mut_mat, cancer_signatures) out_df=t(fit_res$contribution) columns=colnames(out_df) index=rownames(out_df) """) # back to python, make pandas dataframe df=pd.DataFrame(robjects.globalenv["out_df"],columns=robjects.globalenv["columns"],index=robjects.globalenv["index"]) logger.info("finished analyzing mutational signature.") return df ############################################################ ############################################################ def predictSubtype(df_decomp,no_table_sort=False, use_default_clfs=False): # import four classifiers if use_default_clfs: logger.warning("You are using default-version classifiers, we recommend you to make and save them in your environment.") svc=joblib.load(lib_dir+"TCGA_7181_svc_c1_g01.joblib") rfc=joblib.load(lib_dir+"TCGA_7181_rfc_ne100.joblib") knn=joblib.load(lib_dir+"TCGA_7181_knn_n5_dis.joblib") lrc=joblib.load(lib_dir+"TCGA_7181_lrc_c1.joblib") else: logger.info("Import four classifiers.") svc=joblib.load(lib_dir+"SVC.joblib") rfc=joblib.load(lib_dir+"RFC.joblib") knn=joblib.load(lib_dir+"KNN.joblib") lrc=joblib.load(lib_dir+"LRC.joblib") # import annotations & colors dict_cluster_numbers={"SMK":0, "UVL":1, "APB":2, "POL":3, "MRD":4,"HRD":5, "GNS":6,"AGE":7,"UND":8} dict_cluster_points={"SMK":1, "UVL":1, "APB":1, "POL":1, "MRD":1,"HRD":0,"GNS":0,"AGE":0} # prediction input_df=np.log10(df_decomp+1) preds=[] logger.info("Predicting subtype by the four classifiers") for clf in [knn,svc, rfc,lrc]: preds.append(clf.predict(input_df)) df_preds=pd.DataFrame( {"KNN":preds[0],"SVC":preds[1], "RF":preds[2], "LR":preds[3]}, index=input_df.index) details,cons,topcounts=[],[],[] # consensus for idx in df_preds.index: counter=Counter( df_preds.loc[idx,"KNN":"LR"]).most_common() topcounts.append( counter[0][1] ) detail="" for x in counter: detail+=x[0]+":"+str(x[1])+"," details.append( detail[:-1] ) if counter[0][1]>=3: cons.append(counter[0][0] ) else: cons.append("UND") df_preds["TGS"]=cons df_preds["Details"]=details df_point=df_preds.iloc[:,0:4].replace( dict_cluster_points ) df_preds["irGS_count"]=np.sum(df_point,axis=1) df_preds["irGS"]=(df_preds["irGS_count"]>=3).astype(int) # return if no_table_sort: logger.warning("Resulting table are not sorted by predicted subtypes.") return df_preds else: logger.info("Resulting table are sorted by predicted subtypes.") df_numbers=df_preds.iloc[:,0:5].replace(dict_cluster_numbers) df_numbers["TopCounts"]=topcounts df_numbers_sort=df_numbers.sort_values( ["TGS","TopCounts","KNN","SVC","RF","LR"], ascending=[True,False,True,True,True,True]) df_numbers_sort=df_numbers_sort.astype(float) df_preds_sort=df_preds.loc[df_numbers_sort.index,:] return df_preds_sort ############################################################ ############################################################ def visualizePrediction(df_decomp,df_preds,out_f,figure_size="5,5",marker_size=None,use_default_umap=False): # import TCGA mapping if use_default_umap: logger.warning("You use default UMAP projection.") u=joblib.load(lib_dir+"TCGA_7181_umap_projector.joblib") else: logger.debug("Load UMAP configuration.") u=joblib.load(lib_dir+"UMAP_projector.joblib") tcga_mutsig_log10=pd.read_csv(lib_dir+"TCGA_7181_df_mutsig_log10.tsv",sep="\t",index_col=0) X0=u.transform(tcga_mutsig_log10) with open(lib_dir+"TCGA_7181_cluster_colors.tsv") as f: tcga_colors=f.read().splitlines() dict_cluster_colors={'SMK': 'red','UVL': 'blue', 'APB': 'green','POL': 'brown', 'MRD': 'purple', 'HRD': 'hotpink', 'GNS': 'c', 'AGE': 'y', 'UND': 'grey'} ## random shuffle before plotting shuffle_numbers=np.arange(len(X0)) np.random.seed(777) np.random.shuffle(shuffle_numbers) X0=X0[shuffle_numbers,:] tcga_colors=[tcga_colors[x] for x in shuffle_numbers] # configure results input_df=np.log10(df_decomp+1) X=u.transform(input_df) sample_order=input_df.index df_preds_order=df_preds.loc[sample_order,:] colors=[ dict_cluster_colors[cluster] for cluster in df_preds_order["TGS"]] ## set marker size markeredgewidth = 1 if marker_size is None: if input_df.shape[0] <= 30: marker_size=150 elif input_df.shape[0] <=100: marker_size=100 markeredgewidth=0.75 elif input_df.shape[0] <=250: marker_size=50 markeredgewidth=0.5 else: marker_size=25 markeredgewidth=0.2 else: logger.warning("You set marker size = %d and figure size =%s " %(marker_size, figure_size)) # plot and save figsize=tuple(float(x) for x in figure_size.split(",")) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) ax.scatter(X0[:,0],X0[:,1],c=tcga_colors,s=1) ax.scatter(X[:,0],X[:,1], edgecolors="k", c=colors,s=marker_size,marker="*",linewidth=markeredgewidth) ax.set_xlabel("UMAP1",fontsize=10) ax.set_ylabel("UMAP2",fontsize=10) ax.tick_params(labelsize=8) ax.set_xticks([-5,0,5,10,15]) ax.set_yticks([-4,0,4,8,12,16]) plt.rcParams["figure.dpi"]=300 plt.savefig(out_f,dpi=300,bbox_inches="tight") ############################################################ ############################################################ def getStandaloneParser(): """Constructs parser to run this script as standalone application.""" parser = argparse.ArgumentParser( description="""Perform genome variant decomposition by MutationalPatterns package, subsequently classify the sample by predictions from models built by scikit-learn, and plot samples by umap algorithm projection with TCGA samples (optional)""", #epilog="""Results of processing and prediction are output in # multiple files in the local directory.""", parents=( getRequiredInputsParser(), getOptionalInputsParser(), getOptionalOutputsParser() )) return parser ############################################################ ############################################################################### ############################################################################### ############################################################################### ############################################################################### ############################################################################### def runAsStandalone(): """Contains script execution flow when run as standalone application. """ #### Pre-execution internal configuation check. #initLogger = logging.Logger(name=_toolName) #initLogger.addHandler(logging.StreamHandler(sys.stderr)) #### Configuration fine, execute as planned. useParser = getStandaloneParser() args = useParser.parse_args() # Setup main logger for messages. logStream = logging.StreamHandler( open(args.logfile, "w") if args.logfile else sys.stderr) # Either streams of a logger or the logger itself can have its level set, # but ONLY the logger itself can have the logger level retrieved. #logStream.setLevel(logging.DEBUG if args.verbose else logging.INFO) logStream.setFormatter(logging.Formatter( "%(asctime)-15s %(levelname)s:%(message)s", datefmt="%Y-%m-%S %H:%M")) logger = logging.Logger(name=_toolName) logger.setLevel(logging.DEBUG if args.verbose else logging.INFO) logger.addHandler(logStream) logger.debug("Invocation arguments: %s" % args) logging.getLogger("GS-PRACTICE").addHandler(logStream) logging.getLogger("GS-PRACTICE").setLevel(logging.INFO) # Start to analyze mutational signatures. logger.info("Start to analyze mutational signatures.") df_decomp=analyzeSignature(input_data=args.input_data, genome_version=args.genome_version, sample_name=args.sample_name, sample_name_list=args.sample_name_list, input_format=args.input_format) # Write results logger.debug("Decomposition result to %s " % args.out_prefix + _defDecompositionSuffix) df_decomp.to_csv(args.out_prefix + _defDecompositionSuffix,sep="\t") # Proceed to prediction by multi-estimator. logger.info("Start to predict subtypes.") df_preds=predictSubtype(df_decomp=df_decomp, no_table_sort=args.no_table_sort, use_default_clfs=args.use_default_clfs) # Write out results logger.debug("Prediction result to %s" % args.out_prefix + _defPredictionSuffix) df_preds.to_csv(args.out_prefix + _defPredictionSuffix,sep="\t") # Proceed to plot umap if args.no_plot==False: out_f=args.out_prefix + _defPlotSuffix + "." + args.figure_format logger.debug("Plot and save mapping figure to %s" % out_f) visualizePrediction(df_decomp=df_decomp, df_preds=df_preds, out_f=out_f, figure_size=args.figure_size, marker_size=args.marker_size, use_default_umap=args.use_default_umap) else: logger.warning("Skip plotting figure.") pass # Clean up and terminate. logger.info("Execution completed.") # end of running script as standalone application. # What to do if script is a main driver program. if __name__ == "__main__": import doctest numFail, numTests = doctest.testmod(verbose=False) if numFail: logger.critical('Expected functionality in doctests fail. ' + 'Aborting standalone execution.') sys.exit(-1) # Otherwise, doctests passed, run standalone application. runAsStandalone() # ----- End code for this module. -----
PypiClean
/HolmesV-2021.10.8.tar.gz/HolmesV-2021.10.8/mycroft/client/text/text_client.py
import sys import io from math import ceil import xdg.BaseDirectory from mycroft.client.text.gui_server import start_qml_gui from mycroft.tts import TTS import os import os.path import time import curses import textwrap import json import mycroft.version from threading import Thread, Lock from mycroft.messagebus.client import MessageBusClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG from mycroft.configuration import Configuration, BASE_FOLDER from mycroft.configuration.holmes import is_using_xdg import locale # Curses uses LC_ALL to determine how to display chars set it to system # default locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default preferred_encoding = locale.getpreferredencoding() bSimple = False bus = None # Mycroft messagebus connection config = {} # Will be populated by the Mycroft configuration event_thread = None history = [] chat = [] # chat history, oldest at the lowest index line = "" scr = None log_line_offset = 0 # num lines back in logs to show log_line_lr_scroll = 0 # amount to scroll left/right for long lines longest_visible_line = 0 # for HOME key auto_scroll = True # for debugging odd terminals last_key = "" show_last_key = False show_gui = None # None = not initialized, else True/False gui_text = [] log_lock = Lock() max_log_lines = 5000 mergedLog = [] filteredLog = [] default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"] log_filters = list(default_log_filters) log_files = [] find_str = None cy_chat_area = 7 # default chat history height (in lines) size_log_area = 0 # max number of visible log lines, calculated during draw # Values used to display the audio meter show_meter = True meter_peak = 20 meter_cur = -1 meter_thresh = -1 SCR_MAIN = 0 SCR_HELP = 1 SCR_SKILLS = 2 screen_mode = SCR_MAIN subscreen = 0 # for help pages, etc. REDRAW_FREQUENCY = 10 # seconds between full redraws last_redraw = time.time() - (REDRAW_FREQUENCY - 1) # seed for 1s redraw screen_lock = Lock() is_screen_dirty = True # Curses color codes (reassigned at runtime) CLR_HEADING = 0 CLR_FIND = 0 CLR_CHAT_RESP = 0 CLR_CHAT_QUERY = 0 CLR_CMDLINE = 0 CLR_INPUT = 0 CLR_LOG1 = 0 CLR_LOG2 = 0 CLR_LOG_DEBUG = 0 CLR_LOG_ERROR = 0 CLR_LOG_CMDMESSAGE = 0 CLR_METER_CUR = 0 CLR_METER = 0 # Allow Ctrl+C catching... ctrl_c_was_pressed = False def ctrl_c_handler(signum, frame): global ctrl_c_was_pressed ctrl_c_was_pressed = True def ctrl_c_pressed(): global ctrl_c_was_pressed if ctrl_c_was_pressed: ctrl_c_was_pressed = False return True else: return False ############################################################################## # Helper functions def clamp(n, smallest, largest): """ Force n to be between smallest and largest, inclusive """ return max(smallest, min(n, largest)) def handleNonAscii(text): """ If default locale supports UTF-8 reencode the string otherwise remove the offending characters. """ if preferred_encoding == 'ASCII': return ''.join([i if ord(i) < 128 else ' ' for i in text]) else: return text.encode(preferred_encoding) ############################################################################## # Settings filename = "mycroft_cli.conf" def load_mycroft_config(bus): """ Load the mycroft config and connect it to updates over the messagebus. """ Configuration.set_config_update_handlers(bus) return Configuration.get() def connect_to_mycroft(): """ Connect to the mycroft messagebus and load and register config on the bus. Sets the bus and config global variables """ global bus global config bus = connect_to_messagebus() config = load_mycroft_config(bus) def load_settings(): global log_filters global cy_chat_area global show_last_key global max_log_lines global show_meter config_file = None # Old location path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf") if not is_using_xdg(): config_file = path elif os.path.isfile(path): from mycroft.configuration.config import _log_old_location_deprecation _log_old_location_deprecation(path) config_file = path # Check XDG_CONFIG_DIR if config_file is None: for conf_dir in xdg.BaseDirectory.load_config_paths(BASE_FOLDER): xdg_file = os.path.join(conf_dir, filename) if os.path.isfile(xdg_file): config_file = xdg_file break # Check /etc/mycroft if config_file is None: config_file = os.path.join("/etc/mycroft", filename) try: with io.open(config_file, 'r') as f: config = json.load(f) if "filters" in config: # Disregard the filtering of DEBUG messages log_filters = [f for f in config["filters"] if f != "DEBUG"] if "cy_chat_area" in config: cy_chat_area = config["cy_chat_area"] if "show_last_key" in config: show_last_key = config["show_last_key"] if "max_log_lines" in config: max_log_lines = config["max_log_lines"] if "show_meter" in config: show_meter = config["show_meter"] except Exception as e: LOG.info("Ignoring failed load of settings file") def save_settings(): config = {} config["filters"] = log_filters config["cy_chat_area"] = cy_chat_area config["show_last_key"] = show_last_key config["max_log_lines"] = max_log_lines config["show_meter"] = show_meter # Old location path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf") if not is_using_xdg(): config_file = path else: config_file = os.path.join(xdg.BaseDirectory.xdg_config_home, BASE_FOLDER, filename) with io.open(config_file, 'w') as f: f.write(str(json.dumps(config, ensure_ascii=False))) ############################################################################## # Log file monitoring class LogMonitorThread(Thread): def __init__(self, filename, logid): global log_files Thread.__init__(self) self.filename = filename self.st_results = os.stat(filename) self.logid = str(logid) log_files.append(filename) def run(self): while True: try: st_results = os.stat(self.filename) # Check if file has been modified since last read if not st_results.st_mtime == self.st_results.st_mtime: self.read_file_from(self.st_results.st_size) self.st_results = st_results set_screen_dirty() except OSError: # ignore any file IO exceptions, just try again pass time.sleep(0.1) def read_file_from(self, bytefrom): global meter_cur global meter_thresh global filteredLog global mergedLog global log_line_offset global log_lock with io.open(self.filename) as fh: fh.seek(bytefrom) while True: line = fh.readline() if line == "": break # Allow user to filter log output ignore = False if find_str: if find_str not in line: ignore = True else: for filtered_text in log_filters: if filtered_text in line: ignore = True break with log_lock: if ignore: mergedLog.append(self.logid + line.rstrip()) else: if bSimple: print(line.rstrip()) else: filteredLog.append(self.logid + line.rstrip()) mergedLog.append(self.logid + line.rstrip()) if not auto_scroll: log_line_offset += 1 # Limit log to max_log_lines if len(mergedLog) >= max_log_lines: with log_lock: cToDel = len(mergedLog) - max_log_lines if len(filteredLog) == len(mergedLog): del filteredLog[:cToDel] del mergedLog[:cToDel] # release log_lock before calling to prevent deadlock if len(filteredLog) != len(mergedLog): rebuild_filtered_log() def start_log_monitor(filename): if os.path.isfile(filename): thread = LogMonitorThread(filename, len(log_files)) thread.setDaemon(True) # this thread won't prevent prog from exiting thread.start() class MicMonitorThread(Thread): def __init__(self, filename): Thread.__init__(self) self.filename = filename self.st_results = None def run(self): while True: try: st_results = os.stat(self.filename) if (not self.st_results or not st_results.st_ctime == self.st_results.st_ctime or not st_results.st_mtime == self.st_results.st_mtime): self.read_mic_level() self.st_results = st_results set_screen_dirty() except Exception: # Ignore whatever failure happened and just try again later pass time.sleep(0.2) def read_mic_level(self): global meter_cur global meter_thresh with io.open(self.filename, 'r') as fh: line = fh.readline() # Just adjust meter settings # Ex:Energy: cur=4 thresh=1.5 muted=0 cur_text, thresh_text, _ = line.split(' ')[-3:] meter_thresh = float(thresh_text.split('=')[-1]) meter_cur = float(cur_text.split('=')[-1]) class ScreenDrawThread(Thread): def __init__(self): Thread.__init__(self) def run(self): global scr global screen_lock global is_screen_dirty global log_lock while scr: try: if is_screen_dirty: # Use a lock to prevent screen corruption when drawing # from multiple threads with screen_lock: is_screen_dirty = False if screen_mode == SCR_MAIN: with log_lock: do_draw_main(scr) elif screen_mode == SCR_HELP: do_draw_help(scr) finally: time.sleep(0.01) def start_mic_monitor(filename): if os.path.isfile(filename): thread = MicMonitorThread(filename) thread.setDaemon(True) # this thread won't prevent prog from exiting thread.start() def add_log_message(message): """ Show a message for the user (mixed in the logs) """ global filteredLog global mergedLog global log_line_offset global log_lock with log_lock: message = "@" + message # the first byte is a code filteredLog.append(message) mergedLog.append(message) if log_line_offset != 0: log_line_offset = 0 # scroll so the user can see the message set_screen_dirty() def clear_log(): global filteredLog global mergedLog global log_line_offset global log_lock with log_lock: mergedLog = [] filteredLog = [] log_line_offset = 0 def rebuild_filtered_log(): global filteredLog global mergedLog global log_lock with log_lock: filteredLog = [] for line in mergedLog: # Apply filters ignore = False if find_str and find_str != "": # Searching log if find_str not in line: ignore = True else: # Apply filters for filtered_text in log_filters: if filtered_text and filtered_text in line: ignore = True break if not ignore: filteredLog.append(line) ############################################################################## # Capturing output from Mycroft def handle_speak(event): global chat utterance = event.data.get('utterance') utterance = TTS.remove_ssml(utterance) if bSimple: print(">> " + utterance) else: chat.append(">> " + utterance) set_screen_dirty() def handle_utterance(event): global chat global history utterance = event.data.get('utterances')[0] history.append(utterance) chat.append(utterance) set_screen_dirty() def connect(bus): """ Run the mycroft messagebus referenced by bus. Args: bus: Mycroft messagebus instance """ bus.run_forever() ############################################################################## # Capturing the messagebus def handle_message(msg): # TODO: Think this thru a little bit -- remove this logging within core? # add_log_message(msg) pass ############################################################################## # "Graphic primitives" def draw(x, y, msg, pad=None, pad_chr=None, clr=None): """Draw a text to the screen Args: x (int): X coordinate (col), 0-based from upper-left y (int): Y coordinate (row), 0-based from upper-left msg (str): string to render to screen pad (bool or int, optional): if int, pads/clips to given length, if True use right edge of the screen. pad_chr (char, optional): pad character, default is space clr (int, optional): curses color, Defaults to CLR_LOG1. """ if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS: return if x + len(msg) > curses.COLS: s = msg[:curses.COLS - x] else: s = msg if pad: ch = pad_chr or " " if pad is True: pad = curses.COLS # pad to edge of screen s += ch * (pad - x - len(msg)) else: # pad to given length (or screen width) if x + pad > curses.COLS: pad = curses.COLS - x s += ch * (pad - len(msg)) if not clr: clr = CLR_LOG1 scr.addstr(y, x, s, clr) ############################################################################## # Screen handling def init_screen(): global CLR_HEADING global CLR_FIND global CLR_CHAT_RESP global CLR_CHAT_QUERY global CLR_CMDLINE global CLR_INPUT global CLR_LOG1 global CLR_LOG2 global CLR_LOG_DEBUG global CLR_LOG_ERROR global CLR_LOG_CMDMESSAGE global CLR_METER_CUR global CLR_METER if curses.has_colors(): curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) bg = curses.COLOR_BLACK for i in range(1, curses.COLORS): curses.init_pair(i + 1, i, bg) # Colors (on black backgound): # 1 = white 5 = dk blue # 2 = dk red 6 = dk purple # 3 = dk green 7 = dk cyan # 4 = dk yellow 8 = lt gray CLR_HEADING = curses.color_pair(1) CLR_CHAT_RESP = curses.color_pair(4) CLR_CHAT_QUERY = curses.color_pair(7) CLR_FIND = curses.color_pair(4) CLR_CMDLINE = curses.color_pair(7) CLR_INPUT = curses.color_pair(7) CLR_LOG1 = curses.color_pair(3) CLR_LOG2 = curses.color_pair(6) CLR_LOG_DEBUG = curses.color_pair(4) CLR_LOG_ERROR = curses.color_pair(2) CLR_LOG_CMDMESSAGE = curses.color_pair(2) CLR_METER_CUR = curses.color_pair(2) CLR_METER = curses.color_pair(4) def scroll_log(up, num_lines=None): global log_line_offset # default to a half-page if not num_lines: num_lines = size_log_area // 2 with log_lock: if up: log_line_offset -= num_lines else: log_line_offset += num_lines if log_line_offset > len(filteredLog): log_line_offset = len(filteredLog) - 10 if log_line_offset < 0: log_line_offset = 0 set_screen_dirty() def _do_meter(height): if not show_meter or meter_cur == -1: return # The meter will look something like this: # # 8.4 * # * # -*- 2.4 # * # * # * # Where the left side is the current level and the right side is # the threshold level for 'silence'. global scr global meter_peak if meter_cur > meter_peak: meter_peak = meter_cur + 1 scale = meter_peak if meter_peak > meter_thresh * 3: scale = meter_thresh * 3 h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1) h_thresh = clamp( int((float(meter_thresh) / scale) * height), 0, height - 1) clr = curses.color_pair(4) # dark yellow str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4' str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24' meter_width = len(str_level) + len(str_thresh) + 4 for i in range(0, height): meter = "" if i == h_cur: # current energy level meter = str_level else: meter = " " * len(str_level) if i == h_thresh: # add threshold indicator meter += "--- " else: meter += " " if i == h_thresh: # 'silence' threshold energy level meter += str_thresh # draw the line meter += " " * (meter_width - len(meter)) scr.addstr(curses.LINES - 1 - i, curses.COLS - len(meter) - 1, meter, clr) # draw an asterisk if the audio energy is at this level if i <= h_cur: if meter_cur > meter_thresh: clr_bar = curses.color_pair(3) # dark green for loud else: clr_bar = curses.color_pair(5) # dark blue for 'silent' scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4, "*", clr_bar) def _do_gui(gui_width): clr = curses.color_pair(2) # dark red x = curses.COLS - gui_width y = 3 draw( x, y, " " + make_titlebar( "= GUI", gui_width - 1) + " ", clr=CLR_HEADING) cnt = len(gui_text) + 1 if cnt > curses.LINES - 15: cnt = curses.LINES - 15 for i in range(0, cnt): draw(x, y + 1 + i, " !", clr=CLR_HEADING) if i < len(gui_text): draw(x + 2, y + 1 + i, gui_text[i], pad=gui_width - 3) else: draw(x + 2, y + 1 + i, "*" * (gui_width - 3)) draw(x + (gui_width - 1), y + 1 + i, "!", clr=CLR_HEADING) draw(x, y + cnt, " " + "-" * (gui_width - 2) + " ", clr=CLR_HEADING) def set_screen_dirty(): global is_screen_dirty global screen_lock with screen_lock: is_screen_dirty = True def do_draw_main(scr): global log_line_offset global longest_visible_line global last_redraw global auto_scroll global size_log_area if time.time() - last_redraw > REDRAW_FREQUENCY: # Do a full-screen redraw periodically to clear and # noise from non-curses text that get output to the # screen (e.g. modules that do a 'print') scr.clear() last_redraw = time.time() else: scr.erase() # Display log output at the top cLogs = len(filteredLog) + 1 # +1 for the '--end--' size_log_area = curses.LINES - (cy_chat_area + 5) start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset end = cLogs - log_line_offset if start < 0: end -= start start = 0 if end > cLogs: end = cLogs auto_scroll = (end == cLogs) # adjust the line offset (prevents paging up too far) log_line_offset = cLogs - end # Top header and line counts if find_str: scr.addstr(0, 0, "Search Results: ", CLR_HEADING) scr.addstr(0, 16, find_str, CLR_FIND) scr.addstr(0, 16 + len(find_str), " ctrl+X to end" + " " * (curses.COLS - 31 - 12 - len(find_str)) + str(start) + "-" + str(end) + " of " + str(cLogs), CLR_HEADING) else: scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) + str(start) + "-" + str(end) + " of " + str(cLogs), CLR_HEADING) ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ===" scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING) scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING) y = 2 for i in range(start, end): if i >= cLogs - 1: log = ' ^--- NEWEST ---^ ' else: log = filteredLog[i] logid = log[0] if len(log) > 25 and log[5] == '-' and log[8] == '-': log = log[11:] # skip logid & date at the front of log line else: log = log[1:] # just skip the logid # Categorize log line if "| DEBUG |" in log: log = log.replace("Skills ", "") clr = CLR_LOG_DEBUG elif "| ERROR |" in log: clr = CLR_LOG_ERROR else: if logid == "1": clr = CLR_LOG1 elif logid == "@": clr = CLR_LOG_CMDMESSAGE else: clr = CLR_LOG2 # limit output line to screen width len_line = len(log) if len(log) > curses.COLS: start = len_line - (curses.COLS - 4) - log_line_lr_scroll if start < 0: start = 0 end = start + (curses.COLS - 4) if start == 0: log = log[start:end] + "~~~~" # start.... elif end >= len_line - 1: log = "~~~~" + log[start:end] # ....end else: log = "~~" + log[start:end] + "~~" # ..middle.. if len_line > longest_visible_line: longest_visible_line = len_line scr.addstr(y, 0, handleNonAscii(log), clr) y += 1 # Log legend in the lower-right y_log_legend = curses.LINES - (3 + cy_chat_area) scr.addstr(y_log_legend, curses.COLS // 2 + 2, make_titlebar("Log Output Legend", curses.COLS // 2 - 2), CLR_HEADING) scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2, "DEBUG output", CLR_LOG_DEBUG) if len(log_files) > 0: scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2, os.path.basename(log_files[0]) + ", other", CLR_LOG2) if len(log_files) > 1: scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2, os.path.basename(log_files[1]), CLR_LOG1) # Meter y_meter = y_log_legend if show_meter: scr.addstr(y_meter, curses.COLS - 14, " Mic Level ", CLR_HEADING) # History log in the middle y_chat_history = curses.LINES - (3 + cy_chat_area) chat_width = curses.COLS // 2 - 2 chat_out = [] scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width), CLR_HEADING) # Build a nicely wrapped version of the chat log idx_chat = len(chat) - 1 while len(chat_out) < cy_chat_area and idx_chat >= 0: if chat[idx_chat][0] == '>': wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" ", width=chat_width) else: wrapper = textwrap.TextWrapper(width=chat_width) chatlines = wrapper.wrap(chat[idx_chat]) for txt in reversed(chatlines): if len(chat_out) >= cy_chat_area: break chat_out.insert(0, txt) idx_chat -= 1 # Output the chat y = curses.LINES - (2 + cy_chat_area) for txt in chat_out: if txt.startswith(">> ") or txt.startswith(" "): clr = CLR_CHAT_RESP else: clr = CLR_CHAT_QUERY scr.addstr(y, 1, handleNonAscii(txt), clr) y += 1 if show_gui and curses.COLS > 20 and curses.LINES > 20: _do_gui(curses.COLS - 20) # Command line at the bottom ln = line if len(line) > 0 and line[0] == ":": scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):", CLR_CMDLINE) scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE) ln = line[1:] else: prompt = "Input (':' for command, Ctrl+C to quit)" if show_last_key: prompt += " === keycode: " + last_key scr.addstr(curses.LINES - 2, 0, make_titlebar(prompt, curses.COLS - 1), CLR_HEADING) scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING) _do_meter(cy_chat_area + 2) scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT) # Curses doesn't actually update the display until refresh() is called scr.refresh() def make_titlebar(title, bar_length): return title + " " + ("=" * (bar_length - 1 - len(title))) ############################################################################## # Help system help_struct = [('Log Scrolling shortcuts', [("Up / Down / PgUp / PgDn", "scroll thru history"), ("Ctrl+T / Ctrl+PgUp", "scroll to top of logs (jump to oldest)"), ("Ctrl+B / Ctrl+PgDn", "scroll to bottom of logs" + "(jump to newest)"), ("Left / Right", "scroll long lines left/right"), ("Home / End", "scroll to start/end of long lines")]), ("Query History shortcuts", [("Ctrl+N / Ctrl+Left", "previous query"), ("Ctrl+P / Ctrl+Right", "next query")]), ("General Commands (type ':' to enter command mode)", [(":quit or :exit", "exit the program"), (":meter (show|hide)", "display the microphone level"), (":keycode (show|hide)", "display typed key codes (mainly debugging)"), (":history (# lines)", "set size of visible history buffer"), (":clear", "flush the logs")]), ("Log Manipulation Commands", [(":filter 'STR'", "adds a log filter (optional quotes)"), (":filter remove 'STR'", "removes a log filter"), (":filter (clear|reset)", "reset filters"), (":filter (show|list)", "display current filters"), (":find 'STR'", "show logs containing 'str'"), (":log level (DEBUG|INFO|ERROR)", "set logging level"), (":log bus (on|off)", "control logging of messagebus messages")]), ("Skill Debugging Commands", [(":skills", "list installed Skills"), (":api SKILL", "show Skill's public API"), (":activate SKILL", "activate Skill, e.g. 'activate skill-wiki'"), (":deactivate SKILL", "deactivate Skill"), (":keep SKILL", "deactivate all Skills except the indicated Skill")])] help_longest = 0 for s in help_struct: for ent in s[1]: help_longest = max(help_longest, len(ent[0])) HEADER_SIZE = 2 HEADER_FOOTER_SIZE = 4 def num_help_pages(): lines = 0 for section in help_struct: lines += 3 + len(section[1]) return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE)) def do_draw_help(scr): def render_header(): scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING) scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING) def render_help(txt, y_pos, i, first_line, last_line, clr): if i >= first_line and i < last_line: scr.addstr(y_pos, 0, txt, clr) y_pos += 1 return y_pos def render_footer(page, total): text = "Page {} of {} [ Any key to continue ]".format(page, total) scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING) scr.erase() render_header() y = HEADER_SIZE page = subscreen + 1 # Find first and last taking into account the header and footer first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE) last = first + (curses.LINES - HEADER_FOOTER_SIZE) i = 0 for section in help_struct: y = render_help(section[0], y, i, first, last, CLR_HEADING) i += 1 y = render_help("=" * (curses.COLS - 1), y, i, first, last, CLR_HEADING) i += 1 for line in section[1]: words = line[1].split() ln = line[0].ljust(help_longest + 1) for w in words: if len(ln) + 1 + len(w) < curses.COLS: ln += " " + w else: y = render_help(ln, y, i, first, last, CLR_CMDLINE) ln = " ".ljust(help_longest + 2) + w y = render_help(ln, y, i, first, last, CLR_CMDLINE) i += 1 y = render_help(" ", y, i, first, last, CLR_CMDLINE) i += 1 if i > last: break render_footer(page, num_help_pages()) # Curses doesn't actually update the display until refresh() is called scr.refresh() def show_help(): global screen_mode global subscreen if screen_mode != SCR_HELP: screen_mode = SCR_HELP subscreen = 0 set_screen_dirty() def show_next_help(): global screen_mode global subscreen if screen_mode == SCR_HELP: subscreen += 1 if subscreen >= num_help_pages(): screen_mode = SCR_MAIN set_screen_dirty() ############################################################################## # Skill debugging def show_skills(skills): """Show list of loaded Skills in as many column as necessary.""" global scr global screen_mode if not scr: return screen_mode = SCR_SKILLS row = 2 column = 0 def prepare_page(): global scr nonlocal row nonlocal column scr.erase() scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE) scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE) row = 2 column = 0 prepare_page() col_width = 0 skill_names = sorted(skills.keys()) for skill in skill_names: if skills[skill]['active']: color = curses.color_pair(4) else: color = curses.color_pair(2) scr.addstr(row, column, " {}".format(skill), color) row += 1 col_width = max(col_width, len(skill)) if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]: column = 0 scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to continue", CLR_HEADING) scr.refresh() wait_for_any_key() prepare_page() elif row == curses.LINES - 2: # Reached bottom of screen, start at top and move output to a # New column row = 2 column += col_width + 2 col_width = 0 if column > curses.COLS - 20: # End of screen break scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return", CLR_HEADING) scr.refresh() def show_skill_api(skill, data): """Show available help on Skill's API.""" global scr global screen_mode if not scr: return screen_mode = SCR_SKILLS row = 2 column = 0 def prepare_page(): global scr nonlocal row nonlocal column scr.erase() scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill), CLR_CMDLINE) scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE) row = 2 column = 4 prepare_page() for key in data: color = curses.color_pair(4) scr.addstr(row, column, "{} ({})".format(key, data[key]['type']), CLR_HEADING) row += 2 if 'help' in data[key]: help_text = data[key]['help'].split('\n') for line in help_text: scr.addstr(row, column + 2, line, color) row += 1 row += 2 else: row += 1 if row == curses.LINES - 5: scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to continue", CLR_HEADING) scr.refresh() wait_for_any_key() prepare_page() elif row == curses.LINES - 5: # Reached bottom of screen, start at top and move output to a # New column row = 2 scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return", CLR_HEADING) scr.refresh() def center(str_len): # generate number of characters needed to center a string # of the given length return " " * ((curses.COLS - str_len) // 2) ############################################################################## # Main UI lopo def _get_cmd_param(cmd, keyword): # Returns parameter to a command. Will de-quote. # Ex: find 'abc def' returns: abc def # find abc def returns: abc def if isinstance(keyword, list): for w in keyword: cmd = cmd.replace(w, "").strip() else: cmd = cmd.replace(keyword, "").strip() if not cmd: return None last_char = cmd[-1] if last_char == '"' or last_char == "'": parts = cmd.split(last_char) return parts[-2] else: parts = cmd.split(" ") return parts[-1] def wait_for_any_key(): """Block until key is pressed. This works around curses.error that can occur on old versions of ncurses. """ while True: try: scr.get_wch() # blocks except curses.error: # Loop if get_wch throws error time.sleep(0.05) else: break def handle_cmd(cmd): global show_meter global screen_mode global log_filters global cy_chat_area global find_str global show_last_key if "show" in cmd and "log" in cmd: pass elif "help" in cmd: show_help() elif "exit" in cmd or "quit" in cmd: return 1 elif "keycode" in cmd: # debugging keyboard if "hide" in cmd or "off" in cmd: show_last_key = False elif "show" in cmd or "on" in cmd: show_last_key = True elif "meter" in cmd: # microphone level meter if "hide" in cmd or "off" in cmd: show_meter = False elif "show" in cmd or "on" in cmd: show_meter = True elif "find" in cmd: find_str = _get_cmd_param(cmd, "find") rebuild_filtered_log() elif "filter" in cmd: if "show" in cmd or "list" in cmd: # display active filters add_log_message("Filters: " + str(log_filters)) return if "reset" in cmd or "clear" in cmd: log_filters = list(default_log_filters) else: # extract last word(s) param = _get_cmd_param(cmd, "filter") if param: if "remove" in cmd and param in log_filters: log_filters.remove(param) else: log_filters.append(param) rebuild_filtered_log() add_log_message("Filters: " + str(log_filters)) elif "clear" in cmd: clear_log() elif "log" in cmd: # Control logging behavior in all Mycroft processes if "level" in cmd: level = _get_cmd_param(cmd, ["log", "level"]) bus.emit(Message("mycroft.debug.log", data={'level': level})) elif "bus" in cmd: state = _get_cmd_param(cmd, ["log", "bus"]).lower() if state in ["on", "true", "yes"]: bus.emit(Message("mycroft.debug.log", data={'bus': True})) elif state in ["off", "false", "no"]: bus.emit(Message("mycroft.debug.log", data={'bus': False})) elif "history" in cmd: # extract last word(s) lines = int(_get_cmd_param(cmd, "history")) if not lines or lines < 1: lines = 1 max_chat_area = curses.LINES - 7 if lines > max_chat_area: lines = max_chat_area cy_chat_area = lines elif "skills" in cmd: # List loaded skill message = bus.wait_for_response( Message('skillmanager.list'), reply_type='mycroft.skills.list') if message: show_skills(message.data) wait_for_any_key() screen_mode = SCR_MAIN set_screen_dirty() elif "deactivate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: bus.emit(Message("skillmanager.deactivate", data={'skill': s})) else: add_log_message('Usage :deactivate SKILL [SKILL2] [...]') elif "keep" in cmd: s = cmd.split() if len(s) > 1: bus.emit(Message("skillmanager.keep", data={'skill': s[1]})) else: add_log_message('Usage :keep SKILL') elif "activate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: bus.emit(Message("skillmanager.activate", data={'skill': s})) else: add_log_message('Usage :activate SKILL [SKILL2] [...]') elif "api" in cmd: parts = cmd.split() if len(parts) < 2: return skill = parts[1] message = bus.wait_for_response(Message('{}.public_api'.format(skill))) if message: show_skill_api(skill, message.data) scr.get_wch() # blocks screen_mode = SCR_MAIN set_screen_dirty() # TODO: More commands return 0 # do nothing upon return def handle_is_connected(msg): add_log_message("Connected to Messagebus!") # start_qml_gui(bus, gui_text) def handle_reconnecting(): add_log_message("Looking for Messagebus websocket...") def gui_main(stdscr): global scr global bus global line global log_line_lr_scroll global longest_visible_line global find_str global last_key global history global screen_lock global show_gui global config scr = stdscr init_screen() scr.keypad(1) scr.notimeout(True) bus.on('speak', handle_speak) bus.on('message', handle_message) bus.on('recognizer_loop:utterance', handle_utterance) bus.on('connected', handle_is_connected) bus.on('reconnecting', handle_reconnecting) add_log_message("Establishing Mycroft Messagebus connection...") gui_thread = ScreenDrawThread() gui_thread.setDaemon(True) # this thread won't prevent prog from exiting gui_thread.start() hist_idx = -1 # index, from the bottom c = 0 try: while True: set_screen_dirty() c = 0 code = 0 try: if ctrl_c_pressed(): # User hit Ctrl+C. treat same as Ctrl+X c = 24 else: # Don't block, this allows us to refresh the screen while # waiting on initial messagebus connection, etc scr.timeout(1) c = scr.get_wch() # unicode char or int for special keys if c == -1: continue except curses.error: # This happens in odd cases, such as when you Ctrl+Z # the CLI and then resume. Curses fails on get_wch(). continue if isinstance(c, int): code = c else: code = ord(c) # Convert VT100 ESC codes generated by some terminals if code == 27: # NOTE: Not sure exactly why, but the screen can get corrupted # if we draw to the screen while doing a scr.getch(). So # lock screen updates until the VT100 sequence has been # completely read. with screen_lock: scr.timeout(0) c1 = -1 start = time.time() while c1 == -1: c1 = scr.getch() if time.time() - start > 1: break # 1 second timeout waiting for ESC code c2 = -1 while c2 == -1: c2 = scr.getch() if time.time() - start > 1: # 1 second timeout break # 1 second timeout waiting for ESC code if c1 == 79 and c2 == 120: c = curses.KEY_UP elif c1 == 79 and c2 == 116: c = curses.KEY_LEFT elif c1 == 79 and c2 == 114: c = curses.KEY_DOWN elif c1 == 79 and c2 == 118: c = curses.KEY_RIGHT elif c1 == 79 and c2 == 121: c = curses.KEY_PPAGE # aka PgUp elif c1 == 79 and c2 == 115: c = curses.KEY_NPAGE # aka PgDn elif c1 == 79 and c2 == 119: c = curses.KEY_HOME elif c1 == 79 and c2 == 113: c = curses.KEY_END else: c = c1 if c1 != -1: last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2) code = c else: last_key = "ESC" else: if code < 33: last_key = str(code) else: last_key = str(code) scr.timeout(-1) # resume blocking if code == 27: # Hitting ESC twice clears the entry line hist_idx = -1 line = "" elif c == curses.KEY_RESIZE: # Generated by Curses when window/screen has been resized y, x = scr.getmaxyx() curses.resizeterm(y, x) # resizeterm() causes another curses.KEY_RESIZE, so # we need to capture that to prevent a loop of resizes c = scr.get_wch() elif screen_mode == SCR_HELP: # in Help mode, any key goes to next page show_next_help() continue elif c == '\n' or code == 10 or code == 13 or code == 343: # ENTER sends the typed line to be processed by Mycroft if line == "": continue if line[:1] == ":": # Lines typed like ":help" are 'commands' if handle_cmd(line[1:]) == 1: break else: # Treat this as an utterance bus.emit(Message("recognizer_loop:utterance", {'utterances': [line.strip()], 'lang': config.get('lang', 'en-us')}, {'client_name': 'mycroft_cli', 'source': 'debug_cli', 'destination': ["skills"]} )) hist_idx = -1 line = "" elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous) # Move up the history stack hist_idx = clamp(hist_idx + 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next) # Move down the history stack hist_idx = clamp(hist_idx - 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif c == curses.KEY_LEFT: # scroll long log lines left log_line_lr_scroll += curses.COLS // 4 elif c == curses.KEY_RIGHT: # scroll long log lines right log_line_lr_scroll -= curses.COLS // 4 if log_line_lr_scroll < 0: log_line_lr_scroll = 0 elif c == curses.KEY_HOME: # HOME scrolls log lines all the way to the start log_line_lr_scroll = longest_visible_line elif c == curses.KEY_END: # END scrolls log lines all the way to the end log_line_lr_scroll = 0 elif c == curses.KEY_UP: scroll_log(False, 1) elif c == curses.KEY_DOWN: scroll_log(True, 1) elif c == curses.KEY_NPAGE: # aka PgDn # PgDn to go down a page in the logs scroll_log(True) elif c == curses.KEY_PPAGE: # aka PgUp # PgUp to go up a page in the logs scroll_log(False) elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn scroll_log(True, max_log_lines) elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp scroll_log(False, max_log_lines) elif code == curses.KEY_BACKSPACE or code == 127: # Backspace to erase a character in the utterance line = line[:-1] elif code == 6: # Ctrl+F (Find) line = ":find " elif code == 7: # Ctrl+G (start GUI) if show_gui is None: start_qml_gui(bus, gui_text) show_gui = not show_gui elif code == 18: # Ctrl+R (Redraw) scr.erase() elif code == 24: # Ctrl+X (Exit) if find_str: # End the find session find_str = None rebuild_filtered_log() elif line.startswith(":"): # cancel command mode line = "" else: # exit CLI break elif code > 31 and isinstance(c, str): # Accept typed character in the utterance line += c finally: scr.erase() scr.refresh() scr = None def simple_cli(): global bSimple bSimple = True bus.on('speak', handle_speak) try: while True: # Sleep for a while so all the output that results # from the previous command finishes before we print. time.sleep(1.5) print("Input (Ctrl+C to quit):") line = sys.stdin.readline() bus.emit(Message("recognizer_loop:utterance", {'utterances': [line.strip()]}, {'client_name': 'mycroft_simple_cli', 'source': 'debug_cli', 'destination': ["skills"]})) except KeyboardInterrupt as e: # User hit Ctrl+C to quit print("") except KeyboardInterrupt as e: LOG.exception(e) event_thread.exit() sys.exit() def connect_to_messagebus(): """ Connect to the mycroft messagebus and launch a thread handling the connection. Returns: WebsocketClient """ bus = MessageBusClient() # Mycroft messagebus connection event_thread = Thread(target=connect, args=[bus]) event_thread.setDaemon(True) event_thread.start() return bus
PypiClean
/OSG-Gratia-Viewer-0.1.tar.gz/OSG-Gratia-Viewer-0.1/src/gratia/tools/gridscan_download.py
import sys, urllib, urllib2, datetime, time try: from pysqlite2 import dbapi2 as sqlite sqlite_present = True except Exception, e: sqlite_present = False try: import sqlite3 as sqlite sqlite_present = True except: pass def do_excuse(line): if line.find("Checking for OSG $OSG_GRID existence") >= 0: return True if line.find("Checking for OSG $WNTMP definition") >= 0: return True if line.find("Checking for MonALISA configuration") >= 0: return True if line.find("Checking for VDS mpiexec (OPTIONAL) existence: FAIL") >= 0: return True if line.find("Checking for VDS k.2 (OPTIONAL) existence") >= 0: return True if line.find("Checking for a valid proxy for") >= 0: return True if line.find("Checking for OSG $Monalisa_HOME definition") >= 0: return True if line.find("Checking for OSG $APP writability") >= 0: return True return False schema = """ CREATE TABLE GridScan ( time timestamp, sitename varchar(255), pass boolean ) """ insert_sql_str = """ INSERT into GridScan VALUES ( ?, ?, ? ) """ list_tests_str = """ SELECT distinct time from GridScan WHERE sitename=? """ def connect_sqlite(): return sqlite.connect('gridscan.db') def insert_test(conn, site, timestamp, status): curs = conn.cursor() curs.execute(insert_sql_str, (timestamp, site, status)) conn.commit() def test_time_list(conn, site): curs = conn.cursor() rows = curs.execute(list_tests_str, (site, )).fetchall() time_tuples = [time.strptime(i[0], '%Y-%m-%d %H:%M:%S') for i in rows] return [datetime.datetime(*i[:6]) for i in time_tuples] gridscan_url = "http://scan.grid.iu.edu/cgi-bin/get_grid_sv?" def site_listing(): query = urllib.urlencode({"get":"set1"}) doc = urllib2.urlopen(gridscan_url + query) retval = {} for line in doc: id, site, ce = line.split(',') retval[site] = id return retval def test_listing(id): query = urllib.urlencode({'id':id, 'get':'listing'}) doc = urllib2.urlopen(gridscan_url + query) retval = [] for line in doc: retval.append(line.strip()) return retval def test_status(id, testname): query = urllib.urlencode({'id':id, 'get':testname}) doc = urllib2.urlopen(gridscan_url + query) for line in doc: if line.find('FAIL') >= 0 and not do_excuse(line): print line.strip() return False return True def datetime_from_listing(name): d, t = name.split('.')[0].split('-')[-2:] return datetime.datetime(*time.strptime(d+t, '%Y%m%d%H%M%S')[0:6]) def main(): if not sqlite_present: print >> sys.stderr, "SQLite not present." sys.exit(1) sites = site_listing() conn = connect_sqlite() for site, id in sites.items(): print "Uploading site %s." % site listings = test_listing(id) times = test_time_list(conn, site) for listing in listings: d = datetime_from_listing(listing) if d in times: continue print "New test: %s" % d.strftime("%Y%m%d %H:%M:%S") status = test_status(id, listing) insert_test(conn, site, d, status) if __name__ == '__main__': main()
PypiClean
/DDLJ-0.0.3-py3-none-any.whl/DDLj/DDLj.py
import json import pandas as pd import re from flatten_json import flatten,unflatten temp=[] DDF_list=[] ColList=[] Domain_Com=[] MinMaxList=[] colBuild='' sqlstring='' colName='' ArrayList=[] Empty_Df=[] #L2P_builder function convert the logical name to physical name using glossary words #separated by '_', if No glossary input has been provided, this has return as is logical name def L2P_builder(char,Glossarypath): if Glossarypath==None or Glossarypath=='': try: df=pd.DataFrame(columns=['Name','Abbreviation']) except: print('No Glossary') else: try: df=pd.read_csv(Glossarypath) except: df=pd.DataFrame(columns=['Name','Abbreviation']) q=0 colval='' splitted = re.sub('(?!^)([A-Z][a-z]+)', r' \1', char).split() for val in splitted: q=q+1 if q==len(splitted): if val[len(val)-2:len(val)]=='ID': try: val=df.loc[df.Name == val[0:len(val)-2], 'Abbreviation'].item() val=val+'_ID' colval+=val except: val=val[0:len(val)-2] val=val+'_ID' colval+=val else: try: val=df.loc[df.Name == val, 'Abbreviation'].item() colval+=val except: colval+=val else: try: val=df.loc[df.Name == val, 'Abbreviation'].item() val=val+'_' colval+=val except: val=val+'_' colval+=val return colval #StringEnds: return the last character value from a String with camelcase def StringEnds(char): q=0 colval='' splitted = re.sub('(?!^)([A-Z][a-z]+)', r' \1', char).split() for val in splitted: q=q+1 if q==len(splitted): return val #Recursive function to generate the DDL string array using the flatten json def sqlStrinNthLevel(json_schema_flat,n,ValStr,Glossarypath): sqlstring='' ColList=[] for keys,vals in json_schema_flat.items(): try: arrayField=keys.split('_')[1] except: arrayField=None if ValStr==None: if (keys.endswith('_type') or keys.endswith('_format')) and keys.count('_')==n: if vals=='string' and keys.endswith('_type') and keys.count('_')==n: try: maxLength=eval("json_schema_flat['"+keys[:len(keys)-5]+"_maxLength"+"']") sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Varchar2("+str(maxLength)+")," ColList.append(sqlstring) except Exception as e: if keys[:len(keys)-5].endswith('Date') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Date," ColList.append(sqlstring) elif (keys[:len(keys)-5].endswith('Timestamp') or keys[:len(keys)-5].endswith('DateTime')) and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Timestamp(6)," ColList.append(sqlstring) else: e elif vals=='number' and keys.count('_')==n: Endword=StringEnds(keys.split("_")[n-1]) sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Number(38,10)," ColList.append(sqlstring) elif vals=='date' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Date," ColList.append(sqlstring) elif vals=='date-time' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Timestamp(6)," ColList.append(sqlstring) elif vals=='boolean' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Char(1)," ColList.append(sqlstring) elif vals=='object' and keys.count('_')==n: sqlStrinNthLevel(json_schema_flat,n+2,None,Glossarypath) elif ValStr!=None and arrayField==ValStr: if keys.endswith('_type') and arrayField==ValStr and keys.count('_')==n: if vals=='string' and keys.count('_')==n: try: maxLength=eval("json_schema_flat['"+keys[:len(keys)-5]+"_maxLength"+"']") sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Varchar2("+str(maxLength)+")," ColList.append(sqlstring) except Exception as e: if keys[:len(keys)-5].endswith('Date') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Date," ColList.append(sqlstring) elif (keys[:len(keys)-5].endswith('Timestamp') or keys[:len(keys)-5].endswith('DateTime')) and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Timestamp(6)," ColList.append(sqlstring) else: e elif vals=='number' and arrayField==ValStr and keys.count('_')==n: Endword=StringEnds(keys.split("_")[n-1]) sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Number(38,10)," ColList.append(sqlstring) elif vals=='date' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Date," ColList.append(sqlstring) elif vals=='date-time' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Timestamp(6)," ColList.append(sqlstring) elif vals=='boolean' and keys.endswith('_format') and keys.count('_')==n: sqlstring=L2P_builder(keys.split("_")[n-1],Glossarypath)+" Char(1)," ColList.append(sqlstring) elif vals=='object' and arrayField==ValStr and keys.count('_')==n: sqlStrinNthLevel(sqlStrinNthLevel,n+2,ValStr,Glossarypath) return ColList #Writing to the outputfile path def callMain(file_path,Database,Glossarypath,outputfilePath): full_sql_str='' with open(file_path) as json_data: json_schema=json.load(json_data) json_schema_flat = flatten(json_schema) if Glossarypath==None or Glossarypath=='': try: df=pd.DataFrame(columns=['Name','Abbreviation']) except: print('no glossary') else: try: df=pd.read_csv(Glossarypath) except: df=pd.DataFrame(columns=['Name','Abbreviation']) try: FileOpen=open(outputfilePath, "w+") f=sqlStrinNthLevel(json_schema_flat,2,None,Glossarypath) f = list(dict.fromkeys(f)) for flat_x,flat_y in json_schema_flat.items(): if flat_x=='title': Table_Name=flat_y DDLout='Create Table '+Table_Name+' (' for ddlTxt in f: #Default Database is Oracle,Supported for below as well if Database in ('PostgreSQL','MYSQL','DB2','MariaDB'): ddlTxt=ddlTxt.replace(' Varchar2',' Varchar') ddlTxt=ddlTxt.replace(' Number(38,10)',' NUMERIC(38,10)') ddlTxt=ddlTxt.replace(' Timestamp(6)',' Timestamp') ddlTxt=ddlTxt.replace(' Char(1)',' boolean') DDLout=DDLout+ddlTxt+'\n' DDLout=DDLout[:len(DDLout)-2]+');' FileOpen.write(DDLout) full_sql_str=DDLout for p,q in json_schema_flat.items(): if q=='array': ArrayList.append(p.split('_')[1]) for Array_DDLs in ArrayList: DDLoutChild='\n\n' First=sqlStrinNthLevel(json_schema_flat,6,Array_DDLs,Glossarypath) First = list(dict.fromkeys(First)) DDLoutChild='\nCreate Table '+Array_DDLs[:len(Array_DDLs)-5]+' (' for listFst in First: DDLoutChild=DDLoutChild+listFst+'\n' DDLoutChild=DDLoutChild[:len(DDLoutChild)-2]+');' FileOpen.write(DDLoutChild) full_sql_str=full_sql_str+DDLoutChild FileOpen.close() return full_sql_str except: df=pd.DataFrame(columns=['Name','Abbreviation']) def genddl(file_path,Database,Glossarypath,outputfilePath): try: with open(file_path) as json_data: json_schema=json.load(json_data) json_schema_flat = flatten(json_schema) x=callMain(file_path,Database,Glossarypath,outputfilePath) return x except Exception as e: print(e) if __name__ == '__main__': genddl(file_path,Database,Glossarypath,outputfilePath)
PypiClean
/Levenshtein-0.19.3-cp310-cp310-macosx_10_9_x86_64.whl/Levenshtein-0.19.3.data/data/README.md
# Levenshtein <p> <a href="https://github.com/maxbachmann/Levenshtein/actions"> <img src="https://github.com/maxbachmann/Levenshtein/workflows/Build/badge.svg" alt="Continous Integration"> </a> <a href="https://pypi.org/project/levenshtein/"> <img src="https://img.shields.io/pypi/v/levenshtein" alt="PyPI package version"> </a> <a href="https://www.python.org"> <img src="https://img.shields.io/pypi/pyversions/levenshtein" alt="Python versions"> </a> <a href="https://maxbachmann.github.io/Levenshtein"> <img src="https://img.shields.io/badge/-documentation-blue" alt="Documentation"> </a> <a href="https://github.com/maxbachmann/Levenshtein/blob/main/COPYING"> <img src="https://img.shields.io/github/license/maxbachmann/Levenshtein" alt="GitHub license"> </a> </p> ## Introduction The Levenshtein Python C extension module contains functions for fast computation of: * Levenshtein (edit) distance, and edit operations * string similarity * approximate median strings, and generally string averaging * string sequence and set similarity This is a fork of [ztane/python-Levenshtein](https://github.com/ztane/python-Levenshtein), since the original project is no longer actively maintained. ## Requirements * Python 3.5 or later ## Installation ```bash pip install levenshtein ``` ## Documentation The documentation for the current version can be found at [https://maxbachmann.github.io/Levenshtein/](https://maxbachmann.github.io/Levenshtein/) ## License Levenshtein is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. See the file [COPYING](https://github.com/maxbachmann/Levenshtein/blob/main/COPYING) for the full text of GNU General Public License version 2.
PypiClean
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/recurrence_repetition_update.py
import re # noqa: F401 import sys # noqa: F401 from firefly_iii_client.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from ..model_utils import OpenApiModel from firefly_iii_client.exceptions import ApiAttributeError def lazy_import(): from firefly_iii_client.model.recurrence_repetition_type import RecurrenceRepetitionType globals()['RecurrenceRepetitionType'] = RecurrenceRepetitionType class RecurrenceRepetitionUpdate(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'moment': (str,), # noqa: E501 'skip': (int,), # noqa: E501 'type': (RecurrenceRepetitionType,), # noqa: E501 'weekend': (int,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'moment': 'moment', # noqa: E501 'skip': 'skip', # noqa: E501 'type': 'type', # noqa: E501 'weekend': 'weekend', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """RecurrenceRepetitionUpdate - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) moment (str): Information that defined the type of repetition. - For 'daily', this is empty. - For 'weekly', it is day of the week between 1 and 7 (Monday - Sunday). - For 'ndom', it is '1,2' or '4,5' or something else, where the first number is the week in the month, and the second number is the day in the week (between 1 and 7). '2,3' means: the 2nd Wednesday of the month - For 'monthly' it is the day of the month (1 - 31) - For yearly, it is a full date, ie '2018-09-17'. The year you use does not matter. . [optional] # noqa: E501 skip (int): How many occurrences to skip. 0 means skip nothing. 1 means every other.. [optional] # noqa: E501 type (RecurrenceRepetitionType): [optional] # noqa: E501 weekend (int): How to respond when the recurring transaction falls in the weekend. Possible values: 1. Do nothing, just create it 2. Create no transaction. 3. Skip to the previous Friday. 4. Skip to the next Monday. . [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """RecurrenceRepetitionUpdate - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) moment (str): Information that defined the type of repetition. - For 'daily', this is empty. - For 'weekly', it is day of the week between 1 and 7 (Monday - Sunday). - For 'ndom', it is '1,2' or '4,5' or something else, where the first number is the week in the month, and the second number is the day in the week (between 1 and 7). '2,3' means: the 2nd Wednesday of the month - For 'monthly' it is the day of the month (1 - 31) - For yearly, it is a full date, ie '2018-09-17'. The year you use does not matter. . [optional] # noqa: E501 skip (int): How many occurrences to skip. 0 means skip nothing. 1 means every other.. [optional] # noqa: E501 type (RecurrenceRepetitionType): [optional] # noqa: E501 weekend (int): How to respond when the recurring transaction falls in the weekend. Possible values: 1. Do nothing, just create it 2. Create no transaction. 3. Skip to the previous Friday. 4. Skip to the next Monday. . [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/BootstrapPy-0.6.tar.gz/BootstrapPy-0.6/bootstrappy/templates/+package+/static/js/i18n/grid.locale-he.js
;(function($){ /** * jqGrid Hebrew Translation * Shuki Shukrun [email protected] * http://trirand.com/blog/ * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl.html **/ $.jgrid = $.jgrid || {}; $.extend($.jgrid,{ defaults : { recordtext: "מציג {0} - {1} מתוך {2}", emptyrecords: "אין רשומות להציג", loadtext: "טוען...", pgtext : "דף {0} מתוך {1}" }, search : { caption: "מחפש...", Find: "חפש", Reset: "התחל", odata : ['שווה', 'לא שווה', 'קטן', 'קטן או שווה','גדול','גדול או שווה', 'מתחיל ב','לא מתחיל ב','נמצא ב','לא נמצא ב','מסתיים ב','לא מסתיים ב','מכיל','לא מכיל'], groupOps: [ { op: "AND", text: "הכל" }, { op: "OR", text: "אחד מ" } ], matchText: " תואם", rulesText: " חוקים" }, edit : { addCaption: "הוסף רשומה", editCaption: "ערוך רשומה", bSubmit: "שלח", bCancel: "בטל", bClose: "סגור", saveData: "נתונים השתנו! לשמור?", bYes : "כן", bNo : "לא", bExit : "בטל", msg: { required:"שדה חובה", number:"אנא, הכנס מספר תקין", minValue:"ערך צריך להיות גדול או שווה ל ", maxValue:"ערך צריך להיות קטן או שווה ל ", email: "היא לא כתובת איימל תקינה", integer: "אנא, הכנס מספר שלם", date: "אנא, הכנס תאריך תקין", url: "הכתובת אינה תקינה. דרושה תחילית ('http://' או 'https://')", nodefined : " is not defined!", novalue : " return value is required!", customarray : "Custom function should return array!", customfcheck : "Custom function should be present in case of custom checking!" } }, view : { caption: "הצג רשומה", bClose: "סגור" }, del : { caption: "מחק", msg: "האם למחוק את הרשומה/ות המסומנות?", bSubmit: "מחק", bCancel: "בטל" }, nav : { edittext: "", edittitle: "ערוך שורה מסומנת", addtext:"", addtitle: "הוסף שורה חדשה", deltext: "", deltitle: "מחק שורה מסומנת", searchtext: "", searchtitle: "חפש רשומות", refreshtext: "", refreshtitle: "טען גריד מחדש", alertcap: "אזהרה", alerttext: "אנא, בחר שורה", viewtext: "", viewtitle: "הצג שורה מסומנת" }, col : { caption: "הצג/הסתר עמודות", bSubmit: "שלח", bCancel: "בטל" }, errors : { errcap : "שגיאה", nourl : "לא הוגדרה כתובת url", norecords: "אין רשומות לעבד", model : "אורך של colNames <> colModel!" }, formatter : { integer : {thousandsSeparator: " ", defaultValue: '0'}, number : {decimalSeparator:".", thousandsSeparator: " ", decimalPlaces: 2, defaultValue: '0.00'}, currency : {decimalSeparator:".", thousandsSeparator: " ", decimalPlaces: 2, prefix: "", suffix:"", defaultValue: '0.00'}, date : { dayNames: [ "א", "ב", "ג", "ד", "ה", "ו", "ש", "ראשון", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת" ], monthNames: [ "ינו", "פבר", "מרץ", "אפר", "מאי", "יונ", "יול", "אוג", "ספט", "אוק", "נוב", "דצמ", "ינואר", "פברואר", "מרץ", "אפריל", "מאי", "יוני", "יולי", "אוגוסט", "ספטמבר", "אוקטובר", "נובמבר", "דצמבר" ], AmPm : ["לפני הצהרים","אחר הצהרים","לפני הצהרים","אחר הצהרים"], S: function (j) {return j < 11 || j > 13 ? ['', '', '', ''][Math.min((j - 1) % 10, 3)] : ''}, srcformat: 'Y-m-d', newformat: 'd/m/Y', masks : { ISO8601Long:"Y-m-d H:i:s", ISO8601Short:"Y-m-d", ShortDate: "n/j/Y", LongDate: "l, F d, Y", FullDateTime: "l, F d, Y g:i:s A", MonthDay: "F d", ShortTime: "g:i A", LongTime: "g:i:s A", SortableDateTime: "Y-m-d\\TH:i:s", UniversalSortableDateTime: "Y-m-d H:i:sO", YearMonth: "F, Y" }, reformatAfterEdit : false }, baseLinkUrl: '', showAction: '', target: '', checkbox : {disabled:true}, idName : 'id' } }); })(jQuery);
PypiClean
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap/site/content/docs/5.0/utilities/vertical-align.md
--- layout: docs title: Vertical alignment description: Easily change the vertical alignment of inline, inline-block, inline-table, and table cell elements. group: utilities --- Change the alignment of elements with the [`vertical-alignment`](https://developer.mozilla.org/en-US/docs/Web/CSS/vertical-align) utilities. Please note that vertical-align only affects inline, inline-block, inline-table, and table cell elements. Choose from `.align-baseline`, `.align-top`, `.align-middle`, `.align-bottom`, `.align-text-bottom`, and `.align-text-top` as needed. To vertically center non-inline content (like `<div>`s and more), use our [flex box utilities]({{< docsref "/utilities/flex#align-items" >}}). With inline elements: {{< example >}} <span class="align-baseline">baseline</span> <span class="align-top">top</span> <span class="align-middle">middle</span> <span class="align-bottom">bottom</span> <span class="align-text-top">text-top</span> <span class="align-text-bottom">text-bottom</span> {{< /example >}} With table cells: {{< example >}} <table style="height: 100px;"> <tbody> <tr> <td class="align-baseline">baseline</td> <td class="align-top">top</td> <td class="align-middle">middle</td> <td class="align-bottom">bottom</td> <td class="align-text-top">text-top</td> <td class="align-text-bottom">text-bottom</td> </tr> </tbody> </table> {{< /example >}} ## Sass ### Utilities API Vertical align utilities are declared in our utilities API in `scss/_utilities.scss`. [Learn how to use the utilities API.]({{< docsref "/utilities/api#using-the-api" >}}) {{< scss-docs name="utils-vertical-align" file="scss/_utilities.scss" >}}
PypiClean