hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
4a37446fd29ea2b6044d47c4ec0b0027825d51e4
2,623
py
Python
tests/unit/app/test_session.py
bernease/whylogs-python
cfd2a2f71280537aae584cbd40a752fbe7da647b
[ "Apache-2.0" ]
null
null
null
tests/unit/app/test_session.py
bernease/whylogs-python
cfd2a2f71280537aae584cbd40a752fbe7da647b
[ "Apache-2.0" ]
null
null
null
tests/unit/app/test_session.py
bernease/whylogs-python
cfd2a2f71280537aae584cbd40a752fbe7da647b
[ "Apache-2.0" ]
null
null
null
import pytest from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config from whylogs.app.config import SessionConfig from whylogs.app.session import Session from pandas import util def test_get_global_session(): _session = None session = get_or_create_session() global_session = get_session() assert session == global_session def test_reset(): session = get_or_create_session() reset_default_session() global_session = get_session() assert global_session.project is not None def test_session_log_dataframe(): _session = None session = session_from_config(SessionConfig( "default-project", "default-pipeline", [], False )) df = util.testing.makeDataFrame() profile = session.log_dataframe(df) assert session.logger() is not None assert session.logger("default-project").dataset_name == "default-project" def test_session_profile(): session = session_from_config(SessionConfig( "default-project", "default-pipeline", [], False )) df = util.testing.makeDataFrame() profile = session.log_dataframe(df) assert profile is not None summary = profile.flat_summary() flat_summary = summary['summary'] assert len(flat_summary) == 4 def test_profile_df(): session = get_or_create_session() df = util.testing.makeDataFrame() log_profile = session.log_dataframe(df) profile = session.profile_dataframe(df) assert log_profile.name == profile.name assert log_profile.dataset_timestamp == profile.dataset_timestamp assert log_profile.session_timestamp == profile.session_timestamp assert len(profile.columns) == 4 assert len(log_profile.tags) == 1 assert len(profile.tags) == 2 def test_close_session(): session = get_or_create_session() session.close() assert session.is_active() == False df = util.testing.makeDataFrame() log_profile = session.log_dataframe(df) assert log_profile == None profile = session.profile_dataframe(df) assert profile == None profile = session.new_profile(df) assert profile == None with pytest.raises(RuntimeError): session.logger() def test_logger_cache(): _session = None session = get_or_create_session() with session.logger("cache-test", with_rotation_time="s") as logger: logger.log({"name": 1}) session.close() def test_remove_logger(): session = get_or_create_session() session.logger("default-project") with pytest.raises(KeyError): session.remove_logger("test")
26.23
122
0.716737
0
0
0
0
0
0
0
0
157
0.059855
4a379f8a8c2abcf1cc5791849c692674276f7e20
851
py
Python
Packages/constants.py
Bemesko/Intelligence-of-Home-GUI
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
[ "MIT" ]
null
null
null
Packages/constants.py
Bemesko/Intelligence-of-Home-GUI
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
[ "MIT" ]
null
null
null
Packages/constants.py
Bemesko/Intelligence-of-Home-GUI
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
[ "MIT" ]
null
null
null
import enum BASELINE = "baseline" ENERGY = "energy" MAX_PRICE = "max_price" START_PRICE = "starting_price" INCREMENT = "increment" MIN_PRICE = "min_price" MAX_LOT_SIZE = "max_lot_size_wh" NAMESERVER_AGENT_AMOUNT = 3 ATTRIBUTE_LIST_LENGTH = 50 NEXT_ENERGY_CONSUMPTION = "next_energy_consumption" NEXT_ENERGY_GENERATION = "next_energy_generation" ENERGY_DIFFERENCE = "energy_difference" ENERGY_MARKET_PRICE = "energy_market_price" WANTED_ENERGY = "wanted_energy" ENERGY_BUY_MAX_PRICE = "energy_buy_max_price" ENERGY_BUY_STARTING_PRICE = "energy_buy_starting_price" ENERGY_BUY_PRICE_INCREMENT = "energy_buy_price_increment" ENERGY_SELL_MIN_PRICE = "energy_sell_min_price" class buy_baseline(enum.Enum): deficit = 0 all_energy = 1 infinite = 2 none = 3 class sell_baseline(enum.Enum): surplus = 0 all_energy = 1 none = 2
24.314286
57
0.788484
174
0.204465
0
0
0
0
0
0
288
0.338425
4a37bdd049a40072735c67bea9e8cc13a3a7a335
1,553
py
Python
target/tests.py
groundupnews/gu
c7179ee3d058c8749d250d681032a76dc8d599d5
[ "BSD-3-Clause" ]
19
2018-01-28T14:35:40.000Z
2020-12-04T03:04:02.000Z
target/tests.py
groundupnews/gu
c7179ee3d058c8749d250d681032a76dc8d599d5
[ "BSD-3-Clause" ]
8
2018-06-02T14:28:28.000Z
2021-08-06T10:22:37.000Z
target/tests.py
groundupnews/gu
c7179ee3d058c8749d250d681032a76dc8d599d5
[ "BSD-3-Clause" ]
21
2018-02-25T14:07:48.000Z
2020-05-28T23:10:52.000Z
from django.contrib.auth.models import User from django.test import TestCase from django.test import Client from django.urls import reverse from target import models from django.utils import timezone # Create your tests here. class URLSWork(TestCase): @classmethod def setUpTestData(cls): target = models.Target() target.letters = 'practical' target.words = 'practical' target.published = timezone.now() target.number = 1 target.save() def test_urls(self): user = User.objects.create_user('admin', '[email protected]', 'abcde') user.is_staff = True user.is_active = True user.is_superuser = True user.save() c = Client() response = c.login(username='admin', password='abcde') self.assertEqual(response, True) url = reverse('target:list') response = c.get(url) self.assertEqual(response.status_code, 200) target = models.Target.objects.all()[0] url = reverse('target:detail', args=(target.number,)) response = c.get(url) self.assertEqual(response.status_code, 200) url = reverse('target:create') response = c.post(url) self.assertEqual(response.status_code, 200) url = reverse('target:create_letters', args=('practical',)) response = c.post(url) self.assertEqual(response.status_code, 200) url = reverse('target:delete', args=(1,)) response = c.get(url) self.assertEqual(response.status_code, 200)
33.76087
78
0.63812
1,324
0.852543
0
0
235
0.15132
0
0
186
0.119768
4a38f4cdb8c158390444f36146a5ad23b2ae9c67
4,998
py
Python
jenkinsapi/view.py
julienduchesne/jenkinsapi
369dc54a8d5bb1f4e985c647378b9e1e62c26961
[ "MIT" ]
null
null
null
jenkinsapi/view.py
julienduchesne/jenkinsapi
369dc54a8d5bb1f4e985c647378b9e1e62c26961
[ "MIT" ]
52
2019-06-25T12:47:14.000Z
2021-04-12T12:24:08.000Z
jenkinsapi/view.py
klauern/jenkinsapi
605ad22a0109d3f51452c7abd23b0376a44682da
[ "MIT" ]
null
null
null
""" Module for jenkinsapi views """ import six import logging from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.job import Job from jenkinsapi.custom_exceptions import NotFound log = logging.getLogger(__name__) class View(JenkinsBase): """ View class """ def __init__(self, url, name, jenkins_obj): self.name = name self.jenkins_obj = jenkins_obj JenkinsBase.__init__(self, url) self.deleted = False def __str__(self): return self.name def __getitem__(self, job_name): assert isinstance(job_name, str) api_url = self.python_api_url(self.get_job_url(job_name)) return Job(api_url, job_name, self.jenkins_obj) def __contains__(self, job_name): """ True if view_name is the name of a defined view """ return job_name in self.keys() def delete(self): """ Remove this view object """ url = "%s/doDelete" % self.baseurl self.jenkins_obj.requester.post_and_confirm_status(url, data='') self.jenkins_obj.poll() self.deleted = True def keys(self): return self.get_job_dict().keys() def iteritems(self): it = six.iteritems(self.get_job_dict()) for name, url in it: yield name, Job(url, name, self.jenkins_obj) def values(self): return [a[1] for a in self.iteritems()] def items(self): return [a for a in self.iteritems()] def _get_jobs(self): if 'jobs' in self._data: for viewdict in self._data["jobs"]: yield viewdict["name"], viewdict["url"] def get_job_dict(self): return dict(self._get_jobs()) def __len__(self): return len(self.get_job_dict().keys()) def get_job_url(self, str_job_name): if str_job_name in self: return self.get_job_dict()[str_job_name] else: # noinspection PyUnboundLocalVariable views_jobs = ", ".join(self.get_job_dict().keys()) raise NotFound("Job %s is not known, available jobs" " in view are: %s" % (str_job_name, views_jobs)) def get_jenkins_obj(self): return self.jenkins_obj def add_job(self, str_job_name, job=None): """ Add job to a view :param str_job_name: name of the job to be added :param job: Job object to be added :return: True if job has been added, False if job already exists or job not known to Jenkins """ if not job: if str_job_name in self.get_job_dict(): log.warning( 'Job %s is already in the view %s', str_job_name, self.name) return False else: # Since this call can be made from nested view, # which doesn't have any jobs, we can miss existing job # Thus let's create top level Jenkins and ask him # http://jenkins:8080/view/CRT/view/CRT-FB/view/CRT-SCRT-1301/ top_jenkins = self.get_jenkins_obj().get_jenkins_obj_from_url( self.baseurl.split('view/')[0]) if not top_jenkins.has_job(str_job_name): log.error( msg='Job "%s" is not known to Jenkins' % str_job_name) return False else: job = top_jenkins.get_job(str_job_name) log.info(msg='Creating job %s in view %s' % (str_job_name, self.name)) url = '%s/addJobToView' % self.baseurl params = {'name': str_job_name} self.get_jenkins_obj().requester.post_and_confirm_status( url, data={}, params=params) self.poll() log.debug(msg='Job "%s" has been added to a view "%s"' % (job.name, self.name)) return True def _get_nested_views(self): for viewdict in self._data.get("views", []): yield viewdict["name"], viewdict["url"] def get_nested_view_dict(self): return dict(self._get_nested_views()) def get_config_xml_url(self): return '%s/config.xml' % self.baseurl def get_config(self): """ Return the config.xml from the view """ url = self.get_config_xml_url() response = self.get_jenkins_obj().requester.get_and_confirm_status(url) return response.text def update_config(self, config): """ Update the config.xml to the view """ url = self.get_config_xml_url() config = str(config) # cast unicode in case of Python 2 response = self.get_jenkins_obj().requester.post_url( url, params={}, data=config) return response.text @property def views(self): return self.get_jenkins_obj().get_jenkins_obj_from_url( self.baseurl).views
30.290909
79
0.580232
4,768
0.953982
445
0.089036
126
0.02521
0
0
1,127
0.22549
4a39a497868bd170b5a86c4ae6d32db864cbebc8
7,240
py
Python
core/vision/collection.py
jmarangola/cv-chess
c1bf1754b622e76bc2bc92276b96760c321a8bd9
[ "MIT" ]
null
null
null
core/vision/collection.py
jmarangola/cv-chess
c1bf1754b622e76bc2bc92276b96760c321a8bd9
[ "MIT" ]
null
null
null
core/vision/collection.py
jmarangola/cv-chess
c1bf1754b622e76bc2bc92276b96760c321a8bd9
[ "MIT" ]
null
null
null
""" Autonomous dataset collection of data for jetson nano John Marangola - [email protected] """ import datasets import json from datasets import Board, ChessPiece, PieceColor, PieceType #from realsense_utils import RealSenseCamera import preprocessing as pr import cv2 import pandas as pd import os from os.path import isfile, join import uuid import numpy as np import uuid from PIL import Image from PIL.ExifTags import TAGS RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata) TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch LOCAL_MD_FILENAME = "local_meta.json" LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME TL = [250, 115] BL = [250, 687] TR = [825, 115] BR = [825, 687] def rotate_image(image, angle): image_center = tuple(np.array(image.shape[1::-1]) / 2) rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR) return result def fen_to_dict(string): name_to_num = { 'p' : 1, 'b' : 2, 'n' : 3, 'r' : 4, 'q' : 5, 'k' : 6, } out = {} letters = "ABCDEFGH" for i in range(8): for j in range(1,9): out[letters[i] + str(j)] = 0 string = string.split('/') new_string = [] for s in string: for d in s: if d.isnumeric(): ix = s.index(d) for i in range(int(d)-1): s = s[0:ix] + '1' + s[ix:] new_string.append(s) for i in range(8, 0, -1): for j in range(8): if new_string[8-i][j].isnumeric(): out[letters[j] + str(i)] = 0 else: out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()] return out def get_sorted_time_saved(images): """ Given a list of image filenames, return a dictionary of image filename : time written to disk pairs. Purpose: for debugging dataset Args: images (list): List of image filenames Returns: dict: dict of image filenames """ image_dat = [] for image in images: imtmp = Image.open(image) tmp = imtmp.getexif() image_dat.append(tmp) dt = {} for exifdata in image_dat: idx = image_dat.index(exifdata) # iterating over all EXIF data fields for tag_id in exifdata: tag = TAGS.get(tag_id, tag_id) data = exifdata.get(tag_id) # decode bytes if isinstance(data, bytes): data = data.decode() # Add datetime field if tag == "DateTime": dt[images[idx]] = data print(f"{tag:25}: {data}") output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False) print(output) dt = {} for item in output: dt[item[0]] = item[1] with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json json.dump(output, wr) return output def del_batch_from_text_file(file): filenames = [] with open(file, "r") as rd: for line in rd.readlines(): # parse each line for file to delete: commaIndex = line.index(",") filename = line[:commaIndex] os.remove(TMP_DEST + filename) if __name__ == "__main__": # Initialize camera realsense = RealSenseCamera() """ # Check if calibration sequence must be run if RUN_CALIBRATION: realsense.calibrate_board_pos() if realsense.get_board_corners() is None: print("Failed to run calibration. Exiting...") exit() """ """ board_meta = Board() # Add pieces to metadata csv board_meta.add_pieces({ "A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE) }) board_meta.display_board(dest=BOARD_SAVE_DEST) print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?") validate = input() if validate.upper() == "E" or validate.upper() == "N": print("Exiting...") realsense.stop_pipeline() exit() files = [] files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))] # Check to see if there is pre-existing .csv metadata to add to if LOCAL_MD_FILENAME in files: try: total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH) except: total_metadata = pd.DataFrame() else: total_metadata = pd.DataFrame() # Loop through input while input() != "exit": img = realsense.capture_rgb_image() # Capture the image img = img[105:690, 348:940, :] img = rotate_image(img, 1.5) files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files piece_types, piece_colors = [], [] batch_id = uuid.uuid1() for tile in sorted(files.keys()): temp = board_meta.get_chess_piece(tile) if temp is None: piece_types.append(None) piece_colors.append(None) else: piece_types.append(temp.piece_type.name) piece_colors.append(temp.piece_color.name) tmp_meta = pd.DataFrame({ "File" : [files[file] for file in files.keys()], "Position" : [file for file in files.keys()], "Piece Type" : piece_types, "Piece Color" : piece_colors, "Batch ID" : [batch_id for i in range(len(files.keys()))] }) frames = [total_metadata, tmp_meta] total_metadata = pd.concat(frames) # Concatenate dataframes print(total_metadata) total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH) """ #pr.delete_board2_64_output(base_directory=TMP_DEST) FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper() last_input = None df = pd.DataFrame() while input() != "end": resp = input("[n] for new fen, [anything key to take an image] >") if resp == "new": fen = input("Enter a FEN:").upper() img = realsense.capture_rgb_image() # Capture the image print("Captured image") img = img[105:690, 348:940, :] img = rotate_image(img, 1.5) cv2.imwrite("original.jpg", img) # Get dict of positions temp_dict = fen_to_dict(FEN) tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files data_frame = pd.DataFrame(tiles) data_frame = data_frame.transpose() frames = [df, data_frame] df = pd.concat(frames) # Concatenate dataframe csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False) # Close streams and end pipeline realsense.stop_pipeline()
31.754386
180
0.604144
0
0
0
0
0
0
0
0
3,866
0.533978
4a3a7096be78dd2d3c57cba31752bc3f172e277d
3,475
py
Python
tests/test_sbfc.py
htwangtw/sbfc
5119017a643b82efbfaaf373a26f191a51f8283a
[ "BSD-3-Clause" ]
null
null
null
tests/test_sbfc.py
htwangtw/sbfc
5119017a643b82efbfaaf373a26f191a51f8283a
[ "BSD-3-Clause" ]
13
2021-04-29T16:11:18.000Z
2022-02-22T18:10:36.000Z
tests/test_sbfc.py
htwangtw/sbfc
5119017a643b82efbfaaf373a26f191a51f8283a
[ "BSD-3-Clause" ]
null
null
null
import os import numpy as np import pandas as pd from nilearn import datasets from sbfc.parser import seed_base_connectivity seed = os.path.dirname(__file__) + "/data/difumo64_pcc.nii.gz" def _make_data_single_run(confound=True): adhd_dataset = datasets.fetch_adhd(n_subjects=2) group_confounds = pd.DataFrame(adhd_dataset.phenotypic)[ ["Subject", "MeanFD", "age", "sex"] ] group_confounds = group_confounds.rename(columns={"Subject": "subject_label"}) group_design_matrix = pd.DataFrame(adhd_dataset.phenotypic)[["Subject"]] group_design_matrix = group_design_matrix.rename( columns={"Subject": "subject_label"} ) group_design_matrix["pheno"] = np.random.rand(2) group_contrast = pd.DataFrame([1], columns=["pheno"]) if confound: func_img = { f"{sub_id}": {"func": [func], "confound": [confound]} for func, confound, sub_id in zip( adhd_dataset.func, adhd_dataset.confounds, group_confounds.index ) } else: func_img = { f"{sub_id}": {"func": [func], "confound": [None]} for func, confound, sub_id in zip( adhd_dataset.func, adhd_dataset.confounds, group_confounds.index ) } return func_img, group_design_matrix, group_confounds, group_contrast def _make_data_multi_run(): adhd_dataset = datasets.fetch_adhd(n_subjects=2) group_confounds = pd.DataFrame(adhd_dataset.phenotypic)[ ["Subject", "MeanFD", "age", "sex"] ] group_confounds = group_confounds.rename(columns={"Subject": "subject_label"}) group_design_matrix = pd.DataFrame(adhd_dataset.phenotypic)[["Subject"]] group_design_matrix = group_design_matrix.rename( columns={"Subject": "subject_label"} ) group_design_matrix["pheno"] = np.random.rand(2) group_contrast = pd.DataFrame([1], columns=["pheno"]) func_img = { f"{sub_id}": {"func": [func, func], "confound": [confound, confound]} for func, confound, sub_id in zip( adhd_dataset.func, adhd_dataset.confounds, group_confounds.index ) } return func_img, group_design_matrix, group_confounds, group_contrast def test_sbfc_single_run(tmpdir): ( func_img, group_design_matrix, group_confounds, group_contrast, ) = _make_data_single_run() # Prepare seed pcc_coords = (0, -53, 26) first_m, first_con, s_m = seed_base_connectivity( func_img, pcc_coords, group_confounds, group_design_matrix, group_contrast, write_dir=tmpdir, ) assert len(first_m) == 2 ( func_img, group_design_matrix, group_confounds, group_contrast, ) = _make_data_single_run(confound=False) # mask seed first_m, first_con, s_m = seed_base_connectivity( func_img, seed, group_confounds, group_design_matrix, group_contrast, write_dir=tmpdir, ) assert len(first_m) == 2 def test_sbfc_mutli_run(tmpdir): ( func_img, group_design_matrix, group_confounds, group_contrast, ) = _make_data_multi_run() # mask seed first_m, first_con, s_m = seed_base_connectivity( func_img, seed, group_confounds, group_design_matrix, group_contrast, write_dir=tmpdir, ) assert len(first_m) == 2
29.700855
82
0.639424
0
0
0
0
0
0
0
0
340
0.097842
4a3cf72d3d9f4ab9e1a082a0ec19d609ba13facf
528
py
Python
final_project/machinetranslation/tests/test.py
ChrisOmeh/xzceb-flask_eng_fr
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
[ "Apache-2.0" ]
null
null
null
final_project/machinetranslation/tests/test.py
ChrisOmeh/xzceb-flask_eng_fr
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
[ "Apache-2.0" ]
null
null
null
final_project/machinetranslation/tests/test.py
ChrisOmeh/xzceb-flask_eng_fr
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
[ "Apache-2.0" ]
null
null
null
import unittest from translator import english_to_french, french_to_english class TestenglishToFrench(unittest.TestCase): def test1(self): self.assertEqual(english_to_french(["Hello"]), "Bonjour") self.assertNotEqual(english_to_french(["Bonjour"]), "Hello") class TestfrenchToEnglish(unittest.TestCase): def test1(self): self.assertEqual(french_to_english(["Bonjour"]),'Hello') self.assertNotEqual(french_to_english(["Hello"]), "Bonjour") if __name__ == "__main__": unittest.main()
35.2
68
0.727273
401
0.75947
0
0
0
0
0
0
74
0.140152
4a3d8daa44bdf458c650e19786cc3f1f2403777e
3,553
py
Python
tests/ut/python/parallel/test_auto_parallel_transformer.py
huxian123/mindspore
ec5ba10c82bbd6eccafe32d3a1149add90105bc8
[ "Apache-2.0" ]
2
2021-04-22T07:00:59.000Z
2021-11-08T02:49:09.000Z
tests/ut/python/parallel/test_auto_parallel_transformer.py
ReIadnSan/mindspore
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
[ "Apache-2.0" ]
1
2020-12-29T06:46:38.000Z
2020-12-29T06:46:38.000Z
tests/ut/python/parallel/test_auto_parallel_transformer.py
ReIadnSan/mindspore
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
[ "Apache-2.0" ]
1
2021-05-10T03:30:36.000Z
2021-05-10T03:30:36.000Z
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() self.loss = VirtualLoss() self.network = network def construct(self, x): predict = self.network(x) return self.loss(predict) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() self.network = network def construct(self, x): return grad_all(self.network)(x) class CustomDense(nn.Cell): def __init__(self, row, column): super(CustomDense, self).__init__() self.weight = Parameter(Tensor(np.ones([row, column]).astype(np.float32) * 0.01), "w", requires_grad=True) self.bias = Parameter(Tensor(np.zeros([row, column]).astype(np.float32)), "b", requires_grad=True) self.matmul1 = P.MatMul() self.add2 = P.TensorAdd() self.activation3 = nn.ReLU() def construct(self, x): mat_output = self.matmul1(x, self.weight) add_output = self.add2(mat_output, self.bias) output = self.activation3(add_output) return output class DenseMutMulNet(nn.Cell): def __init__(self): super(DenseMutMulNet, self).__init__() self.fc1 = CustomDense(4096, 4096) self.fc2 = CustomDense(4096, 4096) self.fc3 = CustomDense(4096, 4096) self.fc4 = CustomDense(4096, 4096) self.relu4 = nn.ReLU() self.relu5 = nn.ReLU() self.transpose = P.Transpose() self.matmul1 = P.MatMul() self.matmul2 = P.MatMul() def construct(self, x): q = self.fc1(x) k = self.fc2(x) v = self.fc3(x) k = self.transpose(k, (1, 0)) c = self.relu4(self.matmul1(q, k)) s = self.relu5(self.matmul2(c, v)) s = self.fc4(s) return s class MultiTransformer(nn.Cell): def __init__(self, layer_nums=1): super(MultiTransformer, self).__init__() self.layer = self._make_layer(layer_nums) def _make_layer(self, layer_num): layers = [] for _ in range(0, layer_num): layers.append(DenseMutMulNet()) return nn.SequentialCell(layers) def construct(self, x): out = self.layer(x) return out def test_dmnet_train_step(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) input_ = Tensor(np.ones([4096, 4096]).astype(np.float32) * 0.01) net = GradWrap(NetWithLoss(MultiTransformer())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, input_)
30.62931
114
0.665072
2,238
0.62989
0
0
0
0
0
0
596
0.167746
4a3dd5e26114808a45a3424f7c019a215fa96e04
6,227
py
Python
cloudcafe/compute/events/models/common.py
rcbops-qa/cloudcafe
d937f85496aadafbb94a330b9adb8ea18bee79ba
[ "Apache-2.0" ]
null
null
null
cloudcafe/compute/events/models/common.py
rcbops-qa/cloudcafe
d937f85496aadafbb94a330b9adb8ea18bee79ba
[ "Apache-2.0" ]
null
null
null
cloudcafe/compute/events/models/common.py
rcbops-qa/cloudcafe
d937f85496aadafbb94a330b9adb8ea18bee79ba
[ "Apache-2.0" ]
null
null
null
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cloudcafe.compute.events.models.base import ( EventBaseModel, EventBaseListModel) class Bandwidth(EventBaseModel): """Bandwidth Response Model @summary: Response model for bandwidth from a compute event notification @note: Although the 'public' and 'private' interfaces are not required, they are the most common names, and are included as optional attributes for the sake of convenience @note: This type may contain additional unspecified BandwidthInterface fields, which will be captured in a dictionary called kwargs JSON Example: { "private": { <BandwidthInterface> }, "public": { <BandwidthInterface> } } """ kwarg_map = {'private': 'private', 'public': 'public'} optional_kwargs = ['private', 'public'] strict_checking = False def __init__(self, private=None, public=None, **kwargs): super(Bandwidth, self).__init__(locals()) @classmethod def _dict_to_obj(cls, json_dict): """Override dict_to_obj implementation""" obj = cls._map_values_to_kwargs(json_dict) for key in obj.kwargs: obj.kwargs[key] = BandwidthInterface._dict_to_obj(obj.kwargs[key]) if obj.private: obj.private = BandwidthInterface._dict_to_obj(obj.private) if obj.public: obj.public = BandwidthInterface._dict_to_obj(obj.public) return obj class BandwidthInterface(EventBaseModel): """Bandwidth Interface Response Model @summary: Response model for bandwidth on an interface from a compute event notification @note: Sub-model of Bandwidth JSON Example: { "bw_in": 123456, "bw_out": 654321 } """ kwarg_map = {'bw_in': 'bw_in', 'bw_out': 'bw_out'} def __init__(self, bw_in, bw_out): super(BandwidthInterface, self).__init__(locals()) class FixedIp(EventBaseModel): """Fixed IP Response Model @summary: Response model for a fixed IP address from a compute event notification @note: Represents a single fixed IP JSON Example: { "address": "10.10.0.0", "floating_ips": [], "label": "public", "meta": {}, "type": "fixed", "version": 4, "vif_mac": "FE:ED:FA:00:1C:D4" } """ kwarg_map = { 'address': 'address', 'floating_ips': 'floating_ips', 'label': 'label', 'meta': 'meta', 'type_': 'type', 'version': 'version', 'vif_mac': 'vif_mac'} def __init__(self, address, floating_ips, label, meta, type_, version, vif_mac): super(FixedIp, self).__init__(locals()) class FixedIps(EventBaseListModel): """Fixed IPs Model @summary: Response model for a list of fixed IP addresses from a compute event notification @note: Returns a list of elements of type 'FixedIp' JSON Example: { "fixed_ips": [ { <FixedIp> }, { <FixedIp> } ] } """ list_model_key = 'fixed_ips' ObjectModel = FixedIp class ImageMeta(EventBaseModel): """Image Metadata Model @summary: Response model for image metadata from a compute event notification @note: This type may contain additional unspecified fields, which will be captured in a dictionary called kwargs JSON Example: { "image_meta": { "auto_disk_config": "disabled", "base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1", "container_format": "ovf", "disk_format": "vhd", "image_type": "base", "min_disk": "20", "min_ram": "512", "org.openstack__1__architecture": "x64", "org.openstack__1__os_distro": "com.ubuntu", "org.openstack__1__os_version": "12.04", "os_type": "linux" } } """ kwarg_map = { 'auto_disk_config': 'auto_disk_config', 'base_image_ref': 'base_image_ref', 'container_format': 'container_format', 'disk_format': 'disk_format', 'image_type': 'image_type', 'min_disk': 'min_disk', 'min_ram': 'min_ram', 'org_openstack__1__architecture': 'org.openstack__1__architecture', 'org_openstack__1__os_distro': 'org.openstack__1__os_distro', 'org_openstack__1__os_version': 'org.openstack__1__os_version', 'os_type': 'os_type'} strict_checking = False def __init__(self, auto_disk_config, base_image_ref, container_format, disk_format, image_type, min_disk, min_ram, org_openstack__1__architecture, org_openstack__1__os_distro, org_openstack__1__os_version, os_type, **kwargs): super(ImageMeta, self).__init__(locals()) class InstanceException(EventBaseModel): """Instance Exception Model @summary: Response model for an instance exception from a compute event notification @note: Represents a single instance exception JSON Example: { "exception": { "kwargs": { "instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1", "reason": "Something broke", "code": 500 } } } """ kwarg_map = {'kwargs': 'kwargs'} def __init__(self, kwargs): super(InstanceException, self).__init__(locals())
30.826733
78
0.605107
5,559
0.892725
0
0
470
0.075478
0
0
4,168
0.669343
4a3e2e6cca24d36e7e6072a43d4a7616c515981f
1,446
py
Python
openpyxl/drawing/tests/test_shapes.py
sekcheong/openpyxl
e1ba037f171efa348f75431c35a50de5ca277b78
[ "MIT" ]
null
null
null
openpyxl/drawing/tests/test_shapes.py
sekcheong/openpyxl
e1ba037f171efa348f75431c35a50de5ca277b78
[ "MIT" ]
null
null
null
openpyxl/drawing/tests/test_shapes.py
sekcheong/openpyxl
e1ba037f171efa348f75431c35a50de5ca277b78
[ "MIT" ]
null
null
null
from __future__ import absolute_import # Copyright (c) 2010-2017 openpyxl import pytest from openpyxl.xml.functions import fromstring, tostring from openpyxl.tests.helper import compare_xml @pytest.fixture def GradientFillProperties(): from ..fill import GradientFillProperties return GradientFillProperties class TestGradientFillProperties: def test_ctor(self, GradientFillProperties): fill = GradientFillProperties() xml = tostring(fill.to_tree()) expected = """ <gradFill></gradFill> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_from_xml(self, GradientFillProperties): src = """ <gradFill></gradFill> """ node = fromstring(src) fill = GradientFillProperties.from_tree(node) assert fill == GradientFillProperties() @pytest.fixture def Transform2D(): from ..shapes import Transform2D return Transform2D class TestTransform2D: def test_ctor(self, Transform2D): shapes = Transform2D() xml = tostring(shapes.to_tree()) expected = """ <xfrm></xfrm> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_from_xml(self, Transform2D): src = """ <root /> """ node = fromstring(src) shapes = Transform2D.from_tree(node) assert shapes == Transform2D()
23.322581
55
0.64177
1,023
0.707469
0
0
219
0.151452
0
0
193
0.133472
4a4054b106f4552f95f762ef5c1bcfd72acaebe7
19,509
py
Python
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
raysect/source
11f03089d0379fc7fb4d23c6f60c3d255673cec9
[ "BSD-3-Clause" ]
71
2015-10-25T16:50:18.000Z
2022-03-02T03:46:19.000Z
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
raysect/source
11f03089d0379fc7fb4d23c6f60c3d255673cec9
[ "BSD-3-Clause" ]
336
2015-02-11T22:39:54.000Z
2022-02-22T18:42:32.000Z
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
raysect/source
11f03089d0379fc7fb4d23c6f60c3d255673cec9
[ "BSD-3-Clause" ]
24
2016-09-11T17:12:10.000Z
2022-02-24T22:57:09.000Z
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the Raysect Project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import numpy as np from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray from matplotlib.colors import SymLogNorm, Normalize import scipy import sys from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import \ TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues,\ TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER,\ NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, \ Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, \ NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace # Force scientific format to get the right number of significant figures np.set_printoptions(30000, linewidth=100, formatter={'float': lambda x_str: format(x_str, '.'+str(PRECISION)+'E')}, threshold=sys.maxsize) # Overwrite imported values here. VISUAL_NOT_TESTS = False if VISUAL_NOT_TESTS: NB_X = 51 NB_Y = 51 NB_Z = 51 NB_XSAMPLES = 101 NB_YSAMPLES = 101 NB_ZSAMPLES = 101 X_EXTRAP_DELTA_MIN = 0.04 Y_EXTRAP_DELTA_MIN = 0.04 Z_EXTRAP_DELTA_MIN = 0.04 BIG_VALUE_FACTOR = 20. SMALL_VALUE_FACTOR = -20. def docstring_test(): """ .. code-block:: python >>> from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray >>> >>> x = np.linspace(-1., 1., 20) >>> y = np.linspace(-1., 1., 20) >>> z = np.linspace(-1., 1., 20) >>> x_array, y_array, z_array = np.meshgrid(x, y, z, indexing='ij') >>> f = np.exp(-(x_array**2 + y_array**2 + z_array**2)) >>> interpolator3D = Interpolator3DArray(x, y, z, f, 'cubic', 'nearest', 1.0, 1.0, 1.0) >>> # Interpolation >>> interpolator3D(1.0, 1.0, 0.2) 0.1300281183136766 >>> # Extrapolation >>> interpolator3D(1.0, 1.0, 1.1) 0.0497870683678659 >>> # Extrapolation out of bounds >>> interpolator3D(1.0, 1.0, 2.1) ValueError: The specified value (z=2.1) is outside of extrapolation range. """ pass def get_extrapolation_input_values( x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, x_extrap_delta_max, y_extrap_delta_max, z_extrap_delta_max, x_extrap_delta_min, y_extrap_delta_min, z_extrap_delta_min): xsamples_extrap_out_of_bounds_options = np.array( [x_lower - x_extrap_delta_max, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_max]) ysamples_extrap_out_of_bounds_options = np.array( [y_lower - y_extrap_delta_max, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_max]) zsamples_extrap_out_of_bounds_options = np.array( [z_lower - z_extrap_delta_max, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_max]) xsamples_extrap_in_bounds_options = np.array( [x_lower - x_extrap_delta_min, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_min]) ysamples_extrap_in_bounds_options = np.array( [y_lower - y_extrap_delta_min, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_min]) zsamples_extrap_in_bounds_options = np.array( [z_lower - z_extrap_delta_min, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_min]) xsamples_extrap_out_of_bounds = [] ysamples_extrap_out_of_bounds = [] zsamples_extrap_out_of_bounds = [] xsamples_extrap_in_bounds = [] ysamples_extrap_in_bounds = [] zsamples_extrap_in_bounds = [] edge_indicies_x = [0, len(xsamples_extrap_out_of_bounds_options) - 1] edge_indicies_y = [0, len(ysamples_extrap_out_of_bounds_options) - 1] edge_indicies_z = [0, len(zsamples_extrap_out_of_bounds_options) - 1] for i_x in range(len(xsamples_extrap_out_of_bounds_options)): for j_y in range(len(ysamples_extrap_out_of_bounds_options)): for k_z in range(len(zsamples_extrap_out_of_bounds_options)): if not (i_x not in edge_indicies_x and j_y not in edge_indicies_y and k_z not in edge_indicies_z): xsamples_extrap_out_of_bounds.append(xsamples_extrap_out_of_bounds_options[i_x]) ysamples_extrap_out_of_bounds.append(ysamples_extrap_out_of_bounds_options[j_y]) zsamples_extrap_out_of_bounds.append(zsamples_extrap_out_of_bounds_options[k_z]) xsamples_extrap_in_bounds.append(xsamples_extrap_in_bounds_options[i_x]) ysamples_extrap_in_bounds.append(ysamples_extrap_in_bounds_options[j_y]) zsamples_extrap_in_bounds.append(zsamples_extrap_in_bounds_options[k_z]) return \ np.array(xsamples_extrap_out_of_bounds), np.array(ysamples_extrap_out_of_bounds), \ np.array(zsamples_extrap_out_of_bounds), np.array(xsamples_extrap_in_bounds), \ np.array(ysamples_extrap_in_bounds), np.array(zsamples_extrap_in_bounds) def pcolourmesh_corners(input_array): return np.concatenate((input_array[:-1] - np.diff(input_array)/2., np.array([input_array[-1] - (input_array[-1] - input_array[-2]) / 2., input_array[-1] + (input_array[-1] - input_array[-2]) / 2.])), axis=0) def function_to_spline(x_input, y_input, z_input, factor_in): t = np.pi * np.sqrt((x_input ** 2 + y_input ** 2 + z_input ** 2)) return factor_in*np.sinc(t) if __name__ == '__main__': # Calculate for big values, small values, or normal values big_values = False small_values = True log_scale = False uneven_spacing = False use_saved_datastore_spline_knots = True verbose_options = [False, True, False, False] if VISUAL_NOT_TESTS: index_x_in = 40 else: index_x_in = 4 index_y_in = 0 index_z_in = 0 index_y_plot = 0 index_z_plot = 0 print('Using scipy version', scipy.__version__) # Find the function values to be used if big_values: factor = np.power(10., BIG_VALUE_FACTOR) elif small_values: factor = np.power(10., SMALL_VALUE_FACTOR) else: factor = 1. if uneven_spacing: x_in = uneven_linspace(X_LOWER, X_UPPER, NB_X, offset_fraction=1./3.) y_in = uneven_linspace(Y_LOWER, Y_UPPER, NB_Y, offset_fraction=1./3.) z_in = uneven_linspace(Z_LOWER, Z_UPPER, NB_Z, offset_fraction=1./3.) else: x_in = np.linspace(X_LOWER, X_UPPER, NB_X) y_in = np.linspace(Y_LOWER, Y_UPPER, NB_Y) z_in = np.linspace(Z_LOWER, Z_UPPER, NB_Z) x_in_full, y_in_full, z_in_full = np.meshgrid(x_in, y_in, z_in, indexing='ij') f_in = function_to_spline(x_in_full, y_in_full, z_in_full, factor) if use_saved_datastore_spline_knots: if uneven_spacing: if big_values: reference_loaded_values = TestInterpolatorLoadBigValuesUneven() elif small_values: reference_loaded_values = TestInterpolatorLoadSmallValuesUneven() else: reference_loaded_values = TestInterpolatorLoadNormalValuesUneven() else: if big_values: reference_loaded_values = TestInterpolatorLoadBigValues() elif small_values: reference_loaded_values = TestInterpolatorLoadSmallValues() else: reference_loaded_values = TestInterpolatorLoadNormalValues() f_in = reference_loaded_values.data if verbose_options[0]: print('Save this to self.data in test_interpolator:\n', repr(f_in)) xsamples = np.linspace(X_LOWER, X_UPPER, NB_XSAMPLES) ysamples = np.linspace(Y_LOWER, Y_UPPER, NB_YSAMPLES) zsamples = np.linspace(Z_LOWER, Z_UPPER, NB_ZSAMPLES) xsamples_extrapolation, ysamples_extrapolation, zsamples_extrapolation = large_extrapolation_range( xsamples, ysamples, zsamples, EXTRAPOLATION_RANGE, N_EXTRAPOLATION ) # # Extrapolation x and y values xsamples_out_of_bounds, ysamples_out_of_bounds, zsamples_out_of_bounds, xsamples_in_bounds, ysamples_in_bounds, \ zsamples_in_bounds = get_extrapolation_input_values( X_LOWER, X_UPPER, Y_LOWER, Y_UPPER, Z_LOWER, Z_UPPER, X_EXTRAP_DELTA_MAX, Y_EXTRAP_DELTA_MAX, Z_EXTRAP_DELTA_MAX, X_EXTRAP_DELTA_MIN, Y_EXTRAP_DELTA_MIN, Z_EXTRAP_DELTA_MIN ) interpolator3D = Interpolator3DArray(x_in, y_in, z_in, f_in, 'linear', 'linear', extrapolation_range_x=2.0, extrapolation_range_y=2.0, extrapolation_range_z=2.0) if VISUAL_NOT_TESTS: n_lower_upper_interp = 51 else: n_lower_upper_interp = 19 n_lower = 50 lower_p = 0.9 xsamples_lower_and_upper = np.linspace(X_LOWER, X_UPPER, n_lower_upper_interp) ysamples_lower_and_upper = np.linspace(Y_LOWER, Y_UPPER, n_lower_upper_interp) zsamples_lower_and_upper = np.linspace(Z_LOWER, Z_UPPER, n_lower_upper_interp) xsamples_lower_and_upper = np.concatenate((np.linspace(X_LOWER - (X_UPPER - X_LOWER) * lower_p, X_LOWER, n_lower)[ :-1], xsamples_lower_and_upper, np.linspace(X_UPPER, X_UPPER + (X_UPPER - X_LOWER) * lower_p, n_lower)[ 1:])) ysamples_lower_and_upper = np.concatenate((np.linspace(Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p, Y_LOWER, n_lower)[ :-1], ysamples_lower_and_upper, np.linspace(Y_UPPER, Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p, n_lower)[ 1:])) zsamples_lower_and_upper = np.concatenate((np.linspace(Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p, Z_LOWER, n_lower)[ :-1], zsamples_lower_and_upper, np.linspace(Z_UPPER, Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p, n_lower)[ 1:])) index_ysamples_lower_upper = np.where(x_in[index_y_in] == ysamples_lower_and_upper)[0].item() # extrapolation to save f_extrapolation_output = np.zeros((len(xsamples_extrapolation), )) for i in range(len(xsamples_extrapolation)): f_extrapolation_output[i] = interpolator3D( xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i] ) if verbose_options[1]: print('Output of extrapolation to be saved:\n', repr(f_extrapolation_output)) check_plot = True if check_plot: import matplotlib.pyplot as plt from matplotlib import cm # Install mayavi and pyQt5 main_plots_on = True if main_plots_on: fig, ax = plt.subplots(1, 4) fig1, ax1 = plt.subplots(1, 2) if not (x_in[index_x_in] == xsamples).any(): raise ValueError( f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES=' f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1' ) if not (y_in[index_y_in] == ysamples_lower_and_upper).any(): raise ValueError( f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES=' f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1' ) index_xsamples = np.where(x_in[index_x_in] == xsamples)[0].item() index_ysamples_lower_upper = np.where(y_in[index_y_in] == ysamples_lower_and_upper)[0].item() # index_ysamples_lower_upper = 0 # index_zsamples_lower_upper = 0 index_zsamples_lower_upper = np.where(z_in[index_z_in] == zsamples_lower_and_upper)[0].item() f_plot_x = f_in[index_x_in, :, :] y_corners_x = pcolourmesh_corners(y_in) z_corners_x = pcolourmesh_corners(z_in) min_colourmap = np.min(f_in) max_colourmap = np.max(f_in) if log_scale: c_norm = SymLogNorm(vmin=min_colourmap, vmax=max_colourmap, linthresh=0.03) else: c_norm = Normalize(vmin=min_colourmap, vmax=max_colourmap) colourmap = cm.get_cmap('viridis', 512) ax[0].pcolormesh(y_corners_x, z_corners_x, f_plot_x, norm=c_norm, cmap='viridis') # ax[0].pcolormesh(y_in, z_in, f_plot_x) ax[0].set_aspect('equal') f_out = np.zeros((len(xsamples), len(ysamples), len(zsamples))) for i in range(len(xsamples)): for j in range(len(ysamples)): for k in range(len(zsamples)): f_out[i, j, k] = interpolator3D(xsamples[i], ysamples[j], zsamples[k]) if verbose_options[2]: print('Test interpolation:\n', repr(f_out)) f_out_lower_and_upper = np.zeros((len(xsamples_lower_and_upper), len(ysamples_lower_and_upper), len(zsamples_lower_and_upper))) for i in range(len(xsamples_lower_and_upper)): for j in range(len(ysamples_lower_and_upper)): for k in range(len(zsamples_lower_and_upper)): f_out_lower_and_upper[i, j, k] = interpolator3D( xsamples_lower_and_upper[i], ysamples_lower_and_upper[j], zsamples_lower_and_upper[k] ) f_out_extrapolation = np.zeros((len(xsamples_extrapolation), )) for i in range(len(xsamples_extrapolation)): f_out_extrapolation[i] = interpolator3D( xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i] ) if verbose_options[3]: print('New output of extrapolation to be saved:\n', repr(f_out_extrapolation)) index_xsamples_extrap = np.where(x_in[index_x_in] == xsamples_extrapolation) f_out_x_extrapolation = f_out_extrapolation[index_xsamples_extrap] im = ax[3].scatter( ysamples_extrapolation[index_xsamples_extrap], zsamples_extrapolation[index_xsamples_extrap], c=f_out_x_extrapolation, norm=c_norm, cmap='viridis', s=10 ) ax[3].set_aspect('equal') f_out_x = f_out[index_xsamples, :, :] ysamples_mesh, zsamples_mesh = np.meshgrid(ysamples, zsamples) ax[0].scatter( ysamples_mesh.ravel(), zsamples_mesh.ravel(), c=f_out_x.ravel(), norm=c_norm, cmap='viridis', s=10 ) index_y_print = -1 index_z_print = 0 index_ysamples_print = np.where(y_in[index_y_print] == ysamples)[0].item() index_zsamples_print = np.where(z_in[index_z_print] == zsamples)[0].item() ax[0].set_title('Slice of x', size=20) ax[1].set_title(f'Interpolated points \nin slice of x={x_in[index_x_in]}', size=20) y_corners_xsamples = pcolourmesh_corners(ysamples) z_corners_xsamples = pcolourmesh_corners(zsamples) im2 = ax[1].pcolormesh(y_corners_xsamples, z_corners_xsamples, f_out_x, norm=c_norm, cmap='viridis') ax[1].set_aspect('equal') if not (x_in[index_x_in] == xsamples_lower_and_upper).any(): raise ValueError( f'To compare a slice, n_lower_upper={n_lower}-1, must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1,' f' NB_Z={NB_Z}-1' ) index_xsamples_lower_and_upper = np.where(x_in[index_x_in] == xsamples_lower_and_upper)[0].item() y_corners_xsamples_lower_and_upper = pcolourmesh_corners(ysamples_lower_and_upper) z_corners_xsamples_lower_and_upper = pcolourmesh_corners(zsamples_lower_and_upper) f_out_lower_and_upper_x = f_out_lower_and_upper[index_xsamples_lower_and_upper, :, :] im3 = ax[2].pcolormesh( y_corners_xsamples_lower_and_upper, z_corners_xsamples_lower_and_upper, f_out_lower_and_upper_x, norm=c_norm, cmap='viridis' ) check_array_z = np.zeros(len(zsamples_lower_and_upper)) check_array_y = np.zeros(len(ysamples_lower_and_upper)) for i in range(len(zsamples_lower_and_upper)): check_array_z[i] = interpolator3D( x_in[index_x_in], ysamples_lower_and_upper[index_ysamples_lower_upper], zsamples_lower_and_upper[i] ) check_array_y[i] = interpolator3D( x_in[index_x_in], ysamples_lower_and_upper[i], zsamples_lower_and_upper[index_zsamples_lower_upper] ) ax1[0].plot(zsamples_lower_and_upper, f_out_lower_and_upper_x[index_ysamples_lower_upper, :]) ax1[0].plot(z_in, f_in[index_x_in, index_y_in, :], 'bo') ax1[0].plot(zsamples_lower_and_upper, check_array_z, 'gx') ax1[1].plot(ysamples_lower_and_upper, check_array_y) # ax1[1].plot(ysamples_lower_and_upper, f_out_lower_and_upper_x[:, index_z_plot]) ax1[0].axvline(z_in[0], color='r', linestyle='--') ax1[0].axvline(z_in[-1], color='r', linestyle='--') ax1[1].axvline(y_in[0], color='r', linestyle='--') ax1[1].axvline(y_in[-1], color='r', linestyle='--') fig.colorbar(im, ax=ax[0]) fig.colorbar(im2, ax=ax[1]) fig.colorbar(im3, ax=ax[2]) ax[2].set_aspect('equal') plt.show()
49.767857
120
0.65703
0
0
0
0
0
0
0
0
3,809
0.195243
4a41ae80cb8630870b8a540d9da1afa369fa489a
2,875
py
Python
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
36
2021-10-05T17:06:07.000Z
2022-03-29T14:11:39.000Z
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
56
2021-09-02T08:24:29.000Z
2022-03-30T07:29:07.000Z
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
8
2022-01-28T14:49:55.000Z
2022-03-26T01:28:38.000Z
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved. # # This software is licensed under the Apache License, Version 2.0 (the # "License") as published by the Apache Software Foundation. # # You may not use this file except in compliance with the License. You may # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import abc from typing import Union, List, TYPE_CHECKING try: from typing import Literal except ImportError: from typing_extensions import Literal from .framework.response import BaseResponse if TYPE_CHECKING: from supertokens_python.framework.request import BaseRequest from .supertokens import AppInfo from .normalised_url_path import NormalisedURLPath from .exceptions import SuperTokensError class RecipeModule(abc.ABC): def __init__(self, recipe_id: str, app_info: AppInfo): self.recipe_id = recipe_id self.app_info = app_info def get_recipe_id(self): return self.recipe_id def get_app_info(self): return self.app_info def return_api_id_if_can_handle_request( self, path: NormalisedURLPath, method: str) -> Union[str, None]: apis_handled = self.get_apis_handled() for current_api in apis_handled: if not current_api.disabled and current_api.method == method and self.app_info.api_base_path.append( current_api.path_without_api_base_path).equals(path): return current_api.request_id return None @abc.abstractmethod def is_error_from_this_recipe_based_on_instance(self, err): pass @abc.abstractmethod def get_apis_handled(self) -> List[APIHandled]: pass @abc.abstractmethod async def handle_api_request(self, request_id: str, request: BaseRequest, path: NormalisedURLPath, method: str, response: BaseResponse): pass @abc.abstractmethod async def handle_error(self, request: BaseRequest, err: SuperTokensError, response: BaseResponse): pass @abc.abstractmethod def get_all_cors_headers(self): pass class APIHandled: def __init__(self, path_without_api_base_path: NormalisedURLPath, method: Literal['post', 'get', 'delete', 'put', 'options', 'trace'], request_id: str, disabled: bool): self.path_without_api_base_path = path_without_api_base_path self.method = method self.request_id = request_id self.disabled = disabled
34.638554
119
0.718261
1,731
0.602087
0
0
589
0.20487
293
0.101913
709
0.246609
4a428a5645724e361b7bbf5d6b4f839753d082e4
58
py
Python
tests/__init__.py
mihaidumitrescu/flake8-html
d5b62c05fb220a5cd6c777feacd69cb726a42e9a
[ "Apache-2.0" ]
36
2017-03-05T13:12:28.000Z
2021-02-03T15:05:34.000Z
tests/__init__.py
mihaidumitrescu/flake8-html
d5b62c05fb220a5cd6c777feacd69cb726a42e9a
[ "Apache-2.0" ]
23
2017-03-01T19:40:10.000Z
2022-03-31T17:13:17.000Z
tests/__init__.py
mihaidumitrescu/flake8-html
d5b62c05fb220a5cd6c777feacd69cb726a42e9a
[ "Apache-2.0" ]
15
2017-03-05T13:12:39.000Z
2022-03-25T14:46:28.000Z
# -*- coding: utf-8 -*- """Tests go in this directory."""
19.333333
33
0.551724
0
0
0
0
0
0
0
0
56
0.965517
4a42d347c7abb078f1060ffec9bcd3fae7f3044c
46
py
Python
datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py
Yambottle/dj-workflow-template
a47a354af2f9303c898ef403491e69cfc396d196
[ "MIT" ]
null
null
null
datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py
Yambottle/dj-workflow-template
a47a354af2f9303c898ef403491e69cfc396d196
[ "MIT" ]
null
null
null
datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py
Yambottle/dj-workflow-template
a47a354af2f9303c898ef403491e69cfc396d196
[ "MIT" ]
6
2022-02-18T20:19:04.000Z
2022-03-05T05:29:23.000Z
__version__ = "{{cookiecutter._pkg_version}}"
23
45
0.76087
0
0
0
0
0
0
0
0
31
0.673913
4a42eafd975ea0137426e4612231c34ec1b242ab
4,041
py
Python
examples/benchmarking/benchmark_bm25.py
shibing624/similarities
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
[ "Apache-2.0" ]
16
2022-02-23T11:46:18.000Z
2022-03-29T07:35:33.000Z
examples/benchmarking/benchmark_bm25.py
shibing624/similarities
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
[ "Apache-2.0" ]
1
2022-03-15T13:51:36.000Z
2022-03-16T02:56:15.000Z
examples/benchmarking/benchmark_bm25.py
shibing624/similarities
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
[ "Apache-2.0" ]
3
2022-02-24T02:06:05.000Z
2022-03-13T11:31:16.000Z
# -*- coding: utf-8 -*- """ @author:XuMing([email protected]) @description: """ import datetime import os import pathlib import random import sys from loguru import logger sys.path.append('../..') from similarities import BM25Similarity from similarities.utils import http_get from similarities.data_loader import SearchDataLoader from similarities.evaluation import evaluate random.seed(42) pwd_path = os.path.dirname(os.path.realpath(__file__)) def get_scifact(): # Download scifact.zip dataset and unzip the dataset dataset = "scifact" url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset) zip_file = os.path.join(pwd_path, "scifact.zip") if not os.path.exists(zip_file): logger.info("Dataset not exists, downloading...") http_get(url, zip_file, extract=True) else: logger.info("Dataset already exists, skipping download.") data_path = os.path.join(pwd_path, dataset) return data_path def get_dbpedia(): dataset = "dbpedia-entity" url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset) zip_file = os.path.join(pwd_path, "dbpedia-entity.zip") if not os.path.exists(zip_file): logger.info("Dataset not exists, downloading...") http_get(url, zip_file, extract=True) else: logger.info("Dataset already exists, skipping download.") data_path = os.path.join(pwd_path, dataset) return data_path data_path = get_scifact() #### Loading test queries and corpus in DBPedia corpus, queries, qrels = SearchDataLoader(data_path).load(split="test") corpus_ids, query_ids = list(corpus), list(queries) logger.info(f"corpus: {len(corpus)}, queries: {len(queries)}") #### Randomly sample 1M pairs from Original Corpus (4.63M pairs) #### First include all relevant documents (i.e. present in qrels) corpus_set = set() for query_id in qrels: corpus_set.update(list(qrels[query_id].keys())) corpus_new = {corpus_id: corpus[corpus_id] for corpus_id in corpus_set} #### Remove already seen k relevant documents and sample (1M - k) docs randomly remaining_corpus = list(set(corpus_ids) - corpus_set) sample = min(1000000 - len(corpus_set), len(remaining_corpus)) # sample = 10 for corpus_id in random.sample(remaining_corpus, sample): corpus_new[corpus_id] = corpus[corpus_id] corpus_docs = {corpus_id: corpus_new[corpus_id]['title'] + corpus_new[corpus_id]['text'] for corpus_id, corpus in corpus_new.items()} #### Index 1M passages into the index (seperately) model = BM25Similarity(corpus_docs) #### Saving benchmark times time_taken_all = {} for query_id in query_ids: query = {query_id: queries[query_id]} #### Measure time to retrieve top-10 BM25 documents using single query latency start = datetime.datetime.now() q_res = model.most_similar(query, topn=10) end = datetime.datetime.now() # print(q_res) #### Measuring time taken in ms (milliseconds) time_taken = (end - start) time_taken = time_taken.total_seconds() * 1000 time_taken_all[query_id] = time_taken # logger.info("query: {}: {} {:.2f}ms".format(query_id, query, time_taken)) # logger.info("\tsearch result: {}".format(results[:2])) time_taken = list(time_taken_all.values()) logger.info("Average time taken: {:.2f}ms".format(sum(time_taken) / len(time_taken_all))) #### Saving benchmark times with batch # queries = [queries[query_id] for query_id in query_ids] start = datetime.datetime.now() results = model.most_similar(queries, topn=10) end = datetime.datetime.now() #### Measuring time taken in ms (milliseconds) time_taken = (end - start) time_taken = time_taken.total_seconds() * 1000 logger.info("All, Spend {:.2f}ms".format(time_taken)) logger.info("Average time taken: {:.2f}ms".format(time_taken / len(queries))) logger.info(f"Results size: {len(results)}") #### Evaluate your retrieval using NDCG@k, MAP@K ... ndcg, _map, recall, precision = evaluate(qrels, results) logger.info(f"MAP: {_map}")
35.761062
113
0.717644
0
0
0
0
0
0
0
0
1,506
0.37268
4a43a63b067e2c9d49aadc213c2c322feea2bc14
14,531
py
Python
tb/test_arp_64.py
sergachev/verilog-ethernet
cef6b47bb3b969120cabce3b89b0c98bb47ca6a9
[ "MIT" ]
2
2020-01-09T05:58:04.000Z
2022-01-04T03:29:00.000Z
tb/test_arp_64.py
zslwyuan/verilog-ethernet
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
[ "MIT" ]
null
null
null
tb/test_arp_64.py
zslwyuan/verilog-ethernet
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
[ "MIT" ]
1
2021-09-25T05:45:18.000Z
2021-09-25T05:45:18.000Z
#!/usr/bin/env python """ Copyright (c) 2014-2018 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from myhdl import * import os import axis_ep import eth_ep import arp_ep module = 'arp_64' testbench = 'test_%s' % module srcs = [] srcs.append("../rtl/%s.v" % module) srcs.append("../rtl/lfsr.v") srcs.append("../rtl/arp_cache.v") srcs.append("../rtl/arp_eth_rx_64.v") srcs.append("../rtl/arp_eth_tx_64.v") srcs.append("%s.v" % testbench) src = ' '.join(srcs) build_cmd = "iverilog -o %s.vvp %s" % (testbench, src) def bench(): # Inputs clk = Signal(bool(0)) rst = Signal(bool(0)) current_test = Signal(intbv(0)[8:]) s_eth_hdr_valid = Signal(bool(0)) s_eth_dest_mac = Signal(intbv(0)[48:]) s_eth_src_mac = Signal(intbv(0)[48:]) s_eth_type = Signal(intbv(0)[16:]) s_eth_payload_axis_tdata = Signal(intbv(0)[64:]) s_eth_payload_axis_tkeep = Signal(intbv(0)[8:]) s_eth_payload_axis_tvalid = Signal(bool(0)) s_eth_payload_axis_tlast = Signal(bool(0)) s_eth_payload_axis_tuser = Signal(bool(0)) m_eth_payload_axis_tready = Signal(bool(0)) m_eth_hdr_ready = Signal(bool(0)) arp_request_valid = Signal(bool(0)) arp_request_ip = Signal(intbv(0)[32:]) arp_response_ready = Signal(bool(0)) local_mac = Signal(intbv(0)[48:]) local_ip = Signal(intbv(0)[32:]) gateway_ip = Signal(intbv(0)[32:]) subnet_mask = Signal(intbv(0)[32:]) clear_cache = Signal(bool(0)) # Outputs s_eth_hdr_ready = Signal(bool(0)) s_eth_payload_axis_tready = Signal(bool(0)) m_eth_hdr_valid = Signal(bool(0)) m_eth_dest_mac = Signal(intbv(0)[48:]) m_eth_src_mac = Signal(intbv(0)[48:]) m_eth_type = Signal(intbv(0)[16:]) m_eth_payload_axis_tdata = Signal(intbv(0)[64:]) m_eth_payload_axis_tkeep = Signal(intbv(0)[8:]) m_eth_payload_axis_tvalid = Signal(bool(0)) m_eth_payload_axis_tlast = Signal(bool(0)) m_eth_payload_axis_tuser = Signal(bool(0)) arp_request_ready = Signal(bool(0)) arp_response_valid = Signal(bool(0)) arp_response_error = Signal(bool(0)) arp_response_mac = Signal(intbv(0)[48:]) # sources and sinks eth_source_pause = Signal(bool(0)) eth_sink_pause = Signal(bool(0)) eth_source = eth_ep.EthFrameSource() eth_source_logic = eth_source.create_logic( clk, rst, eth_hdr_ready=s_eth_hdr_ready, eth_hdr_valid=s_eth_hdr_valid, eth_dest_mac=s_eth_dest_mac, eth_src_mac=s_eth_src_mac, eth_type=s_eth_type, eth_payload_tdata=s_eth_payload_axis_tdata, eth_payload_tkeep=s_eth_payload_axis_tkeep, eth_payload_tvalid=s_eth_payload_axis_tvalid, eth_payload_tready=s_eth_payload_axis_tready, eth_payload_tlast=s_eth_payload_axis_tlast, eth_payload_tuser=s_eth_payload_axis_tuser, pause=eth_source_pause, name='eth_source' ) eth_sink = eth_ep.EthFrameSink() eth_sink_logic = eth_sink.create_logic( clk, rst, eth_hdr_ready=m_eth_hdr_ready, eth_hdr_valid=m_eth_hdr_valid, eth_dest_mac=m_eth_dest_mac, eth_src_mac=m_eth_src_mac, eth_type=m_eth_type, eth_payload_tdata=m_eth_payload_axis_tdata, eth_payload_tkeep=m_eth_payload_axis_tkeep, eth_payload_tvalid=m_eth_payload_axis_tvalid, eth_payload_tready=m_eth_payload_axis_tready, eth_payload_tlast=m_eth_payload_axis_tlast, eth_payload_tuser=m_eth_payload_axis_tuser, pause=eth_sink_pause, name='eth_sink' ) arp_request_source = axis_ep.AXIStreamSource() arp_request_source_logic = arp_request_source.create_logic( clk, rst, tdata=(arp_request_ip,), tvalid=arp_request_valid, tready=arp_request_ready, name='arp_request_source' ) arp_response_sink = axis_ep.AXIStreamSink() arp_response_sink_logic = arp_response_sink.create_logic( clk, rst, tdata=(arp_response_error, arp_response_mac), tvalid=arp_response_valid, tready=arp_response_ready, name='arp_response_sink' ) # DUT if os.system(build_cmd): raise Exception("Error running build command") dut = Cosimulation( "vvp -m myhdl %s.vvp -lxt2" % testbench, clk=clk, rst=rst, current_test=current_test, s_eth_hdr_valid=s_eth_hdr_valid, s_eth_hdr_ready=s_eth_hdr_ready, s_eth_dest_mac=s_eth_dest_mac, s_eth_src_mac=s_eth_src_mac, s_eth_type=s_eth_type, s_eth_payload_axis_tdata=s_eth_payload_axis_tdata, s_eth_payload_axis_tkeep=s_eth_payload_axis_tkeep, s_eth_payload_axis_tvalid=s_eth_payload_axis_tvalid, s_eth_payload_axis_tready=s_eth_payload_axis_tready, s_eth_payload_axis_tlast=s_eth_payload_axis_tlast, s_eth_payload_axis_tuser=s_eth_payload_axis_tuser, m_eth_hdr_valid=m_eth_hdr_valid, m_eth_hdr_ready=m_eth_hdr_ready, m_eth_dest_mac=m_eth_dest_mac, m_eth_src_mac=m_eth_src_mac, m_eth_type=m_eth_type, m_eth_payload_axis_tdata=m_eth_payload_axis_tdata, m_eth_payload_axis_tkeep=m_eth_payload_axis_tkeep, m_eth_payload_axis_tvalid=m_eth_payload_axis_tvalid, m_eth_payload_axis_tready=m_eth_payload_axis_tready, m_eth_payload_axis_tlast=m_eth_payload_axis_tlast, m_eth_payload_axis_tuser=m_eth_payload_axis_tuser, arp_request_valid=arp_request_valid, arp_request_ready=arp_request_ready, arp_request_ip=arp_request_ip, arp_response_valid=arp_response_valid, arp_response_ready=arp_response_ready, arp_response_error=arp_response_error, arp_response_mac=arp_response_mac, local_mac=local_mac, local_ip=local_ip, gateway_ip=gateway_ip, subnet_mask=subnet_mask, clear_cache=clear_cache ) @always(delay(4)) def clkgen(): clk.next = not clk @instance def check(): yield delay(100) yield clk.posedge rst.next = 1 yield clk.posedge rst.next = 0 yield clk.posedge yield delay(100) yield clk.posedge yield clk.posedge local_mac.next = 0xDAD1D2D3D4D5 local_ip.next = 0xc0a80165 gateway_ip.next = 0xc0a80101 subnet_mask.next = 0xFFFFFF00 yield clk.posedge print("test 1: ARP request") current_test.next = 1 test_frame = arp_ep.ARPFrame() test_frame.eth_dest_mac = 0xFFFFFFFFFFFF test_frame.eth_src_mac = 0x5A5152535455 test_frame.eth_type = 0x0806 test_frame.arp_htype = 0x0001 test_frame.arp_ptype = 0x0800 test_frame.arp_hlen = 6 test_frame.arp_plen = 4 test_frame.arp_oper = 1 test_frame.arp_sha = 0x5A5152535455 test_frame.arp_spa = 0xc0a80164 test_frame.arp_tha = 0x000000000000 test_frame.arp_tpa = 0xc0a80165 eth_source.send(test_frame.build_eth()) yield eth_sink.wait() rx_frame = eth_sink.recv() check_frame = arp_ep.ARPFrame() check_frame.parse_eth(rx_frame) assert check_frame.eth_dest_mac == 0x5A5152535455 assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5 assert check_frame.eth_type == 0x0806 assert check_frame.arp_htype == 0x0001 assert check_frame.arp_ptype == 0x0800 assert check_frame.arp_hlen == 6 assert check_frame.arp_plen == 4 assert check_frame.arp_oper == 2 assert check_frame.arp_sha == 0xDAD1D2D3D4D5 assert check_frame.arp_spa == 0xc0a80165 assert check_frame.arp_tha == 0x5A5152535455 assert check_frame.arp_tpa == 0xc0a80164 yield delay(100) yield clk.posedge print("test 2: Cached read") current_test.next = 2 arp_request_source.send([(0xc0a80164,)]) yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert not err assert mac == 0x5A5152535455 yield delay(100) yield clk.posedge print("test 3: Unached read") current_test.next = 3 arp_request_source.send([(0xc0a80166,)]) # wait for ARP request packet yield eth_sink.wait() rx_frame = eth_sink.recv() check_frame = arp_ep.ARPFrame() check_frame.parse_eth(rx_frame) assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5 assert check_frame.eth_type == 0x0806 assert check_frame.arp_htype == 0x0001 assert check_frame.arp_ptype == 0x0800 assert check_frame.arp_hlen == 6 assert check_frame.arp_plen == 4 assert check_frame.arp_oper == 1 assert check_frame.arp_sha == 0xDAD1D2D3D4D5 assert check_frame.arp_spa == 0xc0a80165 assert check_frame.arp_tha == 0x000000000000 assert check_frame.arp_tpa == 0xc0a80166 # generate response test_frame = arp_ep.ARPFrame() test_frame.eth_dest_mac = 0xDAD1D2D3D4D5 test_frame.eth_src_mac = 0x6A6162636465 test_frame.eth_type = 0x0806 test_frame.arp_htype = 0x0001 test_frame.arp_ptype = 0x0800 test_frame.arp_hlen = 6 test_frame.arp_plen = 4 test_frame.arp_oper = 2 test_frame.arp_sha = 0x6A6162636465 test_frame.arp_spa = 0xc0a80166 test_frame.arp_tha = 0xDAD1D2D3D4D5 test_frame.arp_tpa = 0xc0a80165 eth_source.send(test_frame.build_eth()) # wait for lookup yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert not err assert mac == 0x6A6162636465 yield delay(100) yield clk.posedge print("test 4: Unached read, outside of subnet") current_test.next = 4 arp_request_source.send([(0x08080808,)]) # wait for ARP request packet yield eth_sink.wait() rx_frame = eth_sink.recv() check_frame = arp_ep.ARPFrame() check_frame.parse_eth(rx_frame) assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5 assert check_frame.eth_type == 0x0806 assert check_frame.arp_htype == 0x0001 assert check_frame.arp_ptype == 0x0800 assert check_frame.arp_hlen == 6 assert check_frame.arp_plen == 4 assert check_frame.arp_oper == 1 assert check_frame.arp_sha == 0xDAD1D2D3D4D5 assert check_frame.arp_spa == 0xc0a80165 assert check_frame.arp_tha == 0x000000000000 assert check_frame.arp_tpa == 0xc0a80101 # generate response test_frame = arp_ep.ARPFrame() test_frame.eth_dest_mac = 0xDAD1D2D3D4D5 test_frame.eth_src_mac = 0xAABBCCDDEEFF test_frame.eth_type = 0x0806 test_frame.arp_htype = 0x0001 test_frame.arp_ptype = 0x0800 test_frame.arp_hlen = 6 test_frame.arp_plen = 4 test_frame.arp_oper = 2 test_frame.arp_sha = 0xAABBCCDDEEFF test_frame.arp_spa = 0xc0a80101 test_frame.arp_tha = 0xDAD1D2D3D4D5 test_frame.arp_tpa = 0xc0a80165 eth_source.send(test_frame.build_eth()) # wait for lookup yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert not err assert mac == 0xAABBCCDDEEFF yield delay(100) yield clk.posedge print("test 5: Unached read, timeout") current_test.next = 5 arp_request_source.send([(0xc0a80167,)]) yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert err # check for 4 ARP requests assert eth_sink.count() == 4 while not eth_sink.empty(): rx_frame = eth_sink.recv() check_frame = arp_ep.ARPFrame() check_frame.parse_eth(rx_frame) assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5 assert check_frame.eth_type == 0x0806 assert check_frame.arp_htype == 0x0001 assert check_frame.arp_ptype == 0x0800 assert check_frame.arp_hlen == 6 assert check_frame.arp_plen == 4 assert check_frame.arp_oper == 1 assert check_frame.arp_sha == 0xDAD1D2D3D4D5 assert check_frame.arp_spa == 0xc0a80165 assert check_frame.arp_tha == 0x000000000000 assert check_frame.arp_tpa == 0xc0a80167 yield delay(100) yield clk.posedge print("test 6: Broadcast") current_test.next = 6 # subnet broadcast arp_request_source.send([(0xc0a801ff,)]) yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert not err assert mac == 0xffffffffffff # general broadcast arp_request_source.send([(0xffffffff,)]) yield arp_response_sink.wait() err, mac = arp_response_sink.recv().data[0] assert not err assert mac == 0xffffffffffff yield delay(100) raise StopSimulation return instances() def test_bench(): sim = Simulation(bench()) sim.run() if __name__ == '__main__': print("Running test...") test_bench()
31.727074
77
0.671874
0
0
12,875
0.886037
7,398
0.509118
0
0
1,770
0.121809
4a4404fe1d92ad81158f3995d99e25353d3c8492
4,315
py
Python
NitroGenerator.py
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
[ "MIT" ]
2
2021-07-27T06:57:36.000Z
2021-08-16T04:17:41.000Z
NitroGenerator.py
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
[ "MIT" ]
null
null
null
NitroGenerator.py
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
[ "MIT" ]
1
2021-11-06T05:32:40.000Z
2021-11-06T05:32:40.000Z
import random import sys import subprocess def pip_install(module: str): subprocess.run([sys.executable, "-m", "pip", "-q", "--disable-pip-version-check", "install", module]) try: import requests except: print("'requests' module not found! Trying to install... ") pip_install("requests") import requests def print_header(): header = """ +-------------------------+ | Discord Nitro Generator | +-------------------------+ Note: For Educational Purposes Only © ATRS 2021. All Rights Reserved. """ print(header) def get_code(nitro_type: str): characters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] if nitro_type == "Boost": return str("".join([random.choice(characters) for char in range(24)])) elif nitro_type == "Classic": return str("".join([random.choice(characters) for char in range(16)])) def check_code(nitro_code: str): try: headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'} check_url = f"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro_code}?with_application=false&with_subscription_plan=true" status = requests.get(url=check_url, headers=headers).status_code if status == 200: return "True" elif status == 429: return "None" else: return "False" except: print("Something went wrong while checking urls. Press any key to exit. ") input() quit() def get_nitro_type(): print("Enter what type of Discord Nitro you want to generate: \n\t1. Boost\n\t2. Classic") user_response = input("> ") if user_response.replace(" ", "").strip().lower() == "boost" or user_response.replace(" ", "").strip().lower() == "1": return "Boost" elif user_response.replace(" ", "").strip().lower() == "classic" or user_response.replace(" ", "").strip().lower() == "2": return "Classic" else: print("Not a valid input. Press any key to exit. ") input() quit() print_header() user_nitro_type = get_nitro_type() print("Enter the number of Nitro Codes you want: ") amount = int(input("> ")) valid_codes = 0 invalid_codes = 0 unchecked_codes = 0 print() print() f = open("All_Nitro_Codes.txt", "w", encoding='utf-8') for i in range(amount): user_nitro_code = get_code(nitro_type=user_nitro_type) validity = check_code(nitro_code=user_nitro_code) if validity == "True": display = f"Valid. | https://discord.com/gifts/{user_nitro_code}" valid_codes += 1 print(display) f.writelines(display + "\n") elif validity == "False": display = f"Invalid. | https://discord.com/gifts/{user_nitro_code}" invalid_codes += 1 print(display) f.writelines(display + "\n") elif validity == "None": display = f"Unchecked. Rate limited. | https://discord.com/gifts/{user_nitro_code}" unchecked_codes += 1 print(display) f.writelines(display + "\n") print("\n\nSuccessfully generated Nitro Codes. ") print("Valid Nitro Codes: " + str(valid_codes)) print("Invalid Nitro Codes: " + str(invalid_codes)) print("Unchecked Nitro Codes: " + str(unchecked_codes)) print("\nEnter any key to exit.") input() quit()
36.567797
145
0.526999
0
0
0
0
0
0
0
0
1,700
0.393883
4a4408798c8290d4f3dfdd7e187e5ce0fde47eee
1,018
py
Python
2015/main/13/part2.py
sgravrock/adventofcode
1f5263ee242c8446ac1c08d2aef195a0a4595ccb
[ "MIT" ]
null
null
null
2015/main/13/part2.py
sgravrock/adventofcode
1f5263ee242c8446ac1c08d2aef195a0a4595ccb
[ "MIT" ]
null
null
null
2015/main/13/part2.py
sgravrock/adventofcode
1f5263ee242c8446ac1c08d2aef195a0a4595ccb
[ "MIT" ]
null
null
null
import sys import itertools def readfile(f): result = {} for line in f: fields = line.rstrip().split(" ") p1 = fields[0] p2 = fields[10].replace(".", "") n = int(fields[3]) if fields[2] == "lose": n *= -1 result[(p1, p2)] = n return result def optimal(config): add_self(config) diners = set([k[0] for k in config.keys()]) arrangements = list(itertools.permutations(diners)) all = [(arr, happiness(config, arr)) for arr in arrangements] return max(all, key=lambda p: p[1]) def happiness(config, arrangement): return sum([happiness_for_pair(config, p) for p in makepairs(arrangement)]) def happiness_for_pair(config, pair): opposite = (pair[1], pair[0]) return config[pair] + config[opposite] def add_self(config): for d in set([k[0] for k in config.keys()]): config[(d, "self")] = 0 config[("self", d)] = 0 def makepairs(arr): n = len(arr) for i in xrange(1, n): yield (arr[i-1], arr[i]) yield (arr[n-1], arr[0]) if __name__ == "__main__": print optimal(readfile(sys.stdin))
23.674419
76
0.650295
0
0
110
0.108055
0
0
0
0
36
0.035363
4a444c988302d74c981cef9771e8cb5c4e9d2945
29,855
py
Python
networking/connection/stun_client.py
bcgrendel/python_networking
b4c847d9eeeea078868b8dcb3d385e02eb0b8e96
[ "MIT" ]
null
null
null
networking/connection/stun_client.py
bcgrendel/python_networking
b4c847d9eeeea078868b8dcb3d385e02eb0b8e96
[ "MIT" ]
null
null
null
networking/connection/stun_client.py
bcgrendel/python_networking
b4c847d9eeeea078868b8dcb3d385e02eb0b8e96
[ "MIT" ]
null
null
null
import socket import sys import traceback import struct import threading; from threading import Thread; import time; import datetime; import json #import buffered_message; import hashlib from Crypto.PublicKey import RSA from connection_state import ConnectionState # publickey = RSA.importKey(key_string) import tcp; import udp; # ************* # EXAMPLE USAGE # ************* ''' import socket import tcp import udp import stun_client import time start_listening = True local_ip = socket.gethostbyname(socket.gethostname()) local_port = 30779 server_ip = socket.gethostbyname(socket.gethostname()) server_port = 30788 socket_timeout = 3.0 peer_block_manager = None client = stun_client.STUN_Client(start_listening, local_ip, local_port, server_ip, server_port, socket_timeout, peer_block_manager) # Set your available listening port ranges client.available_ports = [[35000, 35100], [36500, 36700],] # Register a user acccount with the stun server. class RegisterCallback: def __init__(self): self.error_message = "" self.success = None def handle_timeout(self, params=None): self.success = False self.error_message = "Registration request to server has timed-out." def complete_registration(self, success, error_message=""): self.success = success self.error_message = error_message username = "test_user" password = "test_pass123" profile_map = {} callback_object = RegisterCallback() registration_type = "permanent" client.register(username, password, profile_map, callback_object, registration_type) response_check_interval = 0.5; while callback_object.success == None: time.sleep(response_check_interval) if not callback_object.success: print "Error: %s" % callback_object.error_message exit() # Login with username and password. class AuthCallback: def __init__(self): self.error_message = "" self.success = None def handle_timeout(self, params=None): self.success = False self.error_message = "Authentication request to server has timed-out." def complete_authentication(self, success, error_message=""): self.success = success self.error_message = error_message callback_object = AuthCallback() login = True # this authentication is to login. It'd be False if we wanted to log out. client.authenticate(username, password, callback_object, login) while callback_object.success == None: time.sleep(response_check_interval) if not callback_object.success: print "Error: %s" % callback_object.error_message exit() # Now we can access the list of peers connected to the server. # Alternatively, assign a function reference to client.peer_map_callback (argument will be a reference to client.peer_map) to be notified of peer list updates as they are received. # # sample peer_map: # ["test_user":["test_user", None], "another_user":["another_user", None],] # Get a peer from the list. peer_username = None; for _username, data in client.peer_map.iteritems(): if username != _username: peer_username = _username break # Connect to that peer (hole-punch) class ConnectionCallback: def __init__(self): self.error_message = "" self.success = None self.client_key = None def handle_timeout(self, params=None): self.success = False self.error_message = "Connection request to server has timed-out." def complete_connection(self, peer_username, success, error_message=""): self.success = success if success: self.client_key = error_message else: self.error_message = error_message buffer_size = 128 callback_object = ConnectionCallback() client.connect_to_peer(peer_username, buffer_size, callback_object) while callback_object.success == None: time.sleep(response_check_interval) if not callback_object.success: print "Error: %s" % callback_object.error_message exit() client_key = callback_object.client_key udp_client = client.client_map[client_key] # Now you can communicate with that peer. udp_client.send_message("Greetings!") udp_client.pop_all_messages() ''' class STUN_Client: def __init__(self, start_listen_thread=False, local_ip=socket.gethostbyname(socket.gethostname()), local_port=30779, server_ip=socket.gethostbyname(socket.gethostname()), server_port=30788, socket_timeout=3.0, peer_block_manager=None): self.local_ip = local_ip; self.local_port = local_port; self.socket_timeout = socket_timeout; self.peer_block_manager = peer_block_manager; self.thread_sleep_duration = 0.1; self.error_log = []; self.username = None; self.password = None; self.profile_map = {}; self.authenticated = False; self.auth_callback = None; self.auth_keys = None; self.auth_timeout = 15; # 15 seconds is the limit for authentication requests. It's just a magic number like many of these timeout values. self.last_auth = None; self.login_expiration = 20; # login will expire after this many seconds passes without successful keep-alive authentication self.auth_keep_alive_interval = 5; self.auth_keep_alive_multiplier = 1; # Avoid hammering the server if it's down. Will increment every time re-auth fails, returns to 1 upon successful authentication. self.re_auth_ready = None; self.master_log = []; # all messages recieved self.message_log_map = {}; # log per message type. # this will handle callbacks for keeping track of whether the user's authentication expires (namely from losing connection to the server.) self.authentication_monitor_object = None; self.hole_punch_timeout = 20; self.hole_punch_max_attempts = 20; self.server_response_timeout = 20; # Server response flags. Set to None when sending a request; they are flipped to True upon receiving a response. Used for determining response time-out. self._auth_status = None; self._registration_status = None; # Private. Internal use only. self._holepunch_status = {}; self.available_ports = [[34000, 34100],] # list of ranges, e.g. ports 34000 - 34100 self.used_ports = []; self.registration_key = None; self.udp_client_keep_alive_timeout = 30; # dictionary of active udp connections (hole-punched) self.client_map = {}; self.callback_map = {}; self.send_queue = []; self.connection_state = ConnectionState(False); # Initialize TCP client. self.init_tcp_client(server_ip, server_port); self.peer_map = {}; # Start listening to the stun server. self.init_stun_listener(); self.keep_alive_monitor = KeepAliveMonitor(self); self.peer_map_callback = None; def shutdown(self, stun_only=True): self.authenticated = False; self.connection_state.active = False; # kills main thread, making the logout auth sequence impossible in its current implementation (get salt/key, then perform request) which needs the main loop. self.stun_client.disconnect(); if not stun_only: # disconnect all udp clients... for key, client in self.client_map.iteritems(): client.disconnect(); self.client_map.clear(); self.peer_map.clear(); del self.used_ports[:] def restart(self, stun_only=True): self.shutdown(stun_only); self.init_tcp_client(self.server_ip, self.server_port); self.init_stun_listener(); def log_error(self, error_message, extra=None): err_msg = "[STUN_Server] Line #%s: %s\n\n%s" % (str(traceback.tb_lineno(sys.exc_traceback)), traceback.format_exc(), sys.exc_info()); timestamp = time.time(); date_string = datetime.datetime.fromtimestamp(timestamp).strftime('(%Y-%m-%d) %H:%M:%S') self.error_log.append((timestamp, date_string, err_msg, extra)); def monitor_response(self, target_object, target_key=None, timeout=20, callback=None, callback_params=None, timeout_callback=None, timeout_callback_params=None): """Waits until target is no longer null or timeout occurs. Timeout is in seconds. target_object and target_key should be strings. If target key is not null, then target_object will be treated as a dictionary (using target_key for the index). This function is best utilized on its own separate thread.""" # Wait until salt and key have been retrieved or timeout occurs. time_elapsed = 0; start_time = time.time(); target_attribute = getattr(self, target_object); target = None; connection_state = self.connection_state #print "Monitoring for %s" % target_object; # Behold, python lambda expressions in the wild! if target_key == None: target = lambda parent: getattr(parent, target_object); else: target = lambda parent: getattr(parent, target_object)[target_key]; while time_elapsed < timeout: time_elapsed = time.time() - start_time; # check for shutdown. if not connection_state.active: return; # check for target condition if target(self) != None: break; time.sleep(self.thread_sleep_duration); # Check for timeout. if target(self) == None: #print "Timeout on %s" % target_object; has_timeout_callback = timeout_callback != None; if has_timeout_callback: if timeout_callback_params != None: timeout_callback(timeout_callback_params); else: timeout_callback(); return; #else: # print "No timeout on %s" % target_object; # Success, run the callback if one was provided (maybe not if one is only concerned with the timeout event). if callback != None: if callback_params != None: callback(target_object, target_key, callback_params); else: callback(target_object, target_key); def authenticate_thread(self, username, password, callback_object=None, login=True): # callback_object should have a complete_authentication(success, error_message) method. self.username = username; self.password = password; self.auth_callback = callback_object; timeout_handler = None; has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout"))) if has_timeout_handler: timeout_handler = callback_object.handle_timeout # Send salt and dynamic key retrieval request. self.auth_keys = None; message = "auth_salt_request %s" % username; if not self.stun_send_message(message): #callback_object.complete_authentication(False, "Failed to connect to the server."); if timeout_handler != None: timeout_handler("Failed to connect to the server."); return; # Wait until salt and key have been retrieved or timeout occurs. self.monitor_response("auth_keys", None, self.server_response_timeout, self.authenticate_send_credentials, [login, callback_object], timeout_handler, "Server failed to respond."); def authenticate_send_credentials(self, target_object=None, target_key=None, params=None): callback_object = None; if params != None: callback_object = params[1]; login = params[0] # hash the password salt, dynamic_key = self.auth_keys; if not salt: if callback_object != None: callback_object.complete_authentication(False, "Failed to connect to the server."); return; salted_password = "%s%s" % (salt, self.password) hashed_salted_password = hashlib.sha384(salted_password).hexdigest(); #print "hash1: %s\n" % hashed_salted_password; key_and_hash = "%s%s" % (dynamic_key, hashed_salted_password) hashed_password = hashlib.sha384(key_and_hash).hexdigest(); #print "hash2: %s" % hashed_password; self._auth_status = None; # Send authentication request. message = "authenticate %s" % json.dumps([self.username, hashed_password, login, json.dumps(self.available_ports), json.dumps(self.used_ports)]); if not self.stun_send_message(message): if callback_object != None: callback_object.complete_authentication(False, "Failed to connect to the server."); return; timeout_handler = None; has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout"))) if has_timeout_handler: timeout_handler = callback_object.handle_timeout self.monitor_response("_auth_status", None, self.server_response_timeout, None, None, timeout_handler); def registration_completion_handler(self, target_object, target_key, params): callback_object = params; registration_handler = None; has_registration_handler = ((callback_object != None) and (hasattr(callback_object, "complete_registration"))) if has_registration_handler: callback_object.complete_registration(True, ""); def send_encrypted_registration_request(self, target_object=None, target_key=None, params=None): username, password, profile_map, callback_object, registration_type = params; self._registration_status = None; # Construct the message. message = "%s" % json.dumps([username, password, profile_map, registration_type]); # Encrypt the message. public_key = RSA.importKey(self.registration_key) message = public_key.encrypt(message, 32); # Tack on the username in plain text and json_encode again. The STUN Server needs to username to determine which private key to use to decrypt the message. message = "register %s %s" % (username, message[0]); if not self.stun_send_message(message): callback_object.complete_registration(False, "Failed to connect to the server."); return; timeout_handler = None; has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout"))) if has_timeout_handler: timeout_handler = callback_object.handle_timeout # Wait until salt and key have been retrieved or timeout occurs. self.monitor_response("_registration_status", None, self.server_response_timeout, self.registration_completion_handler, callback_object, timeout_handler); def register_thread(self, username, password, profile_map, callback_object=None, registration_type="permanent"): # callback_object should have a complete_registration(success, error_message) method. self.username = username; self.password = password; self.profile_map = profile_map; self.register_callback = callback_object; self.registration_key = None; message = "register_key %s" % username; if not self.stun_send_message(message): callback_object.complete_registration(False, "Failed to connect to the server."); return; timeout_handler = None; has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout"))) if has_timeout_handler: timeout_handler = callback_object.handle_timeout params = [username, password, profile_map, callback_object, registration_type]; self.monitor_response("registration_key", None, self.server_response_timeout, self.send_encrypted_registration_request, params, timeout_handler); def authenticate(self, username, password, callback_object=None, login=True): """Non-blocking. Sends a user authentication request.""" # Spawn a separate thread to perform authentication. This is to keep from blocking the caller, since a callback is expected to handle results. Thread(target=self.authenticate_thread, args=(username, password, callback_object, login)).start(); def maintain_authentication(self, callback_object=None): #self.authentication_monitor_object username = self.username password = self.password last_auth = self.last_auth self.re_auth_ready = True; while self.authenticated: last_reauth = self.keep_alive_monitor.last_reauth_attempt; now = time.time(); ready_time = last_reauth + (self.auth_keep_alive_multiplier * self.auth_keep_alive_interval); time_for_another_reauth_attempt = now >= ready_time; # By re_auth_ready, I'm saying a re-authentication attempt isn't currently in progress. Yes, it's a poorly named variable. # I'll need to rename it something better. Maybe later (trademark). if self.re_auth_ready and time_for_another_reauth_attempt: self.re_auth_ready = False; self.authenticate(self.username, self.password, self.keep_alive_monitor); time.sleep(self.thread_sleep_duration); def logout(self): self.authenticated = False; self.authenticate(self.username, self.password, self.keep_alive_monitor, False); def register(self, username, password, profile_map, callback_object=None, registration_type="permanent"): """Non-blocking. Sends a user registration request. Only type of registration available for now is 'permanent'. Temporary to come later, maybe (for guests/'unregistered' users). Note that profile_map should be a json-encoded string (you can store arbitrary data here).""" # Spawn a separate thread to perform registration. This is to keep from blocking the caller, since a callback is expected to handle results. Thread(target=self.register_thread, args=(username, password, profile_map, callback_object, registration_type)).start(); def init_tcp_client(self, server_ip, server_port, buffer_size=1024): self.server_ip = server_ip; self.server_port = server_port; self.stun_client = tcp.TCP_Client(server_ip, server_port, buffer_size); def init_stun_listener(self): self.connection_state = ConnectionState(True); Thread(target=self.stun_listen_loop).start(); def stun_send_message(self, message, json_encode=False, prepare=True): try: self.stun_client.send_message(message, json_encode, prepare); return True; except: return False; def stun_listen_loop(self): connection_state = self.connection_state message_object = None while self.connection_state.active: try: message_object = self.stun_client.pop_message(); is_valid_message = ((message_object != None) and (len(message_object) > 2)); self.master_log.append(message_object); if is_valid_message: message = message_object[2]; message_type, message_body = message.split(" ",1); if message_type not in self.message_log_map: self.message_log_map[message_type] = []; self.message_log_map[message_type].append(message_object); #print "MESSAGE: %s\n" % message_object; if(message_type == "peer_map"): # peer data should be [[peer_username, public_profile_map], ...] message_data = json.loads(message_body); self.update_peer_map(message_data); if self.peer_map_callback != None: self.peer_map_callback(self.peer_map); elif(message_type == "hole_punch"): peer_allowed = True; # message body should be [listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size] message_data = json.loads(message_body); listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size = message_data port_in_use = False; # Ensure port isn't already in use. if listen_port in self.used_ports: port_in_use = True; self.stun_send_message("hole_punch_reject %s" % json.dumps([listen_ip, listen_port, self.username, peer_ip, peer_port, peer_username, buffer_size, port_in_use])); continue; message_body = json.dumps([listen_ip, listen_port, self.username, peer_ip, peer_port, peer_username, buffer_size, port_in_use]); if(self.peer_block_manager != None): peer_allowed = self.peer_block_manager.is_peer_allowed(message_data); if(peer_allowed): self.stun_send_message("hole_punch_ack %s" % message_body); else: self.stun_send_message("hole_punch_reject %s" % message_body); elif(message_type == "hole_punch_request_rejected"): # Deals with requests that fail due to lack of authentication (this client or the target client) or target client doesn't exist. # message_body should be [listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size] fail_type, target_username, error_message = json.loads(message_body); if target_username in self.callback_map: callback_object = self.callback_map[target_username]; callback_object.complete_connection(target_username, False, error_message); del self.callback_map[target_username]; elif(message_type == "hole_punch_rejected"): # message_body should be [listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size] message_data = json.loads(message_body); listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size = message_data client_key = "%s-%s-%s" % (target_ip, target_port, username); callback_object = None; if client_key in self.callback_map: callback_object = self.callback_map[client_key] if callback_object != None: callback_object.complete_connection(client_key, False, "Peer rejected the connection request."); del self.callback_map[client_key]; elif(message_type == "init_hole_punch"): try: listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size = json.loads(message_body); if listen_port not in self.used_ports: self.used_ports.append(listen_port); # No else. We're just going to hope there's no way for that if to not run, and that we're just being half-assed at feeling paranoid. # My mind is feeling like it's been twisted into a few knots at this point, to be honest. Thread(target=self.connect_to_remote_peer, args=(listen_ip, listen_port, peer_ip, peer_port, buffer_size, peer_username)).start(); client_key = "%s_%s_%s" % (peer_ip, peer_port, peer_username) if peer_username in self._holepunch_status: self._holepunch_status[peer_username] = True; if peer_username in self.callback_map: self.callback_map[client_key] = self.callback_map[peer_username]; del self.callback_map[peer_username] except Exception as e: self.log_error(e); elif(message_type == "auth_keys"): # message body should be [salt, dynamic_key] self.auth_keys = json.loads(message_body); elif(message_type == "auth_response"): # message body should be [success, username, profile_map, login, error_message] success, username, profile_map, login, error_message = json.loads(message_body); self._auth_status = True; new_auth = not self.authenticated; if success: if login: self.authenticated = True; self.auth_keep_alive_multiplier = 1; self.last_auth = time.time(); self.username = username; self.profile_map = profile_map; if new_auth: Thread(target=self.maintain_authentication).start(); else: self.authenticated = False; self.auth_keep_alive_multiplier = 1; self.last_auth = time.time(); self.username = username; self.profile_map = profile_map; if self.auth_callback != None: self.auth_callback.complete_authentication(success, error_message); elif(message_type == "registration_key"): # message body should be "public_key" self.registration_key = message_body; elif(message_type == "registration_response"): # message body should be [success, username, profile_map, error_message] success, username, profile_map, error_message = json.loads(message_body); if success: self.username = username; self.profile_map = profile_map; self._registration_status = True; if self.registration_callback != None: self.register_callback.complete_registration(success, error_message); except Exception as exc: self.log_error(exc, message_object); time.sleep(self.thread_sleep_duration); def update_peer_map(self, packet): username_list = []; current_username_list = self.peer_map.keys(); for user_block in packet: peer_username, profile_map = user_block; valid_username = ((peer_username != None) and (peer_username.replace(" ","").replace("\t","").replace("\n","").replace("\r","") != "")); if valid_username: username_list.append(peer_username); self.peer_map[peer_username] = user_block; remove_username_list = []; for username in current_username_list: if username not in username_list: remove_username_list.append(username); for username in remove_username_list: del self.peer_map[username]; def auto_select_local_endpoint(self): listen_ip = self.local_ip; range_count = len(self.available_ports); for i in range(0, range_count): x = range_count - (1 + i) port_range = self.available_ports[x] port_count = port_range[1] - port_range[0] for j in range(0, port_count): port = port_range[1] - j; if port not in self.used_ports: return (listen_ip, port); return None; def connect_to_peer(self, target_username, buffer_size, callback_object=None, listen_ip = None, listen_port = None): """ callback_object should have a complete_connection(target, success, error_message) method where success is True or False. Extract info with: ip, port, username = target.split("-",2) Returns False if it fails to send request message (e.g. peer is blocked or connection to server failed.). """ local_endpoint_not_specified = ((listen_ip == None) or (listen_port == None)) if local_endpoint_not_specified: try: listen_ip, listen_port = self.auto_select_local_endpoint(); except: callback_object.complete_connection(client_key, False, "All available allowed local ports are already in use. Cannot initiate connection to peer."); return False; # Disallow connecting to yourself. What are you trying to pull? if self.username == target_username: callback_object.complete_connection(client_key, False, "You cannot connect to yourself."); return False; # disallow connecting to blocked peers. if(self.peer_block_manager != None): peer_allowed = self.peer_block_manager.is_peer_allowed([target_username, buffer_size]); if not peer_allowed: callback_object.complete_connection(client_key, False, "This peer has been blocked."); return False; client_key = target_username; self.callback_map[client_key] = callback_object; self._holepunch_status[client_key] = None; # Start hole_punch process. message = "request_hole_punch %s" % json.dumps([listen_ip, listen_port, self.username, target_username, buffer_size]) if not self.stun_send_message(message): callback_object.complete_connection(client_key, False, "Failed to connect to the server."); del self.callback_map[client_key]; return False; timeout_handler = None; has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout"))) if has_timeout_handler: timeout_handler = callback_object.handle_timeout # Wait until salt and key have been retrieved or timeout occurs. Thread(target=self.monitor_response, args=("_holepunch_status", client_key, self.server_response_timeout, None, None, timeout_handler)).start(); return True; def connect_to_remote_peer(self, local_ip, local_port, target_ip, target_port, buffer_size, username): """Warning: Internal use only!""" print "Connecting to remote peer." udp_client = udp.UDP_Client(True, local_ip, local_port, target_ip, target_port, buffer_size, True); client_key = "%s_%s_%s" % (target_ip, target_port, username) callback_object = None; if client_key in self.callback_map: callback_object = self.callback_map[client_key] if self.hole_punch(udp_client, self.hole_punch_max_attempts, self.hole_punch_timeout): print "Hole-punch succeeded." if callback_object != None: callback_object.complete_connection(username, True, client_key); self.client_map[client_key] = udp_client; # success, add it to the map. else: print "Hole-punch failed." # remove that port from the used ports list. port_count = len(self.used_ports); for i in range(0, port_count): if self.used_ports[i] == local_port: del self.used_ports[i] break; # run the callback, if there is one. if callback_object != None: callback_object.complete_connection(client_key, False, "Failed to connect to peer."); def hole_punch_send_loop(self, udp_client, maximum_retries=20, delay=0.5): for i in range(0, maximum_retries): udp_client.send_message("syn", False, False); time.sleep(delay); # Create and return a udp socket that has established connection with the target peer, or None if it fails. def hole_punch(self, udp_client, maximum_retries=20, timeout=20): print "Performing hole-punch." delay = 0.5 result = False; connection_state = self.connection_state Thread(target=self.hole_punch_send_loop, args=(udp_client, maximum_retries, delay)).start(); start_time = time.time(); for i in range(0, maximum_retries): time.sleep(delay) if not connection_state.active: # give up and close it out. udp_client.disconnect(); print "Fail 1"; return False; packet = ""; try: packet = udp_client.pop_message(); except: pass; if packet != None: print "hole_punch_response: " + str(packet); if len(packet) >= 3: # check the packet. if(packet[2] == "syn"): udp_client.send_message("ack", False, False); # send acknowledge elif(packet[2] == "ack"): udp_client.send_message("ack2", False, False); # send ack ack and return socket. result = True; print "Success 1"; break; elif(packet[2] == "ack2"): result = True; # ack ack received, return socket. print "Success 2"; break; # check for timeout time_elapsed = time.time() - start_time; if(time_elapsed >= timeout): print "Fail 2"; break; return result; class KeepAliveMonitor: def __init__(self, parent): self.parent = parent; self.last_reauth_attempt = time.time(); def complete_authentication(self, success, error_message=""): self.parent.re_auth_ready = True; self.last_reauth_attempt = time.time(); if not success: self.parent.auth_keep_alive_multiplier += 1; def handle_timeout(self, params=None): self.last_reauth_attempt = time.time(); self.parent.re_auth_ready = True; self.parent.auth_keep_alive_multiplier += 1;
38.374036
197
0.73589
25,869
0.866488
0
0
0
0
0
0
10,322
0.345738
4a4611f60a1d159391b648d9954a9b9efff56f91
11,842
py
Python
tools/wptserve/tests/functional/test_response.py
qanat/wpt
7c61a4594a95682531367b6956d1c37f8b8fd486
[ "BSD-3-Clause" ]
1
2021-12-12T18:13:24.000Z
2021-12-12T18:13:24.000Z
tools/wptserve/tests/functional/test_response.py
qanat/wpt
7c61a4594a95682531367b6956d1c37f8b8fd486
[ "BSD-3-Clause" ]
112
2021-09-27T14:39:02.000Z
2022-03-30T14:26:35.000Z
tools/wptserve/tests/functional/test_response.py
qanat/wpt
7c61a4594a95682531367b6956d1c37f8b8fd486
[ "BSD-3-Clause" ]
null
null
null
import os import unittest import json import types from http.client import BadStatusLine from io import BytesIO import pytest wptserve = pytest.importorskip("wptserve") from .base import TestUsingServer, TestUsingH2Server, doc_root def send_body_as_header(self): if self._response.add_required_headers: self.write_default_headers() self.write("X-Body: ") self._headers_complete = True class TestResponse(TestUsingServer): def test_head_without_body(self): @wptserve.handlers.handler def handler(request, response): response.writer.end_headers = types.MethodType(send_body_as_header, response.writer) return [("X-Test", "TEST")], "body\r\n" route = ("GET", "/test/test_head_without_body", handler) self.server.router.register(*route) resp = self.request(route[1], method="HEAD") self.assertEqual("6", resp.info()['Content-Length']) self.assertEqual("TEST", resp.info()['x-Test']) self.assertEqual("", resp.info()['x-body']) def test_head_with_body(self): @wptserve.handlers.handler def handler(request, response): response.send_body_for_head_request = True response.writer.end_headers = types.MethodType(send_body_as_header, response.writer) return [("X-Test", "TEST")], "body\r\n" route = ("GET", "/test/test_head_with_body", handler) self.server.router.register(*route) resp = self.request(route[1], method="HEAD") self.assertEqual("6", resp.info()['Content-Length']) self.assertEqual("TEST", resp.info()['x-Test']) self.assertEqual("body", resp.info()['X-Body']) def test_write_content_no_status_no_header(self): resp_content = b"TEST" @wptserve.handlers.handler def handler(request, response): response.writer.write_content(resp_content) route = ("GET", "/test/test_write_content_no_status_no_header", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 200 assert resp.read() == resp_content assert resp.info()["Content-Length"] == str(len(resp_content)) assert "Date" in resp.info() assert "Server" in resp.info() def test_write_content_no_headers(self): resp_content = b"TEST" @wptserve.handlers.handler def handler(request, response): response.writer.write_status(201) response.writer.write_content(resp_content) route = ("GET", "/test/test_write_content_no_headers", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 201 assert resp.read() == resp_content assert resp.info()["Content-Length"] == str(len(resp_content)) assert "Date" in resp.info() assert "Server" in resp.info() def test_write_content_no_status(self): resp_content = b"TEST" @wptserve.handlers.handler def handler(request, response): response.writer.write_header("test-header", "test-value") response.writer.write_content(resp_content) route = ("GET", "/test/test_write_content_no_status", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 200 assert resp.read() == resp_content assert sorted(x.lower() for x in resp.info().keys()) == sorted(['test-header', 'date', 'server', 'content-length']) def test_write_content_no_status_no_required_headers(self): resp_content = b"TEST" @wptserve.handlers.handler def handler(request, response): response.add_required_headers = False response.writer.write_header("test-header", "test-value") response.writer.write_content(resp_content) route = ("GET", "/test/test_write_content_no_status_no_required_headers", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 200 assert resp.read() == resp_content assert resp.info().items() == [('test-header', 'test-value')] def test_write_content_no_status_no_headers_no_required_headers(self): resp_content = b"TEST" @wptserve.handlers.handler def handler(request, response): response.add_required_headers = False response.writer.write_content(resp_content) route = ("GET", "/test/test_write_content_no_status_no_headers_no_required_headers", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 200 assert resp.read() == resp_content assert resp.info().items() == [] def test_write_raw_content(self): resp_content = b"HTTP/1.1 202 Giraffe\n" \ b"X-TEST: PASS\n" \ b"Content-Length: 7\n\n" \ b"Content" @wptserve.handlers.handler def handler(request, response): response.writer.write_raw_content(resp_content) route = ("GET", "/test/test_write_raw_content", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 202 assert resp.info()["X-TEST"] == "PASS" assert resp.read() == b"Content" def test_write_raw_content_file(self): @wptserve.handlers.handler def handler(request, response): with open(os.path.join(doc_root, "test.asis"), 'rb') as infile: response.writer.write_raw_content(infile) route = ("GET", "/test/test_write_raw_content", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 202 assert resp.info()["X-TEST"] == "PASS" assert resp.read() == b"Content" def test_write_raw_none(self): @wptserve.handlers.handler def handler(request, response): with pytest.raises(ValueError): response.writer.write_raw_content(None) route = ("GET", "/test/test_write_raw_content", handler) self.server.router.register(*route) self.request(route[1]) def test_write_raw_contents_invalid_http(self): resp_content = b"INVALID HTTP" @wptserve.handlers.handler def handler(request, response): response.writer.write_raw_content(resp_content) route = ("GET", "/test/test_write_raw_content", handler) self.server.router.register(*route) with pytest.raises(BadStatusLine) as e: self.request(route[1]) assert str(e.value) == resp_content.decode('utf-8') class TestH2Response(TestUsingH2Server): def test_write_without_ending_stream(self): data = b"TEST" @wptserve.handlers.handler def handler(request, response): headers = [ ('server', 'test-h2'), ('test', 'PASS'), ] response.writer.write_headers(headers, 202) response.writer.write_data_frame(data, False) # Should detect stream isn't ended and call `writer.end_stream()` route = ("GET", "/h2test/test", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 202 assert [x for x in resp.headers.items()] == [('server', 'test-h2'), ('test', 'PASS')] assert resp.content == data def test_set_error(self): @wptserve.handlers.handler def handler(request, response): response.set_error(503, message="Test error") route = ("GET", "/h2test/test_set_error", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 503 assert json.loads(resp.content) == json.loads("{\"error\": {\"message\": \"Test error\", \"code\": 503}}") def test_file_like_response(self): @wptserve.handlers.handler def handler(request, response): content = BytesIO(b"Hello, world!") response.content = content route = ("GET", "/h2test/test_file_like_response", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 200 assert resp.content == b"Hello, world!" def test_list_response(self): @wptserve.handlers.handler def handler(request, response): response.content = ['hello', 'world'] route = ("GET", "/h2test/test_file_like_response", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 200 assert resp.content == b"helloworld" def test_content_longer_than_frame_size(self): @wptserve.handlers.handler def handler(request, response): size = response.writer.get_max_payload_size() content = "a" * (size + 5) return [('payload_size', size)], content route = ("GET", "/h2test/test_content_longer_than_frame_size", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 200 payload_size = int(resp.headers['payload_size']) assert payload_size assert resp.content == b"a" * (payload_size + 5) def test_encode(self): @wptserve.handlers.handler def handler(request, response): response.encoding = "utf8" t = response.writer.encode("hello") assert t == b"hello" with pytest.raises(ValueError): response.writer.encode(None) route = ("GET", "/h2test/test_content_longer_than_frame_size", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 200 def test_raw_header_frame(self): @wptserve.handlers.handler def handler(request, response): response.writer.write_raw_header_frame([ (':status', '204'), ('server', 'TEST-H2') ], end_headers=True) route = ("GET", "/h2test/test_file_like_response", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 204 assert resp.headers['server'] == 'TEST-H2' assert resp.content == b'' def test_raw_data_frame(self): @wptserve.handlers.handler def handler(request, response): response.write_status_headers() response.writer.write_raw_data_frame(data=b'Hello world', end_stream=True) route = ("GET", "/h2test/test_file_like_response", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.content == b'Hello world' def test_raw_header_continuation_frame(self): @wptserve.handlers.handler def handler(request, response): response.writer.write_raw_header_frame([ (':status', '204') ]) response.writer.write_raw_continuation_frame([ ('server', 'TEST-H2') ], end_headers=True) route = ("GET", "/h2test/test_file_like_response", handler) self.server.router.register(*route) resp = self.client.get(route[1]) assert resp.status_code == 204 assert resp.headers['server'] == 'TEST-H2' assert resp.content == b'' if __name__ == '__main__': unittest.main()
36.549383
123
0.616703
11,380
0.960986
0
0
4,095
0.345803
0
0
1,812
0.153015
4a48326e1bcc0c4ce67dffee3193eed37eb8dfe4
2,881
py
Python
bbc1/core/command.py
ks91/bbc1-pub
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
[ "Apache-2.0" ]
89
2017-10-31T05:38:30.000Z
2021-11-06T11:53:19.000Z
bbc1/core/command.py
ks91/bbc1-pub
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
[ "Apache-2.0" ]
74
2017-11-07T13:06:33.000Z
2021-05-06T14:26:19.000Z
bbc1/core/command.py
ks91/bbc1-pub
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
[ "Apache-2.0" ]
56
2017-11-04T13:54:56.000Z
2021-06-18T18:05:46.000Z
# -*- coding: utf-8 -*- """ Copyright (c) 2017 beyond-blockchain.org. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from argparse import ArgumentParser import sys sys.path.extend(["../../"]) from bbc1.core.bbc_config import DEFAULT_CORE_PORT, DEFAULT_P2P_PORT DEFAULT_SERV_ADDR = '127.0.0.1' def parser(): usage = 'python {} [--coreport <number>] [--p2pport <number>] [--workingdir <dir>] ' \ '[--config <filename>] [--default_config <filename>] [--nodekey] [--no_nodekey] [--domain0] ' \ '[--ledgersubsystem] [--ip4addr <IP addr>] [--ip6addr <IPv6 addr>] ' \ '[--log <filename>] [--verbose_level <string>] [--daemon] [--kill] [--help]'.format(__file__) argparser = ArgumentParser(usage=usage) argparser.add_argument('-cp', '--coreport', type=int, default=DEFAULT_CORE_PORT, help='waiting TCP port') argparser.add_argument('-pp', '--p2pport', type=int, default=DEFAULT_P2P_PORT, help='waiting TCP port') argparser.add_argument('-w', '--workingdir', type=str, default=".bbc1", help='working directory name') argparser.add_argument('-c', '--config', type=str, default=None, help='config file name') argparser.add_argument('--default_config', type=str, default=None, help='default config file') argparser.add_argument('--nodekey', action='store_true', help='use node_key for admin command') argparser.add_argument('--no_nodekey', action='store_true', help='don\'t use node_key for admin command') argparser.add_argument('--domain0', action='store_true', help='connect to domain_global_0') argparser.add_argument('--ledgersubsystem', action='store_true', help='use ledger_subsystem') argparser.add_argument('--ip4addr', type=str, default=None, help='IPv4 address exposed to the external network') argparser.add_argument('--ip6addr', type=str, default=None, help='IPv6 address exposed to the external network') argparser.add_argument('-l', '--log', type=str, default="-", help='log filename/"-" means STDOUT') argparser.add_argument('-d', '--daemon', action='store_true', help='run in background') argparser.add_argument('-k', '--kill', action='store_true', help='kill the daemon') argparser.add_argument('-v', '--verbose_level', type=str, default="debug", help='log level all/debug/info/warning/error/critical/none') args = argparser.parse_args() return args
57.62
116
0.701493
0
0
0
0
0
0
0
0
1,669
0.579313
4a4861b9f42f405c3f1bc83a1f33fe81d2ee9835
33,928
py
Python
main.py
cmcquinn/cmake-uvision-syncer
26f34b79b3102a326ced2b0bca2524a98b69abf4
[ "MIT" ]
null
null
null
main.py
cmcquinn/cmake-uvision-syncer
26f34b79b3102a326ced2b0bca2524a98b69abf4
[ "MIT" ]
null
null
null
main.py
cmcquinn/cmake-uvision-syncer
26f34b79b3102a326ced2b0bca2524a98b69abf4
[ "MIT" ]
1
2022-03-31T13:47:50.000Z
2022-03-31T13:47:50.000Z
""" Usage: main.py [<project>] Options: <project> Path to the .uvprojx file (Keil® µVision5 Project File). The .uvoptx file (Keil® µVision5 Project Options file) will be located automatically as it shall be adjacent to the .uvprojx file, having the same filename. If this is a directory, .uvprojx is found automatically (if multiple found then the latest changed is chosen). If not provided then the current working directory is chosen as a project directory. """ import enum import operator import os import warnings from collections import defaultdict from dataclasses import dataclass from os import DirEntry from pathlib import Path from typing import List, Optional, Union, Iterable, Collection, Set, Tuple, Callable, Dict, Iterator from docopt import docopt from lxml import etree __author__ = "Bojan Potočnik" UnknownInt = int UnknownBool = bool @enum.unique class Language(enum.Enum): ASM = "Assembler" C = "C" CPP = "C++" @enum.unique class FileType(enum.Enum): C_SOURCE = 1 """C Source file""" ASM_SOURCE = 2 """Assembly language file""" OBJECT = 3 """Object file""" LIBRARY = 4 """Library file""" TEXT_DOCUMENT = 5 """Text Document file""" CUSTOM = 7 """Custom file""" CPP_SOURCE = 8 """C++ Source file""" IMAGE = 9 """Image file""" # region XML data structures for Project File @dataclass class Target: @dataclass class Toolset: number: int name: str @dataclass class Compiler: cc: str ac6: bool @dataclass class Options: @dataclass class Common: device: str vendor: str pack_id: str pack_url: str cpu: str device_id: int register_file: str @dataclass class Properties: use_cpp_compiler: bool common: Common properties: Properties @dataclass class Build: @dataclass class Misc: @dataclass class Memory: @enum.unique class Type(enum.Enum): """TODO: Real meaning unknown.""" TYPE0 = 0 TYPE1 = 1 name: str type: Type start: int size: int cpu_type: str memories: List[Memory] @dataclass class C: optimization: int strict: bool c99: bool gnu: bool misc: List[str] defines: List[str] undefines: List[str] include_paths: List[str] @dataclass class Asm: misc: List[str] defines: List[str] undefines: List[str] include_paths: List[str] @dataclass class Linker: text_address_range: int data_address_range: int misc: List[str] misc: Misc c: C asm: Asm ld: Linker @dataclass class File: name: str type: FileType path: str include_in_build: bool """Whether this file is included in the build or ignored.""" always_build: bool """Whether to always build this file.""" @dataclass class Group: name: str files: List['Target.File'] name: str toolset: Toolset compiler: Compiler options: Options build: Build groups: List[Group] @dataclass class RTE: @dataclass class TargetInfo: @enum.unique class VersionMatchMode(enum.Enum): FIXED = "fixed" name: str version_match_mode: Optional[VersionMatchMode] @dataclass class Package: name: str url: str vendor: str version: str target_infos: List['RTE.TargetInfo'] @dataclass class Component: class_: str group: str vendor: str version: str condition: str package: 'RTE.Package' target_infos: List['RTE.TargetInfo'] @dataclass class File: @enum.unique class Attribute(enum.Enum): CONFIG = "config" @enum.unique class Category(enum.Enum): SOURCE = "source" attr: Attribute category: Category condition: Optional[str] name: str version: str instance: str component: 'RTE.Component' package: 'RTE.Package' target_infos: List['RTE.TargetInfo'] packages: List[Package] components: List[Component] files: List[File] # endregion XML data structures for Project File # region XML data structures for Project Options file @dataclass class File: group_number: int """Number of the :cls:`Group` this file belongs to.""" number: int """Number of the file (global across all groups).""" type: FileType """File type as selected in the Options for File ... -> Properties dialog""" expanded: bool """Whether the file is expanded (include file dependencies shown) in the Project Window file browser.""" include_in_build: bool """Whether this file is included in the build or ignored.""" always_build: bool """Whether to always build this file.""" tv_exp_opt_dlg: UnknownBool dave2: UnknownBool path: str filename: str rte_flag: bool """Whether this file is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only.""" shared: UnknownBool _project_file: Target.File = None """Reference to the instance of this file from the Project File.""" @dataclass class Group: name: str """Group name as shown in the Project Window file browser.""" expanded: bool """Whether the group is expanded (files shown) in the Project Window file browser.""" tv_exp_opt_dlg: UnknownBool cb_sel: UnknownBool rte_flag: bool """Whether this group is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only.""" files: List[File] """List of files in this group.""" _project_group: Target.Group = None """Reference to the instance of this group from the Project File.""" # endregion XML data structures for Project Options file # region XML parsing helper functions def text(element: etree.ElementBase, name: str, is_attribute: bool = False, nullable: bool = False) -> Optional[str]: if is_attribute: if nullable: return element.attrib.get(name) else: return element.attrib[name] value = element.xpath(name) if (not value) and nullable: return None if len(value) != 1: raise ValueError(f"Only one '{name}' tag per tree is supported, {len(value)} found") return value[0].text def strict_bool(element: etree.ElementBase, name: str, nullable: bool = False, *, false_value: str = "0", true_value: str = "1") -> Optional[bool]: value = text(element, name, nullable=nullable) if value == false_value: return False if value == true_value: return True if (value is None) and nullable: return None raise ValueError(f"'{value}' (of {name}) is not valid boolean value") def strict_hex(element: etree.ElementBase, name: str) -> int: value = text(element, name) if not value.startswith("0x"): raise ValueError(f"'{value}' (of {name}) is not valid hexadecimal value") return int(value, 16) # endregion XML parsing helper functions @dataclass class UVisionProject: project_file_path: str project_options_path: str # region Project File targets: List[Target] # endregion Project File # region Project Options groups: List[Group] """Groups of files, as shown in the Project Window file browser.""" # endregion Project Options @classmethod def new(cls, project_file_path: str) -> 'UVisionProject': fp_base = os.path.splitext(project_file_path)[0] project_file_path = fp_base + ".uvprojx" project_options_path = fp_base + ".uvoptx" with open(project_file_path) as f: # noinspection PyProtectedMember xproj: etree._Element = etree.parse(f).getroot() with open(project_options_path) as f: # noinspection PyProtectedMember xopt: etree._Element = etree.parse(f).getroot() # region Project File if xproj.tag != "Project": raise ValueError("Invalid uVision Project File XML file") # noinspection PyCallByClass,SpellCheckingInspection targets = [ Target( name=text(target, "TargetName"), toolset=Target.Toolset( number=strict_hex(target, "ToolsetNumber"), name=text(target, "ToolsetName") ), compiler=Target.Compiler( cc=text(target, "pCCUsed", nullable=True), ac6=strict_bool(target, "uAC6") ), options=next( # There is always only one package, but using generator is clean and # effective way of creating an inline local variable. Target.Options( common=next( Target.Options.Common( device=text(tco, "Device"), vendor=text(tco, "Vendor"), pack_id=text(tco, "PackID"), pack_url=text(tco, "PackURL"), cpu=text(tco, "Cpu"), device_id=text(tco, "DeviceId"), register_file=text(tco, "RegisterFile") ) for tco in to.xpath("TargetCommonOption") ), properties=next( Target.Options.Properties( use_cpp_compiler=strict_bool(tcp, "UseCPPCompiler"), ) for tcp in to.xpath("CommonProperty") ) ) for to in target.xpath("TargetOption") ), build=next( Target.Build( misc=Target.Build.Misc( cpu_type=text(to_taa, "ArmAdsMisc/AdsCpuType"), memories=[ Target.Build.Misc.Memory( name=memory.tag, type=Target.Build.Misc.Memory.Type(int(text(memory, "Type"))), start=strict_hex(memory, "StartAddress"), size=strict_hex(memory, "Size") ) for memory in to_taa.xpath("ArmAdsMisc/OnChipMemories/*") ] ), c=next( Target.Build.C( optimization=int(text(to_taa_c, "Optim")), strict=strict_bool(to_taa_c, "Strict"), c99=strict_bool(to_taa_c, "uC99"), gnu=strict_bool(to_taa_c, "uGnu"), misc=[ mc.strip() for mc in text(to_taa_c, "VariousControls/MiscControls").split(",") ], defines=[ mc.strip() for mc in text(to_taa_c, "VariousControls/Define").split(" ") ], undefines=[ mc.strip() for mc in (text(to_taa_c, "VariousControls/Undefine") or "").split(" ") ], include_paths=[ mc.strip() for mc in text(to_taa_c, "VariousControls/IncludePath").split(";") ] ) for to_taa_c in to_taa.xpath("Cads") ), asm=next( Target.Build.Asm( misc=[ mc.strip() for mc in (text(to_taa_a, "VariousControls/MiscControls") or "").split(",") ], defines=[ mc.strip() for mc in (text(to_taa_a, "VariousControls/Define") or "").split(" ") ], undefines=[ mc.strip() for mc in (text(to_taa_a, "VariousControls/Undefine") or "").split(" ") ], include_paths=[ mc.strip() for mc in (text(to_taa_a, "VariousControls/IncludePath") or "").split(";") ] ) for to_taa_a in to_taa.xpath("Aads") ), ld=next( Target.Build.Linker( text_address_range=strict_hex(to_taa_ld, "TextAddressRange"), data_address_range=strict_hex(to_taa_ld, "DataAddressRange"), misc=[ mc.strip() for mc in text(to_taa_ld, "Misc").split(",") # TODO: Delimiter unknown ] ) for to_taa_ld in to_taa.xpath("LDads") ) ) for to_taa in target.xpath("TargetOption/TargetArmAds") ), groups=[ Target.Group( name=text(group, "GroupName"), files=[ Target.File( name=text(file, "FileName"), type=FileType(int(text(file, "FileType"))), path=text(file, "FilePath"), include_in_build=strict_bool(file, "FileOption/CommonProperty/IncludeInBuild", nullable=True), always_build=strict_bool(file, "FileOption/CommonProperty/AlwaysBuild", nullable=True, true_value="2") ) for file in group.xpath("Files/File") ] ) for group in target.xpath("Groups/Group") ] ) for target in xproj.xpath("Targets/Target") ] # region RTE # noinspection PyCallByClass,PyTypeChecker rte = RTE( packages=[ RTE.Package( name=text(package, "name", True), url=text(package, "url", True), vendor=text(package, "vendor", True), version=text(package, "version", True), target_infos=[ RTE.TargetInfo( name=text(ti, "name", True), # Using generator and list only for local variable version_match_mode=next(RTE.TargetInfo.VersionMatchMode(vmm) if vmm else None for vmm in [text(ti, "versionMatchMode", True, True)]) ) for ti in package.xpath("targetInfos/targetInfo") ] ) for package in xproj.xpath("RTE/packages/package") ], components=[ RTE.Component( class_=text(component, "Cclass", True), group=text(component, "Cgroup", True), vendor=text(component, "Cvendor", True), version=text(component, "Cversion", True), condition=text(component, "condition", True), package=next( # There is always only one package, but using generator is clean and # effective way of creating an inline local variable. # This new instance of package will be replaced below with reference to an actual matching # instance of the package from rte.packages. RTE.Package( name=text(package, "name", True), url=text(package, "url", True), vendor=text(package, "vendor", True), version=text(package, "version", True), target_infos=None ) for package in component.xpath("package") ), target_infos=[ RTE.TargetInfo( name=text(ti, "name", True), # TODO: Handle nullable # RTE.TargetInfo.VersionMatchMode(text(ti, "versionMatchMode", True, True)) version_match_mode=None ) for ti in component.xpath("targetInfos/targetInfo") ] ) for component in xproj.xpath("RTE/components/component") ], files=[ RTE.File( attr=RTE.File.Attribute(text(file, "attr", True)), category=RTE.File.Category(text(file, "category", True)), condition=text(file, "condition", True, True), name=text(file, "name", True), version=text(file, "version", True), instance=text(file, "instance"), component=next( RTE.Component( class_=text(component, "Cclass", True), group=text(component, "Cgroup", True), vendor=text(component, "Cvendor", True), version=text(component, "Cversion", True), condition=text(component, "condition", True), package=None, target_infos=None ) for component in file.xpath("component") ), package=None, # TODO target_infos=None, # TODO ) for file in xproj.xpath("RTE/files/file") ] ) # TODO: Connect actual references of the rte.packages and rte.packages.target_infos for component in rte.components: cp = component.package component.package = None cp.target_infos = None for package in rte.packages: # Temporally remove target_infos to enable usage of equality operator. pti = package.target_infos package.target_infos = None if cp == package: component.package = package package.target_infos = pti break package.target_infos = pti # endregion RTE # endregion Project File # region Project Options if xopt.tag != "ProjectOpt": raise ValueError("Invalid uVision Project Options XML file") groups: List[Group] = [] for group in xopt.xpath("Group"): group_name = text(group, "GroupName") # Find this group in the Project File xproj_group = next(g for g in next(iter(targets)).groups if (g.name == group_name)) # Find all files in this group and also in the Project File files: List[File] = [] for file in group.xpath("File"): file_type = FileType(int(text(file, "FileType"))) file_name = text(file, "FilenameWithoutPath") xproj_file = next(f for f in xproj_group.files if (f.type == file_type and f.name == file_name)) files.append(File( group_number=int(text(file, "GroupNumber")), number=int(text(file, "FileNumber")), type=file_type, expanded=strict_bool(file, "tvExp"), include_in_build=xproj_file.include_in_build, always_build=xproj_file.always_build, tv_exp_opt_dlg=strict_bool(file, "tvExpOptDlg"), dave2=strict_bool(file, "bDave2"), path=text(file, "PathWithFileName"), filename=file_name, rte_flag=strict_bool(file, "RteFlg"), shared=strict_bool(file, "bShared") )) groups.append(Group( name=group_name, expanded=strict_bool(group, "tvExp"), tv_exp_opt_dlg=strict_bool(group, "tvExpOptDlg"), cb_sel=strict_bool(group, "cbSel"), rte_flag=strict_bool(group, "RteFlg"), files=files )) # There is no more *currently relevant* data in the Project Options file. # endregion Project Options # Add RTE files to the file groups to actually match the Project Window file browser. for file in rte.files: # Find the group to which this file belongs to (there shall be one and only one). group = None group_number = 1 for group_number, group in enumerate(groups, 1): if group.files and group.files[0].group_number != group_number: warnings.warn(f"Inconsistent group number {group.files[0].group_number} for group {group.name}" f" (expected to be {group_number})") if group.rte_flag and group.name.strip(":") == file.component.class_: break filename = os.path.basename(file.instance) # Detect file type (this information is not provided for RTE files) if filename.endswith(".s"): file_type = FileType.ASM_SOURCE elif filename.endswith(".c"): file_type = FileType.C_SOURCE elif filename.endswith(".cpp"): file_type = FileType.CPP_SOURCE elif filename.endswith(".h"): file_type = FileType.TEXT_DOCUMENT else: warnings.warn(f"Unknown RTE file type '{file.instance}': {file}") continue group.files.append(File( group_number=group_number, number=max(f.number for g in groups for f in g.files) + 1, type=file_type, expanded=False, include_in_build=True, # TODO: This information is available for RTE files always_build=None, tv_exp_opt_dlg=False, # TODO dave2=False, # TODO path=file.instance, filename=os.path.basename(file.instance), rte_flag=True, shared=False )) return cls( project_file_path=project_file_path, project_options_path=project_options_path, targets=targets, groups=groups ) def source_files(self) -> Iterator[Tuple[File, Optional[Language], Optional[str]]]: """ Get all files grouped by the file type with group names as a comments. """ # Add source files for group in self.groups: comment = group.name if group.rte_flag: # RTE groups start with double colon (::). comment = "RTE" + comment # Group files by type and add one comment for every file type as they are in the separate sections. files: Dict[Union[Language, None], List[File]] = defaultdict(list) for file in group.files: if file.type == FileType.ASM_SOURCE: lang = Language.ASM elif file.type == FileType.C_SOURCE: lang = Language.C elif file.type == FileType.TEXT_DOCUMENT: lang = None else: warnings.warn(f"Unsupported file type: {file.type} for {file}") continue files[lang].append(file) for lang, files in files.items(): comment_per_type = comment for file in files: yield file, lang, comment_per_type comment_per_type = None class CMake: @dataclass class String: value: str """The actual string value.""" languages: Set[Language] """Set of all build configs in which this value is present.""" common: bool = False comment: Optional[str] = None """Comment which will be added to the line before""" def __eq__(self, o: 'CMake.String') -> bool: if isinstance(o, type(self)): return self.value == o.value elif isinstance(o, str): return self.value == o return NotImplemented def __init__(self) -> None: self.include_paths: List[CMake.String] = [] self.defines: List[CMake.String] = [] self.undefines: List[CMake.String] = [] self.source_file_paths: List[CMake.String] = [] self.other_file_paths: List[CMake.String] = [] @classmethod def _get(cls, lst: List[String], obj: str) -> String: """Get existing object from the list or append a new one to the end.""" try: # noinspection PyTypeChecker itm = lst[lst.index(obj)] except ValueError: # noinspection PyCallByClass itm = cls.String(obj, set()) lst.append(itm) return itm @classmethod def _add_values(cls, where: List[String], values: Union[str, Iterable[str]], languages: Union[Language, Collection[Language], None], comment: Optional[str] = None) -> None: if isinstance(languages, Language): languages = [languages] for val in values: obj = cls._get(where, val) if comment is not None: # Add comment to the first value only obj.comment = comment comment = None if languages: obj.languages.update(languages) @staticmethod def _clean_paths(paths: Union[str, Iterable[str]]) -> List[str]: if isinstance(paths, (str, Path)): paths = [paths] return [Path(p).as_posix() for p in map(os.path.normpath, paths)] def add_include_paths(self, paths: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]], comment: str = None) -> None: self._add_values(self.include_paths, self._clean_paths(paths), languages, comment) def add_defines(self, defines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]], comment: str = None) -> None: self._add_values(self.defines, defines, languages, comment) def add_undefines(self, undefines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]], comment: str = None) -> None: self._add_values(self.undefines, undefines, languages, comment) def add_source_files(self, paths: Union[None, str, Iterable[str]], languages: Union[Language, Collection[Language], None], comment: str = None, include_in_build: bool = True) -> None: paths = self._clean_paths(paths) # If file is not included in the build, comment it if include_in_build is False: paths = ["# " + path for path in paths] self._add_values(self.source_file_paths if languages else self.other_file_paths, paths, languages, comment) def add_other_files(self, paths: Union[str, Iterable[str]], comment: str = None) -> None: self.add_source_files(paths, None, comment) def check_common(self) -> Set[Language]: """ Check which properties are common to all language configurations. :return: Set of all used languages (languages with at least one property) """ all_props = (self.include_paths, self.defines, self.undefines, self.source_file_paths) # Get all of the defined languages used languages = {lang for props in all_props for prop in props for lang in prop.languages} for props in all_props: for prop in props: prop.common = (prop.languages == languages) return languages def __str__(self) -> str: languages = sorted(self.check_common(), key=operator.attrgetter('value')) ret_str = [ "# Made with CMake <> uVision project file synchronizer" "# https://github.com/bojanpotocnik/cmake-uvision-syncer" ] # Set of the build properties prop_sets: List[Tuple[str, str, List[CMake.String], str]] = [ ("definitions", "DEFINES", self.defines, "-D"), ("un-defines", "UNDEFINES", self.undefines, ""), ("include directories", "INCLUDE_DIRS", self.include_paths, ""), ("source files", "SOURCES", self.source_file_paths, ""), ] # Set of the language configs per build property sub_prop_sets: List[Tuple[str, str, Callable[[CMake.String], bool]]] = [ ("Common", "COMMON", lambda prop: prop.common), *((lang.value + " specific", lang.name, lambda prop, lang_=lang: (not prop.common) and (lang_ in prop.languages)) for lang in languages) ] def _add_section_files(comment: str, var_name: str, value_iterator: Iterable[CMake.String], value_prefix: str = "") -> str: s = (f"# {comment}\n" f"set({var_name}") value_str = '' for value in value_iterator: if value.comment is not None: value_str += f"\n\t# {value.comment}" value_str += f"\n\t{value_prefix}{value.value}" if len(value_str) is not 0: return s + value_str + "\n)" else: return None for section_comment, section_var_prefix, section_props, val_prefix in prop_sets: ss_str = [] for prop_set_comment, var_suffix, filter_fun in sub_prop_sets: section_files = _add_section_files( comment=f"{prop_set_comment} {section_comment}", var_name=f"{section_var_prefix}_{var_suffix}", value_iterator=filter(filter_fun, section_props), value_prefix=val_prefix ) if section_files is not None: ss_str.append(section_files) ret_str.append("\n\n".join(ss_str)) other_files = _add_section_files( comment="Other files", var_name="OTHER_FILES", value_iterator=self.other_file_paths ) if other_files is not None: ret_str.append(other_files) return "\n\n\n".join(ret_str) def main() -> None: # region Parse arguments arguments = docopt(__doc__) project_path: str = arguments["<project>"] or "." if not os.path.isfile(project_path): with os.scandir(project_path) as dirs: # type: Iterator[DirEntry] projects = [de.path for de in dirs if (de.is_file() and (os.path.splitext(de.name)[1] == ".uvprojx"))] if not projects: raise FileNotFoundError(f"Could not find any .uvprojx file in '{project_path}'") elif len(projects) > 1: # Choose the latest file by modification time. project_path = max(projects, key=os.path.getmtime) else: project_path = projects[0] project_path = os.path.realpath(project_path) # endregion Parse arguments print(f"Using µVision5 Project File '{project_path}'") # Parse uVision project XML files uvp = UVisionProject.new(project_path) # Generate CMake file and populate it with information from uVision project cmake = CMake() # Add Assembler properties cmake.add_include_paths(uvp.targets[0].build.asm.include_paths, Language.ASM) cmake.add_defines(uvp.targets[0].build.asm.defines, Language.ASM) cmake.add_undefines(uvp.targets[0].build.asm.undefines, Language.ASM) # Add C properties cmake.add_include_paths(uvp.targets[0].build.c.include_paths, Language.C) cmake.add_defines(uvp.targets[0].build.c.defines, Language.C) cmake.add_undefines(uvp.targets[0].build.c.undefines, Language.C) # Add source and other files for file, lang, comment in uvp.source_files(): cmake.add_source_files(file.path, lang, comment, file.include_in_build) fp_proj_cmake = os.path.join(os.path.dirname(uvp.project_file_path), os.path.splitext(os.path.basename(uvp.project_file_path))[0] + ".cmake") with open(fp_proj_cmake, 'w') as f: print(cmake, file=f) print(f"Generated CMake file '{fp_proj_cmake}'") if __name__ == "__main__": main()
38.207207
122
0.528855
29,352
0.864973
1,316
0.038781
24,404
0.719161
0
0
7,650
0.225438
4a48ec3aeae99c16ed4de0cce8fcde590af1ac0c
3,434
py
Python
scipy/weave/base_spec.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
8
2015-10-07T00:37:32.000Z
2022-01-21T17:02:33.000Z
scipy/weave/base_spec.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
null
null
null
scipy/weave/base_spec.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
8
2015-05-09T14:23:57.000Z
2018-11-15T05:56:00.000Z
class base_converter(object): """ Properties: headers -- list of strings that name the header files needed by this object. include_dirs -- list of directories where the header files can be found. libraries -- list of libraries needed to link to when compiling extension. library_dirs -- list of directories to search for libraries. support_code -- list of strings. Each string is a subroutine needed by the type. Functions that are used in the conversion between Python and C++ files are examples of these. Methods: type_match(value) returns 1 if this class is used to represent type specification for value. type_spec(name, value) returns a new object (of this class) that is used to produce C++ code for value. declaration_code() returns C++ code fragment for type declaration and conversion of python object to C++ object. cleanup_code() returns C++ code fragment for cleaning up after the variable after main C++ code fragment has executed. """ _build_information = [] compiler = '' def set_compiler(self,compiler): self.compiler = compiler def type_match(self,value): raise NotImplementedError("You must override method in derived class") def build_information(self): return self._build_information def type_spec(self,name,value): pass def declaration_code(self,templatize = 0): return "" def local_dict_code(self): return "" def cleanup_code(self): return "" def retrieve_py_variable(self,inline=0): # this needs a little coordination in name choices with the # ext_inline_function class. if inline: vn = 'get_variable("%s",raw_locals,raw_globals)' % self.name else: vn = 'py_' + self.name return vn def py_reference(self): return "&py_" + self.name def py_pointer(self): return "*py_" + self.name def py_variable(self): return "py_" + self.name def reference(self): return "&" + self.name def pointer(self): return "*" + self.name def init_flag(self): return self.name + "_used" def variable(self): return self.name def variable_as_string(self): return '"' + self.name + '"' import UserList import base_info class arg_spec_list(UserList.UserList): def build_information(self): all_info = base_info.info_list() for i in self: all_info.extend(i.build_information()) return all_info def py_references(self): return map(lambda x: x.py_reference(),self) def py_pointers(self): return map(lambda x: x.py_pointer(),self) def py_variables(self): return map(lambda x: x.py_variable(),self) def references(self): return map(lambda x: x.py_reference(),self) def pointers(self): return map(lambda x: x.pointer(),self) def variables(self): return map(lambda x: x.variable(),self) def init_flags(self): return map(lambda x: x.init_flag(),self) def variable_as_strings(self): return map(lambda x: x.variable_as_string(),self)
35.040816
80
0.609785
3,397
0.989225
0
0
0
0
0
0
1,453
0.423122
4a48f5c7b324b298a0d8541fe2c9610bbecc1796
96
py
Python
xception/test.py
latentai/model-zoo-models
70a96e955b3b1245f8417613cd9debdae91b1d28
[ "Apache-2.0" ]
8
2020-05-16T20:14:27.000Z
2020-07-08T09:23:24.000Z
inceptionv3/test.py
latentai/model-zoo-models
70a96e955b3b1245f8417613cd9debdae91b1d28
[ "Apache-2.0" ]
9
2020-03-26T10:25:12.000Z
2022-02-28T19:54:14.000Z
audio_recognition/test.py
latentai/model-zoo-models
70a96e955b3b1245f8417613cd9debdae91b1d28
[ "Apache-2.0" ]
6
2020-03-19T20:52:09.000Z
2022-03-06T01:33:29.000Z
#!/usr/bin/env python3 from utils.model_config_helpers import run_model_test run_model_test()
16
53
0.822917
0
0
0
0
0
0
0
0
22
0.229167
4a4b8d448257463b5f6347e3da0f24a94bac2394
10,816
py
Python
mpunet/bin/cv_split.py
alexsosn/MultiPlanarUNet
2d1cecdee391be8e9f72da95e33077ed82a2183a
[ "MIT" ]
null
null
null
mpunet/bin/cv_split.py
alexsosn/MultiPlanarUNet
2d1cecdee391be8e9f72da95e33077ed82a2183a
[ "MIT" ]
null
null
null
mpunet/bin/cv_split.py
alexsosn/MultiPlanarUNet
2d1cecdee391be8e9f72da95e33077ed82a2183a
[ "MIT" ]
1
2020-10-07T12:44:47.000Z
2020-10-07T12:44:47.000Z
from glob import glob import sys import os import numpy as np import random from mpunet.utils import create_folders import argparse def get_parser(): parser = argparse.ArgumentParser(description="Prepare a data folder for a" "CV experiment setup.") parser.add_argument("--data_dir", type=str, help="Path to data directory") parser.add_argument("--CV", type=int, default=5, help="Number of splits (default=5)") parser.add_argument("--out_dir", type=str, default="views", help="Directory to store CV subfolders " "(default=views") parser.add_argument("--im_sub_dir", type=str, default="images", help="Subfolder under 'data_dir' in which image are " "stored (default=images)") parser.add_argument("--lab_sub_dir", type=str, default="labels", help="Subfolder under 'data_dir' in which labels are " "stored (default=labels)") parser.add_argument("--copy", action="store_true", help="Copy files to CV-subfolders instead of " "symlinking (not recommended)") parser.add_argument("--file_list", action="store_true", help="Create text files with paths pointing to the " "images at the image and labels subdirs under " "each split instead of symlink/copying. This is" " usefull on systems were symlink is not " "supported, but the dataset size is too large to" " store in copies. NOTE: Only one of --copy and " "--file_list flags must be set.") parser.add_argument("--file_regex", type=str, default="*.nii*", help="Regex used to select files from the image " "and labels subdirs. (default='*.nii*')") parser.add_argument("--validation_fraction", type=float, default=0.20, help="Fraction of OVERALL data size used for " "validation in each split. In a 5-CV setting with " "N=100 and val_frac=0.20, each split will have " "N_train=60, N_val=20 and N_test=20 images") parser.add_argument("--test_fraction", type=float, default=0.20, help="Fraction of data size used for test if CV=1.") parser.add_argument("--common_prefix_length", type=int, required=False, default=0) return parser def assert_dir_structure(data_dir, im_dir, lab_dir, out_dir): for _dir in (data_dir, im_dir, lab_dir): if not os.path.exists(_dir): raise OSError("Invalid data directory '%s'. Does not exist." % data_dir) if os.path.exists(out_dir): raise OSError("Output directory at '%s' already exists." % out_dir) def create_view_folders(out_dir, n_splits): if not os.path.exists(out_dir): print("Creating directory at %s" % out_dir) os.makedirs(out_dir) if n_splits > 1: for i in range(n_splits): split_dir = os.path.join(out_dir, "split_%i" % i) print("Creating directory at %s" % split_dir) os.mkdir(split_dir) def pair_by_names(images, common_prefix_length): if common_prefix_length == 0: return images from collections import defaultdict names = [os.path.split(i)[-1][:common_prefix_length] for i in images] inds = defaultdict(list) for i, item in enumerate(names): inds[item].append(i) pairs = inds.values() return [tuple(np.array(images)[i]) for i in pairs] def add_images(images, im_folder_path, label_folder_path, im_dir, lab_dir, link_func=os.symlink): for image in images: if not isinstance(image, (list, tuple, np.ndarray)): image = (image,) for im in image: # Get file name file_name = os.path.split(im)[-1] # Get label path (OBS: filenames must match!) lab = im.replace(im_dir, lab_dir) if not os.path.exists(lab): raise OSError("No label file found at '%s'. OBS: image and " "label files must have exactly the same name. " "Images should be located at '%s' and labels at" " '%s'" % (lab, im_folder_path, label_folder_path)) # Get relative paths rel_image = os.path.relpath(im, im_folder_path) rel_label = os.path.relpath(lab, label_folder_path) # Symlink or copy link_func(rel_image, im_folder_path + "/%s" % file_name) link_func(rel_label, label_folder_path + "/%s" % file_name) def _add_to_file_list_fallback(rel_image_path, image_path, fname="LIST_OF_FILES.txt"): """ On some system synlinks are not supported, if --files_list flag is set, uses this function to add each absolute file path to a list at the final subfolder that is supposed to store images and label links or actual files At run-time, these files must be loaded by reading in the path from these files instead. """ # Get folder where list of files should be stored folder = os.path.split(image_path)[0] # Get absolute path to image # We change dir to get the correct abs path from the relative os.chdir(folder) abs_file_path = os.path.abspath(rel_image_path) # Get path to the list of files list_file_path = os.path.join(folder, fname) with open(list_file_path, "a") as out_f: out_f.write(abs_file_path + "\n") def entry_func(args=None): # Get parser parser = vars(get_parser().parse_args(args)) # Get arguments data_dir = os.path.abspath(parser["data_dir"]) n_splits = int(parser["CV"]) if n_splits > 1: out_dir = os.path.join(data_dir, parser["out_dir"], "%i_CV" % n_splits) else: out_dir = os.path.join(data_dir, parser["out_dir"], "fixed_split") im_dir = os.path.join(data_dir, parser["im_sub_dir"]) lab_dir = os.path.join(data_dir, parser["lab_sub_dir"]) copy = parser["copy"] file_list = parser["file_list"] regex = parser["file_regex"] val_frac = parser["validation_fraction"] test_frac = parser["test_fraction"] common_prefix_length = parser["common_prefix_length"] if n_splits == 1 and not test_frac: raise ValueError("Must specify --test_fraction with --CV=1.") if copy and file_list: raise ValueError("Only one of --copy and --file_list " "flags must be set.") # Assert suitable folders assert_dir_structure(data_dir, im_dir, lab_dir, out_dir) # Create sub-folders create_view_folders(out_dir, n_splits) # Get images and pair by subject identifier if common_prefix_length > 0 images = glob(os.path.join(im_dir, regex)) images = pair_by_names(images, common_prefix_length) print("-----") print("Found {} images".format(len(images))) # Get validation size N_total = len(images) if n_splits > 1: N_test = N_total // n_splits else: N_test = int(np.ceil(N_total * test_frac)) N_val = int(np.ceil(N_total * val_frac)) if N_val + N_test >= N_total: raise ValueError("Too large validation_fraction - " "No training samples left!") N_train = N_total - N_test - N_val print("Total images:".ljust(40), N_total) print("Train images pr. split:".ljust(40), N_train) print("Validation images pr. split:".ljust(40), N_val) print("Test images pr. split:".ljust(40), N_test) # Shuffle and split the images into CV parts random.shuffle(images) splits = np.array_split(images, n_splits) # Symlink / copy files for i, split in enumerate(splits): print(" Split %i/%i" % (i+1, n_splits), end="\r", flush=True) # Set root path to split folder if n_splits > 1: split_path = os.path.join(out_dir, "split_%i" % i) else: split_path = out_dir # Here we kind of hacky force the following code to work with CV=1 # Define a test set and overwrite the current split (which stores # add the data, as splits was never split with n_splits=1 split = splits[0][:N_test] # Overwrite the splits variable to a length 2 array with the # remaining data which will be used as val+train. The loop still # refers to the old split and thus will only execute once splits = [split, splits[0][N_test:]] # Define train, val and test sub-dirs train_path = os.path.join(split_path, "train") train_im_path = os.path.join(train_path, parser["im_sub_dir"]) train_label_path = os.path.join(train_path, parser["lab_sub_dir"]) if N_val: val_path = os.path.join(split_path, "val") val_im_path = os.path.join(val_path, parser["im_sub_dir"]) val_label_path = os.path.join(val_path, parser["lab_sub_dir"]) else: val_path, val_im_path, val_label_path = (None,) * 3 test_path = os.path.join(split_path, "test") test_im_path = os.path.join(test_path, parser["im_sub_dir"]) test_label_path = os.path.join(test_path, parser["lab_sub_dir"]) # Create folders if not existing create_folders([train_path, val_path, train_im_path, train_label_path, val_im_path, val_label_path, test_path, test_im_path, test_label_path]) # Copy or symlink? if copy: from shutil import copyfile move_func = copyfile elif file_list: move_func = _add_to_file_list_fallback else: move_func = os.symlink # Add test data to test folder add_images(split, test_im_path, test_label_path, im_dir, lab_dir, move_func) # Join remaining splits into train+val remaining = [x for ind, x in enumerate(splits) if ind != i] remaining = [item for sublist in remaining for item in sublist] # Extract validation data from the remaining random.shuffle(remaining) validation = remaining[:N_val] training = remaining[N_val:] # Add if validation: add_images(validation, val_im_path, val_label_path, im_dir, lab_dir, move_func) add_images(training, train_im_path, train_label_path, im_dir, lab_dir, move_func) if __name__ == "__main__": entry_func()
41.125475
91
0.606971
0
0
0
0
0
0
0
0
3,563
0.329419
4a4cc74674f055ddea956ccb55ba03b1e2719b21
1,964
py
Python
src/client/pydaos/raw/conversion.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
1
2021-12-04T14:57:48.000Z
2021-12-04T14:57:48.000Z
src/client/pydaos/raw/conversion.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
52
2019-12-04T05:47:10.000Z
2020-06-09T03:26:12.000Z
src/client/pydaos/raw/conversion.py
gczsjdy/daos
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
[ "Apache-2.0" ]
8
2019-12-04T08:26:00.000Z
2020-06-09T07:40:11.000Z
#!/usr/bin/python """ (C) Copyright 2018 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE The Government's rights to use, modify, reproduce, release, perform, display, or disclose this software are subject to the terms of the Apache License as provided in Contract No. B609815. Any reproduction of computer software, computer software documentation, or portions thereof marked with this legend must also reproduce the markings. """ import ctypes import uuid def c_uuid_to_str(uuid): """ utility function to convert a C uuid into a standard string format """ uuid_str = '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}'\ '{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format( uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]) return uuid_str def c_uuid(p_uuid, c_uuid): """ utility function to create a UUID in C format from a python UUID """ hexstr = p_uuid.hex for i in range(0, 31, 2): c_uuid[int(i/2)] = int(hexstr[i:i+2], 16) def str_to_c_uuid(uuidstr): """ utility function to convert string format uuid to a C uuid """ uuidstr2 = '{' + uuidstr + '}' puuid = uuid.UUID(uuidstr2) cuuid = (ctypes.c_ubyte * 16)() c_uuid(puuid, cuuid) return cuuid
40.081633
79
0.67057
0
0
0
0
0
0
0
0
1,326
0.675153
4a4d871b786cc8a162c159d5da63831c271b0be6
956
py
Python
experiments/nmt/utils/vocabulary_coverage.py
lvapeab/GroundHog_INMT
d5ad1d466eaf5040e99b9aaaa1b28c96402436ce
[ "BSD-3-Clause" ]
null
null
null
experiments/nmt/utils/vocabulary_coverage.py
lvapeab/GroundHog_INMT
d5ad1d466eaf5040e99b9aaaa1b28c96402436ce
[ "BSD-3-Clause" ]
null
null
null
experiments/nmt/utils/vocabulary_coverage.py
lvapeab/GroundHog_INMT
d5ad1d466eaf5040e99b9aaaa1b28c96402436ce
[ "BSD-3-Clause" ]
null
null
null
import cPickle import argparse parser = argparse.ArgumentParser( "Computes the coverage of a shortlist in a corpus file") parser.add_argument("--vocab", required=True, help="Vocabulary to use (.pkl)") parser.add_argument("--text", required=True, help="Beam size, turns on beam-search") args = parser.parse_args() with open(args.vocab, 'rb') as f: d = cPickle.load(f) with open(args.text, 'rb') as f: text = f.read().splitlines() n_words = 0 n_unks = 0 split_vocab = 0 split_vocabulary = {} for line in text: for word in line.split(): if split_vocabulary.get(word) is None: split_vocabulary[word] = split_vocab split_vocab += 1 if d.get(word) is None: n_unks += 1 n_words += 1 print "Coverage: %f (%d unknown words out of %d of a total of %d)"%((float)(split_vocab - n_unks)/split_vocab, n_unks, split_vocab, n_words)
28.117647
140
0.621339
0
0
0
0
0
0
0
0
199
0.208159
4a4d9078d162889cc7a0df9b67742f350806db8d
13,952
py
Python
stores/apps/inventory/migrations/0001_initial.py
diassor/CollectorCity-Market-Place
892ad220b8cf1c0fc7433f625213fe61729522b2
[ "Apache-2.0" ]
135
2015-03-19T13:28:18.000Z
2022-03-27T06:41:42.000Z
stores/apps/inventory/migrations/0001_initial.py
dfcoding/CollectorCity-Market-Place
e59acec3d600c049323397b17cae14fdcaaaec07
[ "Apache-2.0" ]
null
null
null
stores/apps/inventory/migrations/0001_initial.py
dfcoding/CollectorCity-Market-Place
e59acec3d600c049323397b17cae14fdcaaaec07
[ "Apache-2.0" ]
83
2015-01-30T01:00:15.000Z
2022-03-08T17:25:10.000Z
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ProductType' db.create_table('inventory_producttype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal('inventory', ['ProductType']) # Adding model 'Product' db.create_table('inventory_product', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])), ('title', self.gf('django.db.models.fields.CharField')(max_length=200)), ('description', self.gf('django.db.models.fields.TextField')()), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'])), ('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'])), ('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('weight', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=11, decimal_places=2)), ('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['inventory.ProductType'], null=True, blank=True)), )) db.send_create_signal('inventory', ['Product']) # Adding model 'Coin' db.create_table('inventory_coin', ( ('producttype_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['inventory.ProductType'], unique=True, primary_key=True)), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'], null=True, blank=True)), ('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'], null=True, blank=True)), ('country_code', self.gf('django.db.models.fields.CharField')(default='us', max_length=2)), ('pcgs_number', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)), ('description', self.gf('django.db.models.fields.TextField')(default='', blank='')), ('year_issued', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')), ('actual_year', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')), ('denomination', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('major_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('die_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('suffix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('sort_order', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('heading', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('holder_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('holder_variety_2', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')), ('additional_data', self.gf('django.db.models.fields.TextField')(default='', blank='')), ('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('inventory', ['Coin']) def backwards(self, orm): # Deleting model 'ProductType' db.delete_table('inventory_producttype') # Deleting model 'Product' db.delete_table('inventory_product') # Deleting model 'Coin' db.delete_table('inventory_coin') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'inventory.coin': { 'Meta': {'object_name': 'Coin', '_ormbases': ['inventory.ProductType']}, 'actual_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}), 'additional_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']", 'null': 'True', 'blank': 'True'}), 'country_code': ('django.db.models.fields.CharField', [], {'default': "'us'", 'max_length': '2'}), 'denomination': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}), 'die_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'heading': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'holder_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'holder_variety_2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'major_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'pcgs_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'producttype_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['inventory.ProductType']", 'unique': 'True', 'primary_key': 'True'}), 'sort_order': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}), 'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}), 'year_issued': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}) }, 'inventory.product': { 'Meta': {'object_name': 'Product'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']"}), 'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}), 'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.ProductType']", 'null': 'True', 'blank': 'True'}), 'weight': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}) }, 'inventory.producttype': { 'Meta': {'object_name': 'ProductType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'market.marketcategory': { 'Meta': {'object_name': 'MarketCategory'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}) }, 'market.marketplace': { 'Meta': {'object_name': 'MarketPlace'}, 'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}), 'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '92'}) }, 'market.marketsubcategory': { 'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'}) }, 'shops.shop': { 'Meta': {'object_name': 'Shop'}, 'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}), 'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}), 'views': ('django.db.models.fields.IntegerField', [], {'default': '0'}) } } complete_apps = ['inventory']
76.240437
181
0.573825
13,826
0.990969
0
0
0
0
0
0
8,847
0.634103
4a4e581c499165152bc4c54e7fe90ad3b4939698
48,733
py
Python
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
vi4m/ralph
2af767ee23d89be9e6cec0a537350a1ce8840bd1
[ "Apache-2.0" ]
1
2018-09-01T14:14:08.000Z
2018-09-01T14:14:08.000Z
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
srikanth4372/sample
127b5742ae464d42909a14d71e3c10c241ec3a23
[ "Apache-2.0" ]
1
2019-08-14T10:03:45.000Z
2019-08-14T10:03:45.000Z
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
srikanth4372/sample
127b5742ae464d42909a14d71e3c10c241ec3a23
[ "Apache-2.0" ]
1
2019-08-14T09:59:42.000Z
2019-08-14T09:59:42.000Z
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'ArchivedDeployment.service' db.add_column('deployment_archiveddeployment', 'service', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL), keep_default=False) # Adding field 'ArchivedDeployment.device_environment' db.add_column('deployment_archiveddeployment', 'device_environment', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL), keep_default=False) # Adding field 'Deployment.service' db.add_column('deployment_deployment', 'service', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL), keep_default=False) # Adding field 'Deployment.device_environment' db.add_column('deployment_deployment', 'device_environment', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL), keep_default=False) def backwards(self, orm): # Deleting field 'ArchivedDeployment.service' db.delete_column('deployment_archiveddeployment', 'service_id') # Deleting field 'ArchivedDeployment.device_environment' db.delete_column('deployment_archiveddeployment', 'device_environment_id') # Deleting field 'Deployment.service' db.delete_column('deployment_deployment', 'service_id') # Deleting field 'Deployment.device_environment' db.delete_column('deployment_deployment', 'device_environment_id') models = { 'account.profile': { 'Meta': {'object_name': 'Profile'}, 'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}), 'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}), 'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}), 'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}), 'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'business.businesssegment': { 'Meta': {'object_name': 'BusinessSegment'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'business.department': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'Department'}, 'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'business.profitcenter': { 'Meta': {'object_name': 'ProfitCenter'}, 'description': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'business.venture': { 'Meta': {'ordering': "(u'parent__symbol', u'symbol')", 'unique_together': "((u'parent', u'symbol'),)", 'object_name': 'Venture'}, 'business_segment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.BusinessSegment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}), 'department': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Department']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_infrastructure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.Venture']"}), 'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'profit_center': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.ProfitCenter']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'show_in_ralph': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'symbol': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}), 'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'business.venturerole': { 'Meta': {'ordering': "(u'parent__name', u'name')", 'unique_together': "((u'name', u'venture'),)", 'object_name': 'VentureRole'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.VentureRole']"}), 'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']"}) }, 'cmdb.ci': { 'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CI'}, 'added_manually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'business_service': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CILayer']", 'symmetrical': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIOwner']", 'through': "orm['cmdb.CIOwnership']", 'symmetrical': 'False'}), 'pci_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CI']", 'through': "orm['cmdb.CIRelation']", 'symmetrical': 'False'}), 'state': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}), 'technical_service': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIType']"}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'zabbix_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}) }, 'cmdb.cilayer': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'CILayer'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'connected_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'cmdb.ciowner': { 'Meta': {'object_name': 'CIOwner'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['account.Profile']", 'unique': 'True'}) }, 'cmdb.ciownership': { 'Meta': {'object_name': 'CIOwnership'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIOwner']"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'cmdb.cirelation': { 'Meta': {'unique_together': "((u'parent', u'child', u'type'),)", 'object_name': 'CIRelation'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child'", 'to': "orm['cmdb.CI']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent'", 'to': "orm['cmdb.CI']"}), 'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}) }, 'cmdb.citype': { 'Meta': {'object_name': 'CIType'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'icon_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'deployment.archiveddeployment': { 'Meta': {'ordering': "(u'-created',)", 'object_name': 'ArchivedDeployment'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}), 'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}), 'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'}) }, 'deployment.deployment': { 'Meta': {'ordering': "(u'-created',)", 'object_name': 'Deployment'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}), 'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}), 'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'}) }, 'deployment.deploymentpoll': { 'Meta': {'object_name': 'DeploymentPoll'}, 'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'deployment.massdeployment': { 'Meta': {'ordering': "(u'-created',)", 'object_name': 'MassDeployment'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}), 'csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'generated_csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}) }, 'deployment.preboot': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'Preboot'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deployment.PrebootFile']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'deployment.prebootfile': { 'Meta': {'object_name': 'PrebootFile'}, 'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}), 'ftype': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '101', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'raw_config': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'discovery.connection': { 'Meta': {'object_name': 'Connection'}, 'connection_type': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'inbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"}), 'outbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'outbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"}) }, 'discovery.datacenter': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'DataCenter'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'discovery.deprecationkind': { 'Meta': {'object_name': 'DeprecationKind'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}) }, 'discovery.device': { 'Meta': {'object_name': 'Device'}, 'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'boot_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'cached_cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'cached_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'chassis_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'connections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'through': "orm['discovery.Connection']", 'symmetrical': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dc': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'deprecation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deprecation_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.DeprecationKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'diag_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'hard_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'logical_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logicalchild_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}), 'management': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'managed_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.IPAddress']", 'blank': 'True', 'null': 'True'}), 'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'mgmt_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'model': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'device_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.DeviceModel']", 'blank': 'True', 'null': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'name2': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}), 'position': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'price': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'purchase_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'rack': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'sn': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'support_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'support_kind': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'uptime_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'uptime_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'warranty_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'discovery.devicemodel': { 'Meta': {'object_name': 'DeviceModel'}, 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'chassis_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '401'}) }, 'discovery.discoveryqueue': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'DiscoveryQueue'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'discovery.environment': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'Environment'}, 'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']"}), 'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'hosts_naming_template': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'next_server': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}), 'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DiscoveryQueue']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'discovery.ipaddress': { 'Meta': {'object_name': 'IPAddress'}, 'address': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dead_ping_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Device']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'dns_info': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'http_family': ('django.db.models.fields.TextField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_buried': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_management': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_plugins': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'last_puppet': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'network': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Network']", 'null': 'True', 'blank': 'True'}), 'number': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}), 'scan_summary': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scan.ScanSummary']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'snmp_community': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'snmp_name': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'snmp_version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '5', 'null': 'True', 'blank': 'True'}), 'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}) }, 'discovery.marginkind': { 'Meta': {'object_name': 'MarginKind'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'margin': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}) }, 'discovery.network': { 'Meta': {'ordering': "(u'vlan',)", 'object_name': 'Network'}, 'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'custom_dns_servers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['dnsedit.DNSServer']", 'null': 'True', 'blank': 'True'}), 'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}), 'dhcp_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'dhcp_config': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'environment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'gateway': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}), 'gateway_as_int': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ignore_addresses': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.NetworkKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'last_scan': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'max_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'min_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'racks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'symmetrical': 'False'}), 'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'reserved': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}), 'reserved_top_margin': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'terminators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.NetworkTerminator']", 'symmetrical': 'False'}), 'vlan': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}) }, 'discovery.networkkind': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkKind'}, 'icon': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'discovery.networkterminator': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkTerminator'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}) }, 'dnsedit.dnsserver': { 'Meta': {'object_name': 'DNSServer'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}) }, 'scan.scansummary': { 'Meta': {'object_name': 'ScanSummary'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'false_positive_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}), 'previous_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}) }, 'tags.tag': { 'Meta': {'object_name': 'Tag'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['account.Profile']"}), 'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'tags_tag_tags'", 'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'stem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'related_tags'", 'null': 'True', 'to': "orm['tags.TagStem']"}) }, 'tags.tagstem': { 'Meta': {'object_name': 'TagStem'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'tag_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) } } complete_apps = ['deployment']
94.996101
239
0.572487
48,601
0.997291
0
0
0
0
0
0
34,533
0.708616
4a4f4bc06c12566c84246f7896cf490e49f35766
2,059
py
Python
SPH/sphbwr_example2.py
RLReed/unotran
b317107e1a39490dda732f86a731872f5207a167
[ "MIT" ]
null
null
null
SPH/sphbwr_example2.py
RLReed/unotran
b317107e1a39490dda732f86a731872f5207a167
[ "MIT" ]
null
null
null
SPH/sphbwr_example2.py
RLReed/unotran
b317107e1a39490dda732f86a731872f5207a167
[ "MIT" ]
3
2019-12-02T23:01:24.000Z
2022-01-26T04:48:41.000Z
import numpy as np import sys sys.path.append('/homes/rlreed/workspace/unotran/src') from coarseBounds import computeBounds, Grouping import pickle from makeDLPbasis import makeBasis as makeDLP from makeKLTbasis import makeBasis as makeKLT import sph import sph_dgm import pydgm def buildGEO(ass_map): fine_map = [1] coarse_map = [1.26] material_map = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]] npins = len(ass_map) cm = [0.0] fm = [] mm = [] for i, ass in enumerate(ass_map): mm += material_map[ass] cm += coarse_map fm += fine_map cm = np.cumsum(cm) return npins, fm, cm, mm def makeDGMXS(G, refXS, dgmstructure, basisType): if 'klt' in basisType: makeKLT(basisType, dgmstructure) else: makeDLP(dgmstructure) dgmstructure.fname = '{}_{}'.format(basisType, dgmstructure.fname) fname = '_homo.'.join(xs_name.split('.')) refXS.write_homogenized_XS(fname) nPin, fm, cm, mm = buildGEO(pin_map) dgm = sph_dgm.DGMSOLVER(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False) pydgm.dgmsolver.initialize_dgmsolver() dgm.extractInfo() pydgm.dgmsolver.finalize_dgmsolver() pydgm.control.finalize_control() nCellPerPin = dgm.phi.shape[2] // dgm.npin return sph_dgm.XS(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s) if __name__ == '__main__': np.set_printoptions(precision=6) G = 44 dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60) fname = dgmstructure.fname xs_name = 'XS/{}gXS.anlxs'.format(G) pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] data_path = 'data2' # Get the homogenized cross sections refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb')) for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']: dgmstructure.fname = fname XS = makeDGMXS(G, refXS, dgmstructure, basis) pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
27.453333
115
0.644973
0
0
0
0
0
0
0
0
246
0.119475
4a4fd2b57960e4af2acbb3603c634154bea6e80b
9,280
py
Python
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
CentroidChef/oci-python-sdk
fa406e27a52b40c70e220c20f52dfe2abe6236a3
[ "Apache-2.0", "BSD-3-Clause" ]
249
2017-09-11T22:06:05.000Z
2022-03-04T17:09:29.000Z
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
CentroidChef/oci-python-sdk
fa406e27a52b40c70e220c20f52dfe2abe6236a3
[ "Apache-2.0", "BSD-3-Clause" ]
228
2017-09-11T23:07:26.000Z
2022-03-23T10:58:50.000Z
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
CentroidChef/oci-python-sdk
fa406e27a52b40c70e220c20f52dfe2abe6236a3
[ "Apache-2.0", "BSD-3-Clause" ]
224
2017-09-27T07:32:43.000Z
2022-03-25T16:55:42.000Z
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ManagementAgentAggregationDimensions(object): """ The Aggregation of Management Agent Dimensions """ #: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "ACTIVE" AVAILABILITY_STATUS_ACTIVE = "ACTIVE" #: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "SILENT" AVAILABILITY_STATUS_SILENT = "SILENT" #: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "NOT_AVAILABLE" AVAILABILITY_STATUS_NOT_AVAILABLE = "NOT_AVAILABLE" #: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "LINUX" PLATFORM_TYPE_LINUX = "LINUX" #: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "WINDOWS" PLATFORM_TYPE_WINDOWS = "WINDOWS" #: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "AGENT" INSTALL_TYPE_AGENT = "AGENT" #: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "GATEWAY" INSTALL_TYPE_GATEWAY = "GATEWAY" def __init__(self, **kwargs): """ Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param availability_status: The value to assign to the availability_status property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type availability_status: str :param platform_type: The value to assign to the platform_type property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type platform_type: str :param version: The value to assign to the version property of this ManagementAgentAggregationDimensions. :type version: str :param has_plugins: The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions. :type has_plugins: bool :param install_type: The value to assign to the install_type property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type install_type: str """ self.swagger_types = { 'availability_status': 'str', 'platform_type': 'str', 'version': 'str', 'has_plugins': 'bool', 'install_type': 'str' } self.attribute_map = { 'availability_status': 'availabilityStatus', 'platform_type': 'platformType', 'version': 'version', 'has_plugins': 'hasPlugins', 'install_type': 'installType' } self._availability_status = None self._platform_type = None self._version = None self._has_plugins = None self._install_type = None @property def availability_status(self): """ Gets the availability_status of this ManagementAgentAggregationDimensions. The availability status of managementAgent Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The availability_status of this ManagementAgentAggregationDimensions. :rtype: str """ return self._availability_status @availability_status.setter def availability_status(self, availability_status): """ Sets the availability_status of this ManagementAgentAggregationDimensions. The availability status of managementAgent :param availability_status: The availability_status of this ManagementAgentAggregationDimensions. :type: str """ allowed_values = ["ACTIVE", "SILENT", "NOT_AVAILABLE"] if not value_allowed_none_or_none_sentinel(availability_status, allowed_values): availability_status = 'UNKNOWN_ENUM_VALUE' self._availability_status = availability_status @property def platform_type(self): """ Gets the platform_type of this ManagementAgentAggregationDimensions. Platform Type Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The platform_type of this ManagementAgentAggregationDimensions. :rtype: str """ return self._platform_type @platform_type.setter def platform_type(self, platform_type): """ Sets the platform_type of this ManagementAgentAggregationDimensions. Platform Type :param platform_type: The platform_type of this ManagementAgentAggregationDimensions. :type: str """ allowed_values = ["LINUX", "WINDOWS"] if not value_allowed_none_or_none_sentinel(platform_type, allowed_values): platform_type = 'UNKNOWN_ENUM_VALUE' self._platform_type = platform_type @property def version(self): """ Gets the version of this ManagementAgentAggregationDimensions. Agent image version :return: The version of this ManagementAgentAggregationDimensions. :rtype: str """ return self._version @version.setter def version(self, version): """ Sets the version of this ManagementAgentAggregationDimensions. Agent image version :param version: The version of this ManagementAgentAggregationDimensions. :type: str """ self._version = version @property def has_plugins(self): """ Gets the has_plugins of this ManagementAgentAggregationDimensions. Whether or not a managementAgent has at least one plugin :return: The has_plugins of this ManagementAgentAggregationDimensions. :rtype: bool """ return self._has_plugins @has_plugins.setter def has_plugins(self, has_plugins): """ Sets the has_plugins of this ManagementAgentAggregationDimensions. Whether or not a managementAgent has at least one plugin :param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions. :type: bool """ self._has_plugins = has_plugins @property def install_type(self): """ Gets the install_type of this ManagementAgentAggregationDimensions. The install type, either AGENT or GATEWAY Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The install_type of this ManagementAgentAggregationDimensions. :rtype: str """ return self._install_type @install_type.setter def install_type(self, install_type): """ Sets the install_type of this ManagementAgentAggregationDimensions. The install type, either AGENT or GATEWAY :param install_type: The install_type of this ManagementAgentAggregationDimensions. :type: str """ allowed_values = ["AGENT", "GATEWAY"] if not value_allowed_none_or_none_sentinel(install_type, allowed_values): install_type = 'UNKNOWN_ENUM_VALUE' self._install_type = install_type def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
37.57085
245
0.691487
8,740
0.94181
0
0
8,770
0.945043
0
0
6,546
0.705388
4a5059beb09af2b372b1d15c442329a32a505195
1,770
py
Python
py_buycoins/sending.py
Bashorun97/BuyCoins-Python-SDK
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
[ "MIT" ]
1
2021-02-16T14:26:30.000Z
2021-02-16T14:26:30.000Z
py_buycoins/sending.py
Bashorun97/BuyCoins-Python-SDK
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
[ "MIT" ]
null
null
null
py_buycoins/sending.py
Bashorun97/BuyCoins-Python-SDK
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
[ "MIT" ]
null
null
null
from .gcore.queries import GetNetworkFee, GetBalance from .gcore.mutations import SendCoin from typing import List, Optional from .exc import SendLimitError, InvalidClientObject class Send: def __init__(self, address: str, cryptocurrency: str, amount: float): self.address = address self.cryptocurrency = cryptocurrency self.amount = amount limits = { "bitcoin": 1, "ethereum": 50, "litecoin": 50, "nairatoken": 2000000 } def execute(self, client): try: return client.execute(query=self.send()) except AttributeError: raise InvalidClientObject("<BuyCoinsClient> object expected received {} instead".format(type(client))) def get_network_fee(self, response_fields): _price = GetNetworkFee() return _price.queryObject( response_fields=response_fields, cryptocurrency=self.cryptocurrency, amount=self.amount ) def check_limit(self): if Send.limits[self.cryptocurrency.lower()] < self.amount: return False else: return True def send(self, response_fields): if self.cryptocurrency.lower() in Send.limits.keys(): if self.check_limit(self.amount, self.cryptocurrency): return SendCoin().Mutate( cryptocurrency=self.cryptocurrency, response_fields=response_fields, amount=self.amount, address=self.address ) else: raise SendLimitError("Maximum daily transaction amount exceeded") def balance(self, response_fields: List): return GetBalance.queryObject(response_fields=response_fields)
33.396226
114
0.627684
1,589
0.89774
0
0
0
0
0
0
138
0.077966
4a51566e6f537d3c7defee7d9f6dd2e1ce52fbb6
2,190
py
Python
snippet/example/python/url.py
yp2800/snippet
054af596655007cbec81340bd166489e706fffe6
[ "MIT" ]
94
2016-09-22T09:13:19.000Z
2022-03-30T07:35:35.000Z
snippet/example/python/url.py
yp2800/snippet
054af596655007cbec81340bd166489e706fffe6
[ "MIT" ]
1
2020-11-22T03:05:05.000Z
2020-11-22T03:05:05.000Z
snippet/example/python/url.py
yp2800/snippet
054af596655007cbec81340bd166489e706fffe6
[ "MIT" ]
38
2017-06-11T22:03:04.000Z
2022-03-10T07:46:39.000Z
# -*- coding: utf-8 -*- try: from urlparse import urlparse, urlunsplit except ImportError: from urllib.parse import urlparse, urlunsplit class URL(object): DEFAULT_SCHEME = ["http", "https"] def __init__(self, url, allowed_scheme=None): self._url = url self.url = urlparse(self._url) self._scheme = allowed_scheme if allowed_scheme else self.DEFAULT_SCHEME def geturl(self): scheme = self.scheme if self.scheme else self.url.scheme netloc = self.netloc if self.netloc else self.url.netloc url = self.path if self.path else self.url.path params = self.params if self.params else self.url.params query = self.query if self.query else self.url.query fragment = self.fragment if self.fragment else self.url.fragment if params: url = "%s;%s" % (url, params) return urlunsplit((scheme, netloc, url, query, fragment)) def get_full_url(self, base=None): return self.s_get_full_url(self, base) @staticmethod def s_get_full_url(url, base=None): if not base: if url.scheme in url._scheme: return url.geturl() return None if not url.scheme: url.scheme = base.scheme if url.scheme not in url._scheme: return None if not url.netloc: url.netloc = base.netloc if len(url.path) == 1 and url.path == '/': return None if url.path[0] != '/': path = base.path.split('/')[:-1] path.append(url.path) url.path = '/'.join(path) return url.geturl() def __getattr__(self, name): if name == "path": path = getattr(self.url, name) if not path: return '/' return path return getattr(self.url, name) def __setattr__(self, name, value): object.__setattr__(self, name, value) def __repr__(self): s = "URL(scheme='%s', netloc='%s', path='%s', params='%s', query='%s', fragment='%s')" p = (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment) return s % p
31.73913
94
0.581279
2,041
0.931963
0
0
623
0.284475
0
0
146
0.066667
4a533004a2f846794254f71446a4268346a94d9f
550
py
Python
netvisor_api_client/services/dimension.py
tristen-tooming/netvisor-api-client
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
[ "MIT" ]
null
null
null
netvisor_api_client/services/dimension.py
tristen-tooming/netvisor-api-client
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
[ "MIT" ]
null
null
null
netvisor_api_client/services/dimension.py
tristen-tooming/netvisor-api-client
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
[ "MIT" ]
null
null
null
from .base import Service from ..requests.dimension import CreateDimensionsRequest, DimensionsListRequest class DimensionService(Service): def create(self, data): request = CreateDimensionsRequest( self.client, params={'method': 'add'}, data=data ) return request.make_request() def list(self, showhidden=None): request = DimensionsListRequest(self.client, params={'showhidden': showhidden}) return request.make_request()
28.947368
79
0.616364
442
0.803636
0
0
0
0
0
0
25
0.045455
4a54146d12e005b9045dcbb5b4f63178061f1a78
7,338
py
Python
cishouseholds/filter.py
ONS-SST/cis_households
e475df5929e6763a46cd05aff1f7e960ccbe8e21
[ "MIT" ]
null
null
null
cishouseholds/filter.py
ONS-SST/cis_households
e475df5929e6763a46cd05aff1f7e960ccbe8e21
[ "MIT" ]
252
2021-05-19T11:12:43.000Z
2022-03-02T10:39:10.000Z
cishouseholds/filter.py
ONS-SST/cis_households
e475df5929e6763a46cd05aff1f7e960ccbe8e21
[ "MIT" ]
null
null
null
from typing import List from typing import Union from pyspark.sql import DataFrame from pyspark.sql import functions as F from pyspark.sql.window import Window def filter_all_not_null(df: DataFrame, reference_columns: List[str]) -> DataFrame: """ Filter rows which have NULL values in all the specified columns. From households_aggregate_processes.xlsx, filter number 2. Parameters ---------- df reference_columns Columns to check for missing values in, all must be missing for the record to be dropped. """ return df.na.drop(how="all", subset=reference_columns) def filter_duplicates_by_time_and_threshold( df: DataFrame, first_reference_column: str, second_reference_column: str, third_reference_column: str, fourth_reference_column: str, time_threshold: float = 1.5, float_threshold: float = 0.00001, ) -> DataFrame: """ Drop duplicates based on two identitical column values if third and fourth column and not both within a threshold difference from the first duplicate record. From households_aggregate_processes.xlsx, filter number 4. Parameters ---------- df first_reference_column First column with duplicate value second_reference_column Second column with duplicate value third_reference_column Column used for time based threshold difference, timestamp fourth_reference_column Column used for numeric based threshold difference, float """ window = Window.partitionBy(first_reference_column, second_reference_column).orderBy(third_reference_column) df = df.withColumn("duplicate_id", F.row_number().over(window)) df = df.withColumn( "within_time_threshold", ( F.abs( F.first(third_reference_column).over(window).cast("long") - F.col(third_reference_column).cast("long") ) / (60 * 60) ) < time_threshold, ) df = df.withColumn( "within_float_threshold", F.abs(F.first(fourth_reference_column).over(window) - F.col(fourth_reference_column)) < float_threshold, ) df = df.filter((F.col("duplicate_id") == 1) | ~(F.col("within_time_threshold") & (F.col("within_float_threshold")))) return df.drop("duplicate_id", "within_time_threshold", "within_float_threshold") def filter_by_cq_diff( df: DataFrame, comparing_column: str, ordering_column: str, tolerance: float = 0.00001 ) -> DataFrame: """ This function works out what columns have a float value difference less than 10-^5 or 0.00001 (or any other tolerance value inputed) given all the other columns are the same and considers it to be the same dropping or deleting the repeated values and only keeping one entry. Parameters ---------- df comparing_column ordering_column tolerance """ column_list = df.columns column_list.remove(comparing_column) windowSpec = Window.partitionBy(column_list).orderBy(ordering_column) df = df.withColumn("first_value_in_duplicates", F.first(comparing_column).over(windowSpec)) df = df.withColumn( "duplicates_first_record", F.abs(F.col("first_value_in_duplicates") - F.col(comparing_column)) < tolerance ) difference_window = Window.partitionBy(column_list + ["duplicates_first_record"]).orderBy(ordering_column) df = df.withColumn("duplicate_number", F.row_number().over(difference_window)) df = df.filter(~(F.col("duplicates_first_record") & (F.col("duplicate_number") != 1))) df = df.drop("first_value_in_duplicates", "duplicates_first_record", "duplicate_number") return df def assign_date_interval_and_flag( df: DataFrame, column_name_inside_interval: str, column_name_time_interval: str, start_datetime_reference_column: str, end_datetime_reference_column: str, lower_interval: Union[int, float], upper_interval: Union[int, float], interval_format: str = "hours", ) -> DataFrame: """ This function gives the time interval in either hours (by default) or days in a column by given two date columns and says whether it is inside and upper and lower interval. If the difference of dates is within the upper and lower time intervals, the function will output None and an integer 1 if the difference in dates are outside of those intervals. Parameters ---------- df column_name_inside_interval Name of the column that returns whether the difference in dates are within the upper/lower limits if within, it will return None, if outside will return an integer 1. column_name_time_interval Name of the column that returns the difference between start and end date and adds at the end of the column name whether it is in hours or days start_datetime_reference_column Earliest date in string format yyyy-mm-dd hh:mm:ss. end_datetime_reference_column Latest date in string format yyyy-mm-dd hh:mm:ss. lower_interval Marks how much NEGATIVE time difference can have between end_datetime_reference_column and start_datetime_reference_column. Meaning how the end_datetime_reference_column can be earlier than start_datetime_reference_column upper_interval Marks how much POSITIVE time difference can have between end_datetime_reference_column and start_datetime_reference_column interval_format By default will be a string called 'hours' if upper and lower intervals are input as days, define interval_format to 'days'. These are the only two possible formats. Notes ----- Lower_interval should be a negative value if start_datetime_reference_column is after end_datetime_reference_column.""" # by default, Hours but if days, apply change factor if interval_format == "hours": # to convert hours to seconds conversion_factor = 3600 # 1h has 60s*60min seconds = 3600 seconds elif interval_format == "days": conversion_factor = 86400 # 1 day has 60s*60min*24h seconds = 86400 seconds column_name_time_interval = column_name_time_interval + "_" + interval_format # FORMULA: (end_datetime_reference_column - start_datetime_reference_column) in # seconds/conversion_factor in seconds df = df.withColumn( column_name_time_interval, ( F.to_timestamp(F.col(end_datetime_reference_column)).cast("long") - F.to_timestamp(F.col(start_datetime_reference_column)).cast("long") ) / conversion_factor, # 1 day has 60s*60min*24h seconds = 86400 seconds ) return df.withColumn( column_name_inside_interval, F.when(~F.col(column_name_time_interval).between(lower_interval, upper_interval), 1).otherwise(None), ) def file_exclude(df: DataFrame, source_file_col: str, files_to_exclude: list): """ Function to exclude specific files from pipeline processing Parameters -------- df source_file_column = Column in input dataframe which contains the source file files_to_exclude = List of files to exclude (feed in from config) """ for item in files_to_exclude: df = df.filter(~F.col(source_file_col).isin(item)) return df
38.020725
120
0.710139
0
0
0
0
0
0
0
0
4,191
0.571137
4a544c66c68a458b980a2174bdc25da63354dc6e
6,088
py
Python
cscs-checks/cuda/multi_gpu.py
hpc-unibe-ch/reframe
07f97e25cf4e7319782c37dd1923f7e70a368b99
[ "BSD-3-Clause" ]
null
null
null
cscs-checks/cuda/multi_gpu.py
hpc-unibe-ch/reframe
07f97e25cf4e7319782c37dd1923f7e70a368b99
[ "BSD-3-Clause" ]
null
null
null
cscs-checks/cuda/multi_gpu.py
hpc-unibe-ch/reframe
07f97e25cf4e7319782c37dd1923f7e70a368b99
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause import os import reframe.utility.sanity as sn import reframe as rfm @rfm.required_version('>=2.16-dev0') @rfm.simple_test class GpuBandwidthCheck(rfm.RegressionTest): def __init__(self): self.valid_systems = ['kesch:cn', 'daint:gpu', 'dom:gpu', 'tiger:gpu', 'arolla:cn', 'tsa:cn'] self.valid_prog_environs = ['PrgEnv-gnu'] if self.current_system.name in ['arolla', 'kesch', 'tsa']: self.valid_prog_environs = ['PrgEnv-gnu-nompi'] self.exclusive_access = True self.sourcesdir = os.path.join( self.current_system.resourcesdir, 'CUDA', 'essentials' ) self.build_system = 'SingleSource' # Set nvcc flags nvidia_sm = '60' if self.current_system.name == 'kesch': nvidia_sm = '37' elif self.current_system.name in ['arolla', 'tsa']: nvidia_sm = '70' self.build_system.cxxflags = ['-I.', '-m64', '-arch=sm_%s' % nvidia_sm] self.sourcepath = 'bandwidthtestflex.cu' self.executable = 'gpu_bandwidth_check.x' # Perform a single bandwidth test with a buffer size of 1024MB self.min_buffer_size = 1073741824 self.max_buffer_size = 1073741824 self.executable_opts = ['device', 'all', '--mode=range', '--start=%d' % self.min_buffer_size, '--increment=%d' % self.min_buffer_size, '--end=%d' % self.max_buffer_size, '--csv'] self.num_tasks = 0 self.num_tasks_per_node = 1 if self.current_system.name in ['daint', 'dom', 'tiger']: self.modules = ['craype-accel-nvidia60'] self.num_gpus_per_node = 1 elif self.current_system.name == 'kesch': self.modules = ['cudatoolkit/8.0.61'] self.num_gpus_per_node = 8 elif self.current_system.name in ['arolla', 'tsa']: self.modules = ['cuda/10.1.243'] self.num_gpus_per_node = 8 # perf_patterns and reference will be set by the sanity check function self.sanity_patterns = self.do_sanity_check() self.perf_patterns = {} self.reference = {} self.__bwref = { # FIXME: reference values for Arolla and Tsa need to be updated # (sanity check fails if they are not defined) 'arolla:cn:h2d': (7583, -0.1, None, 'MB/s'), 'arolla:cn:d2h': (7584, -0.1, None, 'MB/s'), 'arolla:cn:d2d': (137408, -0.1, None, 'MB/s'), 'daint:gpu:h2d': (11881, -0.1, None, 'MB/s'), 'daint:gpu:d2h': (12571, -0.1, None, 'MB/s'), 'daint:gpu:d2d': (499000, -0.1, None, 'MB/s'), 'dom:gpu:h2d': (11881, -0.1, None, 'MB/s'), 'dom:gpu:d2h': (12571, -0.1, None, 'MB/s'), 'dom:gpu:d2d': (499000, -0.1, None, 'MB/s'), 'kesch:cn:h2d': (7583, -0.1, None, 'MB/s'), 'kesch:cn:d2h': (7584, -0.1, None, 'MB/s'), 'kesch:cn:d2d': (137408, -0.1, None, 'MB/s'), 'tiger:gpu:h2d': (0, None, None, 'MB/s'), 'tiger:gpu:d2h': (0, None, None, 'MB/s'), 'tiger:gpu:d2d': (0, None, None, 'MB/s'), 'tsa:cn:h2d': (7583, -0.1, None, 'MB/s'), 'tsa:cn:d2h': (7584, -0.1, None, 'MB/s'), 'tsa:cn:d2d': (137408, -0.1, None, 'MB/s'), } self.tags = {'diagnostic', 'benchmark', 'mch', 'craype', 'external-resources'} self.maintainers = ['AJ', 'SK'] def _xfer_pattern(self, xfer_kind, devno, nodename): '''generates search pattern for performance analysis''' if xfer_kind == 'h2d': first_part = 'bandwidthTest-H2D-Pinned' elif xfer_kind == 'd2h': first_part = 'bandwidthTest-D2H-Pinned' else: first_part = 'bandwidthTest-D2D' # Extract the bandwidth corresponding to the maximum buffer size return (r'^%s[^,]*,\s*%s[^,]*,\s*Bandwidth\s*=\s*(\S+)\s*MB/s([^,]*,)' r'{2}\s*Size\s*=\s*%d\s*bytes[^,]*,\s*DeviceNo\s*=\s*-1' r':%s' % (nodename, first_part, self.max_buffer_size, devno)) @sn.sanity_function def do_sanity_check(self): failures = [] devices_found = set(sn.extractall( r'^\s*([^,]*),\s*Detected devices: %s' % self.num_gpus_per_node, self.stdout, 1 )) sn.evaluate(sn.assert_eq( self.job.num_tasks, len(devices_found), msg='requested {0} node(s), got {1} (nodelist: %s)' % ','.join(sorted(devices_found)))) good_nodes = set(sn.extractall( r'^\s*([^,]*),\s*NID\s*=\s*\S+\s+Result = PASS', self.stdout, 1 )) sn.evaluate(sn.assert_eq( devices_found, good_nodes, msg='check failed on the following node(s): %s' % ','.join(sorted(devices_found - good_nodes))) ) # Sanity is fine, fill in the perf. patterns based on the exact node id for nodename in devices_found: for xfer_kind in ('h2d', 'd2h', 'd2d'): for devno in range(self.num_gpus_per_node): perfvar = '%s_gpu_%s_%s_bw' % (nodename, devno, xfer_kind) perfvar = 'bw_%s_%s_gpu_%s' % (xfer_kind, nodename, devno) self.perf_patterns[perfvar] = sn.extractsingle( self._xfer_pattern(xfer_kind, devno, nodename), self.stdout, 1, float, 0 ) partname = self.current_partition.fullname refkey = '%s:%s' % (partname, perfvar) bwkey = '%s:%s' % (partname, xfer_kind) self.reference[refkey] = self.__bwref[bwkey] return True
42.873239
79
0.53433
5,768
0.947438
0
0
5,822
0.956307
0
0
1,941
0.318824
4a548d3916f1d9f7cfe21d9195722cae0fa08812
5,094
py
Python
sympy/series/tests/test_demidovich.py
msgoff/sympy
1e7daef7514902f5e89718fa957b7b36c6669a10
[ "BSD-3-Clause" ]
null
null
null
sympy/series/tests/test_demidovich.py
msgoff/sympy
1e7daef7514902f5e89718fa957b7b36c6669a10
[ "BSD-3-Clause" ]
null
null
null
sympy/series/tests/test_demidovich.py
msgoff/sympy
1e7daef7514902f5e89718fa957b7b36c6669a10
[ "BSD-3-Clause" ]
null
null
null
from sympy import ( limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S, ) # Numbers listed with the tests refer to problem numbers in the book # "Anti-demidovich, problemas resueltos, Ed. URSS" x = Symbol("x") def test_leadterm(): assert (3 + 2 * x ** (log(3) / log(2) - 1)).leadterm(x) == (3, 0) def root3(x): return root(x, 3) def root4(x): return root(x, 4) def test_Limits_simple_0(): assert limit((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x), x, oo) == 3 # 175 def test_Limits_simple_1(): assert limit((x + 1) * (x + 2) * (x + 3) / x ** 3, x, oo) == 1 # 172 assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0 # 179 assert ( limit((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1), x, oo) == 8 ) # Primjer 1 assert limit(x / root3(x ** 3 + 10), x, oo) == 1 # Primjer 2 assert limit((x + 1) ** 2 / (x ** 2 + 1), x, oo) == 1 # 181 def test_Limits_simple_2(): assert limit(1000 * x / (x ** 2 - 1), x, oo) == 0 # 182 assert limit((x ** 2 - 5 * x + 1) / (3 * x + 7), x, oo) is oo # 183 assert limit((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5), x, oo) == 0 # 184 assert limit((2 * x ** 2 - 3 * x - 4) / sqrt(x ** 4 + 1), x, oo) == 2 # 186 assert limit((2 * x + 3) / (x + root3(x)), x, oo) == 2 # 187 assert limit(x ** 2 / (10 + x * sqrt(x)), x, oo) is oo # 188 assert limit(root3(x ** 2 + 1) / (x + 1), x, oo) == 0 # 189 assert limit(sqrt(x) / sqrt(x + sqrt(x + sqrt(x))), x, oo) == 1 # 190 def test_Limits_simple_3a(): a = Symbol("a") # issue 3513 assert together(limit((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3), x, a)) == ( a - 1 ) / ( 3 * a ** 2 ) # 196 def test_Limits_simple_3b(): h = Symbol("h") assert limit(((x + h) ** 3 - x ** 3) / h, h, 0) == 3 * x ** 2 # 197 assert limit((1 / (1 - x) - 3 / (1 - x ** 3)), x, 1) == -1 # 198 assert ( limit((sqrt(1 + x) - 1) / (root3(1 + x) - 1), x, 0) == Rational(3) / 2 ) # Primer 4 assert limit((sqrt(x) - 1) / (x - 1), x, 1) == Rational(1) / 2 # 199 assert limit((sqrt(x) - 8) / (root3(x) - 4), x, 64) == 3 # 200 assert limit((root3(x) - 1) / (root4(x) - 1), x, 1) == Rational(4) / 3 # 201 assert ( limit((root3(x ** 2) - 2 * root3(x) + 1) / (x - 1) ** 2, x, 1) == Rational(1) / 9 ) # 202 def test_Limits_simple_4a(): a = Symbol("a") assert limit((sqrt(x) - sqrt(a)) / (x - a), x, a) == 1 / (2 * sqrt(a)) # Primer 5 assert limit((sqrt(x) - 1) / (root3(x) - 1), x, 1) == Rational(3, 2) # 205 assert limit((sqrt(1 + x) - sqrt(1 - x)) / x, x, 0) == 1 # 207 assert limit(sqrt(x ** 2 - 5 * x + 6) - x, x, oo) == Rational(-5, 2) # 213 def test_limits_simple_4aa(): assert limit(x * (sqrt(x ** 2 + 1) - x), x, oo) == Rational(1) / 2 # 214 def test_Limits_simple_4b(): # issue 3511 assert limit(x - root3(x ** 3 - 1), x, oo) == 0 # 215 def test_Limits_simple_4c(): assert limit(log(1 + exp(x)) / x, x, -oo) == 0 # 267a assert limit(log(1 + exp(x)) / x, x, oo) == 1 # 267b def test_bounded(): assert limit(sin(x) / x, x, oo) == 0 # 216b assert limit(x * sin(1 / x), x, 0) == 0 # 227a def test_f1a(): # issue 3508: assert limit((sin(2 * x) / x) ** (1 + x), x, 0) == 2 # Primer 7 def test_f1a2(): # issue 3509: assert limit(((x - 1) / (x + 1)) ** x, x, oo) == exp(-2) # Primer 9 def test_f1b(): m = Symbol("m") n = Symbol("n") h = Symbol("h") a = Symbol("a") assert limit(sin(x) / x, x, 2) == sin(2) / 2 # 216a assert limit(sin(3 * x) / x, x, 0) == 3 # 217 assert limit(sin(5 * x) / sin(2 * x), x, 0) == Rational(5, 2) # 218 assert limit(sin(pi * x) / sin(3 * pi * x), x, 0) == Rational(1, 3) # 219 assert limit(x * sin(pi / x), x, oo) == pi # 220 assert limit((1 - cos(x)) / x ** 2, x, 0) == S.Half # 221 assert limit(x * sin(1 / x), x, oo) == 1 # 227b assert limit((cos(m * x) - cos(n * x)) / x ** 2, x, 0) == ( (n ** 2 - m ** 2) / 2 ) # 232 assert limit((tan(x) - sin(x)) / x ** 3, x, 0) == S.Half # 233 assert limit((x - sin(2 * x)) / (x + sin(3 * x)), x, 0) == -Rational(1, 4) # 237 assert limit((1 - sqrt(cos(x))) / x ** 2, x, 0) == Rational(1, 4) # 239 assert limit((sqrt(1 + sin(x)) - sqrt(1 - sin(x))) / x, x, 0) == 1 # 240 assert limit((1 + h / x) ** x, x, oo) == exp(h) # Primer 9 assert limit((sin(x) - sin(a)) / (x - a), x, a) == cos(a) # 222, *176 assert limit((cos(x) - cos(a)) / (x - a), x, a) == -sin(a) # 223 assert limit((sin(x + h) - sin(x)) / h, h, 0) == cos(x) # 225 def test_f2a(): assert limit(((x + 1) / (2 * x + 1)) ** (x ** 2), x, oo) == 0 # Primer 8 def test_f2(): assert limit((sqrt(cos(x)) - root3(cos(x))) / (sin(x) ** 2), x, 0) == -Rational( 1, 12 ) # *184 def test_f3(): a = Symbol("a") # issue 3504 assert limit(asin(a * x) / x, x, 0) == a
30.686747
86
0.458186
0
0
0
0
0
0
0
0
522
0.102473
4a54b5369073023cda9e88293fbf883952f8a99e
493
py
Python
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
1
2022-01-19T22:24:35.000Z
2022-01-19T22:24:35.000Z
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
4
2021-12-28T05:15:49.000Z
2021-12-28T05:18:25.000Z
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
null
null
null
from notion.client import NotionClient from notion.settings import Settings class Context: def __init__(self): self.settings = Settings.from_file() self._client = None def get_client(self): if not self._client: self.settings.validate() self._client = NotionClient(token_v2=self.settings.token, monitor=False) return self._client def update_settings(self, **kwargs): self.settings = self.settings.update(**kwargs)
27.388889
84
0.6714
414
0.839757
0
0
0
0
0
0
0
0
4a5505f918153846b19b1a912cedc52b11e1b4e9
1,552
py
Python
setup.py
rgooler/bootstrap-pip
34eaa648c81e3f8213b97cd33bda23b50743122a
[ "Unlicense" ]
null
null
null
setup.py
rgooler/bootstrap-pip
34eaa648c81e3f8213b97cd33bda23b50743122a
[ "Unlicense" ]
null
null
null
setup.py
rgooler/bootstrap-pip
34eaa648c81e3f8213b97cd33bda23b50743122a
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import os try: from setuptools import setup except ImportError: from distutils.core import setup def read(*paths): """Build a file path from *paths* and return the contents.""" with open(os.path.join(*paths), 'r') as f: return f.read() install_requires = [] # install_requires = ['requests >= 2.1.0'] # For SNI support in Python 2, must install the following packages # if sys.version_info[0] == 2: # install_requires.append('pyOpenSSL >= 0.14') # install_requires.append('ndg-httpsclient >= 0.3.3') # install_requires.append('pyasn1 >= 0.1.7') setup( name='mymodule', packages=['mymodule'], version='0.1', description='Desc', long_description=(read('README.rst') + '\n\n' + read('HISTORY.rst') + '\n\n' + read('AUTHORS.rst')), url='http://github.com/rgooler/bootstrap-pip/', license='MIT', author='Ryan Gooler', author_email='[email protected]', py_modules=['mymodule'], install_requires=install_requires, include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
30.431373
71
0.614046
0
0
0
0
0
0
0
0
893
0.575387
4a553a81b1d7bdf7e54e2eefdce19b67fef643fd
138
py
Python
cfdata/tabular/converters/__init__.py
carefree0910/carefree-data
ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19
[ "MIT" ]
9
2020-10-25T11:52:34.000Z
2022-01-23T02:45:41.000Z
cfdata/tabular/converters/__init__.py
carefree0910/carefree-data
ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19
[ "MIT" ]
2
2020-08-02T01:58:48.000Z
2021-02-26T11:24:19.000Z
cfdata/tabular/converters/__init__.py
carefree0910/carefree-data
ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19
[ "MIT" ]
1
2021-11-04T14:34:13.000Z
2021-11-04T14:34:13.000Z
from .base import * from .string import * from .categorical import * from .numerical import * __all__ = ["Converter", "converter_dict"]
17.25
41
0.724638
0
0
0
0
0
0
0
0
27
0.195652
4a579bbd92a904600d51867b452bca5458bcdea4
114
py
Python
hello_world.py
BronWang/first_github
9cdd40458014a448a5121268ebca907e3cba1eee
[ "MIT" ]
null
null
null
hello_world.py
BronWang/first_github
9cdd40458014a448a5121268ebca907e3cba1eee
[ "MIT" ]
null
null
null
hello_world.py
BronWang/first_github
9cdd40458014a448a5121268ebca907e3cba1eee
[ "MIT" ]
null
null
null
def hello_world(): """打印Hello world""" message = 'hello world' print(message.title()) hello_world()
14.25
27
0.622807
0
0
0
0
0
0
0
0
36
0.305085
4a5c703189126174bfc5a0bc0302603a5b45186d
583
py
Python
Python/Samples/Observer/UtObserver.py
plasroom46/DesignPattern.Sample
86c05c5ae356cb01f3d075f248c45da3e6534d07
[ "MIT" ]
9
2019-03-14T01:54:31.000Z
2021-11-26T13:00:32.000Z
Python/Samples/Observer/UtObserver.py
plasroom46/DesignPattern.Sample
86c05c5ae356cb01f3d075f248c45da3e6534d07
[ "MIT" ]
null
null
null
Python/Samples/Observer/UtObserver.py
plasroom46/DesignPattern.Sample
86c05c5ae356cb01f3d075f248c45da3e6534d07
[ "MIT" ]
2
2019-08-19T06:00:04.000Z
2021-07-15T01:23:52.000Z
import unittest from Observers import Observer, ObserverMailServer, ObserverPbx from Subjects import Subject, SubjectEflow class UtVisitor(unittest.TestCase): def test_observer(self): # Create observers pbx = ObserverPbx() ms = ObserverMailServer() # Create subject subject = SubjectEflow() subject.attach(pbx) subject.attach(ms) # Notify when JB is leave of absence subject.notify("JB", "Hachi") self.assertTrue(True) if __name__ == '__main__': unittest.main()
21.592593
63
0.626072
408
0.699828
0
0
0
0
0
0
91
0.156089
4a5d879c71ea4b0d47b4f6335a7e75debaa68573
1,368
py
Python
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
def can_build(env, platform): return True def configure(env): pass def get_doc_classes(): return [ "WorldArea", "VoxelLight", "VoxelLightNode", "VoxelLevelGenerator", "VoxelLevelGeneratorFlat", "VoxelSurfaceMerger", "VoxelSurfaceSimple", "VoxelSurface", "VoxelLibraryMerger", "VoxelLibrarySimple", "VoxelLibrary", "VoxelLibraryMergerPCM", "VoxelMaterialCache", "VoxelMaterialCachePCM", "VoxelCubePoints", "VoxelMesherCubic", "VoxelMeshData", "MarchingCubesCellData", "VoxelMesherMarchingCubes", "VoxelMesher", "EnvironmentData", "VoxelChunk", "VoxelChunkDefault", "VoxelStructure", "BlockVoxelStructure", "VoxelWorld", "VoxelMesherBlocky", "VoxelWorldBlocky", "VoxelChunkBlocky", "VoxelMesherLiquidBlocky", "VoxelWorldMarchingCubes", "VoxelChunkMarchingCubes", "VoxelMesherCubic", "VoxelWorldCubic", "VoxelChunkCubic", "VoxelMesherDefault", "VoxelWorldDefault", "VoxelJob", "VoxelTerrainJob", "VoxelLightJob", "VoxelPropJob", "VoxelMesherJobStep", ] def get_doc_path(): return "doc_classes"
18.739726
35
0.576754
0
0
0
0
0
0
0
0
778
0.568713
4a5f2654f1609f5c5550084dae95f8a37c34d9e6
4,247
py
Python
Python/2021/day_04/day_04.py
JonoRicci/Advent-Of-Code
1c092410d6ece195f4689788af4b1091acf10fbb
[ "MIT" ]
null
null
null
Python/2021/day_04/day_04.py
JonoRicci/Advent-Of-Code
1c092410d6ece195f4689788af4b1091acf10fbb
[ "MIT" ]
null
null
null
Python/2021/day_04/day_04.py
JonoRicci/Advent-Of-Code
1c092410d6ece195f4689788af4b1091acf10fbb
[ "MIT" ]
null
null
null
""" Day 04 """ from logger import logger def main() -> None: """ Import the puzzle input, process and display the results. """ puzzle_input = import_list() logger.debug(puzzle_input) final_score = play_bingo(puzzle_input) for result in final_score: logger.info(f"The final score is: {result}.") def import_list() -> list: """ Import the puzzle input and return a list. :return: Puzzle input text file as list :rtype: list """ file = open("puzzle-input", "r") string_list = file.read().splitlines() file.close() return string_list def play_bingo(bingo_cards: list) -> list: """ Extract winning numbers, bingo boards from input. Make a separate 2D list tracking wins. For each winning number, check every board row and column for a match. Add matches to the 2D list tracking wins. Once done, check 2D list for winning columns / rows. Add winning boards to new list along with winning number. Multiply to get score. :param bingo_cards: puzzle input where each line is a string :return: First and last winning board score :rtype: list """ winning_numbers = [int(x) for x in bingo_cards[0].split(",")] logger.debug(f" Winning numbers: {winning_numbers}") single_board = [] all_boards = [] final_score_list = [] # Get Bingo Boards for line in range(len(bingo_cards)): if "," not in bingo_cards[line]: row = [int(x) for x in bingo_cards[line].split()] if row: logger.debug(row) single_board.append(row) elif single_board: all_boards.append(single_board) single_board = [] # Set up separate 2D list tracking matches to winning numbers. unmarked_tracker = [] for board in all_boards: assert len(board) == 5 and len(board[0]) == 5 unmarked_tracker.append([[False for _ in range(5)] for _ in range(5)]) # Set up list to track winning boards. winning_board = [False for _ in range(len(all_boards))] for number in winning_numbers: for index, board in enumerate(all_boards): logger.debug(f"Checking board: {index} for {number}") # Check for winning numbers. for row in range(5): for column in range(5): if board[row][column] == number: logger.debug(f"{unmarked_tracker[index][row][column]} " f"is True.") unmarked_tracker[index][row][column] = True # Check for 5 in a row. won = False for row in range(5): ok = True for column in range(5): if not unmarked_tracker[index][row][column]: ok = False if ok: won = True # Check for 5 in a column. for column in range(5): ok = True for row in range(5): if not unmarked_tracker[index][row][column]: ok = False if ok: won = True # Check for each winning board. if won and not winning_board[index]: winning_board[index] = True winning_boards_count = len([j for j in range(len(all_boards)) if winning_board[j]]) # If first or last board. if winning_boards_count == 1 or winning_boards_count == \ len(all_boards): # Calculate all unmarked. unmarked = 0 for row in range(5): for column in range(5): if not unmarked_tracker[index][row][column]: unmarked += board[row][column] final_score_list.append(unmarked * number) logger.debug(f"The final score is: {final_score_list[-1]}, " f"which is {unmarked} * {number}.") return final_score_list if __name__ == "__main__": main()
32.419847
80
0.53732
0
0
0
0
0
0
0
0
1,267
0.298328
4a612749e70c643dade9a21e3ef7dab25d3f46e9
1,982
py
Python
timeeval_experiments/algorithms/eif.py
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
2
2022-01-29T03:46:31.000Z
2022-02-14T14:06:35.000Z
timeeval_experiments/algorithms/eif.py
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
null
null
null
timeeval_experiments/algorithms/eif.py
HPI-Information-Systems/TimeEval
9b2717b89decd57dd09e04ad94c120f13132d7b8
[ "MIT" ]
null
null
null
from durations import Duration from typing import Any, Dict, Optional from timeeval import Algorithm, TrainingType, InputDimensionality from timeeval.adapters import DockerAdapter from timeeval.params import ParameterConfig _eif_parameters: Dict[str, Dict[str, Any]] = { "extension_level": { "defaultValue": None, "description": "Extension level 0 resembles standard isolation forest. If unspecified (`None`), then `extension_level=X.shape[1] - 1`.", "name": "extension_level", "type": "int" }, "limit": { "defaultValue": None, "description": "The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.", "name": "limit", "type": "int" }, "max_samples": { "defaultValue": None, "description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`None`), then `max_samples=min(256, X.shape[0])`.", "name": "max_samples", "type": "float" }, "n_trees": { "defaultValue": 200, "description": "The number of decision trees (base estimators) in the forest (ensemble).", "name": "n_trees", "type": "int" }, "random_state": { "defaultValue": 42, "description": "Seed for random number generation.", "name": "random_state", "type": "int" } } def eif(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm: return Algorithm( name="Extended Isolation Forest (EIF)", main=DockerAdapter( image_name="registry.gitlab.hpi.de/akita/i/eif", skip_pull=skip_pull, timeout=timeout, group_privileges="akita", ), preprocess=None, postprocess=None, param_schema=_eif_parameters, param_config=params or ParameterConfig.defaults(), data_as_file=True, training_type=TrainingType.UNSUPERVISED, input_dimensionality=InputDimensionality("multivariate") )
33.033333
180
0.676085
0
0
0
0
0
0
0
0
938
0.473259
4a614519b633b8e43e30737c32c7066d2365e9ab
5,548
py
Python
deepchem/models/tf_new_models/graph_models.py
KEHANG/deepchem
367bea14cab47b1093bf106e0c196bb02d55c755
[ "MIT" ]
null
null
null
deepchem/models/tf_new_models/graph_models.py
KEHANG/deepchem
367bea14cab47b1093bf106e0c196bb02d55c755
[ "MIT" ]
null
null
null
deepchem/models/tf_new_models/graph_models.py
KEHANG/deepchem
367bea14cab47b1093bf106e0c196bb02d55c755
[ "MIT" ]
1
2021-07-09T19:58:54.000Z
2021-07-09T19:58:54.000Z
""" Convenience classes for assembling graph models. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals __author__ = "Han Altae-Tran and Bharath Ramsundar" __copyright__ = "Copyright 2016, Stanford University" __license__ = "MIT" import tensorflow as tf from deepchem.nn.layers import GraphGather from deepchem.models.tf_new_models.graph_topology import GraphTopology class SequentialGraph(object): """An analog of Keras Sequential class for Graph data. Like the Sequential class from Keras, but automatically passes topology placeholders from GraphTopology to each graph layer (from layers) added to the network. Non graph layers don't get the extra placeholders. """ def __init__(self, n_feat): """ Parameters ---------- n_feat: int Number of features per atom. """ self.graph = tf.Graph() with self.graph.as_default(): self.graph_topology = GraphTopology(n_feat) self.output = self.graph_topology.get_atom_features_placeholder() # Keep track of the layers self.layers = [] def add(self, layer): """Adds a new layer to model.""" with self.graph.as_default(): ############################################# DEBUG #print("start - add()") #print("self.output") #print(self.output) ############################################# DEBUG # For graphical layers, add connectivity placeholders if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']: if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")): assert self.layers[-1].__name__ != "GraphGather", \ 'Cannot use GraphConv or GraphGather layers after a GraphGather' self.output = layer([self.output] + self.graph_topology.get_topology_placeholders()) else: self.output = layer(self.output) ############################################# DEBUG #print("end- add()") #print("self.output") #print(self.output) ############################################# DEBUG # Add layer to the layer list self.layers.append(layer) def get_graph_topology(self): return self.graph_topology def get_num_output_features(self): """Gets the output shape of the featurization layers of the network""" return self.layers[-1].output_shape[1] def return_outputs(self): return self.output def return_inputs(self): return self.graph_topology.get_input_placeholders() def get_layer(self, layer_id): return self.layers[layer_id] class SequentialSupportGraph(object): """An analog of Keras Sequential model for test/support models.""" def __init__(self, n_feat): """ Parameters ---------- n_feat: int Number of atomic features. """ self.graph = tf.Graph() with self.graph.as_default(): # Create graph topology and x self.test_graph_topology = GraphTopology(n_feat, name='test') self.support_graph_topology = GraphTopology(n_feat, name='support') self.test = self.test_graph_topology.get_atom_features_placeholder() self.support = self.support_graph_topology.get_atom_features_placeholder() # Keep track of the layers self.layers = [] # Whether or not we have used the GraphGather layer yet self.bool_pre_gather = True def add(self, layer): """Adds a layer to both test/support stacks. Note that the layer transformation is performed independently on the test/support tensors. """ with self.graph.as_default(): self.layers.append(layer) # Update new value of x if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']: assert self.bool_pre_gather, "Cannot apply graphical layers after gather." self.test = layer([self.test] + self.test_graph_topology.topology) self.support = layer([self.support] + self.support_graph_topology.topology) else: self.test = layer(self.test) self.support = layer(self.support) if type(layer).__name__ == 'GraphGather': self.bool_pre_gather = False # Set flag to stop adding topology def add_test(self, layer): """Adds a layer to test.""" with self.graph.as_default(): self.layers.append(layer) # Update new value of x if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']: self.test = layer([self.test] + self.test_graph_topology.topology) else: self.test = layer(self.test) def add_support(self, layer): """Adds a layer to support.""" with self.graph.as_default(): self.layers.append(layer) # Update new value of x if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']: self.support = layer([self.support] + self.support_graph_topology.topology) else: self.support = layer(self.support) def join(self, layer): """Joins test and support to a two input two output layer""" with self.graph.as_default(): self.layers.append(layer) self.test, self.support = layer([self.test, self.support]) def get_test_output(self): return self.test def get_support_output(self): return self.support def return_outputs(self): return [self.test] + [self.support] def return_inputs(self): return (self.test_graph_topology.get_inputs() + self.support_graph_topology.get_inputs())
32.635294
82
0.6469
5,109
0.920872
0
0
0
0
0
0
1,983
0.357426
4a618ed57cbfdde42c612f538425cdaf22f7923a
20,082
py
Python
yandex/cloud/access/access_pb2.py
IIKovalenko/python-sdk
980e2c5d848eadb42799132b35a9f58ab7b27157
[ "MIT" ]
1
2019-06-07T10:45:58.000Z
2019-06-07T10:45:58.000Z
yandex/cloud/access/access_pb2.py
IIKovalenko/python-sdk
980e2c5d848eadb42799132b35a9f58ab7b27157
[ "MIT" ]
null
null
null
yandex/cloud/access/access_pb2.py
IIKovalenko/python-sdk
980e2c5d848eadb42799132b35a9f58ab7b27157
[ "MIT" ]
null
null
null
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: yandex/cloud/access/access.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='yandex/cloud/access/access.proto', package='yandex.cloud.access', syntax='proto3', serialized_options=_b('Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;access'), serialized_pb=_b('\n yandex/cloud/access/access.proto\x12\x13yandex.cloud.access\x1a\x1dyandex/cloud/validation.proto\"-\n\x07Subject\x12\x14\n\x02id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x0c\n\x04type\x18\x02 \x01(\t\"_\n\rAccessBinding\x12\x19\n\x07role_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x33\n\x07subject\x18\x02 \x01(\x0b\x32\x1c.yandex.cloud.access.SubjectB\x04\xe8\xc7\x31\x01\"|\n\x19ListAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"r\n\x1aListAccessBindingsResponse\x12;\n\x0f\x61\x63\x63\x65ss_bindings\x18\x01 \x03(\x0b\x32\".yandex.cloud.access.AccessBinding\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x80\x01\n\x18SetAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x41\n\x0f\x61\x63\x63\x65ss_bindings\x18\x02 \x03(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01\"0\n\x19SetAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x8e\x01\n\x1bUpdateAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12L\n\x15\x61\x63\x63\x65ss_binding_deltas\x18\x02 \x03(\x0b\x32\'.yandex.cloud.access.AccessBindingDeltaB\x04\xe8\xc7\x31\x01\"3\n\x1cUpdateAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x96\x01\n\x12\x41\x63\x63\x65ssBindingDelta\x12>\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32(.yandex.cloud.access.AccessBindingActionB\x04\xe8\xc7\x31\x01\x12@\n\x0e\x61\x63\x63\x65ss_binding\x18\x02 \x01(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01*Q\n\x13\x41\x63\x63\x65ssBindingAction\x12%\n!ACCESS_BINDING_ACTION_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02\x42@Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;accessb\x06proto3') , dependencies=[yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,]) _ACCESSBINDINGACTION = _descriptor.EnumDescriptor( name='AccessBindingAction', full_name='yandex.cloud.access.AccessBindingAction', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='ACCESS_BINDING_ACTION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='ADD', index=1, number=1, serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REMOVE', index=2, number=2, serialized_options=None, type=None), ], containing_type=None, serialized_options=None, serialized_start=1006, serialized_end=1087, ) _sym_db.RegisterEnumDescriptor(_ACCESSBINDINGACTION) AccessBindingAction = enum_type_wrapper.EnumTypeWrapper(_ACCESSBINDINGACTION) ACCESS_BINDING_ACTION_UNSPECIFIED = 0 ADD = 1 REMOVE = 2 _SUBJECT = _descriptor.Descriptor( name='Subject', full_name='yandex.cloud.access.Subject', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='yandex.cloud.access.Subject.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='yandex.cloud.access.Subject.type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=88, serialized_end=133, ) _ACCESSBINDING = _descriptor.Descriptor( name='AccessBinding', full_name='yandex.cloud.access.AccessBinding', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='role_id', full_name='yandex.cloud.access.AccessBinding.role_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='subject', full_name='yandex.cloud.access.AccessBinding.subject', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001'), file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=135, serialized_end=230, ) _LISTACCESSBINDINGSREQUEST = _descriptor.Descriptor( name='ListAccessBindingsRequest', full_name='yandex.cloud.access.ListAccessBindingsRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_id', full_name='yandex.cloud.access.ListAccessBindingsRequest.resource_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_size', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_size', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\372\3071\006<=1000'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='page_token', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\212\3101\005<=100'), file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=232, serialized_end=356, ) _LISTACCESSBINDINGSRESPONSE = _descriptor.Descriptor( name='ListAccessBindingsResponse', full_name='yandex.cloud.access.ListAccessBindingsResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='access_bindings', full_name='yandex.cloud.access.ListAccessBindingsResponse.access_bindings', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='next_page_token', full_name='yandex.cloud.access.ListAccessBindingsResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=358, serialized_end=472, ) _SETACCESSBINDINGSREQUEST = _descriptor.Descriptor( name='SetAccessBindingsRequest', full_name='yandex.cloud.access.SetAccessBindingsRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsRequest.resource_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='access_bindings', full_name='yandex.cloud.access.SetAccessBindingsRequest.access_bindings', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001'), file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=475, serialized_end=603, ) _SETACCESSBINDINGSMETADATA = _descriptor.Descriptor( name='SetAccessBindingsMetadata', full_name='yandex.cloud.access.SetAccessBindingsMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsMetadata.resource_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=605, serialized_end=653, ) _UPDATEACCESSBINDINGSREQUEST = _descriptor.Descriptor( name='UpdateAccessBindingsRequest', full_name='yandex.cloud.access.UpdateAccessBindingsRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.resource_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='access_binding_deltas', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.access_binding_deltas', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001'), file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=656, serialized_end=798, ) _UPDATEACCESSBINDINGSMETADATA = _descriptor.Descriptor( name='UpdateAccessBindingsMetadata', full_name='yandex.cloud.access.UpdateAccessBindingsMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsMetadata.resource_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=800, serialized_end=851, ) _ACCESSBINDINGDELTA = _descriptor.Descriptor( name='AccessBindingDelta', full_name='yandex.cloud.access.AccessBindingDelta', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='action', full_name='yandex.cloud.access.AccessBindingDelta.action', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='access_binding', full_name='yandex.cloud.access.AccessBindingDelta.access_binding', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=_b('\350\3071\001'), file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=854, serialized_end=1004, ) _ACCESSBINDING.fields_by_name['subject'].message_type = _SUBJECT _LISTACCESSBINDINGSRESPONSE.fields_by_name['access_bindings'].message_type = _ACCESSBINDING _SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings'].message_type = _ACCESSBINDING _UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas'].message_type = _ACCESSBINDINGDELTA _ACCESSBINDINGDELTA.fields_by_name['action'].enum_type = _ACCESSBINDINGACTION _ACCESSBINDINGDELTA.fields_by_name['access_binding'].message_type = _ACCESSBINDING DESCRIPTOR.message_types_by_name['Subject'] = _SUBJECT DESCRIPTOR.message_types_by_name['AccessBinding'] = _ACCESSBINDING DESCRIPTOR.message_types_by_name['ListAccessBindingsRequest'] = _LISTACCESSBINDINGSREQUEST DESCRIPTOR.message_types_by_name['ListAccessBindingsResponse'] = _LISTACCESSBINDINGSRESPONSE DESCRIPTOR.message_types_by_name['SetAccessBindingsRequest'] = _SETACCESSBINDINGSREQUEST DESCRIPTOR.message_types_by_name['SetAccessBindingsMetadata'] = _SETACCESSBINDINGSMETADATA DESCRIPTOR.message_types_by_name['UpdateAccessBindingsRequest'] = _UPDATEACCESSBINDINGSREQUEST DESCRIPTOR.message_types_by_name['UpdateAccessBindingsMetadata'] = _UPDATEACCESSBINDINGSMETADATA DESCRIPTOR.message_types_by_name['AccessBindingDelta'] = _ACCESSBINDINGDELTA DESCRIPTOR.enum_types_by_name['AccessBindingAction'] = _ACCESSBINDINGACTION _sym_db.RegisterFileDescriptor(DESCRIPTOR) Subject = _reflection.GeneratedProtocolMessageType('Subject', (_message.Message,), dict( DESCRIPTOR = _SUBJECT, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.Subject) )) _sym_db.RegisterMessage(Subject) AccessBinding = _reflection.GeneratedProtocolMessageType('AccessBinding', (_message.Message,), dict( DESCRIPTOR = _ACCESSBINDING, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBinding) )) _sym_db.RegisterMessage(AccessBinding) ListAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('ListAccessBindingsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTACCESSBINDINGSREQUEST, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsRequest) )) _sym_db.RegisterMessage(ListAccessBindingsRequest) ListAccessBindingsResponse = _reflection.GeneratedProtocolMessageType('ListAccessBindingsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTACCESSBINDINGSRESPONSE, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsResponse) )) _sym_db.RegisterMessage(ListAccessBindingsResponse) SetAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('SetAccessBindingsRequest', (_message.Message,), dict( DESCRIPTOR = _SETACCESSBINDINGSREQUEST, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsRequest) )) _sym_db.RegisterMessage(SetAccessBindingsRequest) SetAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('SetAccessBindingsMetadata', (_message.Message,), dict( DESCRIPTOR = _SETACCESSBINDINGSMETADATA, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsMetadata) )) _sym_db.RegisterMessage(SetAccessBindingsMetadata) UpdateAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsRequest', (_message.Message,), dict( DESCRIPTOR = _UPDATEACCESSBINDINGSREQUEST, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsRequest) )) _sym_db.RegisterMessage(UpdateAccessBindingsRequest) UpdateAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsMetadata', (_message.Message,), dict( DESCRIPTOR = _UPDATEACCESSBINDINGSMETADATA, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsMetadata) )) _sym_db.RegisterMessage(UpdateAccessBindingsMetadata) AccessBindingDelta = _reflection.GeneratedProtocolMessageType('AccessBindingDelta', (_message.Message,), dict( DESCRIPTOR = _ACCESSBINDINGDELTA, __module__ = 'yandex.cloud.access.access_pb2' # @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBindingDelta) )) _sym_db.RegisterMessage(AccessBindingDelta) DESCRIPTOR._options = None _SUBJECT.fields_by_name['id']._options = None _ACCESSBINDING.fields_by_name['role_id']._options = None _ACCESSBINDING.fields_by_name['subject']._options = None _LISTACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None _LISTACCESSBINDINGSREQUEST.fields_by_name['page_size']._options = None _LISTACCESSBINDINGSREQUEST.fields_by_name['page_token']._options = None _SETACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None _SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings']._options = None _UPDATEACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None _UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas']._options = None _ACCESSBINDINGDELTA.fields_by_name['action']._options = None _ACCESSBINDINGDELTA.fields_by_name['access_binding']._options = None # @@protoc_insertion_point(module_scope)
40.900204
1,963
0.7684
0
0
0
0
0
0
0
0
6,224
0.309929
4a629d479574b8f27c92b3a96ac0d80522d6e255
992
py
Python
questionbank/users/urls.py
SyafiqTermizi/questionbank
33e58db1a1610a85bd30a85d2f52e819bc27058b
[ "MIT" ]
1
2018-04-17T23:58:46.000Z
2018-04-17T23:58:46.000Z
questionbank/users/urls.py
SyafiqTermizi/questionbank
33e58db1a1610a85bd30a85d2f52e819bc27058b
[ "MIT" ]
8
2019-12-04T23:08:00.000Z
2022-02-13T22:48:26.000Z
questionbank/users/urls.py
SyafiqTermizi/questionbank
33e58db1a1610a85bd30a85d2f52e819bc27058b
[ "MIT" ]
null
null
null
from django.urls import path from .views import ( UserListView, UserUpdateView, UserProfileView, UserDeleteView, AcceptInvitationView, SpecialtyListView, SpecialtyCreateView, SpecialtyUpdateView, SpecialtyDeleteView ) app_name = 'users' urlpatterns = [ path('', UserListView.as_view(), name='list'), path('<int:pk>/', UserUpdateView.as_view(), name='update'), path('<int:pk>/delete/', UserDeleteView.as_view(), name='delete'), path('profile/', UserProfileView.as_view(), name='profile'), path( 'invite/<str:token>/', AcceptInvitationView.as_view(), name='accept_invite' ), path('specialties/', SpecialtyListView.as_view(), name='specialty_list'), path('specialties/create/', SpecialtyCreateView.as_view(), name='specialty_create'), path('specialties/<int:pk>/update/', SpecialtyUpdateView.as_view(), name='specialty_update'), path('specialties/<int:pk>/delete/', SpecialtyDeleteView.as_view(), name='specialty_delete') ]
39.68
97
0.708669
0
0
0
0
0
0
0
0
280
0.282258
4a6439ff07d926ead0739ddd1b337b6e86927570
8,197
py
Python
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
Zoufalc/qiskit-machine-learning
aae3941214cd9667a53b643f229d11d0bff32c60
[ "Apache-2.0" ]
1
2021-07-07T21:23:38.000Z
2021-07-07T21:23:38.000Z
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
Zoufalc/qiskit-machine-learning
aae3941214cd9667a53b643f229d11d0bff32c60
[ "Apache-2.0" ]
null
null
null
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
Zoufalc/qiskit-machine-learning
aae3941214cd9667a53b643f229d11d0bff32c60
[ "Apache-2.0" ]
1
2021-04-11T14:30:32.000Z
2021-04-11T14:30:32.000Z
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Neural network regressor """ from typing import Union import numpy as np from qiskit.algorithms.optimizers import Optimizer from ...exceptions import QiskitMachineLearningError from ...neural_networks import NeuralNetwork from ...utils.loss_functions import (Loss, L1Loss, L2Loss, CrossEntropyLoss, CrossEntropySigmoidLoss) class NeuralNetworkRegressor: """ Quantum neural network regressor""" def __init__(self, neural_network: NeuralNetwork, loss: Union[str, Loss] = 'l2', optimizer: Optimizer = None, warm_start: bool = False): """ Args: neural_network: An instance of an quantum neural network. If the neural network has a one-dimensional output, i.e., `neural_network.output_shape=(1,)`, then it is expected to return values in [-1, +1] and it can only be used for binary classification. If the output is multi-dimensional, it is assumed that the result is a probability distribution, i.e., that the entries are non-negative and sum up to one. Then there are two options, either one-hot encoding or not. In case of one-hot encoding, each probability vector resulting a neural network is considered as one sample and the loss function is applied to the whole vector. Otherwise, each entry of the probability vector is considered as an individual sample and the loss function is applied to the index and weighted with the corresponding probability. loss: A target loss function to be used in training. Default is `l2`, i.e. L2 loss. Can be given either as a string for 'l1', 'l2', 'cross_entropy', 'cross_entropy_sigmoid', or as a loss function implementing the Loss interface. optimizer: An instance of an optimizer to be used in training. warm_start: Use weights from previous fit to start next fit. Raises: QiskitMachineLearningError: unknown loss, invalid neural network """ self._neural_network = neural_network if len(neural_network.output_shape) > 1: raise QiskitMachineLearningError('Invalid neural network output shape!') if isinstance(loss, Loss): self._loss = loss else: if loss.lower() == 'l1': self._loss = L1Loss() elif loss.lower() == 'l2': self._loss = L2Loss() elif loss.lower() == 'cross_entropy': self._loss = CrossEntropyLoss() elif loss.lower() == 'cross_entropy_sigmoid': self._loss = CrossEntropySigmoidLoss() else: raise QiskitMachineLearningError(f'Unknown loss {loss}!') self._optimizer = optimizer self._warm_start = warm_start self._fit_result = None @property def neural_network(self): """ Returns the underlying neural network.""" return self._neural_network @property def loss(self): """ Returns the underlying neural network.""" return self._loss @property def warm_start(self) -> bool: """ Returns the warm start flag.""" return self._warm_start @warm_start.setter def warm_start(self, warm_start: bool) -> None: """ Sets the warm start flag.""" self._warm_start = warm_start def fit(self, X: np.ndarray, y: np.ndarray): # pylint: disable=invalid-name """ Fit the model to data matrix X and target(s) y. Args: X: The input data. y: The target values. Returns: self: returns a trained classifier. Raises: QiskitMachineLearningError: In case of invalid data (e.g. incompatible with network) """ if self._neural_network.output_shape == (1,): # TODO: we should add some reasonable compatibility checks and raise meaningful errors. def objective(w): predict = self._neural_network.forward(X, w) target = np.array(y).reshape(predict.shape) value = np.sum(self._loss(predict, target)) return value def objective_grad(w): # TODO should store output from forward pass (implement loss interface?) # TODO: need to be able to turn off input grads if not needed. output = self._neural_network.forward(X, w) _, weights_grad = self._neural_network.backward(X, w) grad = np.zeros((1, self._neural_network.num_weights)) for i in range(len(X)): grad += self._loss.gradient(output[i][0], y[i]) * weights_grad[i] return grad else: def objective(w): val = 0.0 probs = self._neural_network.forward(X, w) for i in range(len(X)): for y_predict, prob in enumerate(probs[i]): val += prob * self._loss(y_predict, y[i]) return val def objective_grad(w): num_classes = self._neural_network.output_shape[0] grad = np.zeros((1, self._neural_network.num_weights)) for x, y_target in zip(X, y): # TODO: do batch eval _, weight_prob_grad = self._neural_network.backward(x, w) for i in range(num_classes): grad += weight_prob_grad[ 0, i, :].reshape(grad.shape) * self._loss(i, y_target) return grad if self._warm_start and self._fit_result is not None: initial_point = self._fit_result[0] else: initial_point = np.random.rand(self._neural_network.num_weights) self._fit_result = self._optimizer.optimize(self._neural_network.num_weights, objective, objective_grad, initial_point=initial_point) return self def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name """ Predict using the network specified to the regression. Args: X: The input data. Raises: QiskitMachineLearningError: Model needs to be fit to some training data first Returns: The predicted values. """ if self._fit_result is None: raise QiskitMachineLearningError('Model needs to be fit to some training data first!') # TODO: proper handling of batching return self._neural_network.forward(X, self._fit_result[0]) def score(self, X: np.ndarray, y: np.ndarray) -> int: # pylint: disable=invalid-name """ Return R-squared on the given test data and targeted values. Args: X: Test samples. y: True target values given `X`. Raises: QiskitMachineLearningError: Model needs to be fit to some training data first Returns: R-squared value. """ if self._fit_result is None: raise QiskitMachineLearningError('Model needs to be fit to some training data first!') predict = self.predict(X) # Compute R2 for score ss_res = sum(map(lambda k: (k[0] - k[1]) ** 2, zip(y, predict))) ss_tot = sum([(k - np.mean(y)) ** 2 for k in y]) score = 1 - (ss_res / ss_tot) if len(np.array(score).shape) > 0: return score[0] else: return score
39.791262
99
0.595218
7,349
0.896548
0
0
506
0.06173
0
0
3,746
0.456996
4a655d791ecdecd8d04559095721de06fb34dc2a
2,380
py
Python
residuals.py
fbob/mplFOAM
90c9a970ba9975ce115ef5a66eb22fc463b54003
[ "MIT" ]
8
2016-11-01T05:43:48.000Z
2022-01-27T02:12:29.000Z
residuals.py
fbob/mplFOAM
90c9a970ba9975ce115ef5a66eb22fc463b54003
[ "MIT" ]
null
null
null
residuals.py
fbob/mplFOAM
90c9a970ba9975ce115ef5a66eb22fc463b54003
[ "MIT" ]
3
2016-11-01T05:44:01.000Z
2019-05-15T04:04:57.000Z
#!/usr/bin/env python # encoding: utf-8 import sys import getopt import re import os import pylab as plt import numpy as np # Define the variables for which the residuals will be plotted variables = ["Ux", "Uy", "T", "p_rgh", "k", "epsilon"] # Get the arguments of the script def usage(): print("Usage: residuals.py -l logfile\nPlot the residuals versus Time/Iteration") try: options, args = getopt.getopt(sys.argv[1:], 'l:h', ['help', 'logfile=']) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in options: if opt in ("-l", "--logfile"): log_file = arg elif opt in ("-h", "--help"): usage() sys.exit(1) # Get the lines of the logfile 'log_file' lines = open(log_file, "r" ).readlines() # Get the time and continuity values time = [] # Time(s) or iterations counter continuity = [] # Continuity values for line in lines: if re.search(r"^Time = ", line): # Search for string 'Time' at the begining of the line in file start = 'Time = ' value = line.split(start)[1] # Take the Time value as the string just after start time.append(np.float(value)) # Transform the string in a float value elif re.search(r"continuity errors :", line): # Search for string 'continuity' in the lines of file 'log_file' start = 'sum local = ' end = ', global' value = line.split(start)[1].split(end)[0] # Take the continuity value as string between start and end continuity.append(np.float(value)) # Transform the string in a float value # Get the residual values for each variable for variable in variables: data = [] for line in lines: if re.search(r"Solving for " + variable, line):# Search for string variable in line of file 'log_file' start = 'Final residual = ' end = ', No Iterations' value = line.split(start)[1].split(end)[0] data.append(np.float(value)) plt.plot(np.array(time),np.array(data), label=variable) # Plot the residual values of variable plt.plot(np.array(time),np.array(continuity), label="Continuity") # Plot the continuity values # Plot plt.title("Residuals plot:\n * logfile: " + log_file + "\n * case dir: " + os.getcwd().split('/')[-1], loc='left') plt.xlabel("Time(s)/Iterations") plt.ylabel("Residuals (Log Scale)") plt.yscale('log') plt.legend() plt.grid() plt.show()
34.492754
114
0.64958
0
0
0
0
0
0
0
0
1,135
0.476891
4a6725140b49d63b56d6ce94163eb9cfc057133e
4,295
py
Python
content_generator/vitae.py
empiricalstateofmind/personal_website
cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c
[ "MIT" ]
null
null
null
content_generator/vitae.py
empiricalstateofmind/personal_website
cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c
[ "MIT" ]
3
2015-09-10T09:26:29.000Z
2015-10-30T10:47:33.000Z
content_generator/vitae.py
empiricalstateofmind/personal_website
cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c
[ "MIT" ]
null
null
null
# Generate the vitae.json file used to populate the Vitae section of the website. import pandas as pd import re from datetime import datetime from collections import defaultdict import json # Publications def create_publications(filepath): publications = pd.read_excel(filepath, sheet_name='publications', endcoding='utf-8') publications = publications.fillna('') publication_store = defaultdict(list) for ix, pub in publications.iterrows(): date = pub.publication_date.strftime('%Y') entry = {'title': pub.title, 'authors': pub.authors, 'arxiv': pub.arxiv_link, 'abstract':pub.abstract, 'date': date} if pub.journal_link != '': entry['link'] = pub.journal_link if pub.journal != '': entry['journal'] = pub.journal publication_store[pub.type].append(entry) return publication_store def create_conferences(filepath): conferences = pd.read_excel(filepath, sheet_name='conferences', endcoding='utf-8') conferences = conferences.fillna('') categories = [('invited', 'Invited Talks \& Posters'), ('contributed', 'Contributed Talks \& Posters'), ('attended', 'Attended'), ('school', 'Schools')] conference_store = {} for key, subtitle in categories: data = conferences[conferences.type == key] collection = [] if len(data) > 0: for ix, conf in data.iterrows(): if conf.include=='no': continue date = conf.timestamp.strftime('%b. %Y') if key in ['attended', 'school']: contribution = 'Attendee' else: contribution = "{} {}".format(conf.type.capitalize(), conf.medium.capitalize()) entry = {'title':conf.title, 'location':conf.location, 'date':date, 'contribution': contribution, } if conf.link != '': entry['link'] = conf.link if (conf.presentation_title != '') & (conf.presentation_authors != ''): entry['presentation_authors'] = conf.presentation_authors entry['presentation_title'] = conf.presentation_title collection.append(entry) conference_store[key] = collection return conference_store def create_teaching(filepath): teaching = pd.read_excel(filepath, sheet_name='teaching', endcoding='utf-8') teaching = teaching.fillna('') teaching_store = [] for ix, teach in teaching.sort_values(by='type').iterrows(): if teach['type'] == 'supervision': entry = { 'date': teach.date, 'project_award': teach.program, 'title': teach.title, 'student': teach.student_name, 'institution': teach.location } teaching_store.append(entry) return teaching_store def create_reviewing(filepath): reviewing = pd.read_excel(filepath, sheet_name='journals', endcoding='utf-8') reviewing = reviewing.fillna('') review_store = [] for ix, review in reviewing.iterrows(): entry = {'name': review.journal_name, 'short_name': review.journal_shortname} review_store.append(entry) return review_store if __name__ == "__main__": # FILEPATH = "D:/Dropbox/projects/personal_cv/vitae.xlsx" # We can pass this as an argument later FILEPATH = "../../../Projects/personal_cv/vitae.xlsx" vitae = {'publications':create_publications(FILEPATH), 'conferences':create_conferences(FILEPATH), 'teaching':create_teaching(FILEPATH), 'reviewing':create_reviewing(FILEPATH)} with open('../app/mod_home/static/vitae.json', 'w') as file: json.dump(vitae, file, sort_keys=True, indent=4) with open('../app/static/vitae.json', 'w') as file: json.dump(vitae, file, sort_keys=True, indent=4)
33.818898
102
0.563213
0
0
0
0
0
0
0
0
853
0.198603
4a6776593c88474050fcd17038b16a7c7bc8d4c6
7,509
py
Python
cement/ext/ext_generate.py
tomekr/cement
fece8629c48bcd598fd61d8aa7457a5df4c4f831
[ "BSD-3-Clause" ]
826
2015-01-09T13:23:35.000Z
2022-03-18T01:19:40.000Z
cement/ext/ext_generate.py
tomekr/cement
fece8629c48bcd598fd61d8aa7457a5df4c4f831
[ "BSD-3-Clause" ]
316
2015-01-14T10:35:22.000Z
2022-03-08T17:18:10.000Z
cement/ext/ext_generate.py
tomekr/cement
fece8629c48bcd598fd61d8aa7457a5df4c4f831
[ "BSD-3-Clause" ]
112
2015-01-10T15:04:26.000Z
2022-03-16T08:11:58.000Z
""" Cement generate extension module. """ import re import os import inspect import yaml import shutil from .. import Controller, minimal_logger, shell from ..utils.version import VERSION, get_version LOG = minimal_logger(__name__) class GenerateTemplateAbstractBase(Controller): class Meta: pass def _generate(self, source, dest): msg = 'Generating %s %s in %s' % ( self.app._meta.label, self._meta.label, dest ) self.app.log.info(msg) data = {} # builtin vars maj_min = float('%s.%s' % (VERSION[0], VERSION[1])) data['cement'] = {} data['cement']['version'] = get_version() data['cement']['major_version'] = VERSION[0] data['cement']['minor_version'] = VERSION[1] data['cement']['major_minor_version'] = maj_min f = open(os.path.join(source, '.generate.yml')) yaml_load = yaml.full_load if hasattr(yaml, 'full_load') else yaml.load g_config = yaml_load(f) f.close() vars = g_config.get('variables', {}) exclude_list = g_config.get('exclude', []) ignore_list = g_config.get('ignore', []) # default ignore the .generate.yml config g_config_yml = r'^(.*)[\/\\\\]%s[\/\\\\]\.generate\.yml$' % \ self._meta.label ignore_list.append(g_config_yml) var_defaults = { 'name': None, 'prompt': None, 'validate': None, 'case': None, 'default': None, } for defined_var in vars: var = var_defaults.copy() var.update(defined_var) for key in ['name', 'prompt']: assert var[key] is not None, \ "Required generate config key missing: %s" % key val = None if var['default'] is not None and self.app.pargs.defaults: val = var['default'] elif var['default'] is not None: default_text = ' [%s]' % var['default'] else: default_text = '' # pragma: nocover if val is None: class MyPrompt(shell.Prompt): class Meta: text = "%s%s:" % (var['prompt'], default_text) default = var.get('default', None) p = MyPrompt() val = p.prompt() # pragma: nocover if var['case'] in ['lower', 'upper', 'title']: val = getattr(val, var['case'])() elif var['case'] is not None: self.app.log.warning( "Invalid configuration for variable " + "'%s': " % var['name'] + "case must be one of lower, upper, or title." ) if var['validate'] is not None: assert re.match(var['validate'], val), \ "Invalid Response (must match: '%s')" % var['validate'] data[var['name']] = val try: self.app.template.copy(source, dest, data, force=self.app.pargs.force, ignore=ignore_list, exclude=exclude_list) except AssertionError as e: if re.match('(.*)already exists(.*)', e.args[0]): raise AssertionError(e.args[0] + ' (try: --force)') else: raise # pragma: nocover def _clone(self, source, dest): msg = 'Cloning %s %s template to %s' % ( self.app._meta.label, self._meta.label, dest ) self.app.log.info(msg) if os.path.exists(dest) and self.app.pargs.force is True: shutil.rmtree(dest) elif os.path.exists(dest): msg = "Destination path already exists: %s (try: --force)" % dest raise AssertionError(msg) shutil.copytree(source, dest) def _default(self): source = self._meta.source_path dest = self.app.pargs.dest if self.app.pargs.clone is True: self._clone(source, dest) else: self._generate(source, dest) def setup_template_items(app): template_dirs = [] template_items = [] # look in app template dirs for path in app._meta.template_dirs: subpath = os.path.join(path, 'generate') if os.path.exists(subpath) and subpath not in template_dirs: template_dirs.append(subpath) # use app template module, find it's path on filesystem if app._meta.template_module is not None: mod_parts = app._meta.template_module.split('.') mod = mod_parts.pop() try: mod = app.__import__(mod, from_module='.'.join(mod_parts)) mod_path = os.path.dirname(inspect.getfile(mod)) subpath = os.path.join(mod_path, 'generate') if os.path.exists(subpath) and subpath not in template_dirs: template_dirs.append(subpath) # FIXME: not exactly sure how to test for this so not covering except AttributeError: # pragma: nocover msg = 'unable to load template module' + \ '%s from %s' % (mod, '.'.join(mod_parts)) # pragma: nocover app.log.debug(msg) # pragma: nocover for path in template_dirs: for item in os.listdir(path): if item not in template_items: template_items.append(item) class GenerateTemplate(GenerateTemplateAbstractBase): class Meta: label = item stacked_on = 'generate' stacked_type = 'nested' help = 'generate %s from template' % item arguments = [ # ------------------------------------------------------ (['dest'], {'help': 'destination directory path'}), # ------------------------------------------------------ (['-f', '--force'], {'help': 'force operation if destination exists', 'dest': 'force', 'action': 'store_true'}), # ------------------------------------------------------ (['-D', '--defaults'], {'help': 'use all default variable values', 'dest': 'defaults', 'action': 'store_true'}), # ------------------------------------------------------ (['--clone'], {'help': 'clone this template to destination path', 'dest': 'clone', 'action': 'store_true'}), ] source_path = os.path.join(path, item) app.handler.register(GenerateTemplate) class Generate(Controller): class Meta: label = 'generate' stacked_on = 'base' stacked_type = 'nested' config_section = 'generate' def _setup(self, app): super(Generate, self)._setup(app) def _default(self): self._parser.print_help() def load(app): app.handler.register(Generate) app.hook.register('pre_run', setup_template_items)
35.088785
80
0.481555
5,742
0.764682
0
0
0
0
0
0
1,760
0.234385
4a681f9f92ee718dd3a1a15638701370f778139a
169
py
Python
ditto/core/__init__.py
Kvoti/ditto
eb4efb241e54bf679222d14afeb71d9d5441c122
[ "BSD-3-Clause" ]
null
null
null
ditto/core/__init__.py
Kvoti/ditto
eb4efb241e54bf679222d14afeb71d9d5441c122
[ "BSD-3-Clause" ]
9
2015-11-10T15:17:22.000Z
2015-11-12T11:07:02.000Z
ditto/core/__init__.py
Kvoti/ditto
eb4efb241e54bf679222d14afeb71d9d5441c122
[ "BSD-3-Clause" ]
null
null
null
from . import forms from . import views ADMIN_ROLE = "Administrator" MEMBER_ROLE = "Member" GUEST_ROLE = "Guest" DEFAULT_ROLES = [ADMIN_ROLE, MEMBER_ROLE, GUEST_ROLE]
18.777778
53
0.763314
0
0
0
0
0
0
0
0
30
0.177515
4a6a1474e56bbc2b491bd544f9d2c60a78d79285
1,216
py
Python
training_stats/hrm.py
salwator/training_stats
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
[ "MIT" ]
4
2018-01-02T01:10:03.000Z
2019-02-09T23:37:13.000Z
training_stats/hrm.py
salwator/training_stats
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
[ "MIT" ]
4
2018-01-05T16:46:35.000Z
2019-03-19T22:10:36.000Z
training_stats/hrm.py
salwator/training_stats
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
[ "MIT" ]
2
2016-12-09T22:36:58.000Z
2018-07-22T12:58:06.000Z
from .gpxfile import get_hr_measurements from .utils import interpolate from operator import itemgetter def __calculate_moving_sums(points, window): """ Calculates hr moving sums of the window len """ time, hrs = zip(*points) moving_sum = sum(hrs[0:window]) sums = [(time[0], moving_sum)] for i, t in enumerate(time[1:-1 * window]): moving_sum += hrs[i + window] - hrs[i] sums.append((t, moving_sum)) return sums def calculate_lactate_threshold(hrdata): """ Given list of (time, hr), returns lactate threshold and selected data""" test_period = 60 * 30 # test time measured_period = 60 * 20 # measured period in seconds hrs = interpolate(hrdata) time_stamp, max_sum = max(__calculate_moving_sums(hrs, test_period), key=itemgetter(1)) # your lactate threshold is average of last 20 in 30 minutes of tempo run start_measure = time_stamp + (test_period - measured_period) stop_measure = start_measure + measured_period measured_time, measured_hrs = zip(*hrs[start_measure:stop_measure]) lactate_thr = round(sum(measured_hrs) / measured_period) return (lactate_thr, measured_time, measured_hrs)
39.225806
80
0.693257
0
0
0
0
0
0
0
0
239
0.196546
4a6b2e5b7cf0173afb424be4c44105af0dae9900
7,577
py
Python
scripts/utils/import_languages.py
mozilla-releng/staging-mozilla-vpn-client
f31d3762a607ccf2d7c6a016f7b800305fbf0113
[ "Apache-2.0" ]
null
null
null
scripts/utils/import_languages.py
mozilla-releng/staging-mozilla-vpn-client
f31d3762a607ccf2d7c6a016f7b800305fbf0113
[ "Apache-2.0" ]
null
null
null
scripts/utils/import_languages.py
mozilla-releng/staging-mozilla-vpn-client
f31d3762a607ccf2d7c6a016f7b800305fbf0113
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python3 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import argparse import xml.etree.ElementTree as ET import os import sys import shutil import atexit import subprocess # Use the project root as the working directory prevdir = os.getcwd() workdir = os.path.join(os.path.dirname(__file__), '..', '..') os.chdir(workdir) atexit.register(os.chdir, prevdir) # Include only locales above this threshold (e.g. 70%) in production l10n_threshold = 0.70 parser = argparse.ArgumentParser() parser.add_argument( '-m', '--macos', default=False, action="store_true", dest="ismacos", help='Include the MacOS bundle data') parser.add_argument( '-q', '--qt_path', default=None, dest="qtpath", help='The QT binary path. If not set, we try to guess.') args = parser.parse_args() stepnum = 1 def title(text): global stepnum print(f"\033[96m\033[1mStep {stepnum}\033[0m: \033[97m{text}\033[0m") stepnum = stepnum+1 # Step 0 title("Find the Qt localization tools...") def qtquery(qmake, propname): try: qtquery = os.popen(f'{qmake} -query {propname}') qtpath = qtquery.read().strip() if len(qtpath) > 0: return qtpath finally: pass return None qtbinpath = args.qtpath if qtbinpath is None: qtbinpath = qtquery('qmake', 'QT_INSTALL_BINS') if qtbinpath is None: qtbinpath = qtquery('qmake6', 'QT_INSTALL_BINS') if qtbinpath is None: qtbinpath = qtquery('qmake5', 'QT_INSTALL_BINS') if qtbinpath is None: qtbinpath = qtquery('qmake-qt5', 'QT_INSTALL_BINS') if qtbinpath is None: print('Unable to locate qmake tool.') sys.exit(1) if not os.path.isdir(qtbinpath): print(f"QT path is not a diretory: {qtbinpath}") sys.exit(1) lupdate = os.path.join(qtbinpath, 'lupdate') lconvert = os.path.join(qtbinpath, 'lconvert') lrelease = os.path.join(qtbinpath, 'lrelease') # Step 0 # Let's update the i18n repo os.system(f"git submodule init") os.system(f"git submodule update --remote --depth 1 i18n") # Step 1 # Go through the i18n repo, check each XLIFF file and take # note which locale is complete above the minimum threshold. # Adds path of .xliff and .ts to l10n_files. title("Validate the XLIFF file...") l10n_files = [] for locale in os.listdir('i18n'): # Skip non folders if not os.path.isdir(os.path.join('i18n', locale)): continue # Skip hidden folders if locale.startswith('.'): continue xliff_path = os.path.join('i18n', locale, 'mozillavpn.xliff') # If it's the source locale (en), ignore parsing for completeness and # add it to the list. if locale == 'en': print(f'OK\t- en added (reference locale)') l10n_files.append({ 'locale': 'en', 'ts': os.path.join('translations', 'generated', 'mozillavpn_en.ts'), 'xliff': xliff_path }) continue tree = ET.parse(xliff_path) root = tree.getroot() sources = 0 translations = 0 for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}source'): sources += 1 for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}target'): translations += 1 completeness = translations/(sources*1.0) # Ignore locale with less than 70% of completeness if completeness < l10n_threshold: print(f'KO\t- {locale} is translated at {round(completeness*100, 2)}%, at least {l10n_threshold*100}% is needed') continue # Not enough translations next file please print(f'OK\t- {locale} added ({round(completeness*100, 2)}% translated)') l10n_files.append({ 'locale': locale, 'ts': os.path.join('translations', 'generated', f'mozillavpn_{locale}.ts'), 'xliff': xliff_path }) # Step 2 title("Create folders and localization files for the languages...") for file in l10n_files: locdirectory = os.path.join('translations', 'generated', file['locale']) os.makedirs(locdirectory, exist_ok=True) locversion = os.path.join(locdirectory, f'locversion.plist') with open(locversion, 'w') as locversion_file: locversion_file.write(f"""<?xml version=\"1.0\" encoding=\"UTF-8\"?> <!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"> <plist version=\"1.0\"> <dict> <key>LprojCompatibleVersion</key> <string>123</string> <key>LprojLocale</key> <string>{file['locale']}</string> <key>LprojRevisionLevel</key> <string>1</string> <key>LprojVersion</key> <string>123</string> </dict> </plist>""") with open(os.path.join('translations', 'generated', 'macos.pri'), 'w') as macospri: macospri.write('### AUTOGENERATED! DO NOT EDIT!! ###\n') for file in l10n_files: macospri.write(f"LANGUAGES_FILES_{file['locale']}.files += $$PWD/{file['locale']}/locversion.plist\n") macospri.write(f"LANGUAGES_FILES_{file['locale']}.path = Contents/Resources/{file['locale']}.lproj\n") macospri.write(f"QMAKE_BUNDLE_DATA += LANGUAGES_FILES_{file['locale']}\n\n") # Step 3 title("Write resource file to import the locales that are ready...") with open('translations/generated/translations.qrc', 'w') as qrcfile: qrcfile.write('<!-- AUTOGENERATED! DO NOT EDIT!! -->\n') qrcfile.write('<RCC>\n') qrcfile.write(' <qresource prefix="/i18n">\n') for file in l10n_files: qrcfile.write(f' <file>mozillavpn_{file["locale"]}.qm</file>\n') qrcfile.write(' </qresource>\n') qrcfile.write('</RCC>\n') # Step 4 title("Generate the Js/C++ string definitions...") try: subprocess.call([sys.executable, os.path.join('scripts', 'utils', 'generate_strings.py'), '-o', os.path.join('translations', 'generated'), os.path.join('translations', 'strings.yaml')]) except Exception as e: print("generate_strings.py failed. Try with:\n\tpip3 install -r requirements.txt --user") print(e) exit(1) # Build a dummy project to glob together everything that might contain strings. title("Scanning for new strings...") def scan_sources(projfile, dirpath): projfile.write(f"HEADERS += $$files({dirpath}/*.h, true)\n") projfile.write(f"SOURCES += $$files({dirpath}/*.cpp, true)\n") projfile.write(f"RESOURCES += $$files({dirpath}/*.qrc, true)\n\n") with open('translations/generated/dummy.pro', 'w') as dummyproj: dummyproj.write('### AUTOGENERATED! DO NOT EDIT!! ###\n') dummyproj.write(f"HEADERS += l18nstrings.h\n") dummyproj.write(f"SOURCES += l18nstrings_p.cpp\n") dummyproj.write(f"SOURCES += ../l18nstrings.cpp\n\n") for l10n_file in l10n_files: dummyproj.write(f"TRANSLATIONS += {os.path.basename(l10n_file['ts'])}\n") dummyproj.write("\n") scan_sources(dummyproj, '../../src') scan_sources(dummyproj, '../../nebula') # Step 5 title("Generate translation resources...") for l10n_file in l10n_files: os.system(f"{lconvert} -if xlf -i {l10n_file['xliff']} -o {l10n_file['ts']}") os.system(f"{lupdate} translations/generated/dummy.pro") for l10n_file in l10n_files: os.system(f"{lrelease} -idbased {l10n_file['ts']}") print(f'Imported {len(l10n_files)} locales') git = os.popen(f'git submodule status i18n') git_commit_hash = git.read().strip().replace("+","").split(' ')[0] print(f'Current commit: https://github.com/mozilla-l10n/mozilla-vpn-client-l10n/commit/{git_commit_hash}')
35.57277
121
0.665171
0
0
0
0
0
0
0
0
4,088
0.539528
4a6e93c38ff63c100497bb656432f8f40340791b
1,026
py
Python
cogs/filter.py
Velgaster/Discord-User-Vote
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
[ "MIT" ]
null
null
null
cogs/filter.py
Velgaster/Discord-User-Vote
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
[ "MIT" ]
null
null
null
cogs/filter.py
Velgaster/Discord-User-Vote
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
[ "MIT" ]
null
null
null
from discord.ext import commands import discord def setup(client): client.add_cog(KeyWordFilter(client)) class KeyWordFilter(commands.Cog): def __init__(self, client): self.client = client self.log_ch = self.client.get_channel(int(self.client.SETTINGS.LOG_CHANNEL)) @commands.Cog.listener() async def on_message(self, msg): if any(x in msg.content.split() for x in self.client.SETTINGS.BLACKLIST): ctx = await self.client.get_context(msg) await self.event_log(ctx, msg, "A blacklisted phrase was used!") await msg.delete() async def event_log(self, ctx, msg, event): embed = discord.Embed() embed.colour = discord.Colour.red() embed.title = event embed.add_field(name='User', value=msg.author, inline=True) embed.add_field(name='Channel', value=msg.channel.name, inline=True) embed.add_field(name='Message', value=f"> {msg.content}", inline=False) await self.log_ch.send(embed=embed)
35.37931
84
0.665692
912
0.888889
0
0
304
0.296296
691
0.673489
74
0.072125
4a6fe4cb292136ed5cb190cbef1dbace08d2c9c3
1,975
py
Python
api/app.py
sai-krishna-msk/KickAssist
7fb256e3ef4beff231332f6491ebb975f3fe4b43
[ "MIT" ]
null
null
null
api/app.py
sai-krishna-msk/KickAssist
7fb256e3ef4beff231332f6491ebb975f3fe4b43
[ "MIT" ]
7
2021-06-08T21:18:49.000Z
2022-03-12T00:24:33.000Z
api/app.py
sai-krishna-msk/KickAssist
7fb256e3ef4beff231332f6491ebb975f3fe4b43
[ "MIT" ]
null
null
null
from ml_model.model import KickModel import numpy as np import pandas as pd import eli5 import joblib import flask from flask import Flask, render_template, request, jsonify app = Flask(__name__) model_oh = joblib.load('ml_model/estimators/model_oh.sav') model_hel = joblib.load('ml_model/estimators/model_hel.sav') encoder_oh = joblib.load('ml_model/estimators/encoder_oh.sav') encoder_hel = joblib.load('ml_model/estimators/encoder_hel.sav') encoder_label = joblib.load('ml_model/estimators/encoder_label.sav') def get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards): pred_dict={ "launched_at":launch_date, "deadline":deadline_date, "goal":int(goal), "sub_category":subcategory, "category":category, "currency":currency, "location_country":country, "blurb":description, "rewards":[] } try: for reward in rewards.split(","): pred_dict["rewards"].append(int(reward)) except Exception as e: raise Exception(f"Error sanatizing rewards with {e} error") return pred_dict @app.route('/predict/<launch_date>/<deadline_date>/<goal>/<subcategory>/<category>/<currency>/<country>/<description>/<rewards>') def GetURL(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards): pred_dict = get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards) obj = KickModel(model_oh , model_hel , encoder_oh , encoder_hel , encoder_label) obj.load_data(pred_dict) obj.pred() oh_pred = float(obj.pred_oh[0][1]) hel_pred = float(obj.pred_hel[0][1]) response = { "prediction_oh":oh_pred, "prediction_hel":hel_pred, "prediction_oh_df":obj.pred_oh_intr.to_dict(), "prediction_hel_intr":obj.pred_hel_intr.to_dict() } return response if __name__=="__main__": app.run(debug =True)
33.474576
132
0.716456
0
0
0
0
799
0.404557
0
0
529
0.267848
4a70669d9d055da240cf688e557bf0a87257569e
2,810
py
Python
snowddl/resolver/primary_key.py
littleK0i/SnowDDL
b24cb3676e41fec8876d61a101ba242e7272a18f
[ "Apache-2.0" ]
21
2022-02-10T16:52:03.000Z
2022-03-18T15:27:18.000Z
snowddl/resolver/primary_key.py
littleK0i/SnowDDL
b24cb3676e41fec8876d61a101ba242e7272a18f
[ "Apache-2.0" ]
null
null
null
snowddl/resolver/primary_key.py
littleK0i/SnowDDL
b24cb3676e41fec8876d61a101ba242e7272a18f
[ "Apache-2.0" ]
1
2022-03-05T11:02:42.000Z
2022-03-05T11:02:42.000Z
from snowddl.blueprint import PrimaryKeyBlueprint from snowddl.resolver.abc_schema_object_resolver import AbstractSchemaObjectResolver, ResolveResult, ObjectType class PrimaryKeyResolver(AbstractSchemaObjectResolver): def get_object_type(self) -> ObjectType: return ObjectType.PRIMARY_KEY def get_existing_objects_in_schema(self, schema: dict): existing_objects = {} constraints_by_name = {} cur = self.engine.execute_meta("SHOW PRIMARY KEYS IN SCHEMA {database:i}.{schema:i}", { "database": schema['database'], "schema": schema['schema'], }) for r in cur: if r['constraint_name'] not in constraints_by_name: constraints_by_name[r['constraint_name']] = { "database": r['database_name'], "schema": r['schema_name'], "table": r['table_name'], "columns": {r['key_sequence']: r['column_name']} } else: constraints_by_name[r['constraint_name']]['columns'][r['key_sequence']] = r['column_name'] for c in constraints_by_name.values(): columns_list = [c['columns'][k] for k in sorted(c['columns'])] full_name = f"{c['database']}.{c['schema']}.{c['table']}({','.join(columns_list)})" existing_objects[full_name] = { "database": c['database'], "schema": c['schema'], "table": c['table'], "columns": columns_list, } return existing_objects def get_blueprints(self): return self.config.get_blueprints_by_type(PrimaryKeyBlueprint) def create_object(self, bp: PrimaryKeyBlueprint): self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", { "table_name": bp.table_name, "columns": bp.columns, }) return ResolveResult.CREATE def compare_object(self, bp: PrimaryKeyBlueprint, row: dict): if [str(c) for c in bp.columns] == row['columns']: return ResolveResult.NOCHANGE self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} DROP PRIMARY KEY", { "table_name": bp.table_name, }) self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", { "table_name": bp.table_name, "columns": bp.columns, }) return ResolveResult.ALTER def drop_object(self, row: dict): self.engine.execute_safe_ddl("ALTER TABLE {database:i}.{schema:i}.{table:i} DROP PRIMARY KEY", { "database": row['database'], "schema": row['schema'], "table": row['table'], }) return ResolveResult.DROP
36.973684
111
0.5879
2,645
0.941281
0
0
0
0
0
0
763
0.27153
4a713700e9c156f74125bcaeca0299290201d914
675
py
Python
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
tetov/ITA19
1af68a8885caf83acd98f4136d0286539ccbe63b
[ "MIT" ]
7
2019-11-13T20:29:54.000Z
2020-02-26T14:30:54.000Z
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
GeneKao/ITA19
c4b10dc183599eed4ed60d922b6ef5922d173bdb
[ "MIT" ]
4
2019-11-07T20:57:51.000Z
2020-03-04T11:43:18.000Z
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
GeneKao/ITA19
c4b10dc183599eed4ed60d922b6ef5922d173bdb
[ "MIT" ]
6
2019-10-30T13:25:54.000Z
2020-02-14T14:06:09.000Z
import os import compas from compas.datastructures import Mesh from compas_rhino.artists import MeshArtist HERE = os.path.dirname(__file__) DATA = os.path.join(HERE, 'data') FILE = os.path.join(DATA, 'faces.obj') mesh = Mesh.from_obj(FILE) artist = MeshArtist(mesh, layer="Mesh") artist.draw_vertices( color={key: (255, 0, 0) for key in mesh.vertices_on_boundary()}) artist.draw_vertexlabels( text={key: str(mesh.vertex_degree(key)) for key in mesh.vertices()}) artist.draw_edges( keys=list(mesh.edges_on_boundary()), color=(255, 0, 0)) artist.draw_faces( color={key: (150, 255, 150) for key in mesh.faces() if not mesh.is_face_on_boundary(key)})
25
94
0.722963
0
0
0
0
0
0
0
0
23
0.034074
4a7152ca8736c0b2b62e12278fe928d5690e8c0b
461
py
Python
OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py
tankishev/Python
60e511fc901f136b88c681f77f209fe2f8c46447
[ "MIT" ]
2
2022-03-04T11:39:03.000Z
2022-03-13T07:13:23.000Z
OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py
tankishev/Python
60e511fc901f136b88c681f77f209fe2f8c46447
[ "MIT" ]
null
null
null
OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py
tankishev/Python
60e511fc901f136b88c681f77f209fe2f8c46447
[ "MIT" ]
null
null
null
# The Pokemon class should receive a name (string) and health (int) upon initialization. # It should also have a method called pokemon_details that returns the information about the pokemon: # "{pokemon_name} with health {pokemon_health}" class Pokemon: def __init__(self, name: str, health: int) -> None: self.name = name self.health = health def pokemon_details(self) -> str: return f"{self.name} with health {self.health}"
35.461538
101
0.70282
220
0.477223
0
0
0
0
0
0
276
0.598698
4a71705f7aaede9643300a7a698cb26841f08adc
1,936
py
Python
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
null
null
null
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
null
null
null
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
1
2022-03-17T08:03:17.000Z
2022-03-17T08:03:17.000Z
"""Tests for the pandas helpers in the pd_helpers.py module.""" import pytest from pandas.testing import assert_frame_equal from tests.conftest import create_dataframe from ons_utils.pandas import * def test_nested_dict_to_df(): """Test for nested_dict_to_df.""" input_d = { 'bones': { 'femur': {'tendons': 24}, 'humerus': {'tendons': 14}, }, 'muscles': { 'gluteus_maximus': {'tendons': 18}, }, 'cars': 7, } actual = nested_dict_to_df( input_d, columns=['number'], level_names=('a', 'b', 'c'), ) expected = create_dataframe([ ('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', 24), ('bones', 'humerus', 'tendons', 14), ('cars', None, None, 7), ('muscles', 'gluteus_maximus', 'tendons', 18), ]) assert_frame_equal( # Sort values as dict order not preserved. actual.sort_values(['a', 'b']), # Set index because function returns a MultiIndex. expected.set_index(['a', 'b', 'c']) ) class TestStacker: """Group of tests for Stacker.""" @pytest.mark.skip(reason="test shell") def test_Stacker(self): """Test for Stacker.""" pass @pytest.mark.skip(reason="test shell") def test_convert_level_to_datetime(): """Test for this.""" pass class TestMultiIndexSlicer: """Group of tests for MultiIndexSlicer.""" @pytest.mark.skip(reason="test shell") def test_MultiIndexSlicer(self): """Test for MultiIndexSlicer.""" pass @pytest.mark.skip(reason="test shell") def test_get_index_level_values(): """Test for this.""" pass @pytest.mark.skip(reason="test shell") def test_shifted_within_year_apply(): """Test for this.""" pass @pytest.mark.skip(reason="test shell") def test_shifted_within_year_ffill(): """Test for this.""" pass
22.776471
63
0.591426
382
0.197314
0
0
677
0.34969
0
0
690
0.356405
4a71f720f8188e39f1b7b64f6e15744bd236efe6
72
py
Python
lsf_ibutils/ibsub/__init__.py
seanfisk/lsf-ibutils
a22c738376d656ab38f4bfa3572d4693288098cb
[ "MIT" ]
null
null
null
lsf_ibutils/ibsub/__init__.py
seanfisk/lsf-ibutils
a22c738376d656ab38f4bfa3572d4693288098cb
[ "MIT" ]
null
null
null
lsf_ibutils/ibsub/__init__.py
seanfisk/lsf-ibutils
a22c738376d656ab38f4bfa3572d4693288098cb
[ "MIT" ]
1
2021-06-03T22:32:54.000Z
2021-06-03T22:32:54.000Z
""":mod:`lsf_ibutils.ibsub` -- Interactive batch submission utility """
24
67
0.722222
0
0
0
0
0
0
0
0
71
0.986111
4a72355337ea53a1937c776fab78aa381734b4c1
193
py
Python
build/lib/configger/fishes/__init__.py
PaperDevil/pyconfigger
75c6e3f74e6e70d8ec9565397e2be9ae8815d44e
[ "MIT" ]
2
2021-02-04T14:29:19.000Z
2021-03-04T12:56:58.000Z
build/lib/configger/fishes/__init__.py
PaperDevil/pyconfigger
75c6e3f74e6e70d8ec9565397e2be9ae8815d44e
[ "MIT" ]
null
null
null
build/lib/configger/fishes/__init__.py
PaperDevil/pyconfigger
75c6e3f74e6e70d8ec9565397e2be9ae8815d44e
[ "MIT" ]
2
2020-08-19T21:50:30.000Z
2020-11-04T03:51:33.000Z
import os splited_path = os.path.realpath(__file__).split('\\')[:-1] fish_path = '\\'.join(splited_path) fish_json_name = "fish.json" fish_json_path = os.path.join(fish_path, fish_json_name)
24.125
58
0.735751
0
0
0
0
0
0
0
0
19
0.098446
4a73c0e8a1979c239e091749b325602ad4a40468
5,620
py
Python
setup.py
IntuitionEngineeringTeam/RedBlackPy
99630408153bea7494415c402eb2d9881f3168ee
[ "Apache-2.0" ]
12
2018-08-24T20:46:38.000Z
2022-01-20T16:25:23.000Z
setup.py
IntuitionEngineeringTeam/RedBlackPy
99630408153bea7494415c402eb2d9881f3168ee
[ "Apache-2.0" ]
1
2019-04-02T04:19:58.000Z
2019-04-02T04:19:58.000Z
setup.py
IntuitionEngineeringTeam/RedBlackPy
99630408153bea7494415c402eb2d9881f3168ee
[ "Apache-2.0" ]
3
2018-07-05T22:47:27.000Z
2019-05-25T06:40:40.000Z
# # Created by Soldoskikh Kirill. # Copyright 2018 Intuition. All rights reserved. # import os import platform from setuptools import setup from setuptools.command.build_ext import build_ext from distutils.extension import Extension from Cython.Build import cythonize from rbp_setup_tools.code_generation import generate_from_cython_src from rbp_setup_tools.types import TYPES if platform.system() == 'Darwin': compile_opts = [ '-std=c++11', '-mmacosx-version-min={:}'.format( platform.mac_ver()[0] ), '-Ofast' ] elif platform.system() == 'Linux': compile_opts = [ '-std=c++11', '-Ofast' ] elif platform.system() == 'Windows': compile_opts = [ '-std=c++11', '-Ofast' ] else: raise EnvironmentError( 'Not supported platform: {plat}'.format(plat=platform.system()) ) #-------------------------------------------------------------------------------------------- # Generate cython code for all supporting types #-------------------------------------------------------------------------------------------- src_1 = './redblackpy/cython_source/__dtype_tree_processing.pxi' src_2 = './redblackpy/cython_source/__tree_series_dtype.pxi' src_3 = './redblackpy/cython_source/__interpolation.pxi' src_4 = './redblackpy/cython_source/__arithmetic.pxi' src_1 = open(src_1, 'r') src_2 = open(src_2, 'r') src_3 = open(src_3, 'r') src_4 = open(src_4, 'r') output_1 = open('./redblackpy/cython_source/dtype_tree_processing.pxi', 'w') output_2 = open('./redblackpy/cython_source/tree_series_dtype.pxi', 'w') output_3 = open('./redblackpy/cython_source/interpolation.pxi', 'w') output_4 = open('./redblackpy/cython_source/arithmetic.pxi', 'w') generate_from_cython_src(src_1, output_1, TYPES[:-1], 0) generate_from_cython_src(src_2, output_2, TYPES, 14) generate_from_cython_src(src_3, output_3, TYPES, 0) generate_from_cython_src(src_4, output_4, TYPES, 0) src_1.close() src_2.close() src_3.close() src_4.close() output_1.close() output_2.close() output_3.close() output_4.close() #-------------------------------------------------------------------------------------------- ext_modules=[ Extension( "redblackpy.series.tree_series", sources=["redblackpy/series/tree_series.pyx"], extra_compile_args=compile_opts, extra_link_args=compile_opts[:-1], language = "c++", include_dirs=['./redblackpy'], depends=[ 'core/tree/tree.hpp', 'core/tree/rb_tree.tpp' 'core/tree/rb_node.tpp', 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp', 'core/trees_iterator/iterator.tpp' ], ), Extension( "redblackpy.series.series_iterator", sources=["redblackpy/series/series_iterator.pyx"], extra_compile_args=compile_opts, extra_link_args=compile_opts[:-1], language = "c++", include_dirs=['./redblackpy'], depends=[ 'core/tree/tree.hpp', 'core/tree/rb_tree.tpp' 'core/tree/rb_node.tpp', 'core/tree/rb_node_valued.tpp', 'core/trees_iterator/iterator.hpp', 'core/trees_iterator/iterator.tpp' ], ), Extension( "redblackpy.benchmark.timer", sources=["redblackpy/benchmark/timer.pyx"], extra_compile_args=compile_opts, extra_link_args=compile_opts[:-1], language = "c++", include_dirs=['./redblackpy'] ) ] setup( name='redblackpy', ext_modules = cythonize(ext_modules), version='0.1.3.0', author='Solodskikh Kirill', author_email='[email protected]', maintainer='Intuition', maintainer_email='[email protected]', install_requires=['cython'], description='Data structures based on red-black trees.', url='https://intuitionengineeringteam.github.io/RedBlackPy/', download_url='https://github.com/IntuitionEngineeringTeam/RedBlackPy/archive/master.zip', zip_safe=False, packages=[ 'redblackpy', 'redblackpy.series', 'redblackpy.benchmark', 'redblackpy.tree_cython_api'], package_data={'redblackpy.series': ['*.pxd']}, include_package_data=True, license='Apache License 2.0', long_description='RedBlackPy is a light Python library that provides data structures \ aimed to fast insertion, removal and self sorting to manipulating ordered data in efficient way.\ The core part of the library had been written on C++ and then was wrapped in Cython. \ Hope that many would find the primary data structures of this library very handy in working \ with time series. One of the main feature of this structures is an access by arbitrary \ key using interpolation, what makes processing of multiple non synchronized time series very simple.\ All data structures based on red black trees.', classifiers = [ 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3' ] )
44.251969
108
0.57242
0
0
0
0
0
0
0
0
2,673
0.475623
4a73d46ee78874a78fab6b3b0aaa918a453b1649
8,296
py
Python
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
null
null
null
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
5
2020-02-11T22:31:59.000Z
2021-06-10T17:45:14.000Z
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
null
null
null
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.views import PasswordResetView as BasePasswordResetView, SuccessURLAllowedHostsMixin from django.shortcuts import get_object_or_404, resolve_url from django.utils.crypto import get_random_string from django.utils.decorators import method_decorator from django.utils.http import is_safe_url from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.utils.translation import gettext_lazy as _ from django.views.generic import RedirectView from django.views.generic.edit import FormView from django.conf import settings from .utils import ( get_login_form, send_activation_email, get_password_reset_form, send_reset_password_email, send_activation_change_email, is_username_disabled, get_resend_ac_form ) from .forms import SignUpForm, ProfileEditForm, ChangeEmailForm from .models import Activation UserModel = get_user_model() class SuccessRedirectView(SuccessURLAllowedHostsMixin, FormView): redirect_field_name = REDIRECT_FIELD_NAME def get_success_url(self): url = self.get_redirect_url() return url or resolve_url(settings.LOGIN_REDIRECT_URL) def get_redirect_url(self): redirect_to = self.request.POST.get( self.redirect_field_name, self.request.GET.get(self.redirect_field_name, '') ) url_is_safe = is_safe_url( url=redirect_to, allowed_hosts=self.get_success_url_allowed_hosts(), require_https=self.request.is_secure(), ) return redirect_to if url_is_safe else '' def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['request'] = self.request return kwargs class SignInView(SuccessRedirectView): template_name = 'accounts/login.html' form_class = get_login_form() success_url = '/' @method_decorator(sensitive_post_parameters('password')) @method_decorator(csrf_protect) @method_decorator(never_cache) def dispatch(self, request, *args, **kwargs): # Sets a test cookie to make sure the user has cookies enabled request.session.set_test_cookie() return super(SignInView, self).dispatch(request, *args, **kwargs) def form_valid(self, form): # If the test cookie worked, go ahead and # delete it since its no longer needed if self.request.session.test_cookie_worked(): self.request.session.delete_test_cookie() login(self.request, form.get_user()) return super(SignInView, self).form_valid(form) class SignUpView(FormView): template_name = 'accounts/register.html' form_class = SignUpForm success_url = '/' def form_valid(self, form): user = form.save(commit=False) if is_username_disabled(): # Set temporary username user.username = get_random_string() else: user.username = form.cleaned_data.get('username') if settings.ENABLE_USER_ACTIVATION: user.is_active = False user.save() # Change the username to "user_ID" form if is_username_disabled(): user.username = 'user_{}'.format(user.id) user.save() if settings.ENABLE_USER_ACTIVATION: send_activation_email(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('You are registered. To activate the account, follow the link sent to the mail.')) else: raw_password = form.cleaned_data.get('password1') user = authenticate(username=user.username, password=raw_password) login(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('You are successfully registered!')) return super(SignUpView, self).form_valid(form) class ActivateView(RedirectView): permanent = False query_string = True pattern_name = 'index' def get_redirect_url(self, *args, **kwargs): assert 'code' in kwargs act = get_object_or_404(Activation, code=kwargs['code']) # Activate user's profile user = act.user user.is_active = True user.save() # Remove activation record, it is unneeded act.delete() messages.add_message(self.request, messages.SUCCESS, _('You have successfully activated your account!')) login(self.request, user) return super(ActivateView, self).get_redirect_url() class ReSendActivationCodeView(SuccessRedirectView): template_name = 'accounts/resend_activation_code.html' form_class = get_resend_ac_form() success_url = '/' def form_valid(self, form): user = form.get_user() activation = user.activation_set.get() activation.delete() send_activation_email(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('A new activation code has been sent to your e-mail.')) return super(ReSendActivationCodeView, self).form_valid(form) class PasswordResetView(BasePasswordResetView): form_class = get_password_reset_form() def form_valid(self, form): send_reset_password_email(self.request, form.get_user()) return super(PasswordResetView, self).form_valid(form) class ProfileEditView(LoginRequiredMixin, FormView): template_name = 'accounts/profile/edit.html' form_class = ProfileEditForm success_url = '/accounts/profile/edit/' def get_initial(self): initial = super(ProfileEditView, self).get_initial() user = self.request.user initial['first_name'] = user.first_name initial['last_name'] = user.last_name return initial def form_valid(self, form): user = self.request.user user.first_name = form.cleaned_data.get('first_name') user.last_name = form.cleaned_data.get('last_name') user.save() messages.add_message(self.request, messages.SUCCESS, _('Profile data has been successfully updated.')) return super(ProfileEditView, self).form_valid(form) class ChangeEmailView(LoginRequiredMixin, FormView): template_name = 'accounts/profile/change_email.html' form_class = ChangeEmailForm success_url = '/accounts/change/email/' def get_form_kwargs(self): kwargs = super(ChangeEmailView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def get_initial(self): initial = super(ChangeEmailView, self).get_initial() user = self.request.user initial['email'] = user.email return initial def form_valid(self, form): user = self.request.user email = form.cleaned_data.get('email') email = email.lower() if hasattr(settings, 'EMAIL_ACTIVATION_AFTER_CHANGING') and settings.EMAIL_ACTIVATION_AFTER_CHANGING: send_activation_change_email(self.request, user, email) messages.add_message(self.request, messages.SUCCESS, _('To complete the change of mail, click on the link sent to it.')) else: user.email = email user.save() messages.add_message(self.request, messages.SUCCESS, _('Email successfully changed.')) return super(ChangeEmailView, self).form_valid(form) class ChangeEmailActivateView(RedirectView): permanent = False query_string = True pattern_name = 'change_email' def get_redirect_url(self, *args, **kwargs): assert 'code' in kwargs act = get_object_or_404(Activation, code=kwargs['code']) # Change user's email user = act.user user.email = act.email user.save() # Remove activation record, it is unneeded act.delete() messages.add_message(self.request, messages.SUCCESS, _('You have successfully changed your email!')) return super(ChangeEmailActivateView, self).get_redirect_url()
32.155039
118
0.690333
7,095
0.855231
0
0
365
0.043997
0
0
1,131
0.136331
4a7405fc354c53785ef8307b7ce20355175f5c8f
7,320
py
Python
conversationkg/kgs/writers.py
INDElab/conversationkg
8bfe09b0afb4954f633a9287f723c61dcd21ce46
[ "Apache-2.0" ]
3
2021-01-18T10:07:44.000Z
2021-05-27T07:39:35.000Z
conversationkg/kgs/writers.py
INDElab/conversationkg
8bfe09b0afb4954f633a9287f723c61dcd21ce46
[ "Apache-2.0" ]
3
2020-12-09T23:20:27.000Z
2021-03-06T11:08:24.000Z
conversationkg/kgs/writers.py
INDElab/conversationkg
8bfe09b0afb4954f633a9287f723c61dcd21ce46
[ "Apache-2.0" ]
1
2021-02-19T12:10:11.000Z
2021-02-19T12:10:11.000Z
from ..conversations.corpus import Conversation from ..conversations.emails import Email from collections import Counter import matplotlib import pandas as pd import json class JSONWriter: def __init__(self, kg): self.kg = kg self.entities = kg.entities() self.triples = kg.triples self.provenances = kg.provenances def store(self, name, save_mapping=True): with open(f"{name}.json", "w") as handle: json.dump(self.translated, handle) with open(f"{name}.provenances.json", "w") as handle: json.dump(self.provenances, handle) if save_mapping: reversed_d = self.reverse_mapping(self.entity2ind) json_d = {i:e.to_json() for i, e in reversed_d.items()} with open(f"{name}.ind2entity.json", "w") as handle: json.dump(json_d, handle) reverse_d = self.reverse_mapping(self.pred2ind) with open(f"{name}.ind2pred.json", "w") as handle: json.dump(reverse_d, handle) @classmethod def restore(cls, name, load_mapping_of=None): def get_class(cls_name): for mod in conversations_modules: try: cls = getattr(mod, cls_name) return cls except AttributeError: pass raise AttributeError(f"{cls_name} could not be found in any of the modules!") def json_to_entity(json_dict): try: json_dict["class"] except KeyError: print(json_dict.keys()) raise cls_name = json_dict["class"] cls = get_class(cls_name) return cls.from_json(json_dict) if load_mapping_of is None: load_mapping_of = name with open(f"{load_mapping_of}.ind2entity.json") as handle: loaded_entity_mapping = {int(i): d for i, d in json.load(handle).items()} ind2entity = {i:json_to_entity(d) for i, d in loaded_entity_mapping.items()} ind2entity = {i: (Person(x) if type(x) is WholePerson else x) for i, x in ind2entity.items()} with open(f"{load_mapping_of}.ind2pred.json") as handle: ind2pred = {int(i): d for i, d in json.load(handle).items()} with open(f"{name}.json") as handle: loaded = json.load(handle) restored_triples = [(ind2entity[s], ind2pred[p], ind2entity[o]) for s, p, o in loaded] with open(f"{name}.provenances.json") as handle: provenances = json.load(handle) kg = KG(restored_triples, provenances) kg.translated = loaded kg.entity2ind = kg.reverse_mapping(ind2entity) kg.pred2ind = kg.reverse_mapping(ind2pred) return kg @staticmethod def reverse_mapping(d): rev_d = {} for k, v in d.items(): if not v in rev_d: rev_d[v] = k else: print("duplicate:", v) if not type(v) is Person: raise ValueError("Non-bijective mapping!") return rev_d class CSVWriter: def __init__(self, kg): self.kg = kg self.entities = kg.entities() self.triples = kg.triples self.provenances = kg.provenances def get_node_df(self): records = [] sorted_ents = sorted(self.entities, key=lambda x: (str(type(x)), str(x))) for i, e in enumerate(sorted_ents): node_id = i # hash(e) node_t = str(e) node_type = type(e).__name__ node_u = f"icons/{node_type.lower()}.png" type_ = "LinkChart" if i == 0 else "0" if type(e) in {Conversation, Email}: node_dtopic = e.topic.topic.index node_dtopic_rate = round(e.topic.score, 5) else: node_dtopic = -1 node_dtopic_rate = 1.0 lat = lng = 0.0 records.append( ( type_, node_type, node_id, node_u, node_t, node_dtopic, node_dtopic_rate, lat, lng ) ) return pd.DataFrame.from_records(records, columns= ['type', 'node_type', 'node_id', 'node_u', 'node_t', 'node_dtopic', 'node_dtopic_rate', 'lat', 'lng'] ) def get_link_df(self): link_types = {p for s, p, o in self.triples} link_counts = Counter(self.triples) colours = dict(zip(link_types, list(matplotlib.colors.cnames.values()))) sorted_ents = dict(zip(sorted(self.entities, key=str), range(len(self.entities)))) records = [] for i, ((s, p, o), prov) in enumerate(zip(self.triples, self.provenances)): linkId = i # hash((s, p, o)) # s.time.timestamp() end1 = sorted_ents[s] # hash(s) end2 = sorted_ents[o] # hash(o) linkcount = link_counts[(s,p,o)] linkcolor = colours[p] linktype = p itemID = prov rec = [linkId, end1, end2, linkcount, linkcolor, itemID, linktype] records.append(rec) return pd.DataFrame.from_records(records, columns=['linkId', 'end1', 'end2', 'linkcount', 'linkcolor', 'itemID', 'linktype']) def to_csv(self, save_path): node_df = self.get_node_df() link_df = self.get_link_df() node_df.to_csv(save_path + ".nodes.csv", index=False) link_df.to_csv(save_path + ".links.csv", index=False) from neo4j import GraphDatabase class Neo4jWriter: def __init__(self, kg): self.kg = kg def to_neo4j(self): pass def run(self, clear=True): self.driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "pwd"), encrypted=False) if clear: tx.run("""MATCH (x) DETACH DELETE x""")
30.247934
124
0.454645
7,053
0.963525
0
0
2,344
0.320219
0
0
676
0.09235
4a74f67398645a5ea142cd4ebc8cc51cbdd14233
590
py
Python
model-test.py
shikew/Handwriting-calculator
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
[ "Apache-2.0" ]
null
null
null
model-test.py
shikew/Handwriting-calculator
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
[ "Apache-2.0" ]
null
null
null
model-test.py
shikew/Handwriting-calculator
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
[ "Apache-2.0" ]
1
2019-09-11T11:48:47.000Z
2019-09-11T11:48:47.000Z
import numpy as np from PIL import Image from keras.models import load_model img_gray = Image.open('1002.png') number = np.array(img_gray) print(number.shape) print('准备的图片的shape:',number.flatten().shape) print('原number:',number) number = number.astype('float32') number = number/255 #归一化 number = number.flatten() print('处理过后的number.shape:',number.shape) model = load_model('mnist-dnn.h5') # model.load_weights('mnist.model.best.hdf5') # def recognize(photo_data): # return clf.predict(photo_data) print(model.predict_classes(np.array([number]))) #print('测试标签为:',test_target[8000])
28.095238
48
0.749153
0
0
0
0
0
0
0
0
268
0.422713
4a752e0adb3dfdb8832eacdb68f81c47021fa651
378
gyp
Python
deps/libgdal/gyp-formats/ogr_mem.gyp
khrushjing/node-gdal-async
6546b0c8690f2db677d5385b40b407523503b314
[ "Apache-2.0" ]
42
2021-03-26T17:34:52.000Z
2022-03-18T14:15:31.000Z
deps/libgdal/gyp-formats/ogr_mem.gyp
khrushjing/node-gdal-async
6546b0c8690f2db677d5385b40b407523503b314
[ "Apache-2.0" ]
29
2021-06-03T14:24:01.000Z
2022-03-23T15:43:58.000Z
deps/libgdal/gyp-formats/ogr_mem.gyp
khrushjing/node-gdal-async
6546b0c8690f2db677d5385b40b407523503b314
[ "Apache-2.0" ]
8
2021-05-14T19:26:37.000Z
2022-03-21T13:44:42.000Z
{ "includes": [ "../common.gypi" ], "targets": [ { "target_name": "libgdal_ogr_mem_frmt", "type": "static_library", "sources": [ "../gdal/ogr/ogrsf_frmts/mem/ogrmemdatasource.cpp", "../gdal/ogr/ogrsf_frmts/mem/ogrmemlayer.cpp", "../gdal/ogr/ogrsf_frmts/mem/ogrmemdriver.cpp" ], "include_dirs": [ "../gdal/ogr/ogrsf_frmts/mem" ] } ] }
18.9
55
0.595238
0
0
0
0
0
0
0
0
285
0.753968
4a75b7b70277fd3cd807924be5321a95f06ea318
72,121
py
Python
iblviewer/volume.py
nantille/iblviewer
a5dad67e8f4b99a535297ba0803caf07b1107ca1
[ "MIT" ]
null
null
null
iblviewer/volume.py
nantille/iblviewer
a5dad67e8f4b99a535297ba0803caf07b1107ca1
[ "MIT" ]
null
null
null
iblviewer/volume.py
nantille/iblviewer
a5dad67e8f4b99a535297ba0803caf07b1107ca1
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from typing import Mapping, List, Any from datetime import datetime import logging import pandas as pd import glob import numpy as np import logging import os from collections import OrderedDict import nrrd import vtk import vedo from vtk.util.numpy_support import numpy_to_vtk from iblviewer.collection import Collection import iblviewer.objects as obj import iblviewer.utils as utils @dataclass class VolumeModel: RAW = 'raw' SEGMENTED = 'segmented' NORMALIZED_SUFFIX = '_norm' DATA_TYPE = {RAW:0, SEGMENTED:1} PREFIX = 'Volume' __count = 0 def unique_name(): VolumeModel.__count += 1 return f'{VolumeModel.PREFIX}_{VolumeModel.__count}' name: str = field(default_factory=unique_name) file_path: str = None scalars: Collection = field(default_factory=Collection) axes: List = field(default_factory=lambda: [1, 1, 1]) data_min: float = None data_max: float = None data_map_step: float = 1.0 data: np.ndarray = None data_type: str = RAW resolution: int = 1 # Default units are microns. units: float = 1e-06 base_color_map: Any = None # At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings mapping_name: str = None lateralized: bool = False # Mapping function. If None, the volume will be given as it is. mapping: Any = None luts: Collection = field(default_factory=Collection) slicers: Collection = field(default_factory=Collection) isosurfaces: Collection = field(default_factory=Collection) interactive_subsampling: bool = True volume_visible: bool = True slices_visible: bool = True transpose_shape: Any = None dimensions: np.ndarray = np.zeros(3).astype(float) center: np.ndarray = np.zeros(3).astype(float) def compute_size(self): """ Compute volume size """ if self.data is None: return self.dimensions = np.array(self.data.shape)[:3] if self.resolution is None: return self.resolution = int(self.resolution) # TODO: move this to constructor or init self.dimensions *= self.resolution self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2 def compute_range(self, force=False): """ Compute min and max range in the volume :return: Min and max values """ if self.data_min is not None and self.data_max is not None and not force: return self.data_min, self.data_max self.data_min = np.min(self.data) self.data_max = np.max(self.data) #print('Volume min-max', self.data_min, self.data_max) return self.data_min, self.data_max def guess_volume_type(self): """ Infer the volume type when it was not specified by the user. We assume here that typical values between -1 and 1 are raw volumes. """ if self.data_type is None: if self.data_min is None or self.data_max is None: self.compute_range() if self.data_min >= -1 and self.data_max <= 1: guess = VolumeModel.RAW else: guess = VolumeModel.SEGMENTED self.data_type = guess def is_segmented(self, auto_guess=True): """ Get whether current volume/image is segmented :return: Boolean """ if self.data_type is None and auto_guess: self.guess_volume_type() return self.data_type == VolumeModel.SEGMENTED def read_volume(self, file_path): """ Read local volume. Downloads the file first if it's remote. :param file_path: Volume path :return: 3D array """ if file_path.startswith('http') or file_path.startswith('ftp'): downloaded_temp_file_path = vedo.download(file_path, verbose=False) if file_path.endswith('nrrd'): data, header = nrrd.read(downloaded_temp_file_path) else: data = vedo.loadImageData(downloaded_temp_file_path) else: if file_path.endswith('nrrd'): data, header = nrrd.read(file_path, index_order='C') else: data = vedo.loadImageData(file_path) return data def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True): """ Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK :param file_path: Volume file path. Could support other file types easily. :param remap_scalars: Whether scalar values in the volume are replaced by their row id from a mapping that stores. This is necessary in the case of segmented volumes with regions that have a discontinuous id. :param mapping: Pandas Series or a Dictionary :param make_current: Set the volume data as the current one :return: 3D array """ data = None if not remap_scalars or mapping is None: data = self.import_volume(file_path) else: time = datetime.now() new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX) if os.path.exists(new_file_path): data = self.import_volume(new_file_path) else: data = self.import_volume(file_path) data, mapping = self.remap_slow(data, mapping, new_file_path) logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's') ''' if volume is not None: logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's') min_value, max_value = np.amin(data), np.amax(data) logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value)) else: logging.error('Failed to open atlas ' + new_file_path) ''' if make_current and data is not None: self.data = data return data, mapping def transpose(self, shape=None): """ Transpose the volume for visualization in VTK :param shape: The new shape. If None, will default to self.transpose_shape """ if shape is None: shape = self.transpose_shape if shape is None: return self.data = np.transpose(self.data, shape) def remap_slow(self, data, mapping=None, write_path=None): """ Reassign volume values (slow on large volumes!) so that they're continuous :param data: Volume ndarray :param write_path: Where the modified volume will be stored (to spare going through this method next time) :param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones :return: Modified volume data """ logging.info('\nBuilding appropriate volume from Allen data source...') #volume = np.vectorize(self.f)(data) labels = np.sort(np.unique(data)) num_labels = len(labels) if mapping is None: mapping = pd.Series(labels) logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas') logging.info('Reassigning ' + str(num_labels) + ' scalar values...') for iter_id in range(num_labels): label = labels[iter_id] ids = mapping.index[mapping == label].to_list() if len(ids) < 1: continue # On a large volume, this takes a long time data[data == label] = ids[0] if num_labels > 10000 and iter_id % 10 == 0: logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%') if write_path is not None: logging.info('Saving volume data under ' + write_path) nrrd.write(write_path, data, index_order='C') return data, mapping def build_lut(self, scalar_map=None, scalar_range=None, color_map=None, alpha_map=None, zero_is_transparent=True, noise_amount=0.0, nan_rgba=None, make_active=True): """ Build a look-up table (LUT, sometimes known as transfer function) for the volume :param scalar_map: A 2D list with values in first column from the volume itself and values from the second column being your scalar values that correspond to such region :param scalar_range: Min and max values in a list :param color_map: Color map name to apply :param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that says how transparent a scalar value should be :param zero_is_transparent: Whether zero values are made transparent, True by default :param noise_amount: Whether a noise value is applied on the colors :param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values :param make_active: Whether this one is made active (you still have to update the views after that) :return: LUTModel """ lut_model = LUTModel() lut_model.build(scalar_map, scalar_range, color_map, alpha_map, zero_is_transparent, noise_amount, nan_rgba) self.luts.store(lut_model, set_current=make_active) return lut_model def blend_maps(map1, map2, time, total_time): """ Blend color maps """ weight1 = max(0.0, total_time - time) weight2 = max(0.0, time) return map1 * weight1 + map2 * weight2 class Volume(vedo.Volume): """ Overwriting of vedo.Volume constructor that is ill-designed as it transposes the given numpy array without us knowing about it, not giving us the option to choose about that. """ def __init__(self, inputobj=None, c='RdBu_r', alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0), alphaGradient=None, alphaUnit=1, mode=0, shade=False, spacing=None, dims=None, origin=None, mapper='smart'): vtk.vtkVolume.__init__(self) vedo.BaseGrid.__init__(self) self.axes = [1, 1, 1] ################### if isinstance(inputobj, str): if "https://" in inputobj: from vedo.io import download inputobj = download(inputobj, verbose=False) # fpath elif os.path.isfile(inputobj): pass else: inputobj = sorted(glob.glob(inputobj)) ################### if 'gpu' in mapper: self._mapper = vtk.vtkGPUVolumeRayCastMapper() elif 'opengl_gpu' in mapper: self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper() elif 'smart' in mapper: self._mapper = vtk.vtkSmartVolumeMapper() elif 'fixed' in mapper: self._mapper = vtk.vtkFixedPointVolumeRayCastMapper() elif isinstance(mapper, vtk.vtkMapper): self._mapper = mapper else: print("Error unknown mapper type", [mapper]) raise RuntimeError() self.SetMapper(self._mapper) ################### inputtype = str(type(inputobj)) #colors.printc('Volume inputtype', inputtype) if inputobj is None: img = vtk.vtkImageData() elif vedo.utils.isSequence(inputobj): if isinstance(inputobj[0], str): # scan sequence of BMP files ima = vtk.vtkImageAppend() ima.SetAppendAxis(2) pb = vedo.utils.ProgressBar(0, len(inputobj)) for i in pb.range(): f = inputobj[i] picr = vtk.vtkBMPReader() picr.SetFileName(f) picr.Update() mgf = vtk.vtkImageMagnitude() mgf.SetInputData(picr.GetOutput()) mgf.Update() ima.AddInputData(mgf.GetOutput()) pb.print('loading...') ima.Update() img = ima.GetOutput() else: if "ndarray" not in inputtype: inputobj = np.array(inputobj) if len(inputobj.shape)==1: varr = vedo.numpy2vtk(inputobj, dtype=np.float) else: # ------------------------------ Nasty lines commented here #if len(inputobj.shape)>2: #inputobj = np.transpose(inputobj, axes=[2, 1, 0]) varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float) varr.SetName('input_scalars') img = vtk.vtkImageData() if dims is not None: img.SetDimensions(dims) else: if len(inputobj.shape)==1: vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r') raise RuntimeError() img.SetDimensions(inputobj.shape) img.GetPointData().SetScalars(varr) #to convert rgb to numpy # img_scalar = data.GetPointData().GetScalars() # dims = data.GetDimensions() # n_comp = img_scalar.GetNumberOfComponents() # temp = utils.vtk2numpy(img_scalar) # numpy_data = temp.reshape(dims[1],dims[0],n_comp) # numpy_data = numpy_data.transpose(0,1,2) # numpy_data = np.flipud(numpy_data) elif "ImageData" in inputtype: img = inputobj elif isinstance(inputobj, vedo.Volume): img = inputobj.GetMapper().GetInput() elif "UniformGrid" in inputtype: img = inputobj elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata if hasattr(inputobj, "Update"): inputobj.Update() img = inputobj.GetOutput() elif isinstance(inputobj, str): from vedo.io import loadImageData, download if "https://" in inputobj: inputobj = download(inputobj, verbose=False) img = loadImageData(inputobj) else: vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r') return if dims is not None: img.SetDimensions(dims) if origin is not None: img.SetOrigin(origin) ### DIFFERENT from volume.origin()! if spacing is not None: img.SetSpacing(spacing) self._data = img self._mapper.SetInputData(img) self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient) self.GetProperty().SetShade(True) self.GetProperty().SetInterpolationType(1) self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit) # remember stuff: self._mode = mode self._color = c self._alpha = alpha self._alphaGrad = alphaGradient self._alphaUnit = alphaUnit @dataclass class LUTModel: """ This class might look slightly convoluted but it's actually simple. We use double mapping here in order to enable live/interactive visualization of volumetric data. Instead of replacing values in a 3D volume, we only replace the colors in the 1D LUT list. The point is that it's too slow to update a given data, like a segmented volume with custom values. Instead, we map such custom values to a 1D array (our LUT) that maps colors to raw volume values. This is much faster in terms of rendering and it enables interactive visualization. The scalar_lut is the original LUT for the given scalars (custom values) and the mapped_lut is the LUT assigned to the surfaces (like slices) that have copied data from the volume. The volume is given color_map and alpha_map through vedo methods. You might say "ok for double mapping, it's the only way for interactive rendering of a volume, but what about color_map and mapped_lut? Aren't they the same?". The answer is: they're the same but VTK does not accept a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction and a vtkPiecewiseFunction for alpha. There's no way around it. The color_map will be computed as a vtkColorTransferFunction and the alpha_map as the vtkPiecewiseFunction. """ name: str = NotImplementedError color_map_function: Any = None scalar_map: np.ndarray = None scalar_min: float = 0.0 scalar_max: float = 1.0 scalar_lut: vtk.vtkLookupTable = None mapped_lut: vtk.vtkLookupTable = None color_map: np.ndarray = None alpha_map: np.ndarray = None base_color_map: np.ndarray = None def build(self, scalar_map=None, scalar_range=None, color_map=None, alpha_map=None, zero_is_transparent=True, noise_amount=0.0, nan_rgba=None): """ Build several look-up tables (LUT, sometimes known as transfer function) for the volume. This is where double-mapping occurs for segmented volumes that have values from 0 to n where each value defines a sub-volume or region. If we want to assign values (say from another model) to these regions, we'd have to change the volume values and it would be too slow iterating over each voxel in 3D. Instead we define colors that represent these values and assign them to segmented regions in a 1D list. :param scalar_map: A 2D list with values in first column from the volume itself and values from the second column being your scalar values that correspond to such region :param scalar_range: Min and max values in a list :param color_map: Color map name to apply :param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that says how transparent a scalar value should be :param zero_is_transparent: Whether zero values are made transparent, True by default :param noise_amount: Whether a noise value is applied on the colors :param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values :return: LUTModel """ if color_map is None: return if nan_rgba is None: nan_rgba = [0.0, 0.0, 0.0, 0.0] if self.base_color_map is None: self.base_color_map = color_map colors = [] alphas = [] lut = vtk.vtkLookupTable() scalar_lut = vtk.vtkLookupTable() # Use the number of values in the volume num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map) num_steps = 2655 s_min = 0 s_max = num_steps if scalar_map is None: if color_map is None and self.base_color_map is not None: color_map = self.base_color_map loop = range(num_steps) noise = None if isinstance(noise_amount, float) and noise_amount > 0: noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2 # Vedo works with nested lists: # [region_id, [r, g, b]] for color, and [region_id, a] for alpha if scalar_map is None: # Standard volume that is not segmented lut.SetRange(s_min, s_max) lut.SetNumberOfTableValues(num_steps) scalar_lut.SetRange(s_min, s_max) scalar_lut.SetNumberOfTableValues(num_steps) for r_id in loop: color = vedo.colors.getColor(color_map[r_id]) color = np.array(color) if noise is not None: color = color + noise[r_id] color = np.maximum(color, 0.0) color = np.minimum(color, 1.0) colors.append([r_id, color]) alpha = 1.0 if alpha_map is None else alpha_map[r_id] if r_id == 0 and zero_is_transparent: alpha = 0.0 alphas.append([r_id, alpha]) lut.SetTableValue(r_id, *color, alpha) scalar_lut.SetTableValue(r_id, *color, alpha) #scalar_map[r_id] = color_map[r_id] else: # Segmented volume s_min, s_max = scalar_range lut.SetRange(0, num_steps) lut.SetNumberOfTableValues(num_steps) color = None for r_id in range(num_steps): try: value = scalar_map[r_id] except Exception: value = None if value is None:# or s_min > value or s_max < value: color = nan_rgba[:3] alpha = nan_rgba[3] else: color = vedo.colorMap(value, color_map, s_min, s_max) alpha = 1.0 if alpha_map is None else alpha_map[r_id] if value == 0 and zero_is_transparent: alpha = 0.0 colors.append([r_id, color]) alphas.append([r_id, alpha]) lut.SetTableValue(r_id, *color, alpha) # Real scalar LUT, mainly as a reference for the user # Here the colors resulting from the given scalar min to max # are assigned to segmented values in the volume mock_values = np.linspace(s_min, s_max, num_steps) scalar_lut.SetRange(s_min, s_max) scalar_lut.SetNumberOfTableValues(len(mock_values)) for r_id in range(len(mock_values)): color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max)) alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0 scalar_lut.SetTableValue(r_id, *color, 1.0) lut.Build() scalar_lut.Build() # Just to avoid confusion: the user can give a string as a color map, like 'viridis' # but the real color map object is stored in self.color_map. The name 'viridis' # is stored under self.color_map_function (if needed later on) self.color_map_function = color_map self.color_map = colors self.alpha_map = alphas self.scalar_map = scalar_map self.mapped_lut = lut self.scalar_lut = scalar_lut def get_sorted_scalars(self): """ Get a numpy 2D array of key-value pairs sorted by value :return: 2D array """ sorted_scalars = np.zeros((len(self.scalar_map), 2)) values = list(self.scalar_map.values()) keys = list(self.scalar_map.keys()) sorted_scalars[:, 0] = keys sorted_scalars[:, 1] = values sorted_mask = sorted_scalars[:, 1].argsort() sorted_scalars = sorted_scalars[sorted_mask] return sorted_scalars class VolumeController(): """ Wrapper class that handles both the volume and its slices """ def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True, center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True): """ Constructor :param plot: Plot instance :param model: VolumeModel instance :param initialize: Whether the initalization :param clipping: Whether clipping is enabled at init time :param slicer_box: Whether the slicer box is enabled at init :param center_on_edges: Whether the volume is offest by half a voxel or not :param alpha_unit_upper_offset: The offset to apply to alpha unit computation. If greater than 0, the volume will be less opaque :param add_to_scene: Whether the volume is added to scene after init """ self.plot = plot self.model = model self.actor = None self.picker = None self.scalars = None self.mask = None self.bounding_mesh = None self.alpha_unit_upper_offset = alpha_unit_upper_offset self.alpha_factor = 0.001 # * self.model.resolution self.clipping_planes = None self.enable_volume_clipping = True self.clipping_axes = [] self.slicers = OrderedDict() self.slicers_selectable = False self.scalar_bar = None if initialize: self.initialize(clipping, slicer_box, center_on_edges, add_to_scene) #msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos()) #logging.info(msg) def get_related_actors(self): """ Get all 3D actors related to this view (for registering it in the application) :return: List of VTK objects """ actors = [] for slicer_id in self.slicers: actor = self.slicers[slicer_id].actor if actor is not None: actors.append(actor) for iso_id in self.model.isosurfaces: actors.append(self.model.isosurfaces[iso_id]) actors.append(self.actor) return actors def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True): """ Set the volume actor for visualization in VTK :param clipping: Whether clipping is enabled :param slicer_box: Whether the slicer box mode is enabled (6 clipping planes) :param center_on_edges: Whether the volume's center is aligned to its edges rather than the voxel center :param add_to_scene: Whether the object is added to the scene """ self.build_actor(center_on_edges, add_to_scene) self.initialize_picker() if slicer_box: self.initialize_slicer_box() self.initialize_clipping_planes() self.set_volume_clipping(clipping) self.set_color_map() ''' if use_mask: self.mask = self.actor.clone() self.mask.threshold(1, replace=1, replaceOut=0) self.actor.mapper().SetMaskTypeToBinary() self.actor.mapper().SetMaskInput(self.mask) ''' def set_volume_visibility(self, on=True): """ Set volume visibility :param on: Visibility boolean """ if self.actor is not None: self.actor.SetVisibility(on) def set_slices_visibility(self, on=True): """ Set the visibility of slices :param on: Visibility boolean """ for slicer_id in self.slicers: slicer_view = self.slicers.get(slicer_id) slicer_view.actor.SetVisibility(on) def get_slices_opacity(self): """ Get the opacity of slices (should be the same value for all slices) A mean calculation is performed on all slices alpha, just in case :return: Alpha value """ value = 0 num_values = 0 for slicer_id in self.slicers: slicer = self.slicers[slicer_id] if slicer.actor is not None: slice_alpha = slicer.actor.GetProperty().GetOpacity() if slice_alpha is None: continue value += slice_alpha num_values += 1 if num_values == 0 or value == 0: return None return value / num_values def set_slices_opacity(self, value): """ Set the opacity of slices :param value: Alpha value """ for slicer_id in self.slicers: slicer = self.slicers[slicer_id] if slicer.actor is not None: slicer.actor.alpha(value) def get_opacity(self): """ Get the relative opacity unit :return: Float """ return self.get_relative_opacity_unit() def get_relative_opacity_unit(self): """ Get the alpha unit relative value :return: Float """ alpha_unit = self.actor.alphaUnit() r = self.model.resolution # Inverse function of set_opacity_unit() value = 1.1 - (alpha_unit / r)**0.5 return value def set_opacity(self, value): """ Set the opacity of the volume like in set_opacity_unit() :param value: Opacity value between 0.0 and 1.0 :return: Resulting alpha unit """ self.set_opacity_unit(value) def set_opacity_unit(self, value): """ Set the opacity of the volume by modifying its alpha unit (a VTK thing). The alpha unit defines how much a voxel is transparent to incoming ray. This method normalizes the range between 0.0 and 1.0 as it depends on the resolution of the volume :param value: Opacity value between 0.0 and 1.0 :return: Resulting alpha unit """ r = self.model.resolution # 1 is chosen and not 1.0 because when value == 1.0, that would # mean that the volume is fully opaque and this yields artifacts with VTK alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r # vedo calls it "alpha" unit, vtk "opacity" unit. same-same! self.actor.alphaUnit(alpha_unit) return alpha_unit def get_spacing(self): """ Get the spacing/resolution of the volume """ res = self.model.resolution spacing = None if isinstance(res, int) or isinstance(res, float): spacing = np.array([res]*3) elif len(res) == 3: spacing = res else: raise ValueError(f'Given volume resolution {self.model.resolution} is invalid') return spacing def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2] """ Set the volume actor for visualization in VTK :param center_on_edges: Whether alignment by one voxel is applied :param add_to_scene: Whether the object is added to the scene """ spacing = self.get_spacing() self.actor = Volume(self.model.data, spacing=spacing, mapper='smart') self.scalars = self.actor._data.GetPointData().GetScalars() self.actor.name = self.model.name self.actor.shade(False) self.actor.mode(0) self.actor.pickable(True) self.set_interactive_subsampling(False) if center_on_edges: # Moving the volume by one voxel. This is possibly due the use of custom spacing. self.actor.pos(self.actor.pos() + spacing) center = np.array(self.actor.pos()) + self.actor.center() if np.linalg.norm(center - self.model.center) > 0: #print('Adjusting volume center from', self.model.center, 'to', center) self.model.center = center self.set_opacity_unit(0.9) self.actor.jittering(True) #self.actor._mapper.AutoAdjustSampleDistancesOn() #self.actor._mapper.SetBlendModeToAverageIntensity() #self.actor._mapper.SetSampleDistance(100) if add_to_scene: self.plot.add(self.actor, render=False) def set_position(self, position): """ Set the position of the volume """ self.actor.pos(position) # TODO: we're entering in unstable things when we move the volume # because there is not yet a guaranteed support for updating the slices # with the correct position self.reset_clipping_planes() def mirror_volume(self, axes): """ Mirror the volume on given axes :param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which the volume will be mirrored. Optional """ if axes is None or self.actor is None: return axes_str = ['x', 'y', 'z'] for axis in axes: if isinstance(axis, int) and 0 <= axis <= 2: axis = axes_str[axis] if isinstance(axis, str) and len(axis) == 1: self.actor.mirror(axis=axis.lower()) def initialize_picker(self, opacity_iso_value=0.0001): """ Initialize the volume picker :param opacity_iso_value: Threshold that defines at what accumulated opacity the picker hits the volume. In the case of a segmented volume, you want to keep this value very low as the default one. """ # As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars # https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point picker = vtk.vtkVolumePicker() picker.PickCroppingPlanesOn() picker.UseVolumeGradientOpacityOff() picker.SetTolerance(opacity_iso_value) # A low OpacityIsoValue is necessary in the case of segmented volumes picker.SetVolumeOpacityIsovalue(opacity_iso_value) picker.AddPickList(self.actor) picker.PickFromListOn() self.picker = picker def initialize_slicer_box(self): """ Initialize 6 slicing planes as a box. """ for axis_id in range(6): slicer_model = SlicerModel(axis=axis_id) slicer_model.align_to_axis(axis_id, self.model.dimensions) self.model.slicers.store(slicer_model) # It's important in this case to have standalone=False self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False) def update_slicer(self, slicer_id, value=None, normal=None): """ Update a given slicer with the given value :param slicer_id: SlicerView id :param value: Value or 3D point :param normal: Normal """ slicer_view = self.slicers.get(slicer_id) if slicer_view is None: return # This is an important part where the slicing plane is itself sliced by other planes slicer_model = slicer_view.model slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis) # Use given value (or point) and normal to guide the below code result = slicer_model.update(value, normal) if not result: return # Update slicing image slicer_view.update() def initialize_clipping_planes(self): """ Initialize X, Y and Z clipping planes with two planes per axis for positive and negative slicing """ self.clipping_planes = vtk.vtkPlaneCollection() slicer_models = self.model.slicers for slicer_id in slicer_models: self.clipping_planes.AddItem(vtk.vtkPlane()) self.reset_clipping_planes() return def get_clipping_planes(self, except_axis=None): """ Get the current clipping planes except the ones on the given axis :param except_axis: Axis id to ignore. If None, all clipping planes will be returned :return: vtkPlaneCollection """ if not isinstance(except_axis, int): return self.clipping_planes exceptions = [except_axis * 2, except_axis * 2 + 1] planes = vtk.vtkPlaneCollection() for plane_id in range(self.clipping_planes.GetNumberOfItems()): if plane_id in exceptions: continue plane = self.clipping_planes.GetItem(plane_id) planes.AddItem(plane) return planes def reset_clipping_planes(self): """ Reset clipping planes """ slicer_models = self.model.slicers for slicer_id in slicer_models: slicer_model = slicer_models[slicer_id] plane_id = slicer_model.get_box_plane_id() plane = self.clipping_planes.GetItem(plane_id) plane.SetOrigin(slicer_model.origin + self.actor.pos()) plane.SetNormal(slicer_model.normal) def clip_on_axis(self, position=None, axis=None, normal=None): """ Apply clipping on a single axis :param position: Position :param axis: Clipping axis, defaults to 0 (X axis) :param thickness: Whether a thickness (so two clipping planes) are applied """ axis_offset = 0 # This should already be sorted in the model but in case it isn't, we double check here if normal is not None and normal[axis] < 0: # This means that the given axis has two # clipping planes and we take the negative one axis_offset += 1 #position = self.model.dimensions - position axis_storage_id = axis * 2 + axis_offset plane = self.clipping_planes.GetItem(axis_storage_id) plane.SetOrigin(position) plane.SetNormal(normal) def set_volume_clipping(self, on=None): """ Set volume clipping on or off. :param on: Whether clipping is enabled or disabled. If None, then the state is toggled. """ if on is None: self.enable_volume_clipping = not self.enable_volume_clipping else: self.enable_volume_clipping = on if self.enable_volume_clipping: self.actor.mapper().SetClippingPlanes(self.clipping_planes) else: self.actor.mapper().SetClippingPlanes(None) def clip_to_bounds(self, bounds): """ Clip the volume and move the slicing planes according to 6 boundary points :param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax) """ planes = vtk.vtkPlanes() planes.SetBounds(bounds) # Normals are reversed with the above code # so we fix that here with flip_normals=True self.set_clipping_planes(planes, flip_normals=True) def box_widget_update(self, widget=None, event=None): """ Clip the volume with the current box widget :param widget: vtkBoxCutter :param event: vtkEvent """ if widget is None: return planes = vtk.vtkPlanes() widget.GetPlanes(planes) self.set_clipping_planes(planes) def set_clipping_planes(self, planes, flip_normals=False): """ Clip the volume and move the slicing planes according the given planes :param planes: vtkPlanes """ vtk_n = planes.GetNormals() vtk_pts = planes.GetPoints() num_pts = vtk_pts.GetNumberOfPoints() for plane_id in range(num_pts): normal = vtk_n.GetTuple(plane_id) origin = vtk_pts.GetPoint(plane_id) plane = self.clipping_planes.GetItem(plane_id) current_origin = np.array(plane.GetOrigin()) # We don't need to check the normal because # we prevent box cutter rotation in our case if np.linalg.norm(current_origin - origin) < 0.1: continue plane.SetOrigin(origin) if flip_normals: normal = np.array(normal)*-1 plane.SetNormal(normal) self.update_slicer(plane_id, origin, normal) self.clipping_planes.Modified() self.actor.GetMapper().Update() def set_alpha_map(self, alpha_map, alpha_factor=None): """ Set alpha map to the volume view :param alpha_map: 2D list of scalar values and alpha values :param alpha_factor: Alpha factor """ if alpha_map is None: if self.model.luts.current is None: return alpha_map = self.model.luts.current.alpha_map if alpha_factor is None: alpha_factor = self.alpha_factor if len(np.array(alpha_map).shape) > 1: volume_alpha_map = np.ones_like(alpha_map).astype(float) volume_alpha_map[:] = alpha_map[:] volume_alpha_map[:, 1] *= alpha_factor self.actor.alpha(volume_alpha_map) else: self.actor.alpha(np.array(alpha_map) * alpha_factor) def set_color_map(self, color_map=None, alpha_map=None): """ Set the color and alpha map to the view objects :param color_map: Nested list of scalar values and rgb colors like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...] :param alpha_map: 2D list of scalar values and alpha values """ lut = self.model.luts.current if color_map is None and lut is not None: color_map = lut.color_map if alpha_map is None and lut is not None: alpha_map = lut.alpha_map if color_map is None: return self.actor.cmap(color_map) self.set_alpha_map(alpha_map) if lut is not None: for surface in self.model.isosurfaces: surface._mapper.SetLookupTable(lut.opaque_lut) for slicer_id in self.slicers: slicer = self.slicers[slicer_id] slicer.apply_lut(lut.mapped_lut) else: for slicer_id in self.slicers: slicer = self.slicers[slicer_id] slicer.set_color_map(color_map, alpha_map) def disable_shading(self): """ Disable volume shading """ volumeProperty = self.actor.GetProperty() volumeProperty.ShadeOff() self.actor.SetProperty(volumeProperty) def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9): """ Enable volume shading TODO: See if this method is useful """ volumeProperty = self.actor.GetProperty() volumeProperty.SetInterpolationTypeToLinear() volumeProperty.ShadeOn() volumeProperty.SetAmbient(ambient) volumeProperty.SetDiffuse(diffuse) volumeProperty.SetSpecular(specular) volumeProperty.SetScalarOpacityUnitDistance(1) self.actor.SetProperty(volumeProperty) def toggle_slices_visibility(self): """ Toggle slices visibility """ self.model.slices_visible = not self.model.slices_visible for slicer_id in self.slicers: slicer = self.slicers[slicer_id] self.update_slicer(slicer) if slicer.actor is not None: slicer.actor.SetVisibility(self.model.slices_visible) def toggle_hollow(self): """ Toggle hollow mode for volume rendering. This is intended to work only on segmented (annotated) volumes. """ volume_property = self.actor.GetProperty() # Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff... disabled = bool(volume_property.GetDisableGradientOpacity()) if disabled: volume_property.DisableGradientOpacityOff() alpha_gradient = vtk.vtkPiecewiseFunction() alpha_gradient.AddPoint(0, 0.0) alpha_gradient.AddPoint(1, 0.75) alpha_gradient.AddPoint(2, 1.0) volume_property.SetGradientOpacity(alpha_gradient) else: volume_property.DisableGradientOpacityOn() return not disabled def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False): """ Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step, that is the normal on which to probe multiplied by the distance you want to travel further into the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts. So you need to go further into the "cloud" so to speak, in order to find the values you want. :param position: 3D array :param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields bad or unwanted results :param avoid_values: Try and find other values than this :param cast_to_int: Whether the value should be cast to integer :return: Scalar value """ if isinstance(avoid_values, int) or isinstance(avoid_values, float): avoid_values = [avoid_values] # TODO: see if this is faster? To be tested # ijk_result = [0.0, 0.0, 0.0] # volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result) # volume_actor._data.GetPoint(ijk_result) pt_id = self.actor._data.FindPoint(*position) valid_id = 0 < pt_id < self.scalars.GetNumberOfValues() value = self.scalars.GetValue(pt_id) if valid_id else None if not valid_id or (value in avoid_values): if normal_step is not None: position += normal_step pt_id = self.actor._data.FindPoint(*position) valid_id = 0 < pt_id < self.scalars.GetNumberOfValues() value = self.scalars.GetValue(pt_id) if valid_id else None if cast_to_int and value is not None: value = int(value) if value is None and none_as_zero: value = 0 return value def raycast(self, origin, screen_position): """ Shorthand for pick() method """ return self.pick(origin, screen_position) def pick(self, origin, screen_position): """ Find the nearest intersection – even on sliced volume – with the ray formed by an origin and a screen-space position (given by VTK when you click on an actor) :param origin: Origin of the vector :param screen_position: 2D position on screen. This is given by vtk events like MouseRelease :return: The nearest position and its related value queried in the volume image """ self.picker.Pick(*screen_position[:2], 0, self.plot.renderer) position = np.array(self.picker.GetPickPosition()) ray = position - origin distance = np.linalg.norm(ray) normal = ray / distance # Go half a voxel further to make sure we don't hit "void" vol_position = position # + normal * self.model.resolution / 2 probe_position = position + normal * self.model.resolution * 10 closest_dist = distance slice_position = None # See if the line hits any of the slicers (that are image planes) for slicer_id in self.slicers: slicer = self.slicers[slicer_id] if slicer.got_slice: hits = slicer.actor.intersectWithLine(origin, probe_position) if len(hits) != 1: continue new_dist = np.linalg.norm(position - hits[0]) if new_dist < closest_dist and new_dist < self.model.resolution * 2: closest_dist = new_dist slice_position = hits[0] if slice_position is None: position = vol_position else: position = slice_position value = self.get_value_from_xyz(position, normal * self.model.resolution * 4) return position, value def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None, screen_space=True, min_v=None, max_v=None, add_to_scene=True): """ Add a series of points along a line probe :param origin: Probe origin :param destination: Probe destination point :param resolution: Number of (equidistant) points that will be probed along that line :param radius: Radius of the points :param color_map: Scalars color map :param screen_space: Whether the points are screen space or spheres :param min_v: Min scalar value :param max_v: Max scalar value :param add_to_scene: Whether the new probe is added to scene :return: Points """ if color_map is None: color_map = self.model.luts.current.color_map positions, values = self.probe(origin, destination, resolution) points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space, color_map=color_map, min_v=min_v, max_v=max_v) points_obj.origin = origin points_obj.destination = destination # Dynamic properties assignment points_obj.target = self.actor points_obj.target_controller = self if add_to_scene: self.plot.add(points_obj) return points_obj def update_probe(self, origin, destination, points_obj): """ Update a probe with given start and end points :param origin: Start point :param destination: End point :param points_obj: Points object """ resolution = points_obj._polydata.GetPoints().GetNumberOfPoints() positions, values = self.probe(origin, destination, resolution) points_obj.update_data(positions, values) def probe(self, origin, destination, resolution=40): """ Probe a volume with a line :param origin: Origin of the line probe :param destination: Destination of the line probe :param resolution: Number of point samples along the probe :return: Positions and values """ origin = np.array(origin) destination = np.array(destination) distance = np.linalg.norm(destination - origin) ray = destination - origin ray_norm = ray / distance step = distance / resolution positions = [origin + ray_norm * p_id * step for p_id in range(resolution)] values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions]) return positions, values def set_interactive_subsampling(self, on=False): """ Set volume subsampling on or off. This is enabled by default in VTK and we disable it by default in IBLViewer :param on: Whether volume subsampling in interactive mode is on or off """ #self.plot.window.SetDesiredUpdateRate(0) #self.actor._mapper.SetInteractiveUpdateRate(0) self.model.interactive_subsampling = on self.actor._mapper.SetAutoAdjustSampleDistances(on) if on: self.actor._mapper.InteractiveAdjustSampleDistancesOn() else: self.actor._mapper.InteractiveAdjustSampleDistancesOff() def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True): """ Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value. Unlike general isosurfacing, this method extracts only the surface mesh of the desired region/label/segmentation, not of all values from 0 to label. :param label: Label (scalar) value found in the volume :param exceptions: If the label is found in the exceptions list, isosurfacing will not occur :param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label :param set_current: Whether the label is set as the current one in the model :param to_int: Whether the label is cast to integer :param split_meshes: Whether we split meshes when multiple ones are found :return: A list of all manifold meshes for the given label """ if label is None or label in exceptions: return if to_int: label = int(label) existing_meshes = self.model.isosurfaces.get(label) if existing_meshes is not None and not force_rebuild: return existing_meshes lut = self.model.luts.current simple_lut = vtk.vtkLookupTable() simple_lut.SetNumberOfColors(1) simple_lut.SetTableRange(0, 1) simple_lut.SetScaleToLinear() simple_lut.SetTableValue(0, 0, 0, 0, 0) simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label)) simple_lut.Build() # Generate object boundaries from labelled volume discrete = vtk.vtkDiscreteMarchingCubes() discrete.SetInputData(self.actor.imagedata()) discrete.GenerateValues(1, label, label) smoothing_iterations = 15 pass_band = 0.001 feature_angle = 120.0 smoother = vtk.vtkWindowedSincPolyDataFilter() smoother.SetInputConnection(discrete.GetOutputPort()) smoother.SetNumberOfIterations(smoothing_iterations) smoother.BoundarySmoothingOff() smoother.FeatureEdgeSmoothingOff() smoother.SetFeatureAngle(feature_angle) smoother.SetPassBand(pass_band) smoother.NonManifoldSmoothingOn() smoother.NormalizeCoordinatesOn() smoother.Update() self.model.isosurfaces[label] = [] #splitter = vtk.vtkExtractPolyDataGeometry() if split_meshes: splitter = vtk.vtkPolyDataConnectivityFilter() splitter.SetInputConnection(smoother.GetOutputPort()) splitter.SetExtractionModeToAllRegions() splitter.ColorRegionsOn() splitter.Update() for region_id in range(splitter.GetNumberOfExtractedRegions()): #splitter.AddSpecifiedRegion(region_id) #splitter.Update() #poly = vtk.vtkPolyData() #poly.ShallowCopy(splitter.GetOutput()) threshold = vtk.vtkThreshold() threshold.SetInputConnection(splitter.GetOutputPort()) threshold.ThresholdBetween(region_id, region_id) threshold.Update() actor = vedo.Mesh(threshold.GetOutput()) #actor._mapper.SetScalarRange(min_value, lut.scalar_max) #actor._mapper.SetUseLookupTableScalarRange(True) actor._mapper.SetLookupTable(simple_lut) actor._mapper.ScalarVisibilityOn() actor.name = 'Isosurface_' + str(label) self.model.isosurfaces[label].append(actor) #actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label) else: poly = smoother.GetOutput() actor = vedo.Mesh(poly) actor._mapper.SetLookupTable(simple_lut) actor._mapper.ScalarVisibilityOn() actor.name = 'Isosurface_' + str(label) self.model.isosurfaces[label].append(actor) ''' pdnorm = vtk.vtkPolyDataNormals() pdnorm.SetInputData(smoother.GetOutput()) pdnorm.ComputePointNormalsOn() pdnorm.ComputeCellNormalsOn() pdnorm.FlipNormalsOff() pdnorm.ConsistencyOn() pdnorm.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(smoother.GetOutputPort()) mapper.SetLookupTable(lut.scalar_lut) mapper.SetScalarRange(min_value, lut.scalar_max) ''' if set_current: self.model.isosurfaces.set_current(label) return self.model.isosurfaces[label] @dataclass class SlicerModel: PREFIX = '[Slicer]_' MIN_SLAB_THICKNESS = 1.0 #um __count = 0 def unique_name(): SlicerModel.__count += 1 return f'{SlicerModel.PREFIX}_{SlicerModel.__count}' name: str = field(default_factory=unique_name) # 0, 1 or 2. See the normal for axis orientation axis: int = None value: float = 0.0 bounds: np.ndarray = None #thickness: float = 0.0 origin: np.ndarray = np.array([0.0, 0.0, 0.0]) normal: np.ndarray = np.array([1.0, 0.0, 0.0]) clipping_planes: vtk.vtkPlaneCollection = None def get_box_plane_id(self): """ Get the plane id :return: Int """ if self.axis is None: return offset = 0 if self.normal[self.axis] < 0 else 1 return self.axis * 2 + offset def get_axis_aligned_info(self, vtk_axis): """ VTK stores box clipping planes in the order: -X to +X: 0, 1 -Y to +Y: 2, 3 -Z to +Z: 4, 5 This method retrieves what is the XYZ axis (0, 1 or 2) and its orientation sign :return: Int axis and float orientation """ orientation = -1.0 if vtk_axis % 2 == 0 else 1.0 axis = (vtk_axis - vtk_axis % 2) // 2 return axis, orientation def align_to_axis(self, axis, dimensions=None): """ Set the axis of the slicer :param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info() :param dimensions: Dimensions of the volume """ if not isinstance(axis, int): return normal = np.zeros(3).astype(float) xyz_axis, orientation = self.get_axis_aligned_info(axis) normal[xyz_axis] = orientation self.axis = xyz_axis if dimensions is not None and orientation < 0: self.origin = np.zeros(3) self.origin[xyz_axis] = dimensions[xyz_axis] self.normal = normal def flip_normal(self): """ Flip the normal of the slicer """ self.normal *= -1.0 self.check_normal() if isinstance(self.axis, int): self.axis *= -1 def check_normal(self): """ Check if the normal is axis-aligned. If not, the axis is set to None. """ zeros = self.normal == 0 if len(self.normal[zeros]) >= 2: self.axis = 0 def update(self, value=None, normal=None, axis=None): """ Update slicer :param value: Origin of the slicing plane :param normal: Normal of the slicing plane :param axis: Axis, if the plane is axis-aligned :return: True if model changed, False if it didn't """ if not(isinstance(value, int) or isinstance(value, float)): if normal is None: normal = self.normal if normal is None: return False if normal[1] == 0 and normal[2] == 0: axis = 0 #if normal[0] > 0 else 1 elif normal[0] == 0 and normal[2] == 0: axis = 1 #if normal[1] > 0 else 1 elif normal[0] == 0 and normal[1] == 0: axis = 2 #if normal[2] > 0 else 1 if axis is not None: value = value[axis] if axis is None: axis = self.axis if self.value == value: return False if axis is not None: self.value = value self.origin = np.array(normal) * value else: self.value = None self.origin = value self.normal = normal self.axis = axis return True class SlicerView(): slices = {} def __init__(self, plot, volume_view, slicer_model, standalone=True): """ Constructor :param plot: Plot instance :param volume_view: VolumeView instance :param slicer_model: SlicerModel instance :param standalone: Whether the slice is a standalone actor that can be clicked. Set this to False if you want to use transparency, at the expense that because of a VTK bug, you won't be able to click on it anymore, requiring you to code another way of detecting where the user clicked. See more in initialize_mapper() """ self.plot = plot self.volume_view = volume_view self.model = slicer_model self.actor = None self.filter = None self.filter = None self.actor = None self.reslice = None self.slice_type = -1 self.depth_peeling_enabled = None self.standalone = standalone self.got_slice = False self.color_map = None self.alpha_map = None self.initialize() def initialize(self, render=False): """ Initialize the slicer object """ if self.filter is None: self.filter = vtk.vtkImageDataGeometryFilter() if self.actor is None: self.actor = vedo.Mesh(self.filter.GetOutput()) # Adding empty actor so that it's updated later on self.plot.add(self.actor, render=render) self.actor.lighting('off') self.actor.name = self.model.name self.initialize_mapper() def initialize_mapper(self): """ Initialize the object mapper """ mapper = self.actor._mapper mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData mapper.SetColorModeToMapScalars() mapper.ScalarVisibilityOn() # We operate on static volumes thanks to the double LUT mapping implemented here mapper.SetStatic(True) # Without using scalar range, the mapping will be off mapper.SetUseLookupTableScalarRange(True) # We prevent this actor from being pickable as a result of the bug described below # when we want to use transparency on the slice. self.actor.pickable(self.standalone) if self.standalone: # There is a bug in VTK 9 that prevents clicking on transparent objects # as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291 # The "Force opaque fix" below should be gone with the next VTK update hopefully. # In the meantime, we use this. # TODO: remove this when this bug is fixed in VTK self.actor.ForceOpaqueOn() else: # We bypass the transparent selection bug when a VolumeView has multiple slicers # like in box mode because the click detection occurs on the volume and we perform # an additional test to see if a slicer yields a nearby result. If it does, # the result is like clicking on the slice and we get transparency for free. pass # Make sure we have depth peeling activated, otherwise transparency with volumes # will look weird and in the wrong order self.plot.renderer.UseDepthPeelingOn() self.plot.renderer.UseDepthPeelingForVolumesOn() segmented = self.volume_view.model.is_segmented() if segmented: # This very line below will mess up the entire slice coloring if: # - you have a segmented volume and this is set to True # - you have a non-segmented (like raw MRI, CT) volume and this is set to False mapper.SetInterpolateScalarsBeforeMapping(not segmented) mapper.Update() def set_color_map(self, color_map, alpha_map=None): """ Set a color map to the slice :param color_map: Color map, can be a string, a list of colors or more. See vedo documentation. """ self.color_map = color_map if alpha_map is not None: self.alpha_map = alpha_map if self.got_slice and color_map is not None: self.actor.cmap(self.color_map, alpha=self.alpha_map) def set_slice_type(self, slice_type): """ Set the slice type. 0 for axial, 1 for free slicing :param slice_type: Int value """ if slice_type == 0 and self.slice_type != slice_type: self.slice_type = slice_type self.filter.SetInputData(self.volume_view.actor.imagedata()) elif slice_type == 1 and self.slice_type != slice_type: self.slice_type = slice_type self.filter.SetInputData(self.reslice.GetOutput()) def slice_on_normal(self, origin, normal): """ Slice a volume with a plane oriented by the given normal. This allows slicing in all directions. :param origin: Origin of the slicing plane :param normal: Normal of the slicing plane :return: Mesh object with the slice as an image texture """ ''' mapper = vtk.vtkImageResliceMapper() mapper.SetInputData(self.volume_view.actor._data) mapper.SliceFacesCameraOff() mapper.SliceAtFocalPointOff() mapper.JumpToNearestSliceOn() mapper.SetImageSampleFactor(2) mapper.BorderOn() mapper.BackgroundOff() mapper.UpdateInformation() mapper.GetSlicePlane().SetOrigin(*origin) mapper.GetSlicePlane().SetNormal(*normal) mapper.GetSlicePlane().Modified() mapper.Modified() mapper.Update() self.actor = vtk.vtkImageSlice() self.actor.SetMapper(mapper) prop = vtk.vtkImageProperty() if True: prop.SetInterpolationTypeToLinear() else: prop.SetInterpolationTypeToNearest() self.actor.SetProperty(prop) return ''' if self.reslice is None: reslice = vtk.vtkImageReslice() reslice.SetInputData(self.volume_view.actor._data) #reslice.SetInputData(image) reslice.SetOutputDimensionality(2) reslice.SetAutoCropOutput(False) #reslice.SetInterpolationModeToLinear() reslice.SetInterpolationModeToNearestNeighbor() reslice.SetSlabNumberOfSlices(1) reslice.SetOutputSpacing(self.volume_view.get_spacing()) reslice.ReleaseDataFlagOn() self.reslice = reslice self.set_slice_type(1) M, T = utils.get_transformation_matrix(origin, normal) self.reslice.SetResliceAxes(M) self.reslice.Update() self.filter.Update() if self.actor is None: self.actor = vedo.Mesh(self.filter.GetOutput()) self.initialize_mapper() else: self.actor._update(self.filter.GetOutput()) self.initialize_mapper() self.actor.SetOrientation(T.GetOrientation()) self.actor.SetPosition(origin) self.got_slice = True return self.actor def x_slice(self, i): """ Extract the slice at index `i` of volume along x-axis. :param i: I index """ self.set_slice_type(0) nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions() if i <= 1 or i > nx - 1: return False self.filter.SetExtent(i, i, 0, ny, 0, nz) self.filter.Update() if self.actor is not None: self.actor._update(self.filter.GetOutput()) else: self.actor = vedo.Mesh(self.filter.GetOutput()) self.initialize_mapper() self.got_slice = True return True def y_slice(self, j): """ Extract the slice at index `j` of volume along y-axis. :param j: J index """ self.set_slice_type(0) #nx, ny, nz = self.volume_view.model.dimensions / resolution nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions() if j <= 1 or j > ny - 1: return False self.filter.SetExtent(0, nx, j, j, 0, nz) self.filter.Update() if self.actor is not None: self.actor._update(self.filter.GetOutput()) else: self.actor = vedo.Mesh(self.filter.GetOutput()) self.initialize_mapper() self.got_slice = True return True def z_slice(self, k): """ Extract the slice at index `k` of volume along z-axis. :param k: K index """ self.set_slice_type(0) nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions() if k <= 1 or k > nz - 1: return False self.filter.SetExtent(0, nx, 0, ny, k, k) self.filter.Update() if self.actor is not None: self.actor._update(self.filter.GetOutput()) else: self.actor = vedo.Mesh(self.filter.GetOutput()) self.initialize_mapper() self.got_slice = True return True def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False): """ Slice on standard X, Y or Z axis :param value: Value on the given axis :param normal: Axis normal, can be either +1.0 or -1.0 along that axis :param axis: Axis integer, 0 for X, 1 for Y, 2 for Z :param use_reslice: if True, this enables vtkImageReslice which is useful when the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper. This is why the default is False. :return: Result boolean, whether slice occured or not """ resolution = self.volume_view.model.resolution volume_dimensions = self.volume_view.model.dimensions ''' if normal[axis] < 0: if value > 0: # Make value consistent with given normal. value *= normal[axis] value = volume_dimensions[axis] + value ''' in_volume_slice = int(value) // resolution if use_reslice: self.slice_on_normal(normal * value, normal) return if axis == 0: result = self.x_slice(in_volume_slice) elif axis == 1: result = self.y_slice(in_volume_slice) elif axis == 2: result = self.z_slice(in_volume_slice) return result def update(self): """ Update slice object according to data in the model """ had_slice = self.got_slice result = True if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2: result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis) else: self.slice_on_normal(self.model.origin, self.model.normal) if not result: self.plot.remove(self.actor) self.got_slice = False return #self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos())) lut = self.volume_view.model.luts.current if lut is not None: ''' This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha) So we have to put a color map, alpha map and a vtkLookupTable built from both maps in a LUTModel. Alternatively, we could update the LUT with alpha values but it's a pain. ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction() lut = vedo.utils.ctf2lut(self.volume_view.actor) otf = self.volume_view.actor.GetProperty().GetScalarOpacity # using "ctf" would work only for colors, not for transparency! self.apply_lut(ctf) ''' self.apply_lut(lut.mapped_lut) else: if self.alpha_map is None: self.actor.cmap(self.color_map) else: self.actor.cmap(self.color_map, alpha=self.alpha_map) if self.model.clipping_planes is not None: self.actor.mapper().SetClippingPlanes(self.model.clipping_planes) if not had_slice: self.plot.add(self.actor, render=True) def apply_lut(self, lut=None): """ Apply a LUT to the volume :param lut: vtkLookupTable :param actor: The actor to receive this """ if self.actor is None or lut is None: return mapper = self.actor._mapper mapper.SetLookupTable(lut)
39.867883
119
0.609892
71,468
0.990589
0
0
20,772
0.287912
0
0
27,541
0.381735
4a76ff4e7600c0692264f843891e33f896e8b3a4
12,670
py
Python
modeling/dataset.py
LaudateCorpus1/ml-cread
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
[ "AML" ]
18
2021-05-25T17:06:46.000Z
2021-11-08T09:47:48.000Z
modeling/dataset.py
LaudateCorpus1/ml-cread
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
[ "AML" ]
null
null
null
modeling/dataset.py
LaudateCorpus1/ml-cread
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
[ "AML" ]
6
2021-06-03T21:29:34.000Z
2022-03-26T11:38:37.000Z
# # For licensing see accompanying LICENSE file. # Copyright (C) 2021 Apple Inc. All Rights Reserved. # ''' Dataset file ''' import sys import time import json import copy from itertools import chain from tqdm import tqdm, trange import torch from torch.utils.data import DataLoader, RandomSampler SPECIAL_TOKENS = { "bos_token": "<BOS>", "eos_token": "<EOS>", "pad_token": "<PAD>", "sep_token": "<SEP>", "additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"] } SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"] class Dataset(torch.utils.data.Dataset): def __init__(self, args, tokenizer, data_type, generation, data_size): assert data_type in ['train', 'dev', 'test'] self.args = args self.data_size = data_size self.tokenizer = tokenizer self.data_type = data_type self.generation = generation self._get_special_token_ids() self._create_examples() def _get_special_token_ids(self): self.SPECIAL_TOKENS = SPECIAL_TOKENS self.SPECIAL_TOKENS_VALUES = SPECIAL_TOKENS_VALUES self.bos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["bos_token"]) self.eos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["eos_token"]) self.pad_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["pad_token"]) self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["sep_token"]) # mention detection vocab self.mc_cl2idx = {'<N>': 0, '<M>': 1, '</M>': 2} # <N>: none, <M>: start of mention, "</M>": end of mention self.mc_idx2cl = {v: k for k, v in self.mc_cl2idx.items()} def prepare_reference_label(self, word_label_index, wordId2tokenId, input_ids): ''' record the index of start/end of mention and refernece in the input otterance this info will be used as attention signal in reference resolution step ''' reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(input_ids) reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence] token_label_index = [] for start_end_link in word_label_index: for link_meta in start_end_link: attention_word_idx, mention_word_idx = link_meta['attention_idx'], link_meta['mention_idx'] if link_meta['mention_type'] == 'start': attention_token_idx = wordId2tokenId[attention_word_idx][0] else: # end attention_token_idx = wordId2tokenId[attention_word_idx][-1] for mention_token_idx in wordId2tokenId[mention_word_idx]: link = {} link['mention_token_idx'] = mention_token_idx link['attention_token_idx'] = attention_token_idx assert reconstruct_sentence[mention_token_idx] in link_meta['mention_word'] assert reconstruct_sentence[attention_token_idx] in link_meta['attention_word'] token_label_index.append(link) return token_label_index def prepare_binary_label(self, input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx): ''' only the start of rewriting token receives binary signal ''' binary_label = [-100] * len(input_ids) assert isinstance(binary_rewrite, bool) if binary_rewrite == True: binary_label[curr_end_token_idx] = 1 # rewrite else: binary_label[curr_end_token_idx] = 0 # not rewrite return binary_label def prepare_mention_label(self, input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx): ''' get label index for mention detection only the parts of current utterance receive signal, everwhere else will get -100 ''' mention_label = [-100] * len(input_ids) curr_start_idx = wordId2tokenId[curr_start_idx][0] curr_end_idx = wordId2tokenId[curr_end_idx-1][-1] + 1 # align class <N> (none) to everywehere in current utterance first mention_label[curr_start_idx: curr_end_idx] = [ self.mc_cl2idx['<N>'] ] * (curr_end_idx-curr_start_idx) for start_end_link in word_label_index: # iterate over links in one example for link_meta in start_end_link: # iterate over start and end of a link idx = link_meta['mention_idx'] if link_meta['mention_type'] == 'start': # align class <M> (start of mention) for idx in wordId2tokenId[idx]: mention_label[idx] = self.mc_cl2idx['<M>'] else: # # align class </M> (end of mention) idx = wordId2tokenId[idx][-1] mention_label[idx] = self.mc_cl2idx['</M>'] return mention_label, curr_start_idx, curr_end_idx def _check_label_index(self, whole_input, links): ''' sanity check for index correctness ''' seq = whole_input.split() for link in links: for start_or_end in link: for word_type in ['mention', 'attention']: assert seq[start_or_end['{}_idx'.format(word_type)]] == start_or_end['{}_word'.format(word_type)] def _create_examples(self): if self.data_type == 'train': data_file = self.args.train_file elif self.data_type == 'dev': data_file = self.args.dev_file else: data_file = self.args.test_file with open(data_file) as f: data = json.load(f) self.examples = [] for example_num, example in enumerate(tqdm(data, disable=self.args.disable_display)): if self.data_size != -1 and example_num == self.data_size: break # get data context = example['dialogue context'] # context, list of str curr_utt = example['current utterance'] # current utterance, str rewt_utt = example['rewrite utterance'] # rewrite utterance, str word_label_index = example['link index'] # index of mention/reference span binary_rewrite = example['rewrite happen'] # binary label for rewrite or not, bool # prepare input sequence to model whole_input = copy.deepcopy(context) whole_input.append(curr_utt) curr_start_idx = sum([len(s.split()) for s in context]) # the (word) start idx of current utt curr_end_idx = curr_start_idx + len(curr_utt.split()) whole_input = " ".join(whole_input) self._check_label_index(whole_input, word_label_index) input_ids, wordId2tokenId, tokenId2wordId = self.tokenize_with_map(whole_input) if rewt_utt == "": rewt_utt_ids = [] else: rewt_utt_ids = self.tokenizer(rewt_utt)['input_ids'] # list target_utt_ids = rewt_utt_ids target_utt_len = len(target_utt_ids) if not self.generation: # input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS> input_ids = input_ids + [self.sep_id] + target_utt_ids + [self.eos_id] # mention detection signal mention_label, curr_start_token_idx, curr_end_token_idx = \ self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx) # reference resolution signal reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids) # binary classification of rewriting signal binary_label = self.prepare_binary_label(input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx) # rewriting singal ignore_len = len(input_ids) - target_utt_len - 1 # eos_id label_ids = [-100] * ignore_len + target_utt_ids + [self.eos_id] assert len(input_ids) == len(label_ids) else: # generation # <sep> is given at first step during decoding input_ids = input_ids label_ids = None mention_label, curr_start_token_idx, curr_end_token_idx = \ self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx) reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids) binary_label = None self.examples.append({ 'input_ids': input_ids, # list of ids 'label_ids': label_ids, # list of ids 'mention_label_ids': mention_label, 'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, 'reference_label': reference_label_index, 'wordId2tokenId': wordId2tokenId, 'tokenId2wordId': tokenId2wordId, 'context': context, 'curr_utt': curr_utt, 'whole_input': whole_input, 'rewt_utt': rewt_utt, 'example_id': example['example index'], 'spk': example['speaker'], 'coref_label': word_label_index, 'binary_label_ids': binary_label, 'binary_rewrite': binary_rewrite }) print('Data Statistics: {} -> {} examples'.format(self.data_type, len(self.examples))) def _pad(self, sentences, pad_id): ''' sentences: a list of list with ids ''' max_len = max((map(len, sentences))) attention_mask = [] sentences_pad = [] for sent in sentences: pad_len = max_len - len(sent) sentences_pad.append( sent + [pad_id]*pad_len ) attention_mask.append( [1]*len(sent) + [0]*pad_len) return sentences_pad, attention_mask def __len__(self): return len(self.examples) def __getitem__(self, index): return self.examples[index] def collate_fn(self, batch): input_ids = [example['input_ids'] for example in batch] input_ids, attention_mask = self._pad(input_ids, self.pad_id) input_ids, attention_mask = torch.tensor(input_ids).long().to(self.args.device), torch.tensor(attention_mask).long().to(self.args.device) if not self.generation: label_ids = [example['label_ids'] for example in batch] label_ids, _ = self._pad(label_ids, -100) label_ids = torch.tensor(label_ids).long().to(self.args.device) mention_label_ids = [example['mention_label_ids'] for example in batch] mention_label_ids, _ = self._pad(mention_label_ids, -100) mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device) binary_label_ids = [example['binary_label_ids'] for example in batch] binary_label_ids, _ = self._pad(binary_label_ids, -100) binary_label_ids = torch.tensor(binary_label_ids).long().to(self.args.device) else: label_ids = None mention_label_ids = [example['mention_label_ids'] for example in batch] mention_label_ids, _ = self._pad(mention_label_ids, -100) mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device) binary_label_ids = None token_type_ids = None # TODO: not sure if this makes any effect to gpt2 # record info context = [example['context'] for example in batch] curr_utt = [example['curr_utt'] for example in batch] rewt_utt = [example['rewt_utt'] for example in batch] example_ids = [example['example_id'] for example in batch] # record the example idx in batch curr_start_token_idx = [example['curr_start_token_idx'] for example in batch] curr_end_token_idx = [example['curr_end_token_idx'] for example in batch] reference_label = [example['reference_label'] for example in batch] wordId2tokenId = [example['wordId2tokenId'] for example in batch] tokenId2wordId = [example['tokenId2wordId'] for example in batch] whole_input = [example['whole_input'] for example in batch] spk = [example['spk'] for example in batch] coref_label = [example['coref_label'] for example in batch] binary_rewrite = [example['binary_rewrite'] for example in batch] return {'input_ids': input_ids, 'attention_mask': attention_mask, \ 'token_type_ids': token_type_ids, 'label_ids': label_ids, \ 'context': context, 'curr_utt': curr_utt, 'rewt_utt': rewt_utt, \ 'example_ids': example_ids, 'spk': spk, 'mention_label_ids': mention_label_ids, \ 'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, \ 'reference_label': reference_label, 'wordId2tokenId': wordId2tokenId, \ 'tokenId2wordId': tokenId2wordId, 'whole_input': whole_input, \ 'coref_label': coref_label, 'binary_label_ids': binary_label_ids, \ 'binary_rewrite': binary_rewrite} def tokenize_with_map(self, sentence): ''' Build the mapping of indexes before/after tokenizer to handel BPE Input: sentence: a natural sentence, str Returns: wordId2tokenId, a 1-to-many map tokenId2wordId, a many-to-1 map ''' assert isinstance(sentence, str) token_ids = self.tokenizer(sentence)['input_ids'] reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(token_ids) reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence] sentence = sentence.split() wordId2tokenId = {} tokenId = 0 for wordId, word in enumerate(sentence): wordId2tokenId[wordId] = [] token = "" while word != token: wordId2tokenId[wordId].append(tokenId) token += reconstruct_sentence[tokenId] tokenId += 1 tokenId2wordId = {} for wordId, tokenIds in wordId2tokenId.items(): for tokenId in tokenIds: assert tokenId not in tokenId2wordId tokenId2wordId[tokenId] = wordId assert len(wordId2tokenId) == len(sentence) assert len(tokenId2wordId) == len(reconstruct_sentence) return token_ids, wordId2tokenId, tokenId2wordId if __name__ == '__main__': pass
38.510638
139
0.728808
12,013
0.947996
0
0
0
0
0
0
3,178
0.250789
4a770f589bb75a8f2ce9da24f74f5b68103d69bf
2,431
py
Python
hy/lex/lexer.py
schuster-rainer/hy
d969ed63d67c4a9070fd41a8fbff35da845e0619
[ "MIT" ]
12
2015-01-01T21:21:31.000Z
2021-06-14T19:51:59.000Z
hy/lex/lexer.py
schuster-rainer/hy
d969ed63d67c4a9070fd41a8fbff35da845e0619
[ "MIT" ]
null
null
null
hy/lex/lexer.py
schuster-rainer/hy
d969ed63d67c4a9070fd41a8fbff35da845e0619
[ "MIT" ]
2
2016-01-17T21:59:29.000Z
2016-09-06T20:56:41.000Z
# Copyright (c) 2013 Nicolas Dandrimont <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from rply import LexerGenerator lg = LexerGenerator() # A regexp for something that should end a quoting/unquoting operator # i.e. a space or a closing brace/paren/curly end_quote = r'(?![\s\)\]\}])' lg.add('LPAREN', r'\(') lg.add('RPAREN', r'\)') lg.add('LBRACKET', r'\[') lg.add('RBRACKET', r'\]') lg.add('LCURLY', r'\{') lg.add('RCURLY', r'\}') lg.add('HLCURLY', r'#\{') lg.add('QUOTE', r'\'%s' % end_quote) lg.add('QUASIQUOTE', r'`%s' % end_quote) lg.add('UNQUOTESPLICE', r'~@%s' % end_quote) lg.add('UNQUOTE', r'~%s' % end_quote) lg.add('HASHBANG', r'#!.*[^\r\n]') lg.add('HASHREADER', r'#[^{]') # A regexp which matches incomplete strings, used to support # multi-line strings in the interpreter partial_string = r'''(?x) (?:u|r|ur|ru)? # prefix " # start string (?: | [^"\\] # non-quote or backslash | \\(.|\n) # or escaped single character or newline | \\x[0-9a-fA-F]{2} # or escaped raw character | \\u[0-9a-fA-F]{4} # or unicode escape | \\U[0-9a-fA-F]{8} # or long unicode escape )* # one or more times ''' lg.add('STRING', r'%s"' % partial_string) lg.add('PARTIAL_STRING', partial_string) lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+') lg.ignore(r';.*(?=\r|\n|$)') lg.ignore(r'\s+') lexer = lg.build()
34.239437
76
0.667626
0
0
0
0
0
0
0
0
2,011
0.827232
4a77208eebfdf92ef53ffabde97b664e8625e12d
1,319
py
Python
week6/shuffle.py
solideveloper/afs-210
2ba0bb7c7617cd3169907458f657696a6987689d
[ "Apache-2.0" ]
1
2022-01-06T01:22:17.000Z
2022-01-06T01:22:17.000Z
week6/shuffle.py
solideveloper/afs-210
2ba0bb7c7617cd3169907458f657696a6987689d
[ "Apache-2.0" ]
null
null
null
week6/shuffle.py
solideveloper/afs-210
2ba0bb7c7617cd3169907458f657696a6987689d
[ "Apache-2.0" ]
null
null
null
# Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this. # For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible. # Add a comment to your code stating what the time complexity of your algorithm is and why. # Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items. data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81] ndata = len(data) import random def shuffleAlgorithm(data, ndata): for i in range(ndata-1, 0, -1): r = random.randint(0, i) data[i], data[r] = data[r], data[i] return data print(data) print(shuffleAlgorithm(data,ndata)) print(shuffleAlgorithm(data,ndata)) print(shuffleAlgorithm(data,ndata)) print(shuffleAlgorithm(data,ndata)) # fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it. # instead i'm modifying the list in place or at a 'constant space' making it O(n) # swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected
54.958333
217
0.749052
0
0
0
0
0
0
0
0
909
0.689158
4a78cf1db1ffe2307d7c782737a9e5d96a2685ca
1,254
py
Python
workbox/workbox/lib/helpers.py
pr3sto/workbox
558147a1a387dcfbe03be0fbc366d1d793364da6
[ "MIT" ]
null
null
null
workbox/workbox/lib/helpers.py
pr3sto/workbox
558147a1a387dcfbe03be0fbc366d1d793364da6
[ "MIT" ]
null
null
null
workbox/workbox/lib/helpers.py
pr3sto/workbox
558147a1a387dcfbe03be0fbc366d1d793364da6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Template Helpers used in workbox""" import logging import socket from datetime import datetime from markupsafe import Markup import psutil import tg log = logging.getLogger(__name__) def current_year(): """ Return current year. """ now = datetime.now() return now.strftime('%Y') def is_docker_enabled(): """ Detect if docker service is started. """ for proc in psutil.process_iter(): if 'docker' in proc.name(): return True return False def get_server_load_value(): """ Get server load value. """ return psutil.virtual_memory().percent def get_free_port(): """ Find and returns free port number. """ soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) soc.bind(("", 0)) free_port = soc.getsockname()[1] soc.close() return free_port def get_vagrantfiles_base_folder(): """ Return base folder for vagrantfiles. """ return tg.config.get('workbox.vagrantfiles.basefolder') def get_hostname(): """ Return hostname. """ return tg.config.get('workbox.hostname') try: from webhelpers2 import date, html, number, misc, text except SyntaxError: log.error("WebHelpers2 helpers not available with this Python Version")
22.392857
75
0.679426
0
0
0
0
0
0
0
0
398
0.317384
4a79466df9295fa5ad7c3a62c359310229ec684a
5,647
py
Python
tadataka/dataset/new_tsukuba.py
IshitaTakeshi/Tadataka
852c7afb904503005e51884408e1492ef0be836f
[ "Apache-2.0" ]
54
2019-11-15T16:30:34.000Z
2022-01-13T15:18:54.000Z
tadataka/dataset/new_tsukuba.py
IshitaTakeshi/Tadataka
852c7afb904503005e51884408e1492ef0be836f
[ "Apache-2.0" ]
11
2019-02-28T08:28:24.000Z
2020-04-07T04:47:12.000Z
tadataka/dataset/new_tsukuba.py
IshitaTakeshi/Tadataka
852c7afb904503005e51884408e1492ef0be836f
[ "Apache-2.0" ]
1
2020-02-26T13:59:40.000Z
2020-02-26T13:59:40.000Z
import csv import os from pathlib import Path from xml.etree import ElementTree as ET from tqdm import tqdm from scipy.spatial.transform import Rotation from skimage.io import imread import numpy as np from tadataka.camera import CameraModel, CameraParameters, FOV from tadataka.dataset.frame import Frame from tadataka.dataset.base import BaseDataset from tadataka.pose import Pose def load_depth(path): tree = ET.parse(path) root = tree.getroot() rows_node, cols_node, dt_node, data_node = root[0] height, width = int(rows_node.text), int(cols_node.text) depth_text = data_node.text depth_text = depth_text.replace('\n', '').strip() depth_map = np.fromstring(depth_text, sep=' ') return depth_map.reshape(height, width) def generate_cache(src_dir, cache_dir, src_extension, loader): def generate_(subdir): os.makedirs(str(Path(cache_dir, subdir))) print(f"Generating cache from {subdir}") paths = Path(src_dir, subdir).glob("*" + src_extension) for path in tqdm(list(paths)): filename = path.name.replace(src_extension, ".npy") cache_path = Path(cache_dir, subdir, filename) array = loader(path) np.save(str(cache_path), array) generate_("left") generate_("right") def generate_image_cache(image_dir, cache_dir): print("Generating image cache") generate_cache(image_dir, cache_dir, ".png", imread) def generate_depth_cache(depth_dir, cache_dir): print("Generating depth cache") generate_cache(depth_dir, cache_dir, ".xml", load_depth) def align_coordinate_system(positions, euler_angles): # Camera coordinate system and world coordinate system are not aligned # # Usually camera coordinate system is represented in the format that # x: right y: down z: forward # however, in 'camera_track.txt', they are written in # x: right y: up z: backward # # This means the camera coordinate system is # rotated 180 degrees around the x-axis from the world coordinate system # rotate 180 degrees around the x-axis R = Rotation.from_rotvec([np.pi, 0, 0]).as_matrix() positions = np.dot(R, positions.T).T # Reverse rotations around y and z because axes are flipped # (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z) euler_angles[:, 1:3] = -euler_angles[:, 1:3] return positions, euler_angles def load_poses(pose_path): poses = np.loadtxt(pose_path, delimiter=',') positions, euler_angles = poses[:, 0:3], poses[:, 3:6] positions, euler_angles = align_coordinate_system(positions, euler_angles) rotations = Rotation.from_euler('xyz', euler_angles, degrees=True) return rotations, positions def discard_alpha(image): return image[:, :, 0:3] def calc_baseline_offset(rotation, baseline_length): local_offset = np.array([baseline_length, 0, 0]) R = rotation.as_matrix() return np.dot(R, local_offset) # TODO download and set dataset_root automatically class NewTsukubaDataset(BaseDataset): def __init__(self, dataset_root, condition="daylight"): self.camera_model = CameraModel( CameraParameters(focal_length=[615, 615], offset=[320, 240]), distortion_model=None ) groundtruth_dir = Path(dataset_root, "groundtruth") illumination_dir = Path(dataset_root, "illumination") pose_path = Path(groundtruth_dir, "camera_track.txt") self.baseline_length = 10.0 self.rotations, self.positions = load_poses(pose_path) depth_dir = Path(groundtruth_dir, "depth_maps") depth_cache_dir = Path(groundtruth_dir, "depth_cache") if not depth_cache_dir.exists(): generate_depth_cache(depth_dir, depth_cache_dir) self.depth_L_paths = sorted(Path(depth_cache_dir, "left").glob("*.npy")) self.depth_R_paths = sorted(Path(depth_cache_dir, "right").glob("*.npy")) image_dir = Path(illumination_dir, condition) image_cache_dir = Path(illumination_dir, condition + "_cache") if not image_cache_dir.exists(): generate_image_cache(image_dir, image_cache_dir) self.image_L_paths = sorted(Path(image_cache_dir, "left").glob("*.npy")) self.image_R_paths = sorted(Path(image_cache_dir, "right").glob("*.npy")) assert((len(self.depth_L_paths) == len(self.depth_R_paths) == len(self.image_L_paths) == len(self.image_R_paths) == len(self.rotations) == len(self.positions))) for i in range(len(self.positions)): DL = self.depth_L_paths[i].name DR = self.depth_R_paths[i].name IL = self.image_L_paths[i].name IR = self.image_R_paths[i].name assert(DL[-8:] == DR[-8:] == IL[-8:] == IR[-8:]) def __len__(self): return len(self.positions) def load(self, index): image_l = np.load(self.image_L_paths[index]) image_r = np.load(self.image_R_paths[index]) image_l = discard_alpha(image_l) image_r = discard_alpha(image_r) depth_l = np.load(self.depth_L_paths[index]) depth_r = np.load(self.depth_R_paths[index]) position_center = self.positions[index] rotation = self.rotations[index] offset = calc_baseline_offset(rotation, self.baseline_length) pose_wl = Pose(rotation, position_center - offset / 2.0) pose_wr = Pose(rotation, position_center + offset / 2.0) return ( Frame(self.camera_model, pose_wl, image_l, depth_l), Frame(self.camera_model, pose_wr, image_r, depth_r) )
34.644172
81
0.673278
2,637
0.466974
0
0
0
0
0
0
843
0.149283
4a798e4f49354ed1b300d7ffad5bbb4e1e929e1a
2,015
py
Python
krogon/maybe.py
enamrik/krogon
a41a10ed346b7198509929ed9ba1e9fcf778dc78
[ "MIT" ]
1
2020-03-02T14:17:02.000Z
2020-03-02T14:17:02.000Z
krogon/maybe.py
enamrik/krogon
a41a10ed346b7198509929ed9ba1e9fcf778dc78
[ "MIT" ]
null
null
null
krogon/maybe.py
enamrik/krogon
a41a10ed346b7198509929ed9ba1e9fcf778dc78
[ "MIT" ]
null
null
null
from typing import Callable, TypeVar, Union, Tuple from krogon.infix import Infix A = TypeVar('A') B = TypeVar('B') E = TypeVar('E') Maybe = Union[Tuple['just', A], Tuple['nothing']] def just(value=None): return "just", value def nothing(): return "nothing", None def from_value(value) -> Maybe[B]: return _cast_to_maybe(value) def from_value_or_default(value, default) -> Maybe[B]: return from_maybe( _cast_to_maybe(value), dict(if_just=lambda x: just(x), if_nothing=lambda: _cast_to_maybe(default))) @Infix def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]: if maybe[0] == "just": return _cast_to_maybe(func(maybe[1])) elif maybe[0] == "nothing": return maybe @Infix def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]: if maybe[0] == "nothing": return _cast_to_maybe(func()) elif maybe[0] == "just": return maybe @Infix def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]: if maybe[0] == "just": return just(mapper(maybe[1])) elif maybe[0] == "nothing": return maybe @Infix def value_or_default(maybe: Maybe[A], default_value: B): return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value)) @Infix def from_maybe(maybe: Maybe[A], dict_args: dict) -> B: if_just: Callable = dict_args['if_just'] if_nothing: Callable = dict_args['if_nothing'] if maybe[0] == "just" and if_just is not None: return if_just(maybe[1]) elif maybe[0] == "nothing" and if_nothing is not None: return if_nothing() else: raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args)) def _cast_to_maybe(result): if result is None: return nothing() if isinstance(result, tuple) and len(result) == 2: maybe_type, value = result if maybe_type == "just" or maybe_type == "nothing": return result return just(result)
24.573171
93
0.629777
0
0
0
0
1,163
0.577171
0
0
158
0.078412
4a7b9c4a8cadc7353c88a38e25dd9423d5d9fd02
1,224
py
Python
Python (desafios)/desafio 009.py
EbersonDias/html-css
b05ec122dc7649656bcfce92dc92ded127bbb2cf
[ "MIT" ]
null
null
null
Python (desafios)/desafio 009.py
EbersonDias/html-css
b05ec122dc7649656bcfce92dc92ded127bbb2cf
[ "MIT" ]
null
null
null
Python (desafios)/desafio 009.py
EbersonDias/html-css
b05ec122dc7649656bcfce92dc92ded127bbb2cf
[ "MIT" ]
null
null
null
# Desafio 009 # Faça um programa que leia um numero inteiro qualquer # e mostre na tela a sua tabuada. n = int(input('digite um numero. ')) r1 = n * 1 r2 = (n * 2) r3 = (n * 3) r4 = (n * 4) r5 = (n * 5) r6 = (n * 6) r7 = (n * 7) r8 = (n * 8) r9 = (n * 9) r10 = (n * 10) print('A Tabuada de {} é'.format(n)) print ('{} x 1 = {}'.format(n,r1)) print ('{} x 2 = {}'.format(n,r2)) print ('{} x 3 = {}'.format(n,r3)) print ('{} x 4 = {}'.format(n,r4)) print ('{} x 5 = {}'.format(n,r5)) print ('{} x 6 = {}'.format(n,r6)) print ('{} x 7 = {}'.format(n,r7)) print ('{} x 8 = {}'.format(n,r8)) print ('{} x 9 = {}'.format(n,r9)) print ('{} x 10 = {}'.format(n,r10)) #Outra forma de ser feito n = int(input('Quanto é a Tabuada de ')) print('A Tabuada de {} é'.format(n)) print('-'*12) print ('{} x {:2} = {}'.format(n, 1, n*1)) print ('{} x {:2} = {}'.format(n, 2, n*2)) print ('{} x {:2} = {}'.format(n, 3, n*3)) print ('{} x {:2} = {}'.format(n, 4, n*4)) print ('{} x {:2} = {}'.format(n, 5, n*5)) print ('{} x {:2} = {}'.format(n, 6, n*6)) print ('{} x {:2} = {}'.format(n, 7, n*7)) print ('{} x {:2} = {}'.format(n, 8, n*8)) print ('{} x {:2} = {}'.format(n, 9, n*9)) print ('{} x {:2} = {}'.format(n, 10, n*10)) print('-'*12)
29.142857
54
0.476307
0
0
0
0
0
0
0
0
508
0.413681
4a7be356f01ce20843ac2c23c55739f318ee8ab2
110
py
Python
tools/__init__.py
supercatex/TelloEdu
8f434dbc9866be3025cb119175c40f1d2d7fb5f3
[ "MIT" ]
1
2019-12-04T04:30:06.000Z
2019-12-04T04:30:06.000Z
tools/__init__.py
supercatex/TelloEdu
8f434dbc9866be3025cb119175c40f1d2d7fb5f3
[ "MIT" ]
null
null
null
tools/__init__.py
supercatex/TelloEdu
8f434dbc9866be3025cb119175c40f1d2d7fb5f3
[ "MIT" ]
null
null
null
from tools.TelloEdu import TelloEdu from tools.Controller import * from tools.SocketObject import SocketClient
36.666667
43
0.863636
0
0
0
0
0
0
0
0
0
0
4a7c28f2d0e401facd4b7a43c6ef059a3a83d500
1,193
py
Python
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
1,080
2015-01-04T08:35:00.000Z
2022-03-27T09:15:52.000Z
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
24
2015-02-21T01:48:28.000Z
2021-11-26T02:38:56.000Z
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
1,241
2015-01-02T10:47:10.000Z
2022-03-27T09:42:23.000Z
# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_config import cfg from neutron.conf.agent import ovs_conf as agent_ovs_conf from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers agent_ovs_conf.register_ovs_agent_opts(cfg.CONF) ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF) enable_connection_uri = functools.partial( priv_helpers.enable_connection_uri, log_fail_as_error=False, check_exit_code=False, timeout=cfg.CONF.OVS.ovsdb_timeout, inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
37.28125
78
0.776194
0
0
0
0
0
0
0
0
596
0.499581
4a7c2f9da4b8409fc6aa35e9cf22595f9dcc870b
302
py
Python
conlo/serializer/json_serializer.py
kira607/config_loader
024f33d48fee1635dfa9ed286f84bb96f22c134a
[ "MIT" ]
null
null
null
conlo/serializer/json_serializer.py
kira607/config_loader
024f33d48fee1635dfa9ed286f84bb96f22c134a
[ "MIT" ]
null
null
null
conlo/serializer/json_serializer.py
kira607/config_loader
024f33d48fee1635dfa9ed286f84bb96f22c134a
[ "MIT" ]
null
null
null
import json from .base_serializer import BaseSerializer class JsonSerializer(BaseSerializer): '''Json serializer.''' def _serialize(self, data: dict, **kwargs) -> str: return json.dumps(data) def _deserialize(self, data: str, **kwargs) -> dict: return json.loads(data)
21.571429
56
0.675497
242
0.801325
0
0
0
0
0
0
22
0.072848
4a7c6a7695f0b0415525906b878d73cc448533e5
264
py
Python
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
import requests locations = ['Лондон', 'Шереметьево', 'Череповец'] payload = {'mnTq': '', 'lang': 'ru'} for location in locations: response = requests.get(f'http://wttr.in/{location}', params=payload) response.raise_for_status() print(response.text)
26.4
73
0.681818
0
0
0
0
0
0
0
0
104
0.358621
4a7c6e1277408f69b722e24dda7d218cc70dda0f
1,192
py
Python
migrations/versions/576712576c48_added_model_for_photo_comments.py
Torniojaws/vortech-backend
f775a97eeae089fa720088d86fe92d40bc5d65bc
[ "MIT" ]
null
null
null
migrations/versions/576712576c48_added_model_for_photo_comments.py
Torniojaws/vortech-backend
f775a97eeae089fa720088d86fe92d40bc5d65bc
[ "MIT" ]
93
2017-09-01T22:24:10.000Z
2021-12-22T14:07:06.000Z
migrations/versions/576712576c48_added_model_for_photo_comments.py
Torniojaws/vortech-backend
f775a97eeae089fa720088d86fe92d40bc5d65bc
[ "MIT" ]
null
null
null
"""Added model for photo comments Revision ID: 576712576c48 Revises: 75bb906df167 Create Date: 2018-03-30 02:06:22.877079 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '576712576c48' down_revision = '75bb906df167' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('CommentsPhotos', sa.Column('CommentID', sa.Integer(), nullable=False), sa.Column('PhotoID', sa.Integer(), nullable=False), sa.Column('Comment', sa.Text(), nullable=False), sa.Column('UserID', sa.Integer(), nullable=False), sa.Column('Created', sa.DateTime(), nullable=True), sa.Column('Updated', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['PhotoID'], ['Photos.PhotoID'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['UserID'], ['Users.UserID'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('CommentID') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('CommentsPhotos') # ### end Alembic commands ###
30.564103
81
0.672819
0
0
0
0
0
0
0
0
549
0.46057
4a7c8678af28d04fe1e6fb14eef66f905c9017b0
164
py
Python
__init__.py
m3sserschmitt/basic-http
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
[ "MIT" ]
null
null
null
__init__.py
m3sserschmitt/basic-http
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
[ "MIT" ]
null
null
null
__init__.py
m3sserschmitt/basic-http
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
[ "MIT" ]
null
null
null
import basic_http.session basic_http.session.LIB_VERSION = 'v0.0.4-beta' basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
32.8
89
0.810976
0
0
0
0
0
0
0
0
34
0.207317
4a7f99985562db134bffd977ed750d635522a7a2
12,364
py
Python
usaspending_api/etl/helpers.py
truthiswill/usaspending-api
bd7d915442e2ec94cc830c480ceeffd4479be6c0
[ "CC0-1.0" ]
null
null
null
usaspending_api/etl/helpers.py
truthiswill/usaspending-api
bd7d915442e2ec94cc830c480ceeffd4479be6c0
[ "CC0-1.0" ]
3
2020-02-12T01:16:46.000Z
2021-06-10T20:36:57.000Z
usaspending_api/etl/helpers.py
truthiswill/usaspending-api
bd7d915442e2ec94cc830c480ceeffd4479be6c0
[ "CC0-1.0" ]
null
null
null
from datetime import datetime import warnings import logging from django.db.models import Q, Case, Value, When from django.core.cache import caches, CacheKeyWarning import django.apps from usaspending_api.references.models import Agency, Location, RefCountryCode from usaspending_api.references.helpers import canonicalize_location_dict from usaspending_api.submissions.models import SubmissionAttributes from usaspending_api.data.daims_maps import daims_maps warnings.simplefilter("ignore", CacheKeyWarning) def clear_caches(): for cache_name in ('default', 'locations', 'awards'): caches[cache_name].clear() def cleanse_values(row): """ Remove textual quirks from CSV values. """ row = {k: v.strip() for (k, v) in row.items()} row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()} return row def convert_date(date): if date == "": return None return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d') def get_subtier_agency_dict(): """Returns a dictionary with key = subtier agency code and value = agency id.""" # there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier # codes, the dictionary we return will reflect the most recently updated one agencies = Agency.objects.all().values( 'id', 'subtier_agency__subtier_code').order_by('subtier_agency__update_date') subtier_agency_dict = { a['subtier_agency__subtier_code']: a['id'] for a in agencies } return subtier_agency_dict def fetch_country_code(vendor_country_code): code_str = up2colon(vendor_country_code) if code_str == "": return None country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first() if not country_code: # We don't have an exact match on the name or the code, so we need to # chain filter on the name query_set = RefCountryCode.objects for word in code_str.split(): query_set = query_set.filter(country_name__icontains=word) country_code = query_set.first() return country_code location_cache = caches['locations'] def get_or_create_location(row, mapper): location_dict = mapper(row) # Country-specific adjustments if location_dict["location_country_code"] == "USA": # Apparently zip codes are optional... if location_dict["location_zip"]: location_dict.update( zip5=location_dict["location_zip"][:5], zip_last4=location_dict["location_zip"][5:]) location_dict.pop("location_zip") else: location_dict.update( foreign_postal_code=location_dict.pop("location_zip", None), foreign_province=location_dict.pop("state_code", None)) if "city_name" in location_dict: location_dict['foreign_city_name'] = location_dict.pop("city_name") location_dict = canonicalize_location_dict(location_dict) location_tup = tuple(location_dict.items()) location = location_cache.get(location_tup) if location: return location location = Location.objects.filter(**location_dict).first() if not location: location = Location.objects.create(**location_dict) location_cache.set(location_tup, location) return location def up2colon(input_string): 'Takes the part of a string before `:`, if any.' if input_string: return input_string.split(':')[0].strip() return '' def parse_numeric_value(string): try: return float(string) except Exception: return None def get_fiscal_quarter(fiscal_reporting_period): """ Return the fiscal quarter. Note: the reporting period being passed should already be in "federal fiscal format", where period 1 = Oct. and period 12 = Sept. """ if fiscal_reporting_period in [1, 2, 3]: return 1 elif fiscal_reporting_period in [4, 5, 6]: return 2 elif fiscal_reporting_period in [7, 8, 9]: return 3 elif fiscal_reporting_period in [10, 11, 12]: return 4 def get_previous_submission(cgac_code, fiscal_year, fiscal_period): """ For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the previous submission within the same fiscal year. """ previous_submission = SubmissionAttributes.objects \ .filter( cgac_code=cgac_code, reporting_fiscal_year=fiscal_year, reporting_fiscal_period__lt=fiscal_period, quarter_format_flag=True) \ .order_by('-reporting_fiscal_period') \ .first() return previous_submission def update_model_description_fields(): """ This method searches through every model Django has registered, checks if it belongs to a list of apps we should update, and updates all fields with '_description' at the end with their relevant information. Dictionaries for DAIMS definitions should be stored in: usaspending_api/data/daims_maps.py Each map should be <field_name>_map for discoverability. If there are conflicting maps (i.e., two models use type_description, but different enumerations) prepend the map name with the model name and a dot. For examples of these situations, see the documentation in daims_maps.py """ logger = logging.getLogger('console') # This is a list of apps whose models will be checked for description fields updatable_apps = [ "accounts", "awards", "common", "financial_activities", "references", "submissions" ] # This iterates over every model that Django has registered for model in django.apps.apps.get_models(): # This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps. # Thus, we'll skip any django admin apps, like auth, corsheaders, etc. if model._meta.app_label not in updatable_apps: continue if model.__name__[:10] == "Historical": continue model_fields = [f.name for f in model._meta.get_fields()] # This supports multi-case DAIMS # We must filter on the model level rather than add them to the when clauses, because if there is a FK in the # when clause Django is not guaranteed to join on that table properly. # # This is an array of tuples of the following format # (Q object of filter, field_names -> case objects map for this filter) # # It is initialized with a blank filter and empty list, which is where default updates are stored model_filtered_update_case_map = [(Q(), {})] desc_fields = [field for field in model_fields if field.split('_')[-1] == "description"[:len(field.split('_')[-1])]] non_desc_fields = [field for field in model_fields if field not in desc_fields] desc_fields_mapping = {} for desc_field in desc_fields: actual_field_short = "_".join(desc_field.split('_')[:-1]) actual_field = None for field in non_desc_fields: if actual_field_short == field: actual_field = field elif actual_field_short == field[:len(actual_field_short)]: actual_field = field desc_fields_mapping[desc_field] = actual_field # Loop through each of the models fields to construct a case for each applicable field for field in model_fields: # We're looking for field names ending in _description split_name = field.split("_") # If the last element in our split name isn't description, skip it if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]: continue source_field = "_".join(split_name[:-1]) destination_field = field # This is the map name, prefixed by model name for when there are non-unique description fields source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field model_map_name = "{}.{}_map".format(model.__name__, source_field) map_name = "{}_map".format(source_field) # This stores a direct reference to the enumeration mapping code_map = None # Validate we have the source field if source_field not in model_fields: logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.". format(destination_field, model.__name__, source_field)) continue # Validate we have a map # Prefer model_map_name over map_name if model_map_name in daims_maps.keys(): code_map = daims_maps[model_map_name] elif map_name in daims_maps.keys(): code_map = daims_maps[map_name] else: logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.". format(destination_field, model.__name__, model_map_name, map_name)) continue # Cases start from 1 case_number = 1 case_name = "case_1" case_map = "case_1_map" while case_name in code_map.keys(): case_object = create_case(code_map[case_map], source_field) # Construct a Q filter for this case case_filter = Q(**code_map[case_name]) # See if we already have a tuple for this filter case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter] if len(case_tuple) == 0: # We don't, so create the tuple temp_case_dict = {} temp_case_dict[field] = case_object model_filtered_update_case_map.append((case_filter, temp_case_dict)) else: # We do, so just add our case object to that dictionary case_tuple[0][1][field] = case_object # Check for the next case case_number += 1 case_name = "case_{}".format(case_number) case_map = "case_{}_map".format(case_number) # If our case number is still 1, then we didn't have any cases. Therefore, we perform the default if case_number == 1: case_object = create_case(code_map, source_field) # Grab the first tuple, which has no filters case_tuple = model_filtered_update_case_map[0] # Add it to our dictionary case_tuple[1][field] = case_object for filter_tuple in model_filtered_update_case_map: # For each filter tuple, check if the dictionary has any entries if len(filter_tuple[1].keys()) > 0: print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}". format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys()))) try: model.objects.filter(filter_tuple[0]).update(**filter_tuple[1]) except django.db.utils.ProgrammingError as e: logger.warn(str(e)) logger.warn("(OK if invoked from a migration, when the table may not yet have been created)") # Utility method for update_model_description_fields, creates the Case object def create_case(code_map, source_field): when_list = [] default = None for code in code_map.keys(): when_args = {} when_args[source_field] = code when_args["then"] = Value(code_map[code]) # If our code is blank, change the comparison to "" if code == "_BLANK": when_args[source_field] = Value("") # We handle the default case later if code == "_DEFAULT": default = Value(code_map[code]) continue # Append a new when to our when-list when_list.append(When(**when_args)) return Case(*when_list, default=default)
38.397516
119
0.63604
0
0
0
0
0
0
0
0
4,248
0.343578
4a7ff589828eca63a17e67bce0eb8c34992e953a
158
py
Python
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py
DevAerial/flask-api-template
6d3f745f2dacb793c4bdc6aaaceb86eb472efe55
[ "MIT" ]
null
null
null
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py
DevAerial/flask-api-template
6d3f745f2dacb793c4bdc6aaaceb86eb472efe55
[ "MIT" ]
null
null
null
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py
DevAerial/flask-api-template
6d3f745f2dacb793c4bdc6aaaceb86eb472efe55
[ "MIT" ]
null
null
null
from flask_marshmallow import Marshmallow{% if cookiecutter.use_celery == 'yes'%} from celery import Celery celery = Celery(){% endif %} ma = Marshmallow()
22.571429
81
0.740506
0
0
0
0
0
0
0
0
5
0.031646
4a80119456047b966a3757d7fd0f105dc0f5c4f6
9,193
py
Python
code/mapplot.py
young-astronomer/vlpy
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
[ "Apache-2.0" ]
null
null
null
code/mapplot.py
young-astronomer/vlpy
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
[ "Apache-2.0" ]
null
null
null
code/mapplot.py
young-astronomer/vlpy
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Oct 21 11:11:56 2020 This program is use to plot polarization map from vlbi fits image. You should specify the input fits images by -i or --infile, output file by -o or --output, contour levs by -l or --levs contour base by -c or --cmul polarization parameters by -p or --pol: "icut pcut inc scale" plot window by -w or --win restore beam position by -b or --bpos figsize by -f or --figsize Installation: 1. copy file chmod a+x mapplot.py cp mapplot.py ~/myapp 2. set envioment parameters Add the following line to ~/.bashrc export PATH=$PATH:/home/usename/myapp source ~/.bashrc Running like this: mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul> mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol> Examples: 1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5' 2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3 https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize @author: Li, Xiaofeng Shanghai Astronomical Observatory, Chinese Academy of Sciences E-mail: [email protected]; [email protected] """ import sys import getopt from astropy.io import fits from astropy.table import Table import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Ellipse import matplotlib.colors as mcolors def add_beam(ax, win, h, bpos=None, pad=2.0): if bpos==None : x = win[0] - pad * h['bmaj']*3.6E6 y = win[2] + pad * h['bmaj']*3.6E6 bpos = (x, y) bmaj = h['bmaj'] * 3.6E6 bmin = h['bmin'] * 3.6E6 bpa = 90 - h['bpa'] e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray') ax.add_artist(e) def annotate(ax, notefile=''): if notefile != '': tab = Table.read(notefile, format='csv') for t in tab: ax.text(t['x'], t['y'], t['text']) # ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction') # ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction') def cut_cmap(cmap, N_cut=0): # cmap = mcolors.Colormap(cmap) cmap = plt.get_cmap(cmap) x = np.arange(N_cut, 256) / 256.0 color_index = cmap(x) cmap = mcolors.ListedColormap(color_index) return cmap def get_normalize(args, vmin=0.0, vmax=1.0): if args == '': norm = mcolors.Normalize(vmin, vmax) args = args.split(' ') name = args[0] if name == 'linear': if len(args)==3: vmin, vmax = np.array(args[1:], dtype='f4') norm = mcolors.Normalize(vmin, vmax, True) elif name == 'power': if len(args)==1: gamma = 0.5 if len(args)==2: gamma = float(args[1]) elif len(args)==4: gamma, vmin, vmax = np.array(args[1:], dtype='f4') if gamma < 1.0 and vmin < 0.0: vmin = 0.0 norm = mcolors.PowerNorm(gamma, vmin, vmax, True) elif name == 'log': if len(args)==3: vmin, vmax = np.array(args[1:], dtype='f4') norm = mcolors.LogNorm(vmin, vmax) elif name == 'symlog': if len(args)==2: linthresh = float(args[1]) linscale = 1.0 elif len(args)==3: linthresh, linscale = np.array(args[1:], dtype='f4') elif len(args)==5: linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4') norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax) elif name == 'twoslope': if len(args)==2: vcenter = float(args[1]) elif len(args)==4: vcenter, vmin, vmax = np.array(args[1:], dtype='f4') norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax) return norm def add_annotation(ax, infile=''): if infile == '': return with open(infile, 'r') as f: for line in f.readlines(): row = line.split(',') row = [col.strip() for col in row] typ = row[0] args = row[1:] if typ == 'text': x, y, text = args x, y = float(x), float(y) ax.text(x, y, text) elif typ == 'arrow': x1, y1, x2, y2 = np.array(args, dtype='f4') ax.annotate("", xy=(x1, y1), xytext=(x2, y2), arrowprops=dict(arrowstyle="->", connectionstyle="arc3")) elif typ == 'annotation': x1, y1, x2, y2 = np.array(args[:-1], dtype='f4') text = args[-1] ax.annotate(text, xy=(x1, y1), xytext=(x2, y2), arrowprops=dict(arrowstyle="->", connectionstyle="arc3")) elif typ == 'ellipse': x, y, majax, minax, pa = np.array(args, dtype='f4') e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-') ax.add_artist(e) def set_axis(ax, w): ax.set_aspect('equal') ax.set_xlabel('Relative R.A. (mas)') ax.set_ylabel('Relative Dec. (mas)') ax.set_xlim(w[0],w[1]) ax.set_ylim(w[2],w[3]) ax.tick_params(which='both', direction='in', length=6, right=True, top=True) ax.tick_params(which='minor',length=4) ax.minorticks_on() def word2pix(w, h): if w == None: W = [0, h['naxis1'], 0, h['naxis2']] else: x0, x1, y0, y1 = w X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6) Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6) X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6) Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6) W = [int(X0), int(X1), int(Y0), int(Y1)] return W def pix2word(W, h): if W == None: W = [0, h['naxis1'], 0, h['naxis2']] X0, X1, Y0, Y1 = W x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1']) y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2']) x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1']) y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2']) w = [x0, x1, y0, y1] return w def savefig(outfile, dpi=100): if outfile.lower().endswith('.pdf') : plt.savefig(outfile) elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'): plt.savefig(outfile, dpi=dpi) elif outfile.lower().endswith('.png'): plt.savefig(outfile, dpi=dpi) def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None, figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0, norm='', fraction=0.05): hdul = fits.open(infile) h = hdul[0].header # img = hdul[0].data[0, 0, :, :] if levs==None: levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096]) # print(win) if figsize == None : figsize = (6, 6) if win == None: win = pix2word(None, h) W = word2pix(None, h) else: W = word2pix(win, h) img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]] if cmap == '': cmap = 'rainbow' cmap = cut_cmap(cmap, N_cut) vmin, vmax = np.min(img), np.max(img) if norm == '': norm = 'linear %.3f %.3f' % (vmin, vmax) norm = get_normalize(norm, vmin, vmax) fig, ax = plt.subplots() fig.set_size_inches(figsize) set_axis(ax, win) add_beam(ax, win, h, bpos=bpos) add_annotation(ax, annotationfile) ax.contour(img, levs, extent=win, linewidths=0.5, colors='k') pcm = ax.imshow(img, extent=win, origin='lower', interpolation='none', cmap=cmap, norm=norm) cbar = fig.colorbar(pcm, ax=ax, fraction=fraction) # cbar.ax.minorticks_off() cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both') cbar.ax.tick_params(axis='y', labelrotation=90) fig.tight_layout(pad=0.5) if outfile != '': savefig(outfile, dpi) hdul.close() def myhelp(): print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>') print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"') def main(argv): # infile = r'3c66a-calib/circe-beam.fits' infile = '' outfile = '' annotationfile = '' cmul = '' win = None levs = None bpos = None figsize = None dpi = 100 colormap = '' N_cut = 0 norm = '' fraction = 0.05 try: opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:", ['help', 'infile=', 'cmul=', 'outfile=', 'win=', 'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=', 'N_cut=', 'norm=', 'fraction=']) except getopt.GetoptError: myhelp() sys.exit(2) for opt, arg in opts: if opt in ('-h', '--help'): myhelp() elif opt in ('-i', '--infile'): infile = arg elif opt in ('-c', '--cmul'): cmul = arg elif opt in ('-o', '--outfile'): outfile = arg elif opt in ('-w', '--win'): win = arg elif opt in ('-l', '--levs'): levs = np.array(arg.split(), dtype=np.float64).tolist() elif opt in ('-b', '--bpos'): bpos = np.array(arg.split(), dtype=np.float64).tolist() elif opt in ('-f', '--figsize'): figsize = np.array(arg.split(), dtype=np.float64).tolist() elif opt in ('-d', '--dpi'): dpi = int(arg) elif opt in ('-a', '--annotatefile'): annotationfile = arg elif opt in ('--colormap', ): colormap = arg elif opt in ('-N', '--N_cut'): N_cut = int(arg) elif opt in ('-n', '--norm'): norm = arg elif opt in ('--fraction',): fraction = float(arg) if infile=='' and len(args)==2: infile, cmul = args if infile=='' and len(args)==3: infile, outfile, cmul = args if infile=='' and len(args)==4: infile, outfile, cmul, win = args if outfile == '': outfile = infile.split('.')[0] + '.pdf' cmul = float(cmul) if type(win) == str: win = np.array(win.split(), dtype=np.float64).tolist() mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos, figsize=figsize, dpi=dpi, annotationfile=annotationfile, cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction) if __name__ == '__main__' : main(sys.argv[1:])
30.042484
101
0.617753
0
0
0
0
0
0
0
0
2,694
0.293049
4a81890c9e9eec4855a38a91238cf619244d9278
2,174
py
Python
umbrella/api/v1/router.py
pizhi/umbrella
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
[ "Apache-2.0" ]
1
2018-01-13T11:45:24.000Z
2018-01-13T11:45:24.000Z
umbrella/api/v1/router.py
pizhi/umbrella
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
[ "Apache-2.0" ]
null
null
null
umbrella/api/v1/router.py
pizhi/umbrella
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
[ "Apache-2.0" ]
2
2018-01-01T11:39:49.000Z
2018-08-07T07:16:45.000Z
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from umbrella.api.v1 import api from umbrella.common import wsgi class API(wsgi.Router): """WSGI router for Glance v1 API requests.""" def __init__(self, mapper): api_resource = api.create_resource() mapper.connect("/", controller=api_resource, action="index") mapper.connect("/images", controller=api_resource, action='index', conditions={'method': ['GET']}) mapper.connect("/images/{id}", controller=api_resource, action="show", conditions=dict(method=["GET"])) mapper.connect("/net/{instance_uuid}", controller=api_resource, action="get_net_sample", conditions=dict(method=["GET"])) mapper.connect("/cpu/{instance_uuid}", controller=api_resource, action="get_cpu_sample", conditions=dict(method=["GET"])) mapper.connect("/disk/{instance_uuid}", controller=api_resource, action="get_disk_sample", conditions=dict(method=["GET"])) mapper.connect("/mem/{instance_uuid}", controller=api_resource, action="get_mem_sample", conditions=dict(method=["GET"])) super(API, self).__init__(mapper)
36.233333
78
0.559798
1,469
0.675713
0
0
0
0
0
0
904
0.415823
4a8279873b5f73ab9eb14c009ec624c039c590a5
943
py
Python
exemples/test_thomson_simu.py
butala/TomograPy
a1da41f1e0b7406a1b770e56428789c54175de20
[ "CECILL-B" ]
7
2016-07-05T08:31:42.000Z
2022-03-31T20:24:13.000Z
exemples/test_thomson_simu.py
esoubrie/TomograPy
a1da41f1e0b7406a1b770e56428789c54175de20
[ "CECILL-B" ]
null
null
null
exemples/test_thomson_simu.py
esoubrie/TomograPy
a1da41f1e0b7406a1b770e56428789c54175de20
[ "CECILL-B" ]
4
2018-08-14T01:54:21.000Z
2022-03-10T19:44:43.000Z
#!/usr/bin/env python import time import numpy as np import tomograpy import lo # object obj = tomograpy.centered_cubic_map(10, 64) obj[:] = tomograpy.phantom.shepp_logan(obj.shape) # data radius = 200 a = tomograpy.fov(obj, radius) data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi) # model kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5} P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs) # projection t = time.time() data[:] = (P * obj.ravel()).reshape(data.shape) print("projection time : " + str(time.time() - t)) # data # backprojection t = time.time() x0 = P.T * data.ravel() bpj = x0.reshape(obj.shape) print("backprojection time : " + str(time.time() - t)) # inversion using scipy.sparse.linalg t = time.time() sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8) sol = sol.reshape(obj.shape) print("inversion time : " + str(time.time() - t))
30.419355
82
0.680806
0
0
0
0
0
0
0
0
206
0.218452
4a82ccd998802091de5e9ed946344d30c5ebeba5
8,124
py
Python
geopy/geocoders/google.py
ulope/geopy
605d0d84137a93949ad03820fa31dc2dab77f089
[ "MIT" ]
1
2021-03-12T15:31:30.000Z
2021-03-12T15:31:30.000Z
geopy/geocoders/google.py
ulope/geopy
605d0d84137a93949ad03820fa31dc2dab77f089
[ "MIT" ]
null
null
null
geopy/geocoders/google.py
ulope/geopy
605d0d84137a93949ad03820fa31dc2dab77f089
[ "MIT" ]
null
null
null
import logging from urllib import urlencode from urllib2 import urlopen import simplejson import xml from xml.parsers.expat import ExpatError from geopy.geocoders.base import Geocoder from geopy import Point, Location, util class Google(Geocoder): """Geocoder using the Google Maps API.""" def __init__(self, api_key=None, domain='maps.google.com', resource='maps/geo', format_string='%s', output_format='kml'): """Initialize a customized Google geocoder with location-specific address information and your Google Maps API key. ``api_key`` should be a valid Google Maps API key. It is required for the 'maps/geo' resource to work. ``domain`` should be a the Google Maps domain to connect to. The default is 'maps.google.com', but if you're geocoding address in the UK (for example), you may want to set it to 'maps.google.co.uk'. ``resource`` is the HTTP resource to give the query parameter. 'maps/geo' is the HTTP geocoder and is a documented API resource. 'maps' is the actual Google Maps interface and its use for just geocoding is undocumented. Anything else probably won't work. ``format_string`` is a string containing '%s' where the string to geocode should be interpolated before querying the geocoder. For example: '%s, Mountain View, CA'. The default is just '%s'. ``output_format`` can be 'json', 'xml', 'kml', 'csv', or 'js' and will control the output format of Google's response. The default is 'kml' since it is supported by both the 'maps' and 'maps/geo' resources. The 'js' format is the most likely to break since it parses Google's JavaScript, which could change. However, it currently returns the best results for restricted geocoder areas such as the UK. """ self.api_key = api_key self.domain = domain self.resource = resource self.format_string = format_string self.output_format = output_format @property def url(self): domain = self.domain.strip('/') resource = self.resource.strip('/') return "http://%(domain)s/%(resource)s?%%s" % locals() def geocode(self, string, exactly_one=True, language_code=None, sensor=False, viewport_center=None, viewport_span=None): params = {'q': self.format_string % string, 'output': self.output_format.lower(), 'sensor': str(sensor).lower(), } if language_code: params.update({'gl': language_code}) if viewport_center and viewport_span: params.update({ 'll': viewport_center, 'spn': viewport_span, }) if self.resource.rstrip('/').endswith('geo'): # An API key is only required for the HTTP geocoder. params['key'] = self.api_key url = self.url % urlencode(params) return self.geocode_url(url, exactly_one) def reverse(self, coord, exactly_one=True): (lat,lng) = coord params = {'q': self.format_string % lat+','+self.format_string % lng, 'output': self.output_format.lower() } if self.resource.rstrip('/').endswith('geo'): # An API key is only required for the HTTP geocoder. params['key'] = self.api_key url = self.url % urlencode(params) return self.geocode_url(url, exactly_one, reverse=True) def geocode_url(self, url, exactly_one=True, reverse=False): logging.getLogger().info("Fetching %s..." % url) page = urlopen(url) dispatch = getattr(self, 'parse_' + self.output_format) return dispatch(page, exactly_one, reverse) def parse_xml(self, page, exactly_one=True, reverse=False): """Parse a location name, latitude, and longitude from an XML response. """ if not isinstance(page, basestring): page = util.decode_page(page) try: doc = xml.dom.minidom.parseString(page) except ExpatError: places = [] else: places = doc.getElementsByTagName('Placemark') if (exactly_one and len(places) != 1) and (not reverse): raise ValueError("Didn't find exactly one placemark! " \ "(Found %d.)" % len(places)) def parse_place(place): location = util.get_first_text(place, ['address', 'name']) or None points = place.getElementsByTagName('Point') point = points and points[0] or None coords = util.get_first_text(point, 'coordinates') or None if coords: longitude, latitude = [float(f) for f in coords.split(',')[:2]] else: latitude = longitude = None _, (latitude, longitude) = self.geocode(location) return (location, (latitude, longitude)) if exactly_one: return parse_place(places[0]) else: return (parse_place(place) for place in places) def parse_csv(self, page, exactly_one=True, reverse=False): raise NotImplementedError def parse_kml(self, page, exactly_one=True, reverse=False): return self.parse_xml(page, exactly_one, reverse) def parse_json(self, page, exactly_one=True, reverse=False): if not isinstance(page, basestring): page = util.decode_page(page) json = simplejson.loads(page) places = json.get('Placemark', []) if (exactly_one and len(places) != 1) and (not reverse): raise ValueError("Didn't find exactly one placemark! " \ "(Found %d.)" % len(places)) def parse_place(place): location = place.get('address') longitude, latitude = place['Point']['coordinates'][:2] # Add support for pulling out the canonical name locality = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('Locality',{}).get('LocalityName') administrative = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('AdministrativeAreaName') return util.RichResult((location, (latitude, longitude)), locality=locality, administrative=administrative) if exactly_one: return parse_place(places[0]) else: return (parse_place(place) for place in places) def parse_js(self, page, exactly_one=True, reverse=False): """This parses JavaScript returned by queries the actual Google Maps interface and could thus break easily. However, this is desirable if the HTTP geocoder doesn't work for addresses in your country (the UK, for example). """ if not isinstance(page, basestring): page = util.decode_page(page) LATITUDE = r"[\s,]lat:\s*(?P<latitude>-?\d+\.\d+)" LONGITUDE = r"[\s,]lng:\s*(?P<longitude>-?\d+\.\d+)" LOCATION = r"[\s,]laddr:\s*'(?P<location>.*?)(?<!\\)'," ADDRESS = r"(?P<address>.*?)(?:(?: \(.*?@)|$)" MARKER = '.*?'.join([LATITUDE, LONGITUDE, LOCATION]) MARKERS = r"{markers: (?P<markers>\[.*?\]),\s*polylines:" def parse_marker(marker): latitude, longitude, location = marker location = re.match(ADDRESS, location).group('address') latitude, longitude = float(latitude), float(longitude) return (location, (latitude, longitude)) match = re.search(MARKERS, page) markers = match and match.group('markers') or '' markers = re.findall(MARKER, markers) if exactly_one: if len(markers) != 1 and (not reverse): raise ValueError("Didn't find exactly one marker! " \ "(Found %d.)" % len(markers)) marker = markers[0] return parse_marker(marker) else: return (parse_marker(marker) for marker in markers)
41.238579
139
0.601674
7,893
0.971566
0
0
175
0.021541
0
0
2,753
0.338872
4a845cfff802e634071ade849b849c82adc47ef1
395
py
Python
interactive_grabcut/repo/drag2draw.py
hiankun/py_sandbox
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
[ "MIT" ]
null
null
null
interactive_grabcut/repo/drag2draw.py
hiankun/py_sandbox
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
[ "MIT" ]
null
null
null
interactive_grabcut/repo/drag2draw.py
hiankun/py_sandbox
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
[ "MIT" ]
null
null
null
# source: https://www.youtube.com/watch?v=U0sVp1xLiyo from tkinter import * def paint(event): color = 'red' x1, y1 = (event.x-1), (event.y-1) x2, y2 = (event.x+1), (event.y+1) c.create_oval(x1,y1,x2,y2,fill=color,outline=color) master = Tk() c = Canvas(master, width=600, height=400, bg='white') c.pack(expand=True, fill=BOTH) c.bind('<B1-Motion>', paint) master.mainloop()
21.944444
55
0.648101
0
0
0
0
0
0
0
0
78
0.197468
4a85a5edb74a35f6879d8683f009ca6b7f10f18c
194
py
Python
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
126
2022-01-15T02:29:07.000Z
2022-03-30T09:57:40.000Z
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
18
2022-01-11T22:24:35.000Z
2022-03-16T00:13:01.000Z
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
25
2022-01-22T15:06:27.000Z
2022-03-01T04:34:19.000Z
""" insert default serverInfo """ from yoyo import step __depends__ = {'20220114_02_lHBKM-new-table-serverinfo'} steps = [ step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);") ]
16.166667
65
0.695876
0
0
0
0
0
0
0
0
128
0.659794
4a861f0810192c03917c1a4cb2de99fa5681f49e
14,913
py
Python
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
slawqo/python-neutronclient
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
[ "Apache-2.0" ]
120
2015-01-07T00:38:58.000Z
2021-12-26T13:05:53.000Z
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
slawqo/python-neutronclient
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
[ "Apache-2.0" ]
1
2021-08-11T18:42:30.000Z
2021-08-11T22:25:21.000Z
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
slawqo/python-neutronclient
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
[ "Apache-2.0" ]
153
2015-01-05T16:50:50.000Z
2021-09-13T12:01:23.000Z
# Copyright 2017 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from osc_lib.cli import format_columns from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import columns as column_util from oslo_log import log as logging from neutronclient._i18n import _ from neutronclient.common import utils as nc_utils from neutronclient.osc import utils as osc_utils from neutronclient.osc.v2.vpnaas import utils as vpn_utils LOG = logging.getLogger(__name__) _formatters = { 'peer_cidrs': format_columns.ListColumn } _attr_map = ( ('id', 'ID', column_util.LIST_BOTH), ('name', 'Name', column_util.LIST_BOTH), ('peer_address', 'Peer Address', column_util.LIST_BOTH), ('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH), ('status', 'Status', column_util.LIST_BOTH), ('tenant_id', 'Project', column_util.LIST_LONG_ONLY), ('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY), ('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY), ('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY), ('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY), ('mtu', 'MTU', column_util.LIST_LONG_ONLY), ('initiator', 'Initiator', column_util.LIST_LONG_ONLY), ('admin_state_up', 'State', column_util.LIST_LONG_ONLY), ('description', 'Description', column_util.LIST_LONG_ONLY), ('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY), ('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY), ('local_id', 'Local ID', column_util.LIST_LONG_ONLY), ('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY), ('local_ep_group_id', 'Local Endpoint Group ID', column_util.LIST_LONG_ONLY), ('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY), ) def _convert_to_lowercase(string): return string.lower() def _get_common_parser(parser, is_create=True): parser.add_argument( '--description', metavar='<description>', help=_('Description for the connection')) parser.add_argument( '--dpd', metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT", type=nc_utils.str2dict_type( optional_keys=['action', 'interval', 'timeout']), help=vpn_utils.dpd_help("IPsec connection")) parser.add_argument( '--mtu', help=_('MTU size for the connection')) parser.add_argument( '--initiator', choices=['bi-directional', 'response-only'], type=_convert_to_lowercase, help=_('Initiator state')) peer_group = parser.add_mutually_exclusive_group() peer_group.add_argument( '--peer-cidr', dest='peer_cidrs', help=_('Remote subnet(s) in CIDR format. ' 'Cannot be specified when using endpoint groups. Only ' 'applicable, if subnet provided for VPN service.') ) peer_group.add_argument( '--local-endpoint-group', help=_('Local endpoint group (name or ID) with subnet(s) ' 'for IPsec connection') ) parser.add_argument( '--peer-endpoint-group', help=_('Peer endpoint group (name or ID) with CIDR(s) for ' 'IPSec connection')) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', help=_("Enable IPSec site connection") ) admin_group.add_argument( '--disable', action='store_true', help=_("Disable IPSec site connection") ) parser.add_argument( '--local-id', help=_('An ID to be used instead of the external IP ' 'address for a virtual router')) return parser def _get_common_attrs(client_manager, parsed_args, is_create=True): attrs = {} if is_create: if 'project' in parsed_args and parsed_args.project is not None: attrs['tenant_id'] = osc_utils.find_project( client_manager.identity, parsed_args.project, parsed_args.project_domain, ).id if parsed_args.description: attrs['description'] = str(parsed_args.description) if parsed_args.mtu: attrs['mtu'] = parsed_args.mtu if parsed_args.enable: attrs['admin_state_up'] = True if parsed_args.disable: attrs['admin_state_up'] = False if parsed_args.initiator: attrs['initiator'] = parsed_args.initiator if parsed_args.dpd: vpn_utils.validate_dpd_dict(parsed_args.dpd) attrs['dpd'] = parsed_args.dpd if parsed_args.local_endpoint_group: _local_epg = client_manager.neutronclient.find_resource( 'endpoint_group', parsed_args.local_endpoint_group, cmd_resource='endpoint_group')['id'] attrs['local_ep_group_id'] = _local_epg if parsed_args.peer_endpoint_group: _peer_epg = client_manager.neutronclient.find_resource( 'endpoint_group', parsed_args.peer_endpoint_group, cmd_resource='endpoint_group')['id'] attrs['peer_ep_group_id'] = _peer_epg if parsed_args.peer_cidrs: attrs['peer_cidrs'] = parsed_args.peer_cidrs if parsed_args.local_id: attrs['local_id'] = parsed_args.local_id return attrs class CreateIPsecSiteConnection(command.ShowOne): _description = _("Create an IPsec site connection") def get_parser(self, prog_name): parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name) _get_common_parser(parser) parser.add_argument( '--peer-id', required=True, help=_('Peer router identity for authentication. Can be ' 'IPv4/IPv6 address, e-mail address, key id, or FQDN')) parser.add_argument( '--peer-address', required=True, help=_('Peer gateway public IPv4/IPv6 address or FQDN')) parser.add_argument( '--psk', required=True, help=_('Pre-shared key string.')) parser.add_argument( '--vpnservice', metavar='VPNSERVICE', required=True, help=_('VPN service instance associated with this ' 'connection (name or ID)')) parser.add_argument( '--ikepolicy', metavar='IKEPOLICY', required=True, help=_('IKE policy associated with this connection (name or ID)')) parser.add_argument( '--ipsecpolicy', metavar='IPSECPOLICY', required=True, help=_('IPsec policy associated with this connection ' '(name or ID)')) parser.add_argument( 'name', metavar='<name>', help=_('Set friendly name for the connection')) osc_utils.add_project_owner_option_to_parser(parser) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient attrs = _get_common_attrs(self.app.client_manager, parsed_args) if parsed_args.vpnservice: _vpnservice_id = client.find_resource( 'vpnservice', parsed_args.vpnservice, cmd_resource='vpnservice')['id'] attrs['vpnservice_id'] = _vpnservice_id if parsed_args.ikepolicy: _ikepolicy_id = client.find_resource( 'ikepolicy', parsed_args.ikepolicy, cmd_resource='ikepolicy')['id'] attrs['ikepolicy_id'] = _ikepolicy_id if parsed_args.ipsecpolicy: _ipsecpolicy_id = client.find_resource( 'ipsecpolicy', parsed_args.ipsecpolicy, cmd_resource='ipsecpolicy')['id'] attrs['ipsecpolicy_id'] = _ipsecpolicy_id if parsed_args.peer_id: attrs['peer_id'] = parsed_args.peer_id if parsed_args.peer_address: attrs['peer_address'] = parsed_args.peer_address if parsed_args.psk: attrs['psk'] = parsed_args.psk if parsed_args.name: attrs['name'] = parsed_args.name if (bool(parsed_args.local_endpoint_group) != bool(parsed_args.peer_endpoint_group)): message = _("You must specify both local and peer endpoint " "groups") raise exceptions.CommandError(message) if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group: message = _("You must specify endpoint groups or peer CIDR(s)") raise exceptions.CommandError(message) obj = client.create_ipsec_site_connection( {'ipsec_site_connection': attrs})['ipsec_site_connection'] columns, display_columns = column_util.get_columns(obj, _attr_map) data = utils.get_dict_properties(obj, columns, formatters=_formatters) return display_columns, data class DeleteIPsecSiteConnection(command.Command): _description = _("Delete IPsec site connection(s)") def get_parser(self, prog_name): parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name) parser.add_argument( 'ipsec_site_connection', metavar='<ipsec-site-connection>', nargs='+', help=_('IPsec site connection to delete (name or ID)')) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient result = 0 for ipsec_conn in parsed_args.ipsec_site_connection: try: ipsec_con_id = client.find_resource( 'ipsec_site_connection', ipsec_conn, cmd_resource='ipsec_site_connection')['id'] client.delete_ipsec_site_connection(ipsec_con_id) except Exception as e: result += 1 LOG.error(_("Failed to delete IPsec site connection with " "name or ID '%(ipsec_site_conn)s': %(e)s"), {'ipsec_site_conn': ipsec_conn, 'e': e}) if result > 0: total = len(parsed_args.ipsec_site_connection) msg = (_("%(result)s of %(total)s IPsec site connection failed " "to delete.") % {'result': result, 'total': total}) raise exceptions.CommandError(msg) class ListIPsecSiteConnection(command.Lister): _description = _("List IPsec site connections " "that belong to a given project") def get_parser(self, prog_name): parser = super(ListIPsecSiteConnection, self).get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, help=_("List additional fields in output") ) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient obj = client.list_ipsec_site_connections()['ipsec_site_connections'] headers, columns = column_util.get_column_definitions( _attr_map, long_listing=parsed_args.long) return (headers, (utils.get_dict_properties( s, columns, formatters=_formatters) for s in obj)) class SetIPsecSiteConnection(command.Command): _description = _("Set IPsec site connection properties") def get_parser(self, prog_name): parser = super(SetIPsecSiteConnection, self).get_parser(prog_name) _get_common_parser(parser) parser.add_argument( '--peer-id', help=_('Peer router identity for authentication. Can be ' 'IPv4/IPv6 address, e-mail address, key id, or FQDN')) parser.add_argument( '--peer-address', help=_('Peer gateway public IPv4/IPv6 address or FQDN')) parser.add_argument( '--name', metavar='<name>', help=_('Set friendly name for the connection')) parser.add_argument( 'ipsec_site_connection', metavar='<ipsec-site-connection>', help=_('IPsec site connection to set (name or ID)')) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient attrs = _get_common_attrs(self.app.client_manager, parsed_args, is_create=False) if parsed_args.peer_id: attrs['peer_id'] = parsed_args.peer_id if parsed_args.peer_address: attrs['peer_address'] = parsed_args.peer_address if parsed_args.name: attrs['name'] = parsed_args.name ipsec_conn_id = client.find_resource( 'ipsec_site_connection', parsed_args.ipsec_site_connection, cmd_resource='ipsec_site_connection')['id'] try: client.update_ipsec_site_connection( ipsec_conn_id, {'ipsec_site_connection': attrs}) except Exception as e: msg = (_("Failed to set IPsec site " "connection '%(ipsec_conn)s': %(e)s") % {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e}) raise exceptions.CommandError(msg) class ShowIPsecSiteConnection(command.ShowOne): _description = _("Show information of a given IPsec site connection") def get_parser(self, prog_name): parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name) parser.add_argument( 'ipsec_site_connection', metavar='<ipsec-site-connection>', help=_('IPsec site connection to display (name or ID)')) return parser def take_action(self, parsed_args): client = self.app.client_manager.neutronclient ipsec_site_id = client.find_resource( 'ipsec_site_connection', parsed_args.ipsec_site_connection, cmd_resource='ipsec_site_connection')['id'] obj = client.show_ipsec_site_connection( ipsec_site_id)['ipsec_site_connection'] columns, display_columns = column_util.get_columns(obj, _attr_map) data = utils.get_dict_properties(obj, columns, formatters=_formatters) return (display_columns, data)
39.768
79
0.632468
8,991
0.602897
0
0
0
0
0
0
4,278
0.286864
4a866ef89141cc4c966674193758ad5f52e83702
551
py
Python
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
null
null
null
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
1
2019-09-10T13:58:24.000Z
2019-09-10T13:58:24.000Z
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
null
null
null
TINY_WAIT = 1 SMALL_WAIT = 3 MEDIUM_WAIT = 5 BIG_WAIT = 10 SECURITY_WAIT = 15 BATTLE_FINISH_DETECT = 12 BATTLE_NONE_DETECT_TIME = 90 BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15 # 关键动作的偏移 FLAGS_START_BATTLE_BIAS = (50, 25) FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50) # 正方形偏移 FLAGS_CLICK_BIAS_TINY = (3, 3) FLAGS_CLICK_BIAS_SMALL = (5, 5) FLAGS_CLICK_BIAS_MEDIUM = (10, 10) FLAGS_CLICK_BIAS_BIG = (15, 15) FLAGS_CLICK_BIAS_HUGE = (30, 30) # 拖动偏移 # 用于左右拖动的偏移,也就是偏移初始坐标点 FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1)) FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1))
21.192308
44
0.751361
0
0
0
0
0
0
0
0
116
0.186196
4a868fe7e98135f318566006794d9b95f620108a
3,229
py
Python
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
4
2021-05-31T19:34:27.000Z
2021-06-01T18:14:31.000Z
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
22
2021-05-15T00:01:49.000Z
2022-02-26T00:08:00.000Z
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
null
null
null
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class ShutdownClient(NamespacedClient): @query_params() def delete_node(self, node_id, params=None, headers=None): """ Removes a node from the shutdown list `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be removed from the shutdown state """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'node_id'.") return self.transport.perform_request( "DELETE", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, ) @query_params() def get_node(self, node_id=None, params=None, headers=None): """ Retrieve status of a node or nodes that are currently marked as shutting down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: Which node for which to retrieve the shutdown status """ return self.transport.perform_request( "GET", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, ) @query_params() def put_node(self, node_id, body, params=None, headers=None): """ Adds a node to be shut down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be shut down :arg body: The shutdown type definition to register """ for param in (node_id, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, body=body, )
33.989474
85
0.637039
2,362
0.731496
0
0
2,306
0.714153
0
0
2,116
0.655311
4a893fcf944a3942d0a9e7e6cc93c141d9894e31
13,620
py
Python
sushichef.py
RechercheTech/sushi-chef-arvind-gupta-toys
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
[ "MIT" ]
1
2020-05-10T06:16:48.000Z
2020-05-10T06:16:48.000Z
sushichef.py
RechercheTech/sushi-chef-arvind-gupta-toys
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
[ "MIT" ]
5
2019-10-04T11:35:21.000Z
2020-05-25T14:19:41.000Z
sushichef.py
RechercheTech/sushi-chef-arvind-gupta-toys
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
[ "MIT" ]
3
2019-09-24T00:15:00.000Z
2020-02-06T16:25:36.000Z
#!/usr/bin/env python import os import requests import re import shutil from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR from bs4 import BeautifulSoup from bs4.element import NavigableString from ricecooker.chefs import SushiChef from ricecooker.classes.files import YouTubeVideoFile from ricecooker.classes.licenses import get_license from ricecooker.classes.nodes import VideoNode, TopicNode ARVIND = "Arvind Gupta Toys" ARVIND_URL = "http://www.arvindguptatoys.com/films.html" ROOT_DIR_PATH = os.getcwd() DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads") DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/") SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt") # These are the languages that has no sub topics on its videos. SINGLE_TOPIC_LANGUAGES = [ "bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils "bhojpuri", # future-proofing for upcoming lang_obj.name changes "nepali", "malayalam", "telugu", "bengali", "odiya", "punjabi", "marwari; marwadi", # actual lang_obj.name in le-utils "marwari", # future-proofing for upcoming lang_obj.name changes "assamese", "urdu", "spanish", "chinese", "indonesian", "sci_edu", "science/educational", ] # List of multiple languages on its topics MULTI_LANGUAGE_TOPIC = ["russian", "french",] # This are the estimate total count of arvind gupta toys language contents TOTAL_ARVIND_LANG = 23 SINGLE_TOPIC = "single" STANDARD_TOPIC = "standard" MULTI_LANGUAGE = "multi" YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"] DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod) def clean_video_title(title, lang_obj): # Remove redundant and misleading words in the video title clean_title = title try: if title != None: clean_str = title.replace("-", " ").replace("MB", "").replace("|", "") clean_uplang = clean_str.replace(lang_obj.name.upper(), "") clean_lowlang = clean_uplang.replace(lang_obj.name.lower(), "") clean_caplang = clean_lowlang.replace(lang_obj.name.capitalize() , "") clean_format = clean_caplang.replace(".avi", "").replace(".wmv", "").strip() clean_extra_spaces = re.sub(" +", " ",clean_format) is_int = clean_extra_spaces[-2:] if is_int.isdigit(): clean_extra_spaces = clean_extra_spaces.replace(is_int, "") clean_title = clean_extra_spaces print("Cleaned video title ====> ", clean_title) except Exception as e: print('Error cleaning this video title: ', clean_title) return clean_title def include_video_topic(topic_node, video_data, lang_obj): # Include video details to the parent topic node video_id = video_data.uid video_source_id = 'arvind-video-{0}'.format(video_id) video_node = VideoNode( source_id=video_source_id, title=clean_video_title(video_data.title, lang_obj), description=video_data.description, author=ARVIND, thumbnail=video_data.thumbnail, license=get_license("CC BY-NC", copyright_holder=ARVIND), files=[ YouTubeVideoFile( youtube_id=video_id, language=video_data.language, high_resolution=False, ) ]) topic_node.add_child(video_node) def save_skip_videos(video, topic, lang_obj): # Compile skip videos into text file if not os.path.exists(SKIP_VIDEOS_PATH): open(SKIP_VIDEOS_PATH,"w+") text_file = open(SKIP_VIDEOS_PATH, "a") video_info = video.language + " - " + topic + " - " + video.url + " - " + video.license + "\n" text_file.write(video_info) text_file.close() def download_video_topics(data, topic, topic_node, lang_obj): """ Scrape, collect, and download the videos and their thumbnails. """ video_source_ids = [] for vinfo in data[topic]: try: video = ArvindVideo( url=vinfo['video_url'], title=vinfo['video_title'], language=lang_obj.code) if video.download_info(): if video.license_common: video_source_id = 'arvind-video-{0}'.format(video.uid) if video_source_id not in video_source_ids: include_video_topic(topic_node, video, lang_obj) video_source_ids.append(video_source_id) else: print('Skipping duplicate video: ' + str(vinfo['video_url'])) else: save_skip_videos(video, topic, lang_obj) else: save_skip_videos(video, topic, lang_obj) except Exception as e: print('Error downloading this video:', e) def generate_child_topics(arvind_contents, main_topic, lang_obj, topic_type): # Create a topic for each languages data = arvind_contents[lang_obj.name] for topic_index in data: topic_name = topic_index if topic_type == STANDARD_TOPIC: source_id = 'arvind-child-topic-{0}'.format(topic_name) topic_node = TopicNode(title=topic_name, source_id=source_id) download_video_topics(data, topic_name, topic_node, lang_obj) main_topic.add_child(topic_node) if topic_type == SINGLE_TOPIC: download_video_topics(data, topic_name, main_topic, lang_obj) return main_topic def create_language_data(lang_data, lang_obj): """ Process the list of elements in `lang_data` to extract video links. """ topic_contents = {} initial_topics = [] prev_topic = "" first_count = 1 total_loop = len(lang_data) lang_name = lang_obj.name.lower() for item in lang_data: total_loop -= 1 if isinstance(item, NavigableString) or item.name == 'br': continue # skip whitespace and <br/> tags try: title = item.text.rstrip().strip() video_link = "" try: video_a_tag = item.find('a') if video_a_tag: video_link = video_a_tag.get("href") # for videos else: video_link = "" # for headings topic_details = {} if any(ytd in video_link for ytd in YOUTUBE_DOMAINS): if lang_name in MULTI_LANGUAGE_TOPIC: current_lang = title.split()[0].lower() if first_count == 1: first_count = 0 prev_topic = current_lang topic_details['video_url'] = video_link topic_details['video_title'] = title if lang_name in MULTI_LANGUAGE_TOPIC: if prev_topic != current_lang: topic_contents[prev_topic] = initial_topics initial_topics = [] prev_topic = current_lang initial_topics.append(topic_details) except Exception as e: print('>> passing on', e) pass if first_count == 1: if ":" in title: first_count = 0 prev_topic = title.replace(":", "").strip() if video_link == "": if ":" in title: topic_contents[prev_topic] = initial_topics prev_topic = title.replace(":", "").strip() initial_topics = [] except Exception as e: print('>>> passing on', e) pass # This wasn't working (last topic in each standard language was missing) ... # if total_loop == 0: # topic_contents[prev_topic] = initial_topics # ... so changed to this: topic_contents[prev_topic] = initial_topics return topic_contents def scrape_arvind_page(): url = ARVIND_URL response = requests.get(url) page = BeautifulSoup(response.text, 'html5lib') content_divs = page.body.div list_divs = list(content_divs.children) languages_div_start = 5 languages_list = list(list_divs[languages_div_start].children) return languages_list def get_language_details(lang_name): video_lang = ArvindLanguage(name=lang_name) if video_lang.get_lang_obj(): return video_lang return None def create_language_topic(): arvind_languages = scrape_arvind_page() main_topic_list = [] if os.path.exists(SKIP_VIDEOS_PATH): os.remove(SKIP_VIDEOS_PATH) loop_max = TOTAL_ARVIND_LANG language_next_int = 7 loop_couter = 0 while (loop_couter != loop_max): try: lang_name = arvind_languages[language_next_int].get('id') lang_obj = get_language_details(lang_name.lower()) if lang_obj != None: lang_name = lang_obj.name lang_name_lower = lang_name.lower() print('== Processing ', lang_name, '='*60) language_source_id = 'arvind-parent-topic-{0}'.format(lang_name_lower) # print('language_source_id =', language_source_id) get_language_data = list(arvind_languages[language_next_int]) # print('len(get_language_data) = ', len(get_language_data)) data_contents = { lang_name: create_language_data(get_language_data, lang_obj) } # print('len(data_contents[lang_name])', len(data_contents[lang_name])) language_topic = TopicNode(title=lang_name.capitalize(), source_id=language_source_id) if lang_name_lower not in SINGLE_TOPIC_LANGUAGES and lang_name_lower not in MULTI_LANGUAGE_TOPIC: print("=======> This Language is in standard format", lang_name) topic_type = STANDARD_TOPIC generate_child_topics(data_contents, language_topic, lang_obj, topic_type) main_topic_list.append(language_topic) print("=====>finished", lang_name) if lang_name_lower in SINGLE_TOPIC_LANGUAGES: print("=====> This Language is in single topic format ", lang_name) topic_type = SINGLE_TOPIC generate_child_topics(data_contents, language_topic, lang_obj, topic_type) main_topic_list.append(language_topic) print("=====>finished", lang_name) if lang_name_lower in MULTI_LANGUAGE_TOPIC: print("=====> This Language is in multiple langauage topic format ", lang_name) lang_data = create_language_data(get_language_data, lang_obj) for lang in lang_data: current_lang = get_language_details(lang.lower()) if current_lang != None: parent_source_id = 'arvind-parent-topic-{0}'.format(current_lang.name) parent_topic = TopicNode(title=lang.capitalize(), source_id=parent_source_id) data_dic = {current_lang.name: {"": lang_data[lang]}} topic_type = SINGLE_TOPIC generate_child_topics(data_dic, parent_topic, current_lang, topic_type) main_topic_list.append(parent_topic) print("=====>finished ", lang) except Exception as e: print("===> error getting language topics: ", e) # raise(e) language_next_int += 4 loop_couter += 1 return main_topic_list class ArvindChef(SushiChef): channel_info = { "CHANNEL_TITLE": "Arvind Gupta Toys", "CHANNEL_SOURCE_DOMAIN": "arvindguptatoys.com", "CHANNEL_SOURCE_ID": "toys-from-trash", "CHANNEL_LANGUAGE": "mul", "CHANNEL_THUMBNAIL": 'chefdata/arvind_gupta_thumbnail.png', "CHANNEL_DESCRIPTION": "Math and science activities through low-cost " \ "materials all in the form of videos to provide various pathways for children to explore" \ " and deepen their understanding of concepts in low-resource contexts around the world." \ " Valuable resource library for teachers to incorporate in their lessons, for parents to" \ " work with children at home using readily available, simple, and low-cost materials.", } def pre_run(self, args, options): """This function will get called by ricecooker before the chef runs.""" if args['update']: # delete video info .json files cached in chefdata/youtubecache/ print('Deleting vinfo .json files in {}'.format(YOUTUBE_CACHE_DIR)) if os.path.exists(YOUTUBE_CACHE_DIR): shutil.rmtree(YOUTUBE_CACHE_DIR) os.makedirs(YOUTUBE_CACHE_DIR) def construct_channel(self, **kwargs): channel = self.get_channel(**kwargs) languages_topic = create_language_topic() for lang_topic in languages_topic: channel.add_child(lang_topic) return channel if __name__ == "__main__": """ Run this script on the command line using: python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232 """ chef = ArvindChef() chef.main()
37.01087
113
0.606608
1,510
0.110866
0
0
0
0
0
0
3,218
0.23627
4a89792f0a691e63a2efbaa3d996bdb8f827265c
1,170
py
Python
api/views/domain.py
lndba/apasa_backend
e0bb96e22a22f6e2a5a2826f225388113473e7e2
[ "Apache-2.0" ]
1
2019-08-06T07:31:40.000Z
2019-08-06T07:31:40.000Z
api/views/domain.py
lndba/apasa_backend
e0bb96e22a22f6e2a5a2826f225388113473e7e2
[ "Apache-2.0" ]
null
null
null
api/views/domain.py
lndba/apasa_backend
e0bb96e22a22f6e2a5a2826f225388113473e7e2
[ "Apache-2.0" ]
null
null
null
from rest_framework.viewsets import ModelViewSet,GenericViewSet from rest_framework.response import Response from api.serializers.domain import * from api.pagination.page import MyPageNumberPagination from api.models import * class MDomainListViewSet(ModelViewSet): queryset = MasterDomainName.objects.all().order_by('id') pagination_class = MyPageNumberPagination serializer_class = MDomainListSerializers class DnsListViewSet(GenericViewSet): def list(self, request, *args, **kwargs): res = {"count": 0, 'results': None} domain_id = request.query_params.get('domain') dns_list = Dns.objects.all().filter(master_domain_name=domain_id) dns_count = Dns.objects.all().filter(master_domain_name=domain_id).count() page = MyPageNumberPagination() page_dns_list = page.paginate_queryset(dns_list,request,self) ser = DnsListSerializers(instance=page_dns_list,many=True) res['results'] = ser.data res['count'] = dns_count return Response(res) class DnsUpdataViewSet(ModelViewSet): queryset = Dns.objects.all().order_by('id') serializer_class = DnsUpdataSerializers
35.454545
82
0.737607
933
0.797436
0
0
0
0
0
0
48
0.041026
4a89890f028ab800ae7dcb96dcff01c0b7e8d98a
1,184
py
Python
90-subsets-ii.py
yuenliou/leetcode
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
[ "MIT" ]
null
null
null
90-subsets-ii.py
yuenliou/leetcode
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
[ "MIT" ]
null
null
null
90-subsets-ii.py
yuenliou/leetcode
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
[ "MIT" ]
null
null
null
#!/usr/local/bin/python3.7 # -*- coding: utf-8 -*- from typing import List class Solution: def subsetsWithDup(self, nums: List[int]) -> List[List[int]]: """ 题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/ """ def backtrack(start, path): #结束条件:无 res.append(path[:]) for i in range(start, len(nums)): #和上个数字相等就跳过 if i > start and nums[i] == nums[i - 1]: continue # 做选择 path.append(nums[i]) # 进入下一行决策 backtrack(i + 1, path) # 撤销选择 path.pop() res = [] nums.sort() backtrack( 0, []) return res def main(): param = [1,2,2] solution = Solution() ret = solution.subsetsWithDup(param) print(ret) '''90. 子集 II 给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。 说明:解集不能包含重复的子集。 示例: 输入: [1,2,2] 输出: [ [2], [1], [1,2,2], [2,2], [1,2], [] ] 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/subsets-ii 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 ''' if __name__ == '__main__': main()
19.733333
112
0.516047
767
0.528237
0
0
0
0
0
0
742
0.511019
4a8a19db97a47f9f1fc1395728868b9d716366fe
450
py
Python
tools/output_tool.py
climberwb/bert-pli
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
[ "MIT" ]
5
2020-12-24T01:46:40.000Z
2022-03-18T19:15:10.000Z
tools/output_tool.py
climberwb/bert-pli
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
[ "MIT" ]
1
2021-04-05T14:27:24.000Z
2021-04-05T14:27:24.000Z
tools/output_tool.py
climberwb/bert-pli
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
[ "MIT" ]
4
2020-12-28T09:20:13.000Z
2021-12-10T13:33:21.000Z
import json from .accuracy_tool import gen_micro_macro_result def null_output_function(data, config, *args, **params): return "" def basic_output_function(data, config, *args, **params): which = config.get("output", "output_value").replace(" ", "").split(",") temp = gen_micro_macro_result(data) result = {} for name in which: result[name] = temp[name] return json.dumps(result, sort_keys=True)
25
77
0.653333
0
0
0
0
0
0
0
0
32
0.071111
4a8ae0336fc8e8f4551cb0d621a28672bac709c0
27,100
py
Python
python/drydock_provisioner/ingester/plugins/deckhand.py
Vjrx/airship-drydock
315fb9864e6d55a66d5266f76c160be55d22c98b
[ "Apache-2.0" ]
14
2018-05-19T11:58:22.000Z
2019-05-10T12:31:36.000Z
python/drydock_provisioner/ingester/plugins/deckhand.py
Vjrx/airship-drydock
315fb9864e6d55a66d5266f76c160be55d22c98b
[ "Apache-2.0" ]
10
2019-11-12T17:21:16.000Z
2021-11-10T18:16:06.000Z
python/drydock_provisioner/ingester/plugins/deckhand.py
Vjrx/airship-drydock
315fb9864e6d55a66d5266f76c160be55d22c98b
[ "Apache-2.0" ]
11
2018-06-05T16:21:18.000Z
2019-04-03T11:44:34.000Z
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This data ingester will consume YAML site topology documents.""" import yaml import logging import jsonschema import os import pkg_resources import copy import hashlib import drydock_provisioner.objects.fields as hd_fields from beaker.cache import CacheManager from beaker.util import parse_cache_config_options from drydock_provisioner import error as errors from drydock_provisioner import objects from drydock_provisioner.ingester.plugins import IngesterPlugin cache_opts = { 'cache.type': 'memory', 'expire': 1800, } cache = CacheManager(**parse_cache_config_options(cache_opts)) class DeckhandIngester(IngesterPlugin): def __init__(self): super().__init__() self.logger = logging.getLogger('drydock.ingester.deckhand') self.load_schemas() def get_name(self): return "deckhand" def ingest_data(self, **kwargs): """Parse and save design data. :param content: String of valid Deckhand YAML :returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects """ def local_parse(): return self.parse_docs(kwargs.get('content')) if 'content' in kwargs: try: # Hash the input to use as the cache key. This is not a security # related hash, so use cheap and fast MD5 hv = hashlib.md5(kwargs.get('content', b'')).hexdigest() local_cache = cache.get_cache('parsed_docs') results = local_cache.get(key=hv, createfunc=local_parse) parse_status, models = results except Exception as ex: self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex) raise ex else: raise ValueError('Missing parameter "content"') return parse_status, models def parse_docs(self, doc_blob): """Translate a YAML string into the internal Drydock model. Returns a tuple of a objects.TaskStatus instance to summarize all document processing and a list of models yielded by successful processing :param doc_blob: bytes representing a utf-8 encoded YAML string """ models = [] yaml_string = doc_blob.decode() self.logger.debug("yamlingester:parse_docs - Parsing YAML string.") try: parsed_data = yaml.safe_load_all(yaml_string) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark raise errors.IngesterError( "Error parsing YAML at (l:%s, c:%s): %s" % (mark.line + 1, mark.column + 1, err)) else: raise errors.IngesterError("Error parsing YAML: %s" % (err)) # tracking processing status to provide a complete summary of issues ps = objects.Validation() ps.set_status(hd_fields.ValidationResult.Success) for d in parsed_data: try: (schema_ns, doc_kind, doc_version) = d.get('schema', '').split('/') except ValueError as ex: self.logger.error( "Error with document structure.", exc_info=ex) self.logger.debug("Error document\n%s" % yaml.dump(d)) continue if schema_ns == 'drydock': try: doc_ref = objects.DocumentReference( doc_type=hd_fields.DocumentType.Deckhand, doc_schema=d.get('schema'), doc_name=d.get('metadata', {}).get('name', 'Unknown')) doc_errors = self.validate_drydock_document(d) if len(doc_errors) > 0: for e in doc_errors: ps.add_detail_msg( objects.ValidationMessage( msg="%s:%s schema validation error: %s" % (doc_kind, doc_version, e), name="DD001", docs=[doc_ref], error=True, level=hd_fields.MessageLevels.ERROR, diagnostic= "Invalid input file - see Drydock Troubleshooting Guide for DD001" )) ps.set_status(hd_fields.ActionResult.Failure) continue model = self.process_drydock_document(d) model.doc_ref = doc_ref models.append(model) except errors.IngesterError as ie: msg = "Error processing document: %s" % str(ie) self.logger.warning(msg) ps.add_detail_msg( objects.ValidationMessage( msg=msg, name="DD000", error=True, level=hd_fields.MessageLevels.ERROR, docs=[doc_ref], diagnostic="Exception during document processing " "- see Drydock Troubleshooting Guide " "for DD000")) ps.set_status(hd_fields.ActionResult.Failure) except Exception as ex: msg = "Unexpected error processing document: %s" % str(ex) self.logger.error(msg, exc_info=True) ps.add_detail_msg( objects.ValidationMessage( msg=msg, name="DD000", error=True, level=hd_fields.MessageLevels.ERROR, docs=[doc_ref], diagnostic="Unexpected exception during document " "processing - see Drydock Troubleshooting " "Guide for DD000")) ps.set_status(hd_fields.ActionResult.Failure) return (ps, models) def process_drydock_document(self, doc): """Process a parsed YAML document. :param doc: The dictionary from parsing the YAML document """ (schema_ns, kind, version) = doc.get('schema', '').split('/') if version == 'v1': doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None) else: doc_processor = None if doc_processor is None: raise errors.IngesterError( "Invalid document - Kind %s and Version %s" % (kind, version)) metadata = doc.get('metadata', {}) doc_name = metadata.get('name') return doc_processor(self, doc_name, doc.get('data', {})) def validate_drydock_document(self, doc): """Validate a parsed document via jsonschema. If a schema for a document Kind is not available, the document is considered valid. Schema is chosen by the doc['kind'] field. Returns a empty list for valid documents, otherwise returns a list of all found errors :param doc: dictionary of the parsed document. """ schemaname = doc.get('schema', '') (schema_ns, doc_kind, doc_version) = schemaname.split('/') errors_found = [] if doc_version == 'v1': if schemaname in self.v1_doc_schemas: validator = jsonschema.Draft4Validator( self.v1_doc_schemas.get(schemaname)) for error in validator.iter_errors(doc.get('data', [])): errors_found.append(error.message) return errors_found def process_drydock_region(self, name, data): """Process the data/spec section of a Region document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.Site() # Need to add validation logic, we'll assume the input is # valid for now model.name = name model.status = hd_fields.SiteStatus.Unknown model.source = hd_fields.ModelSource.Designed model.tag_definitions = objects.NodeTagDefinitionList() tag_defs = data.get('tag_definitions', []) for t in tag_defs: tag_model = objects.NodeTagDefinition() tag_model.tag = t.get('tag', '') tag_model.type = t.get('definition_type', '') tag_model.definition = t.get('definition', '') if tag_model.type not in ['lshw_xpath']: raise errors.IngesterError( 'Unknown definition_type in ' 'tag_definition instance: %s' % (t.definition_type)) model.tag_definitions.append(tag_model) auth_keys = data.get('authorized_keys', []) model.authorized_keys = [k for k in auth_keys] repos = data.get('repositories', None) if repos: model.repositories = self.process_drydock_region_repo_list(repos) return model def process_drydock_region_repo_list(self, data): """Process a package repository list. :param data: The data from the ``repositories`` key in a Region document """ model = objects.RepositoryList() for k, v in data.items(): if k == 'remove_unlisted': model.remove_unlisted = v else: model.append(objects.Repository(name=k, **v)) return model def process_drydock_rack(self, name, data): """Process the data/spec section of a Rack document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.Rack() model.source = hd_fields.ModelSource.Designed model.name = name model.tor_switches = objects.TorSwitchList() tors = data.get('tor_switches', {}) for k, v in tors.items(): tor = objects.TorSwitch() tor.switch_name = k tor.mgmt_ip = v.get('mgmt_ip', None) tor.sdn_api_uri = v.get('sdn_api_url', None) model.tor_switches.append(tor) model.location = copy.deepcopy(data.get('location', {})) model.local_networks = [n for n in data.get('local_networks', [])] return model def process_drydock_networklink(self, name, data): """Process the data/spec section of a NetworkLink document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.NetworkLink() model.source = hd_fields.ModelSource.Designed model.name = name model.metalabels = data.get('labels', {}) bonding = data.get('bonding', {}) model.bonding_mode = bonding.get( 'mode', hd_fields.NetworkLinkBondingMode.Disabled) if model.bonding_mode in \ (hd_fields.NetworkLinkBondingMode.LACP, hd_fields.NetworkLinkBondingMode.RoundRobin, hd_fields.NetworkLinkBondingMode.Standby): model.bonding_mon_rate = bonding.get('mon_rate', '100') model.bonding_up_delay = bonding.get('up_delay', '200') model.bonding_down_delay = bonding.get('down_delay', '200') if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP: model.bonding_xmit_hash = bonding.get('hash', 'layer3+4') model.bonding_peer_rate = bonding.get('peer_rate', 'fast') model.mtu = data.get('mtu', None) model.linkspeed = data.get('linkspeed', None) trunking = data.get('trunking', {}) model.trunk_mode = trunking.get( 'mode', hd_fields.NetworkLinkTrunkingMode.Disabled) model.native_network = trunking.get('default_network', None) model.allowed_networks = data.get('allowed_networks', None) return model def process_drydock_network(self, name, data): """Process the data/spec section of a Network document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.Network() model.source = hd_fields.ModelSource.Designed model.name = name model.metalabels = data.get('labels', {}) model.cidr = data.get('cidr', None) model.vlan_id = data.get('vlan', None) model.mtu = data.get('mtu', None) model.routedomain = data.get('routedomain', None) dns = data.get('dns', {}) model.dns_domain = dns.get('domain', 'local') model.dns_servers = dns.get('servers', None) ranges = data.get('ranges', []) model.ranges = [] for r in ranges: model.ranges.append({ 'type': r.get('type', None), 'start': r.get('start', None), 'end': r.get('end', None), }) routes = data.get('routes', []) model.routes = [] for r in routes: model.routes.append({ 'subnet': r.get('subnet', None), 'gateway': r.get('gateway', None), 'metric': r.get('metric', None), 'routedomain': r.get('routedomain', None), }) dhcp_relay = data.get('dhcp_relay', None) if dhcp_relay is not None: model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None) model.dhcp_relay_upstream_target = dhcp_relay.get( 'upstream_target', None) return model def process_drydock_hwprofile(self, name, data): """Process the data/spec section of a HardwareProfile document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.HardwareProfile() model.name = name model.source = hd_fields.ModelSource.Designed model.vendor = data.get('vendor', None) model.generation = data.get('generation', None) model.hw_version = data.get('hw_version', None) model.bios_version = data.get('bios_version', None) model.boot_mode = data.get('boot_mode', None) model.bootstrap_protocol = data.get('bootstrap_protocol', None) model.pxe_interface = data.get('pxe_interface', None) model.devices = objects.HardwareDeviceAliasList() device_aliases = data.get('device_aliases', {}) for d, v in device_aliases.items(): dev_model = objects.HardwareDeviceAlias() dev_model.source = hd_fields.ModelSource.Designed dev_model.alias = d dev_model.bus_type = v.get('bus_type', None) dev_model.dev_type = v.get('dev_type', None) dev_model.address = v.get('address', None) model.devices.append(dev_model) model.cpu_sets = data.get('cpu_sets', None) or dict() model.hugepages_confs = objects.HugepagesConfList() for c, d in data.get('hugepages', {}).items(): conf = objects.HugepagesConf( name=c, size=d.get('size'), count=d.get('count')) model.hugepages_confs.append(conf) return model def process_drydock_hostprofile(self, name, data): """Process the data/spec section of a HostProfile document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.HostProfile() model.name = name model.source = hd_fields.ModelSource.Designed self.process_host_common_fields(data, model) return model def process_drydock_bootaction(self, name, data): """Process the data/spec section of a BootAction document. :param name: the document name attribute :Param data: the dictionary of the parsed data/spec section """ model = objects.BootAction() model.name = name model.source = hd_fields.ModelSource.Designed assets = data.get('assets') model.asset_list = objects.BootActionAssetList() for a in assets: ba = self.process_bootaction_asset(a) model.asset_list.append(ba) node_filter = data.get('node_filter', None) if node_filter is not None: nfs = self.process_bootaction_nodefilter(node_filter) model.node_filter = nfs model.signaling = data.get('signaling', None) return model def process_bootaction_asset(self, asset_dict): """Process a dictionary representing a BootAction Data Asset. :param asset_dict: dictionary representing the bootaction asset """ model = objects.BootActionAsset(**asset_dict) return model def process_bootaction_nodefilter(self, nf): """Process a dictionary representing a BootAction NodeFilter Set. :param nf: dictionary representing the bootaction nodefilter set. """ model = objects.NodeFilterSet() model.filter_set_type = nf.get('filter_set_type', None) model.filter_set = [] for nf in nf.get('filter_set', []): nf_model = objects.NodeFilter(**nf) model.filter_set.append(nf_model) return model def process_drydock_node(self, name, data): """Process the data/spec section of a BaremetalNode document. :param name: the document name attribute :param data: the dictionary of the data/spec section """ model = objects.BaremetalNode() model.name = name model.source = hd_fields.ModelSource.Designed self.process_host_common_fields(data, model) node_metadata = data.get('metadata', {}) model.boot_mac = node_metadata.get('boot_mac', None) addresses = data.get('addressing', []) if len(addresses) == 0: raise errors.IngesterError('BaremetalNode needs at least' ' 1 assigned address') model.addressing = objects.IpAddressAssignmentList() for a in addresses: assignment = objects.IpAddressAssignment() address = a.get('address', '') if address == 'dhcp': assignment.type = 'dhcp' assignment.address = None assignment.network = a.get('network') model.addressing.append(assignment) elif address != '': assignment.type = 'static' assignment.address = a.get('address') assignment.network = a.get('network') model.addressing.append(assignment) else: self.log.error("Invalid address assignment %s on Node %s" % (address, self.name)) return model def process_host_common_fields(self, data, model): """Process fields common to the host-based documents. Update the provided model with the values of fields common to BaremetalNode and HostProfile documents. :param data: dictionary from YAML parsing of the document data/spec section :param model: instance of objects.HostProfile or objects.BaremetalNode to update """ model.parent_profile = data.get('host_profile', None) model.hardware_profile = data.get('hardware_profile', None) oob = data.get('oob', {}) model.oob_parameters = {} for k, v in oob.items(): if k == 'type': model.oob_type = oob.get('type', None) else: model.oob_parameters[k] = v (model.storage_devices, model.volume_groups) = self.process_node_storage( data.get('storage', {})) interfaces = data.get('interfaces', {}) model.interfaces = objects.HostInterfaceList() for k, v in interfaces.items(): int_model = objects.HostInterface() # A null value indicates this interface should be removed # from any parent profiles if v is None: int_model.device_name = '!' + k continue int_model.device_name = k int_model.network_link = v.get('device_link', None) int_model.hardware_slaves = [] slaves = v.get('slaves', []) for s in slaves: int_model.hardware_slaves.append(s) int_model.networks = [] networks = v.get('networks', []) for n in networks: int_model.networks.append(n) if 'sriov' in v: int_model.sriov = True int_model.vf_count = v.get('sriov', {}).get('vf_count', 0) int_model.trustedmode = v.get('sriov', {}).get( 'trustedmode', False) model.interfaces.append(int_model) platform = data.get('platform', {}) model.image = platform.get('image', None) model.kernel = platform.get('kernel', None) model.kernel_params = {} for k, v in platform.get('kernel_params', {}).items(): model.kernel_params[k] = v model.primary_network = data.get('primary_network', None) node_metadata = data.get('metadata', {}) metadata_tags = node_metadata.get('tags', []) model.tags = metadata_tags owner_data = node_metadata.get('owner_data', {}) model.owner_data = {} for k, v in owner_data.items(): model.owner_data[k] = v model.rack = node_metadata.get('rack', None) return model def process_node_storage(self, storage): """Process the storage data for a node-based document. Return a tuple of of two lists the first is a StorageDeviceList, the second is a VolumeGroupList. :param storage: dictionary of the storage section of a document """ phys_devs = storage.get('physical_devices', {}) storage_devices = objects.HostStorageDeviceList() for k, v in phys_devs.items(): sd = objects.HostStorageDevice(name=k) sd.source = hd_fields.ModelSource.Designed if 'labels' in v: sd.labels = v.get('labels').copy() if 'volume_group' in v: vg = v.get('volume_group') sd.volume_group = vg elif 'partitions' in v: sd.partitions = objects.HostPartitionList() for vv in v.get('partitions', []): part_model = objects.HostPartition() part_model.name = vv.get('name') part_model.source = hd_fields.ModelSource.Designed part_model.part_uuid = vv.get('part_uuid', None) part_model.size = vv.get('size', None) if 'labels' in vv: part_model.labels = vv.get('labels').copy() if 'volume_group' in vv: part_model.volume_group = vv.get('vg') elif 'filesystem' in vv: fs_info = vv.get('filesystem', {}) part_model.mountpoint = fs_info.get('mountpoint', None) part_model.fstype = fs_info.get('fstype', 'ext4') part_model.mount_options = fs_info.get( 'mount_options', 'defaults') part_model.fs_uuid = fs_info.get('fs_uuid', None) part_model.fs_label = fs_info.get('fs_label', None) sd.partitions.append(part_model) storage_devices.append(sd) volume_groups = objects.HostVolumeGroupList() vol_groups = storage.get('volume_groups', {}) for k, v in vol_groups.items(): vg = objects.HostVolumeGroup(name=k) vg.vg_uuid = v.get('vg_uuid', None) vg.logical_volumes = objects.HostVolumeList() volume_groups.append(vg) for vv in v.get('logical_volumes', []): lv = objects.HostVolume(name=vv.get('name')) lv.size = vv.get('size', None) lv.lv_uuid = vv.get('lv_uuid', None) if 'filesystem' in vv: fs_info = vv.get('filesystem', {}) lv.mountpoint = fs_info.get('mountpoint', None) lv.fstype = fs_info.get('fstype', 'ext4') lv.mount_options = fs_info.get('mount_options', 'defaults') lv.fs_uuid = fs_info.get('fs_uuid', None) lv.fs_label = fs_info.get('fs_label', None) vg.logical_volumes.append(lv) return (storage_devices, volume_groups) def load_schemas(self): self.v1_doc_schemas = dict() schema_dir = self._get_schema_dir() for schema_file in os.listdir(schema_dir): f = open(os.path.join(schema_dir, schema_file), 'r') for schema in yaml.safe_load_all(f): schema_for = schema['metadata']['name'] if schema_for in self.v1_doc_schemas: self.logger.warning( "Duplicate document schemas found for document kind %s." % schema_for) self.logger.debug( "Loaded schema for document kind %s." % schema_for) self.v1_doc_schemas[schema_for] = schema.get('data') f.close() def _get_schema_dir(self): return pkg_resources.resource_filename('drydock_provisioner', 'schemas') # Mapping of handlers for different document kinds v1_doc_handlers = { 'Region': process_drydock_region, 'Rack': process_drydock_rack, 'NetworkLink': process_drydock_networklink, 'Network': process_drydock_network, 'HardwareProfile': process_drydock_hwprofile, 'HostProfile': process_drydock_hostprofile, 'BaremetalNode': process_drydock_node, 'BootAction': process_drydock_bootaction, }
37.225275
108
0.573579
25,878
0.954908
0
0
0
0
0
0
7,346
0.27107
4a8ca4ac28e4f99e7596ac67b54b694b5e38191d
5,517
py
Python
porting_tools/package_xml_porter.py
nreplogle/ros2-migration-tools
8e422731dea52df19da6de780319a17516f60f7c
[ "Apache-2.0" ]
92
2018-10-17T22:18:01.000Z
2022-03-19T22:03:16.000Z
porting_tools/package_xml_porter.py
nreplogle/ros2-migration-tools
8e422731dea52df19da6de780319a17516f60f7c
[ "Apache-2.0" ]
12
2019-02-21T22:29:15.000Z
2021-06-28T22:33:31.000Z
porting_tools/package_xml_porter.py
nreplogle/ros2-migration-tools
8e422731dea52df19da6de780319a17516f60f7c
[ "Apache-2.0" ]
19
2018-10-18T11:47:07.000Z
2022-02-04T18:41:03.000Z
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ Contains a class and method for porting a package.xml file from catkin to ament""" import xml.etree.ElementTree as etree from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER from .utils import get_functions_with def new_element(tag, text="", tail="\n", attrib=None): """ Helper function to make creating an element with a text and tail easier """ if not attrib: attrib = {} element = etree.Element(tag, attrib=attrib) element.text = text element.tail = tail return element def tag_order(tag): """ Returns integer to order tags """ if tag in PACKAGE_XML_ELEMENT_ORDER: return PACKAGE_XML_ELEMENT_ORDER.index(tag) return float("inf") class PackageXMLPorter: """A class for porting a package.xml file from catkin to ament""" @staticmethod def port(tree, extra_rules=[]): """ Ports package.xml from catkin to ament Arguments: tree - the xml tree representing the package.xml file (output of etree.parse("package.xml")) extra_rules - a list of functions to apply to the xml tree Returns: The new xml tree """ # Pulls out all methods in this class with name starting with "rule" rules = get_functions_with(criteria=lambda name: name.startswith("rule"), from_class=PackageXMLPorter) package_root = tree.getroot() for rule in rules + extra_rules: rule(package_root) # Make sure there's a final newline package_root.tail = "\n" # Reorder the elements package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag)) # Correct indentation PackageXMLPorter.indent_tree(elem=package_root, level=0) ######################### # RULES # ######################### @staticmethod def rule_set_format(package_root): # ROS 2 supports formats 2,3 package_root.set("format", "3") @staticmethod def rule_set_build_tool(package_root): for elem in package_root.findall("buildtool_depend"): if elem.text and elem.text.strip() == "catkin": package_root.remove(elem) package_root.append(new_element(tag="buildtool_depend", text="ament_cmake")) @staticmethod def rule_set_client_library(package_root): for elem in list(package_root): if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION: elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()] @staticmethod def rule_add_export_build_type(package_root): build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ") export_elem = new_element(tag="export", text="\n ") export_elem.append(build_elem) package_root.append(export_elem) @staticmethod def rule_set_run_to_exec_depend(package_root): for elem in package_root.findall("run_depend"): elem.tag = "exec_depend" @staticmethod def rule_set_depend_to_run_exec(package_root): for elem in package_root.findall("depend"): elem.tag = "build_depend" package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib)) @staticmethod def rule_update_message_gen_dependency(package_root): message_generation_used = False for elem in list(package_root): if elem.text and elem.text == "message_generation" or elem.text == "message_runtime": package_root.remove(elem) message_generation_used = True if message_generation_used: package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators")) package_root.append(new_element(tag="build_depend", text="builtin_interfaces")) package_root.append(new_element(tag="exec_depend", text="builtin_interfaces")) package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime")) package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages")) ######################### # HELPERS # ######################### @staticmethod def indent_tree(elem, level): if len(elem) > 0: # element has children if elem.text is None or len(elem.text) == 0: elem.text = "\n" + (" "*(level+1)) # sets the indent for the children list(elem)[-1].tail = "\n" + " "*level for child in list(elem)[:-1]: child.tail = "\n" + (" "*(level+1)) PackageXMLPorter.indent_tree(elem=child, level=level+1) if __name__ == '__main__': tree = etree.parse("package.xml") PackageXMLPorter.port(tree=tree) tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
39.12766
104
0.649085
4,042
0.732645
0
0
3,714
0.673192
0
0
2,015
0.365235
4a8eaddf7ae51bc116bee8d180b8c5c1f2cfecaf
4,739
py
Python
endpoints/api/permission_models_interface.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
2,027
2019-11-12T18:05:48.000Z
2022-03-31T22:25:04.000Z
endpoints/api/permission_models_interface.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
496
2019-11-12T18:13:37.000Z
2022-03-31T10:43:45.000Z
endpoints/api/permission_models_interface.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
249
2019-11-12T18:02:27.000Z
2022-03-22T12:19:19.000Z
import sys from abc import ABCMeta, abstractmethod from collections import namedtuple from six import add_metaclass class SaveException(Exception): def __init__(self, other): self.traceback = sys.exc_info() super(SaveException, self).__init__(str(other)) class DeleteException(Exception): def __init__(self, other): self.traceback = sys.exc_info() super(DeleteException, self).__init__(str(other)) class Role(namedtuple("Role", ["role_name"])): def to_dict(self): return { "role": self.role_name, } class UserPermission( namedtuple( "UserPermission", [ "role_name", "username", "is_robot", "avatar", "is_org_member", "has_org", ], ) ): def to_dict(self): perm_dict = { "role": self.role_name, "name": self.username, "is_robot": self.is_robot, "avatar": self.avatar, } if self.has_org: perm_dict["is_org_member"] = self.is_org_member return perm_dict class RobotPermission( namedtuple( "RobotPermission", [ "role_name", "username", "is_robot", "is_org_member", ], ) ): def to_dict(self, user=None, team=None, org_members=None): return { "role": self.role_name, "name": self.username, "is_robot": True, "is_org_member": self.is_org_member, } class TeamPermission( namedtuple( "TeamPermission", [ "role_name", "team_name", "avatar", ], ) ): def to_dict(self): return { "role": self.role_name, "name": self.team_name, "avatar": self.avatar, } @add_metaclass(ABCMeta) class PermissionDataInterface(object): """ Data interface used by permissions API. """ @abstractmethod def get_repo_permissions_by_user(self, namespace_name, repository_name): """ Args: namespace_name: string repository_name: string Returns: list(UserPermission) """ @abstractmethod def get_repo_roles(self, username, namespace_name, repository_name): """ Args: username: string namespace_name: string repository_name: string Returns: list(Role) or None """ @abstractmethod def get_repo_permission_for_user(self, username, namespace_name, repository_name): """ Args: username: string namespace_name: string repository_name: string Returns: UserPermission """ @abstractmethod def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name): """ Args: username: string namespace_name: string repository_name: string role_name: string Returns: UserPermission Raises: SaveException """ @abstractmethod def delete_repo_permission_for_user(self, username, namespace_name, repository_name): """ Args: username: string namespace_name: string repository_name: string Returns: void Raises: DeleteException """ @abstractmethod def get_repo_permissions_by_team(self, namespace_name, repository_name): """ Args: namespace_name: string repository_name: string Returns: list(TeamPermission) """ @abstractmethod def get_repo_role_for_team(self, team_name, namespace_name, repository_name): """ Args: team_name: string namespace_name: string repository_name: string Returns: Role """ @abstractmethod def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission): """ Args: team_name: string namespace_name: string repository_name: string permission: string Returns: TeamPermission Raises: SaveException """ @abstractmethod def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name): """ Args: team_name: string namespace_name: string repository_name: string Returns: TeamPermission Raises: DeleteException """
21.15625
99
0.556447
4,577
0.965816
0
0
2,840
0.599283
0
0
2,041
0.430682