hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
314ff3ce95a071becc9963a4bf86bce7daa74c66 | 2,465 | py | Python | integration-test/830-windmill-zoom.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
]
| null | null | null | integration-test/830-windmill-zoom.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
]
| null | null | null | integration-test/830-windmill-zoom.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
]
| null | null | null | # -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class WindmillZoom(FixtureTest):
def test_windmill_with_attraction(self):
# update windmill zoom to 15 and if attraction zoom to 14
# windmill with tourism = attraction
self.generate_fixtures(dsl.way(287921407, wkt_loads('POLYGON ((-122.50950954605 37.77044960028441, -122.509500293403 37.77047580268178, -122.509480889793 37.77049817057459, -122.509461037025 37.770509816078, -122.509468493042 37.77057535750521, -122.509461126856 37.770575996587, -122.509454030166 37.7705140056184, -122.509453311513 37.77051436066419, -122.509420523006 37.77052259772597, -122.50938611753 37.77052195864368, -122.509353868011 37.7705125144261, -122.509327277879 37.77049540121677, -122.50930913191 37.77047232323118, -122.50930149623 37.77044576578648, -122.509305089492 37.7704187822768, -122.509319642199 37.77039407105399, -122.509343537386 37.77037461451069, -122.509374080105 37.77036218788359, -122.509408126255 37.7703583533811, -122.50943723167 37.77036268494871, -122.509432290936 37.7702980664548, -122.509441274089 37.77029771140798, -122.509446484317 37.7703654543115, -122.509471726977 37.770376886808, -122.509494364522 37.77039726647121, -122.509507479925 37.77042247475761, -122.50950954605 37.77044960028441))'), {
u'building': u'yes', u'name': u'North Windmill', u'gnis:reviewed': u'no', u'wikipedia': u'en:Golden Gate Park windmills', u'way_area': u'433.646', u'man_made': u'windmill', u'addr:state': u'CA', u'height': u'13', u'source': u'openstreetmap.org', u'gnis:import_uuid': u'57871b70-0100-4405-bb30-88b2e001a944', u'gnis:feature_id': u'1655473', u'ele': u'11', u'tourism': u'attraction', u'gnis:county_name': u'San Francisco'}))
self.assert_has_feature(
14, 2616, 6333, 'pois',
{'id': 287921407, 'kind': 'windmill'})
def test_windmill_without_attraction(self):
# windmill without tourism = attraction
self.generate_fixtures(dsl.way(2304462088, wkt_loads(
'POINT (-121.222603731272 36.38242898668528)'), {u'source': u'openstreetmap.org', u'man_made': u'windmill'}))
self.assert_no_matching_feature(
14, 2675, 6412, 'pois',
{'id': 2304462088, 'kind': 'windmill'})
self.assert_has_feature(
15, 5350, 12824, 'pois',
{'id': 2304462088, 'kind': 'windmill'})
| 79.516129 | 1,057 | 0.714807 | 2,355 | 0.955375 | 0 | 0 | 0 | 0 | 0 | 0 | 1,689 | 0.685193 |
3150323baad4c964b663614ac688ae658dd6c3a5 | 1,129 | py | Python | tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
]
| 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
]
| 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
]
| 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.extractor import FrontExtractorOp
class ProposalFrontExtractor(FrontExtractorOp):
op = 'Proposal'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.proposal_param
update_attrs = {
'feat_stride': param.feat_stride,
'base_size': param.base_size,
'min_size': param.min_size,
'ratio': mo_array(param.ratio),
'scale': mo_array(param.scale),
'pre_nms_topn': param.pre_nms_topn,
'post_nms_topn': param.post_nms_topn,
'nms_thresh': param.nms_thresh
}
mapping_rule = merge_attrs(param, update_attrs)
# update the attributes of the node
ProposalOp.update_node_stat(node, mapping_rule)
return cls.enabled
| 34.212121 | 72 | 0.681134 | 781 | 0.691763 | 0 | 0 | 689 | 0.610275 | 0 | 0 | 214 | 0.189548 |
315044a27a790f45a0932ccbc6e97fab229aec69 | 667 | py | Python | mk42/apps/users/migrations/0003_auto_20170614_0038.py | vint21h/mk42 | 1574d1143ea829212203f2be0b11b44de1e7c722 | [
"WTFPL"
]
| 5 | 2017-06-18T17:04:49.000Z | 2017-11-02T11:44:36.000Z | mk42/apps/users/migrations/0003_auto_20170614_0038.py | vint21h/mk42 | 1574d1143ea829212203f2be0b11b44de1e7c722 | [
"WTFPL"
]
| 13 | 2017-07-05T06:35:42.000Z | 2017-09-06T02:04:04.000Z | mk42/apps/users/migrations/0003_auto_20170614_0038.py | vint21h/mk42 | 1574d1143ea829212203f2be0b11b44de1e7c722 | [
"WTFPL"
]
| 10 | 2017-06-29T05:31:52.000Z | 2017-10-27T09:31:32.000Z | # -*- coding: utf-8 -*-
# mk42
# mk42/apps/users/migrations/0003_auto_20170614_0038.py
# Generated by Django 1.11.2 on 2017-06-14 00:38
from __future__ import unicode_literals
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
("users", "0002_auto_20170613_2124"),
]
operations = [
migrations.AlterField(
model_name="user",
name="language",
field=models.CharField(choices=[("en", "English"), ("uk", "\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")], default="en", max_length=5, verbose_name="language"),
),
]
| 23 | 189 | 0.635682 | 430 | 0.644678 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.409295 |
315100585ad8fffd2754fc98108600f71764e4fa | 3,271 | py | Python | utils/datasets.py | LukasStruppek/Plug-and-Play-Attacks | f433f97531a5fb3e6f82965ecdde504e0eb1c4ab | [
"MIT"
]
| null | null | null | utils/datasets.py | LukasStruppek/Plug-and-Play-Attacks | f433f97531a5fb3e6f82965ecdde504e0eb1c4ab | [
"MIT"
]
| null | null | null | utils/datasets.py | LukasStruppek/Plug-and-Play-Attacks | f433f97531a5fb3e6f82965ecdde504e0eb1c4ab | [
"MIT"
]
| 1 | 2022-02-09T07:06:09.000Z | 2022-02-09T07:06:09.000Z | import pickle
import pandas as pd
import torch
import torch.nn as nn
import torchvision.transforms as T
from torch.utils import data
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from datasets.celeba import CelebA1000
from datasets.facescrub import FaceScrub
from datasets.stanford_dogs import StanfordDogs
def get_normalization():
normalization = T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
return normalization
def get_train_val_split(data, split_ratio, seed=0):
validation_set_length = int(split_ratio * len(data))
training_set_length = len(data) - validation_set_length
torch.manual_seed(seed)
training_set, validation_set = random_split(
data, [training_set_length, validation_set_length])
return training_set, validation_set
def get_subsampled_dataset(dataset,
dataset_size=None,
proportion=None,
seed=0):
if dataset_size > len(dataset):
raise ValueError(
'Dataset size is smaller than specified subsample size')
if dataset_size is None:
if proportion is None:
raise ValueError('Neither dataset_size nor proportion specified')
else:
dataset_size = int(proportion * len(dataset))
torch.manual_seed(seed)
subsample, _ = random_split(
dataset, [dataset_size, len(dataset) - dataset_size])
return subsample
def get_facescrub_idx_to_class():
with open('utils/files/facescrub_idx_to_class.pkl', 'rb') as f:
idx_to_class = pickle.load(f)
return idx_to_class
def get_facescrub_class_to_idx():
with open('utils/files/facescrub_class_to_idx.pkl', 'rb') as f:
class_to_idx = pickle.load(f)
return class_to_idx
def get_celeba_idx_to_attr(list_attr_file='data/celeba/list_attr_celeba.txt'):
file = pd.read_csv(list_attr_file)
attributes = file.iloc[0].tolist()[0].split(' ')[:-1]
attr_dict = {idx: attributes[idx] for idx in range(len(attributes))}
return attr_dict
def get_celeba_attr_to_idx(list_attr_file='data/celeba/list_attr_celeba.txt'):
file = pd.read_csv(list_attr_file)
attributes = file.iloc[0].tolist()[0].split(' ')[:-1]
attr_dict = {attributes[idx]: idx for idx in range(len(attributes))}
return attr_dict
def get_stanford_dogs_idx_to_class():
with open('utils/files/stanford_dogs_idx_to_class.pkl', 'rb') as f:
idx_to_class = pickle.load(f)
return idx_to_class
def get_stanford_dogs_class_to_idx():
with open('utils/files/stanford_dogs_class_to_idx.pkl', 'rb') as f:
class_to_idx = pickle.load(f)
return class_to_idx
def create_target_dataset(dataset_name, transform):
if dataset_name.lower() == 'facescrub':
return FaceScrub(group='all',
train=True,
transform=transform)
elif dataset_name.lower() == 'celeba_identities':
return CelebA1000(train=True, transform=transform)
elif 'stanford_dogs' in dataset_name.lower():
return StanfordDogs(train=True, cropped=True, transform=transform)
else:
print(f'{dataset_name} is no valid dataset.')
| 33.040404 | 78 | 0.697035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.136961 |
31517240d14ea2064075bc5b5b6dfd061de1cb2d | 8,594 | py | Python | applications/MappingApplication/test_examples/Fluid_SubModelling/MainKratos.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
]
| 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/MappingApplication/test_examples/Fluid_SubModelling/MainKratos.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
]
| 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/MappingApplication/test_examples/Fluid_SubModelling/MainKratos.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
]
| 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.FluidDynamicsApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
from KratosMultiphysics.MeshingApplication import *
import KratosMultiphysics.MappingApplication as KratosMapping
# In this example two domains are solved, a coarse background mesh and a fine mesh around
# an obstacle. The fine domain receives the values from the coarse domain as input on it's boundary
######################################################################################
######################################################################################
######################################################################################
##PARSING THE PARAMETERS
#import define_output
parameter_file_background = open("ProjectParameters_Background.json",'r')
Projectparameters_BG = Parameters( parameter_file_background.read())
parameter_file_bodyfitted = open("ProjectParameters_BodyFitted.json",'r')
Projectparameters_BF = Parameters( parameter_file_bodyfitted.read())
## Fluid model part definition
main_model_part_bg = ModelPart(Projectparameters_BG["problem_data"]["model_part_name"].GetString())
main_model_part_bg.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BG["problem_data"]["domain_size"].GetInt())
main_model_part_bf = ModelPart(Projectparameters_BF["problem_data"]["model_part_name"].GetString())
main_model_part_bf.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BF["problem_data"]["domain_size"].GetInt())
###TODO replace this "model" for real one once available
Model_BG = {Projectparameters_BG["problem_data"]["model_part_name"].GetString() : main_model_part_bg}
Model_BF = {Projectparameters_BF["problem_data"]["model_part_name"].GetString() : main_model_part_bf}
## Solver construction
solver_module = __import__(Projectparameters_BG["solver_settings"]["solver_type"].GetString())
solver_bg = solver_module.CreateSolver(main_model_part_bg, Projectparameters_BG["solver_settings"])
solver_bg.AddVariables()
solver_module = __import__(Projectparameters_BF["solver_settings"]["solver_type"].GetString())
solver_bf = solver_module.CreateSolver(main_model_part_bf, Projectparameters_BF["solver_settings"])
solver_bf.AddVariables()
## Read the model - note that SetBufferSize is done here
solver_bg.ImportModelPart()
solver_bf.ImportModelPart()
## Add AddDofs
solver_bg.AddDofs()
solver_bf.AddDofs()
## Initialize GiD I/O
from gid_output_process import GiDOutputProcess
gid_output_bg = GiDOutputProcess(solver_bg.GetComputingModelPart(),
Projectparameters_BG["problem_data"]["problem_name"].GetString() ,
Projectparameters_BG["output_configuration"])
gid_output_bg.ExecuteInitialize()
gid_output_bf = GiDOutputProcess(solver_bf.GetComputingModelPart(),
Projectparameters_BF["problem_data"]["problem_name"].GetString() ,
Projectparameters_BF["output_configuration"])
gid_output_bf.ExecuteInitialize()
##here all of the allocation of the strategies etc is done
solver_bg.Initialize()
solver_bf.Initialize()
##TODO: replace MODEL for the Kratos one ASAP
## Get the list of the skin submodel parts in the object Model
for i in range(Projectparameters_BG["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BG["solver_settings"]["skin_parts"][i].GetString()
Model_BG.update({skin_part_name: main_model_part_bg.GetSubModelPart(skin_part_name)})
for i in range(Projectparameters_BF["solver_settings"]["skin_parts"].size()):
skin_part_name = Projectparameters_BF["solver_settings"]["skin_parts"][i].GetString()
Model_BF.update({skin_part_name: main_model_part_bf.GetSubModelPart(skin_part_name)})
## Get the list of the initial conditions submodel parts in the object Model
for i in range(Projectparameters_BF["initial_conditions_process_list"].size()):
initial_cond_part_name = Projectparameters_BF["initial_conditions_process_list"][i]["Parameters"]["model_part_name"].GetString()
Model_BF.update({initial_cond_part_name: main_model_part_bf.GetSubModelPart(initial_cond_part_name)})
## Processes construction
import process_factory
# "list_of_processes_bg" contains all the processes already constructed (boundary conditions, initial conditions and gravity)
# Note that the conditions are firstly constructed. Otherwise, they may overwrite the BCs information.
list_of_processes_bg = process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["initial_conditions_process_list"] )
list_of_processes_bg += process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["boundary_conditions_process_list"] )
list_of_processes_bf = process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["initial_conditions_process_list"] )
list_of_processes_bf += process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["boundary_conditions_process_list"] )
## Processes initialization
for process in list_of_processes_bg:
process.ExecuteInitialize()
for process in list_of_processes_bf:
process.ExecuteInitialize()
# Mapper initialization
mapper_settings_file = open("MapperSettings.json",'r')
Projectparameters_Mapper = Parameters( mapper_settings_file.read())["mapper_settings"]
inlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[0])
sides_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[1])
outlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg,
main_model_part_bf,
Projectparameters_Mapper[2])
## Stepping and time settings
Dt = Projectparameters_BG["problem_data"]["time_step"].GetDouble()
end_time = Projectparameters_BG["problem_data"]["end_time"].GetDouble()
time = 0.0
step = 0
out = 0.0
gid_output_bg.ExecuteBeforeSolutionLoop()
gid_output_bf.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bg:
process.ExecuteBeforeSolutionLoop()
for process in list_of_processes_bf:
process.ExecuteBeforeSolutionLoop()
while(time <= end_time):
time = time + Dt
step = step + 1
main_model_part_bg.CloneTimeStep(time)
main_model_part_bf.CloneTimeStep(time)
print("STEP = ", step)
print("TIME = ", time)
if(step >= 3):
for process in list_of_processes_bg:
process.ExecuteInitializeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteInitializeSolutionStep()
gid_output_bg.ExecuteInitializeSolutionStep()
gid_output_bf.ExecuteInitializeSolutionStep()
solver_bg.Solve()
inlet_mapper.Map(VELOCITY, VELOCITY)
sides_mapper.Map(VELOCITY, VELOCITY)
outlet_mapper.Map(VELOCITY, VELOCITY)
solver_bf.Solve()
for process in list_of_processes_bg:
process.ExecuteFinalizeSolutionStep()
for process in list_of_processes_bf:
process.ExecuteFinalizeSolutionStep()
gid_output_bg.ExecuteFinalizeSolutionStep()
gid_output_bf.ExecuteFinalizeSolutionStep()
#TODO: decide if it shall be done only when output is processed or not
for process in list_of_processes_bg:
process.ExecuteBeforeOutputStep()
for process in list_of_processes_bf:
process.ExecuteBeforeOutputStep()
if gid_output_bg.IsOutputStep():
gid_output_bg.PrintOutput()
gid_output_bf.PrintOutput()
for process in list_of_processes_bg:
process.ExecuteAfterOutputStep()
for process in list_of_processes_bf:
process.ExecuteAfterOutputStep()
out = out + Dt
for process in list_of_processes_bg:
process.ExecuteFinalize()
for process in list_of_processes_bf:
process.ExecuteFinalize()
gid_output_bg.ExecuteFinalize()
gid_output_bf.ExecuteFinalize()
| 40.729858 | 155 | 0.728764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,309 | 0.268676 |
3152a37b3680f114e554c550f939501a4b8c1547 | 4,475 | py | Python | src/ga4gh/vrs/extras/vcf_annotation.py | reece/vmc-python | 579e73063a6927f2a2734fa47b7efc7416ca2a25 | [
"Apache-2.0"
]
| 1 | 2017-01-10T18:47:54.000Z | 2017-01-10T18:47:54.000Z | src/ga4gh/vrs/extras/vcf_annotation.py | reece/vmc-python | 579e73063a6927f2a2734fa47b7efc7416ca2a25 | [
"Apache-2.0"
]
| null | null | null | src/ga4gh/vrs/extras/vcf_annotation.py | reece/vmc-python | 579e73063a6927f2a2734fa47b7efc7416ca2a25 | [
"Apache-2.0"
]
| null | null | null | """
Annotate VCF files with VRS
Input Format: VCF
Output Format: VCF
The user should pass arguments for the VCF input, VCF output, &
the vrs object file name.
ex. python3 src/ga4gh/vrs/extras/vcf_annotation.py input.vcf.gz --out
./output.vcf.gz --vrs-file ./vrs_objects.pkl
"""
import argparse
import sys
import pickle
import time
from biocommons.seqrepo import SeqRepo
import pysam
from ga4gh.vrs.dataproxy import SeqRepoDataProxy
from ga4gh.vrs.extras.translator import Translator
class VCFAnnotator:
"""
This class provides utility for annotating VCF's with VRS allele id's.
VCF's are read using pysam and stored as pysam objects.
Alleles are translated into vrs allele id's using VRS-Python Translator.
"""
def __init__(self, tlr) -> None:
"""
param: Translator tlr Valid translator object with a specified data proxy
"""
self.tlr = tlr
def annotate(self, inputfile, outputfile, vrsfile):
"""
Annotates an input VCF file with VRS allele ids & creates a
pickle file containing the vrs object information.
param: str inputfile The path and filename for the input VCF file
param: str outputfile The path and filename for the output VCF file
param: str vrsfile The path and filename for the output VRS object file
"""
INFO_FIELD_ID = "VRS_Allele"
vrs_data = {}
vcf_in = pysam.VariantFile(filename=inputfile)
vcf_in.header.info.add(INFO_FIELD_ID, "1", "String", "vrs")
vcf_out = pysam.VariantFile(outputfile, "w", header=vcf_in.header)
vrs_out = open(vrsfile, "wb") # For sending VRS data to the pickle file
for record in vcf_in:
ld = self._record_digests(record, vrs_data)
record.info[INFO_FIELD_ID] = ",".join(ld)
vcf_out.write(record)
pickle.dump(vrs_data, vrs_out)
vrs_out.close()
vcf_in.close()
vcf_out.close()
def _record_digests(self, record, vrs_data):
"""
Mutate vrs_data with vrs object information and returning a list of vrs allele ids
param: pysam.VariantRecord record A row in the vcf file
param: dict vrs_data Dictionary containing the VRS object information for the VCF
return: list vrs_allele_ids List containing the vrs allele id information
"""
gnomad_loc = f"{record.chrom}-{record.pos}"
alts = record.alts if record.alts else []
data = f"{record.chrom}\t{record.pos}\t{record.id}\t{record.ref}\t{record.alts}"
# payloads like ['20:14369:1', '20:14369:1:G', '20:14369:1:A']
reference_allele = f"{gnomad_loc}-{record.ref}-{record.ref}"
vrs_ref_object = self.tlr.translate_from(reference_allele, "gnomad")
vrs_data[reference_allele] = str(vrs_ref_object.as_dict())
alleles = [f"{gnomad_loc}-{record.ref}-{a}" for a in [*alts]] # using gnomad format
vrs_allele_ids = [vrs_ref_object._id._value]
for allele in alleles:
if "*" in allele:
vrs_allele_ids.append("")
else:
vrs_object = self.tlr.translate_from(allele, "gnomad")
vrs_allele_ids.append(vrs_object._id._value)
vrs_data[data] = str(vrs_object.as_dict())
return vrs_allele_ids
def parse_args(argv):
"""
Parses arguments passed in by the user
param: list[str] argv Arguments passed by the user to specify file locations and names
return: argparse.Namespace Returns the options passed by the user to be assigned to proper variables
"""
ap = argparse.ArgumentParser()
ap.add_argument("VCF_IN")
ap.add_argument("--out", "-o", default="-")
ap.add_argument("--vrs-file", default="-")
opts = ap.parse_args(argv)
return opts
if __name__ == "__main__":
start_time = time.time()
options = parse_args(sys.argv[1:])
print(f"These are the options that you have selected: {options}\n")
data_proxy = SeqRepoDataProxy(SeqRepo("/usr/local/share/seqrepo/latest"))
tlr = Translator(data_proxy)
vcf_annotator = VCFAnnotator(tlr)
vcf_annotator.annotate(options.VCF_IN, options.out, options.vrs_file)
end_time = time.time()
total_time = (float(end_time) - float(start_time))
total_time_minutes = (total_time / 60)
print(f"This program took {total_time} seconds to run.")
print(f"This program took {total_time_minutes} minutes to run.")
| 36.382114 | 104 | 0.666592 | 2,848 | 0.636425 | 0 | 0 | 0 | 0 | 0 | 0 | 2,171 | 0.48514 |
3153b18dc5b355a77d44d559ba2a0ad4652444b2 | 11,438 | py | Python | ci/infra/testrunner/utils/utils.py | butsoleg/skuba | 1a363d4f3ca8fc33bfb0284043da26fe879c31a3 | [
"Apache-2.0"
]
| null | null | null | ci/infra/testrunner/utils/utils.py | butsoleg/skuba | 1a363d4f3ca8fc33bfb0284043da26fe879c31a3 | [
"Apache-2.0"
]
| null | null | null | ci/infra/testrunner/utils/utils.py | butsoleg/skuba | 1a363d4f3ca8fc33bfb0284043da26fe879c31a3 | [
"Apache-2.0"
]
| null | null | null | import glob
import hashlib
import logging
import os
import shutil
import subprocess
from functools import wraps
from tempfile import gettempdir
from threading import Thread
import requests
from timeout_decorator import timeout
from utils.constants import Constant
from utils.format import Format
logger = logging.getLogger('testrunner')
_stepdepth = 0
def step(f):
@wraps(f)
def wrapped(*args, **kwargs):
global _stepdepth
_stepdepth += 1
logger.debug("{} entering {} {}".format(Format.DOT * _stepdepth, f.__name__,
f.__doc__ or ""))
r = f(*args, **kwargs)
logger.debug("{} exiting {}".format(
Format.DOT_EXIT * _stepdepth, f.__name__))
_stepdepth -= 1
return r
return wrapped
class Utils:
def __init__(self, conf):
self.conf = conf
@staticmethod
def chmod_recursive(directory, permissions):
os.chmod(directory, permissions)
for file in glob.glob(os.path.join(directory, "**/*"), recursive=True):
try:
os.chmod(file, permissions)
except Exception as ex:
logger.debug(ex)
@staticmethod
def cleanup_file(file):
if os.path.exists(file):
logger.debug(f"Cleaning up {file}")
try:
try:
# Attempt to remove the file first, because a socket (e.g.
# ssh-agent) is not a file but has to be removed like one.
os.remove(file)
except IsADirectoryError:
shutil.rmtree(file)
except Exception as ex:
logger.debug(ex)
else:
logger.debug(f"Nothing to clean up for {file}")
@staticmethod
def cleanup_files(files):
"""Remove any files or dirs in a list if they exist"""
for file in files:
Utils.cleanup_file(file)
def ssh_cleanup(self):
"""Remove ssh sock files"""
# TODO: also kill ssh agent here? maybe move pkill to kill_ssh_agent()?
sock_file = self.ssh_sock_fn()
sock_dir = os.path.dirname(sock_file)
try:
Utils.cleanup_file(sock_file)
# also remove tempdir if it's empty afterwards
if 0 == len(os.listdir(sock_dir)):
os.rmdir(sock_dir)
else:
logger.warning(f"Dir {sock_dir} not empty; leaving it")
except FileNotFoundError:
pass
except OSError as ex:
logger.debug(ex)
def collect_remote_logs(self, ip_address, logs, store_path):
"""
Collect logs from a remote machine
:param ip_address: (str) IP of the machine to collect the logs from
:param logs: (dict: list) The different logs to collect {"files": [], "dirs": [], ""services": []}
:param store_path: (str) Path to copy the logs to
:return: (bool) True if there was an error while collecting the logs
"""
logging_errors = False
for log in logs.get("files", []):
try:
self.scp_file(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for log in logs.get("dirs", []):
try:
self.rsync(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for service in logs.get("services", []):
try:
self.ssh_run(
ip_address, f"sudo journalctl -xeu {service} > {service}.log")
self.scp_file(ip_address, f"{service}.log", store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {service}.log from {ip_address}\n {ex}")
logging_errors = True
return logging_errors
def authorized_keys(self):
public_key_path = self.conf.terraform.ssh_key + ".pub"
os.chmod(self.conf.terraform.ssh_key, 0o400)
with open(public_key_path) as f:
pubkey = f.read().strip()
return pubkey
def ssh_run(self, ipaddr, cmd):
key_fn = self.conf.terraform.ssh_key
cmd = "ssh " + Constant.SSH_OPTS + " -i {key_fn} {username}@{ip} -- '{cmd}'".format(
key_fn=key_fn, ip=ipaddr, cmd=cmd, username=self.conf.terraform.nodeuser)
return self.runshellcommand(cmd)
def scp_file(self, ip_address, remote_file_path, local_file_path):
"""
Copies a remote file from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_file_path: (str) Path of the file to be copied
:param local_file_path: (str) Path where to store the log
:return:
"""
cmd = (f"scp {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}"
f" {self.conf.terraform.nodeuser}@{ip_address}:{remote_file_path} {local_file_path}")
self.runshellcommand(cmd)
def rsync(self, ip_address, remote_dir_path, local_dir_path):
"""
Copies a remote dir from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_dir_path: (str) Path of the dir to be copied
:param local_dir_path: (str) Path where to store the dir
:return:
"""
cmd = (f'rsync -avz --no-owner --no-perms -e "ssh {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}" '
f'--rsync-path="sudo rsync" --ignore-missing-args {self.conf.terraform.nodeuser}@{ip_address}:{remote_dir_path} '
f'{local_dir_path}')
self.runshellcommand(cmd)
def runshellcommand(self, cmd, cwd=None, env={}, ignore_errors=False, stdin=None):
"""Running shell command in {workspace} if cwd == None
Eg) cwd is "skuba", cmd will run shell in {workspace}/skuba/
cwd is None, cmd will run in {workspace}
cwd is abs path, cmd will run in cwd
Keyword arguments:
cmd -- command to run
cwd -- dir to run the cmd
env -- environment variables
ignore_errors -- don't raise exception if command fails
stdin -- standard input for the command in bytes
"""
if not cwd:
cwd = self.conf.workspace
if not os.path.isabs(cwd):
cwd = os.path.join(self.conf.workspace, cwd)
if not os.path.exists(cwd):
raise FileNotFoundError(Format.alert("Directory {} does not exists".format(cwd)))
if logging.DEBUG >= logger.level:
logger.debug("Executing command\n"
" cwd: {} \n"
" env: {}\n"
" cmd: {}".format(cwd, str(env) if env else "{}", cmd))
else:
logger.info("Executing command {}".format(cmd))
stdout, stderr = [], []
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd,
stdin=subprocess.PIPE if stdin else None, shell=True, env=env
)
if stdin:
p.stdin.write(stdin)
p.stdin.close()
stdoutStreamer = Thread(target = self.read_fd, args = (p, p.stdout, logger.debug, stdout))
stderrStreamer = Thread(target = self.read_fd, args = (p, p.stderr, logger.error, stderr))
stdoutStreamer.start()
stderrStreamer.start()
stdoutStreamer.join()
stderrStreamer.join()
# this is redundant, at this point threads were joined and they waited for the subprocess
# to exit, however it should not hurt to explicitly wait for it again (no-op).
p.wait()
stdout, stderr = "".join(stdout), "".join(stderr)
if p.returncode != 0:
if not ignore_errors:
raise RuntimeError("Error executing command {}".format(cmd))
else:
return stderr
return stdout
def ssh_sock_fn(self):
"""generate path to ssh socket
A socket path can't be over 107 chars on Linux, so generate a short
hash of the workspace and use that in $TMPDIR (usually /tmp) so we have
a predictable, test-unique, fixed-length path.
"""
path = os.path.join(
gettempdir(),
hashlib.md5(self.conf.workspace.encode()).hexdigest(),
"ssh-agent-sock"
)
maxl = 107
if len(path) > maxl:
raise Exception(f"Socket path '{path}' len {len(path)} > {maxl}")
return path
def read_fd(self, proc, fd, logger_func, output):
"""Read from fd, logging using logger_func
Read from fd, until proc is finished. All contents will
also be appended onto output."""
while True:
contents = fd.readline().decode()
if contents == '' and proc.poll() is not None:
return
if contents:
output.append(contents)
logger_func(contents.strip())
@timeout(60)
@step
def setup_ssh(self):
os.chmod(self.conf.terraform.ssh_key, 0o400)
# use a dedicated agent to minimize stateful components
sock_fn = self.ssh_sock_fn()
# be sure directory containing socket exists and socket doesn't exist
if os.path.exists(sock_fn):
try:
if os.path.isdir(sock_fn):
os.path.rmdir(sock_fn) # rmdir only removes an empty dir
else:
os.remove(sock_fn)
except FileNotFoundError:
pass
try:
os.mkdir(os.path.dirname(sock_fn), mode=0o700)
except FileExistsError:
if os.path.isdir(os.path.dirname(sock_fn)):
pass
else:
raise
# clean up old ssh agent process(es)
try:
self.runshellcommand("pkill -f 'ssh-agent -a {}'".format(sock_fn))
logger.warning("Killed previous instance of ssh-agent")
except:
pass
self.runshellcommand("ssh-agent -a {}".format(sock_fn))
self.runshellcommand(
"ssh-add " + self.conf.terraform.ssh_key, env={"SSH_AUTH_SOCK": sock_fn})
@timeout(30)
@step
def info(self):
"""Node info"""
info_lines = "Env vars: {}\n".format(sorted(os.environ))
info_lines += self.runshellcommand('ip a')
info_lines += self.runshellcommand('ip r')
info_lines += self.runshellcommand('cat /etc/resolv.conf')
# TODO: the logic for retrieving external is platform depedant and should be
# moved to the corresponding platform
try:
r = requests.get(
'http://169.254.169.254/2009-04-04/meta-data/public-ipv4', timeout=2)
r.raise_for_status()
except (requests.HTTPError, requests.Timeout) as err:
logger.warning(
f'Meta Data service unavailable could not get external IP addr{err}')
else:
info_lines += 'External IP addr: {}'.format(r.text)
return info_lines
| 37.016181 | 128 | 0.573439 | 10,622 | 0.928659 | 0 | 0 | 3,568 | 0.311943 | 0 | 0 | 4,035 | 0.352771 |
3154dea931b787a54d8711fa79545f3f1aea8641 | 1,076 | py | Python | nonebot/command/argfilter/controllers.py | EVAyo/nonebot | 2780ea4e8a1b6bb961f95a86e7e6b3983badf75f | [
"MIT"
]
| 676 | 2018-12-28T09:53:51.000Z | 2020-04-10T16:28:57.000Z | nonebot/command/argfilter/controllers.py | EVAyo/nonebot | 2780ea4e8a1b6bb961f95a86e7e6b3983badf75f | [
"MIT"
]
| 133 | 2018-12-27T16:01:35.000Z | 2020-04-11T04:49:01.000Z | nonebot/command/argfilter/controllers.py | EVAyo/nonebot | 2780ea4e8a1b6bb961f95a86e7e6b3983badf75f | [
"MIT"
]
| 164 | 2019-01-02T07:40:33.000Z | 2020-04-11T06:54:30.000Z | """
提供几种常用的控制器。
这些验证器通常需要提供一些参数进行一次调用,返回的结果才是真正的验证器,其中的技巧在于通过闭包使要控制的对象能够被内部函数访问。
版本: 1.3.0+
"""
import re
from nonebot import CommandSession
from nonebot.helpers import render_expression
def handle_cancellation(session: CommandSession):
"""
在用户发送 `算了`、`不用了`、`取消吧`、`停` 之类的话的时候,结束当前传入的命令会话(调用 `session.finish()`),并发送配置项 `SESSION_CANCEL_EXPRESSION` 所填的内容。
如果不是上述取消指令,则将输入原样输出。
参数:
session: 要控制的命令会话
"""
def control(value):
if _is_cancellation(value) is True:
session.finish(
render_expression(session.bot.config.SESSION_CANCEL_EXPRESSION))
return value
return control
def _is_cancellation(sentence: str) -> bool:
for kw in ('算', '别', '不', '停', '取消'):
if kw in sentence:
# a keyword matches
break
else:
# no keyword matches
return False
if re.match(r'^那?[算别不停]\w{0,3}了?吧?$', sentence) or \
re.match(r'^那?(?:[给帮]我)?取消了?吧?$', sentence):
return True
return False
__all__ = [
'handle_cancellation',
]
| 20.692308 | 115 | 0.621747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.535466 |
31580ef618e975209c707f821f01726f7b8d5bf6 | 21,726 | py | Python | hnn_core/dipole.py | chenghuzi/hnn-core | b974fed4317b9b03ec732850829d9c17841c6ee2 | [
"BSD-3-Clause"
]
| null | null | null | hnn_core/dipole.py | chenghuzi/hnn-core | b974fed4317b9b03ec732850829d9c17841c6ee2 | [
"BSD-3-Clause"
]
| null | null | null | hnn_core/dipole.py | chenghuzi/hnn-core | b974fed4317b9b03ec732850829d9c17841c6ee2 | [
"BSD-3-Clause"
]
| null | null | null | """Class to handle the dipoles."""
# Authors: Mainak Jas <[email protected]>
# Sam Neymotin <[email protected]>
import warnings
import numpy as np
from copy import deepcopy
from .viz import plot_dipole, plot_psd, plot_tfr_morlet
def simulate_dipole(net, tstop, dt=0.025, n_trials=None, record_vsoma=False,
record_isoma=False, postproc=False):
"""Simulate a dipole given the experiment parameters.
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int | None
The number of trials to simulate. If None, the 'N_trials' value
of the ``params`` used to create ``net`` is used (must be >0)
record_vsoma : bool
Option to record somatic voltages from cells
record_isoma : bool
Option to record somatic currents from cells
postproc : bool
If True, smoothing (``dipole_smooth_win``) and scaling
(``dipole_scalefctr``) values are read from the parameter file, and
applied to the dipole objects before returning. Note that this setting
only affects the dipole waveforms, and not somatic voltages, possible
extracellular recordings etc. The preferred way is to use the
:meth:`~hnn_core.dipole.Dipole.smooth` and
:meth:`~hnn_core.dipole.Dipole.scale` methods instead. Default: False.
Returns
-------
dpls: list
List of dipole objects for each trials
"""
from .parallel_backends import _BACKEND, JoblibBackend
if _BACKEND is None:
_BACKEND = JoblibBackend(n_jobs=1)
if n_trials is None:
n_trials = net._params['N_trials']
if n_trials < 1:
raise ValueError("Invalid number of simulations: %d" % n_trials)
if not net.connectivity:
warnings.warn('No connections instantiated in network. Consider using '
'net = jones_2009_model() or net = law_2021_model() to '
'create a predefined network from published models.',
UserWarning)
for drive_name, drive in net.external_drives.items():
if 'tstop' in drive['dynamics']:
if drive['dynamics']['tstop'] is None:
drive['dynamics']['tstop'] = tstop
for bias_name, bias in net.external_biases.items():
for cell_type, bias_cell_type in bias.items():
if bias_cell_type['tstop'] is None:
bias_cell_type['tstop'] = tstop
if bias_cell_type['tstop'] < 0.:
raise ValueError('End time of tonic input cannot be negative')
duration = bias_cell_type['tstop'] - bias_cell_type['t0']
if duration < 0.:
raise ValueError('Duration of tonic input cannot be negative')
net._instantiate_drives(n_trials=n_trials, tstop=tstop)
net._reset_rec_arrays()
if isinstance(record_vsoma, bool):
net._params['record_vsoma'] = record_vsoma
else:
raise TypeError("record_vsoma must be bool, got %s"
% type(record_vsoma).__name__)
if isinstance(record_isoma, bool):
net._params['record_isoma'] = record_isoma
else:
raise TypeError("record_isoma must be bool, got %s"
% type(record_isoma).__name__)
if postproc:
warnings.warn('The postproc-argument is deprecated and will be removed'
' in a future release of hnn-core. Please define '
'smoothing and scaling explicitly using Dipole methods.',
DeprecationWarning)
dpls = _BACKEND.simulate(net, tstop, dt, n_trials, postproc)
return dpls
def read_dipole(fname):
"""Read dipole values from a file and create a Dipole instance.
Parameters
----------
fname : str
Full path to the input file (.txt)
Returns
-------
dpl : Dipole
The instance of Dipole class
"""
dpl_data = np.loadtxt(fname, dtype=float)
dpl = Dipole(dpl_data[:, 0], dpl_data[:, 1:])
return dpl
def average_dipoles(dpls):
"""Compute dipole averages over a list of Dipole objects.
Parameters
----------
dpls: list of Dipole objects
Contains list of dipole objects, each with a `data` member containing
'L2', 'L5' and 'agg' components
Returns
-------
dpl: instance of Dipole
A new dipole object with each component of `dpl.data` representing the
average over the same components in the input list
"""
scale_applied = dpls[0].scale_applied
for dpl_idx, dpl in enumerate(dpls):
if dpl.scale_applied != scale_applied:
raise RuntimeError('All dipoles must be scaled equally!')
if not isinstance(dpl, Dipole):
raise ValueError(
f"All elements in the list should be instances of "
f"Dipole. Got {type(dpl)}")
if dpl.nave > 1:
raise ValueError("Dipole at index %d was already an average of %d"
" trials. Cannot reaverage" %
(dpl_idx, dpl.nave))
avg_data = list()
layers = dpl.data.keys()
for layer in layers:
avg_data.append(
np.mean(np.array([dpl.data[layer] for dpl in dpls]), axis=0)
)
avg_data = np.c_[avg_data].T
avg_dpl = Dipole(dpls[0].times, avg_data)
# The averaged scale should equal all scals in the input dpl list.
avg_dpl.scale_applied = scale_applied
# set nave to the number of trials averaged in this dipole
avg_dpl.nave = len(dpls)
return avg_dpl
def _rmse(dpl, exp_dpl, tstart=0.0, tstop=0.0, weights=None):
""" Calculates RMSE between data in dpl and exp_dpl
Parameters
----------
dpl: instance of Dipole
A dipole object with simulated data
exp_dpl: instance of Dipole
A dipole object with experimental data
tstart | None: float
Time at beginning of range over which to calculate RMSE
tstop | None: float
Time at end of range over which to calculate RMSE
weights | None: array
An array of weights to be applied to each point in
simulated dpl. Must have length >= dpl.data
If None, weights will be replaced with 1's for typical RMSE
calculation.
Returns
-------
err: float
Weighted RMSE between data in dpl and exp_dpl
"""
from scipy import signal
exp_times = exp_dpl.times
sim_times = dpl.times
# do tstart and tstop fall within both datasets?
# if not, use the closest data point as the new tstop/tstart
for tseries in [exp_times, sim_times]:
if tstart < tseries[0]:
tstart = tseries[0]
if tstop > tseries[-1]:
tstop = tseries[-1]
# make sure start and end times are valid for both dipoles
exp_start_index = (np.abs(exp_times - tstart)).argmin()
exp_end_index = (np.abs(exp_times - tstop)).argmin()
exp_length = exp_end_index - exp_start_index
sim_start_index = (np.abs(sim_times - tstart)).argmin()
sim_end_index = (np.abs(sim_times - tstop)).argmin()
sim_length = sim_end_index - sim_start_index
if weights is None:
# weighted RMSE with weights of all 1's is equivalent to
# normal RMSE
weights = np.ones(len(sim_times[0:sim_end_index]))
weights = weights[sim_start_index:sim_end_index]
dpl1 = dpl.data['agg'][sim_start_index:sim_end_index]
dpl2 = exp_dpl.data['agg'][exp_start_index:exp_end_index]
if (sim_length > exp_length):
# downsample simulation timeseries to match exp data
dpl1 = signal.resample(dpl1, exp_length)
weights = signal.resample(weights, exp_length)
indices = np.where(weights < 1e-4)
weights[indices] = 0
elif (sim_length < exp_length):
# downsample exp timeseries to match simulation data
dpl2 = signal.resample(dpl2, sim_length)
return np.sqrt((weights * ((dpl1 - dpl2) ** 2)).sum() / weights.sum())
class Dipole(object):
"""Dipole class.
An instance of the ``Dipole``-class contains the simulated dipole moment
timecourses for L2 and L5 pyramidal cells, as well as their aggregate
(``'agg'``). The units of the dipole moment are in ``nAm``
(1e-9 Ampere-meters).
Parameters
----------
times : array (n_times,)
The time vector (in ms)
data : array, shape (n_times x n_layers)
The data. The first column represents 'agg' (the total diple),
the second 'L2' layer and the last one 'L5' layer. For experimental
data, it can contain only one column.
nave : int
Number of trials that were averaged to produce this Dipole. Defaults
to 1
Attributes
----------
times : array-like
The time vector (in ms)
sfreq : float
The sampling frequency (in Hz)
data : dict of array
Dipole moment timecourse arrays with keys 'agg', 'L2' and 'L5'
nave : int
Number of trials that were averaged to produce this Dipole
scale_applied : int or float
The total factor by which the dipole has been scaled (using
:meth:`~hnn_core.dipole.Dipole.scale`).
"""
def __init__(self, times, data, nave=1): # noqa: D102
self.times = np.array(times)
if data.ndim == 1:
data = data[:, None]
if data.shape[1] == 3:
self.data = {'agg': data[:, 0], 'L2': data[:, 1], 'L5': data[:, 2]}
elif data.shape[1] == 1:
self.data = {'agg': data[:, 0]}
self.nave = nave
self.sfreq = 1000. / (times[1] - times[0]) # NB assumes len > 1
self.scale_applied = 1 # for visualisation
def copy(self):
"""Return a copy of the Dipole instance
Returns
-------
dpl_copy : instance of Dipole
A copy of the Dipole instance.
"""
return deepcopy(self)
def _post_proc(self, window_len, fctr):
"""Apply scaling and smoothing from param-files (DEPRECATE)
Parameters
----------
window_len : int
Smoothing window in ms
fctr : int
Scaling factor
"""
self.scale(fctr)
if window_len > 0: # this is to allow param-files with len==0
self.smooth(window_len)
def _convert_fAm_to_nAm(self):
"""The NEURON simulator output is in fAm, convert to nAm
NB! Must be run `after` :meth:`Dipole.baseline_renormalization`
"""
for key in self.data.keys():
self.data[key] *= 1e-6
def scale(self, factor):
"""Scale (multiply) the dipole moment by a fixed factor
The attribute ``Dipole.scale_applied`` is updated to reflect factors
applied and displayed in plots.
Parameters
----------
factor : int
Scaling factor, applied to the data in-place.
"""
for key in self.data.keys():
self.data[key] *= factor
self.scale_applied *= factor
return self
def smooth(self, window_len):
"""Smooth the dipole waveform using Hamming-windowed convolution
Note that this method operates in-place, i.e., it will alter the data.
If you prefer a filtered copy, consider using the
:meth:`~hnn_core.dipole.Dipole.copy`-method.
Parameters
----------
window_len : float
The length (in ms) of a `~numpy.hamming` window to convolve the
data with.
Returns
-------
dpl_copy : instance of Dipole
A copy of the modified Dipole instance.
"""
from .utils import smooth_waveform
for key in self.data.keys():
self.data[key] = smooth_waveform(self.data[key], window_len,
self.sfreq)
return self
def savgol_filter(self, h_freq):
"""Smooth the dipole waveform using Savitzky-Golay filtering
Note that this method operates in-place, i.e., it will alter the data.
If you prefer a filtered copy, consider using the
:meth:`~hnn_core.dipole.Dipole.copy`-method. The high-frequency cutoff
value of a Savitzky-Golay filter is approximate; see the SciPy
reference: :func:`~scipy.signal.savgol_filter`.
Parameters
----------
h_freq : float or None
Approximate high cutoff frequency in Hz. Note that this
is not an exact cutoff, since Savitzky-Golay filtering
is done using polynomial fits
instead of FIR/IIR filtering. This parameter is thus used to
determine the length of the window over which a 5th-order
polynomial smoothing is applied.
Returns
-------
dpl_copy : instance of Dipole
A copy of the modified Dipole instance.
"""
from .utils import _savgol_filter
if h_freq < 0:
raise ValueError('h_freq cannot be negative')
elif h_freq > 0.5 * self.sfreq:
raise ValueError(
'h_freq must be less than half the sample rate')
for key in self.data.keys():
self.data[key] = _savgol_filter(self.data[key],
h_freq,
self.sfreq)
return self
def plot(self, tmin=None, tmax=None, layer='agg', decim=None, ax=None,
color='k', show=True):
"""Simple layer-specific plot function.
Parameters
----------
tmin : float or None
Start time of plot (in ms). If None, plot entire simulation.
tmax : float or None
End time of plot (in ms). If None, plot entire simulation.
layer : str
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
decimate : int
Factor by which to decimate the raw dipole traces (optional)
ax : instance of matplotlib figure | None
The matplotlib axis
color : tuple of float
RGBA value to use for plotting. By default, 'k' (black)
show : bool
If True, show the figure
Returns
-------
fig : instance of plt.fig
The matplotlib figure handle.
"""
return plot_dipole(self, tmin=tmin, tmax=tmax, ax=ax, layer=layer,
decim=decim, color=color, show=show)
def plot_psd(self, fmin=0, fmax=None, tmin=None, tmax=None, layer='agg',
ax=None, show=True):
"""Plot power spectral density (PSD) of dipole time course
Applies `~scipy.signal.periodogram` from SciPy with
``window='hamming'``.
Note that no spectral averaging is applied across time, as most
``hnn_core`` simulations are short-duration. However, passing a list of
`Dipole` instances will plot their average (Hamming-windowed) power,
which resembles the `Welch`-method applied over time.
Parameters
----------
dpl : instance of Dipole | list of Dipole instances
The Dipole object.
fmin : float
Minimum frequency to plot (in Hz). Default: 0 Hz
fmax : float
Maximum frequency to plot (in Hz). Default: None (plot up to
Nyquist)
tmin : float or None
Start time of data to include (in ms). If None, use entire
simulation.
tmax : float or None
End time of data to include (in ms). If None, use entire
simulation.
layer : str, default 'agg'
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
ax : instance of matplotlib figure | None
The matplotlib axis.
show : bool
If True, show the figure
Returns
-------
fig : instance of matplotlib Figure
The matplotlib figure handle.
"""
return plot_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax,
layer=layer, ax=ax, show=show)
def plot_tfr_morlet(self, freqs, n_cycles=7., tmin=None, tmax=None,
layer='agg', decim=None, padding='zeros', ax=None,
colormap='inferno', colorbar=True, show=True):
"""Plot Morlet time-frequency representation of dipole time course
NB: Calls `~mne.time_frequency.tfr_array_morlet`, so ``mne`` must be
installed.
Parameters
----------
dpl : instance of Dipole | list of Dipole instances
The Dipole object. If a list of dipoles is given, the power is
calculated separately for each trial, then averaged.
freqs : array
Frequency range of interest.
n_cycles : float or array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
tmin : float or None
Start time of plot in milliseconds. If None, plot entire
simulation.
tmax : float or None
End time of plot in milliseconds. If None, plot entire simulation.
layer : str, default 'agg'
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
decim : int or list of int or None (default)
Optional (integer) factor by which to decimate the raw dipole
traces. The SciPy function :func:`~scipy.signal.decimate` is used,
which recommends values <13. To achieve higher decimation factors,
a list of ints can be provided. These are applied successively.
padding : str or None
Optional padding of the dipole time course beyond the plotting
limits. Possible values are: 'zeros' for padding with 0's
(default), 'mirror' for mirror-image padding.
ax : instance of matplotlib figure | None
The matplotlib axis
colormap : str
The name of a matplotlib colormap, e.g., 'viridis'. Default:
'inferno'
colorbar : bool
If True (default), adjust figure to include colorbar.
show : bool
If True, show the figure
Returns
-------
fig : instance of matplotlib Figure
The matplotlib figure handle.
"""
return plot_tfr_morlet(
self, freqs, n_cycles=n_cycles, tmin=tmin, tmax=tmax,
layer=layer, decim=decim, padding=padding, ax=ax,
colormap=colormap, colorbar=colorbar, show=show)
def _baseline_renormalize(self, N_pyr_x, N_pyr_y):
"""Only baseline renormalize if the units are fAm.
Parameters
----------
N_pyr_x : int
Nr of cells (x)
N_pyr_y : int
Nr of cells (y)
"""
# N_pyr cells in grid. This is PER LAYER
N_pyr = N_pyr_x * N_pyr_y
# dipole offset calculation: increasing number of pyr
# cells (L2 and L5, simultaneously)
# with no inputs resulted in an aggregate dipole over the
# interval [50., 1000.] ms that
# eventually plateaus at -48 fAm. The range over this interval
# is something like 3 fAm
# so the resultant correction is here, per dipole
# dpl_offset = N_pyr * 50.207
dpl_offset = {
# these values will be subtracted
'L2': N_pyr * 0.0443,
'L5': N_pyr * -49.0502
# 'L5': N_pyr * -48.3642,
# will be calculated next, this is a placeholder
# 'agg': None,
}
# L2 dipole offset can be roughly baseline shifted over
# the entire range of t
self.data['L2'] -= dpl_offset['L2']
# L5 dipole offset should be different for interval [50., 500.]
# and then it can be offset
# slope (m) and intercept (b) params for L5 dipole offset
# uncorrected for N_cells
# these values were fit over the range [37., 750.)
m = 3.4770508e-3
b = -51.231085
# these values were fit over the range [750., 5000]
t1 = 750.
m1 = 1.01e-4
b1 = -48.412078
# piecewise normalization
self.data['L5'][self.times <= 37.] -= dpl_offset['L5']
self.data['L5'][(self.times > 37.) & (self.times < t1)] -= N_pyr * \
(m * self.times[(self.times > 37.) & (self.times < t1)] + b)
self.data['L5'][self.times >= t1] -= N_pyr * \
(m1 * self.times[self.times >= t1] + b1)
# recalculate the aggregate dipole based on the baseline
# normalized ones
self.data['agg'] = self.data['L2'] + self.data['L5']
def write(self, fname):
"""Write dipole values to a file.
Parameters
----------
fname : str
Full path to the output file (.txt)
Outputs
-------
A tab separatd txt file where rows correspond
to samples and columns correspond to
1) time (s),
2) aggregate current dipole (scaled nAm),
3) L2/3 current dipole (scaled nAm), and
4) L5 current dipole (scaled nAm)
"""
if self.nave > 1:
warnings.warn("Saving Dipole to file that is an average of %d"
" trials" % self.nave)
X = [self.times]
fmt = ['%3.3f']
for data in self.data.values():
X.append(data)
fmt.append('%5.4f')
X = np.r_[X].T
np.savetxt(fname, X, fmt=fmt, delimiter='\t')
| 36.270451 | 79 | 0.589708 | 13,552 | 0.623769 | 0 | 0 | 0 | 0 | 0 | 0 | 13,748 | 0.63279 |
315848ad3b388abc47c5004cde951c763a8f0cc9 | 9,728 | py | Python | openstates/openstates-master/openstates/ga/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | openstates/openstates-master/openstates/ga/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | openstates/openstates-master/openstates/ga/bills.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from collections import defaultdict
from .util import get_client, get_url, backoff
# Methods (7):
# GetLegislationDetail(xs:int LegislationId, )
#
# GetLegislationDetailByDescription(ns2:DocumentType DocumentType,
# xs:int Number, xs:int SessionId)
#
# GetLegislationForSession(xs:int SessionId, )
#
# GetLegislationRange(ns2:LegislationIndexRangeSet Range, )
#
# GetLegislationRanges(xs:int SessionId,
# ns2:DocumentType DocumentType, xs:int RangeSize, )
#
# GetLegislationSearchResultsPaged(ns2:LegislationSearchConstraints
# Constraints, xs:int PageSize,
# xs:int StartIndex, )
# GetTitles()
member_cache = {}
SOURCE_URL = "http://www.legis.ga.gov/Legislation/en-US/display/{session}/{bid}"
class GABillScraper(BillScraper):
jurisdiction = 'ga'
lservice = get_client("Legislation").service
vservice = get_client("Votes").service
mservice = get_client("Members").service
lsource = get_url("Legislation")
msource = get_url("Members")
vsource = get_url("Votes")
def get_member(self, member_id):
if member_id in member_cache:
return member_cache[member_id]
mem = backoff(self.mservice.GetMember, member_id)
member_cache[member_id] = mem
return mem
def scrape(self, session, chambers):
sid = self.metadata['session_details'][session]['_guid']
legislation = backoff(
self.lservice.GetLegislationForSession,
sid
)['LegislationIndex']
for leg in legislation:
lid = leg['Id']
instrument = backoff(self.lservice.GetLegislationDetail, lid)
history = [x for x in instrument['StatusHistory'][0]]
actions = reversed([{
"code": x['Code'],
"action": x['Description'],
"_guid": x['Id'],
"date": x['Date']
} for x in history])
guid = instrument['Id']
bill_type = instrument['DocumentType']
chamber = {
"H": "lower",
"S": "upper",
"J": "joint"
}[bill_type[0]] # XXX: This is a bit of a hack.
bill_id = "%s %s" % (
bill_type,
instrument['Number'],
)
if instrument['Suffix']:
bill_id += instrument['Suffix']
title = instrument['Caption']
description = instrument['Summary']
if title is None:
continue
bill = Bill(
session,
chamber,
bill_id,
title,
description=description,
_guid=guid
)
if instrument['Votes']:
for vote_ in instrument['Votes']:
_, vote_ = vote_
vote_ = backoff(self.vservice.GetVote, vote_[0]['VoteId'])
vote = Vote(
{"House": "lower", "Senate": "upper"}[vote_['Branch']],
vote_['Date'],
vote_['Caption'] or "Vote on Bill",
(vote_['Yeas'] > vote_['Nays']),
vote_['Yeas'],
vote_['Nays'],
(vote_['Excused'] + vote_['NotVoting']),
session=session,
bill_id=bill_id,
bill_chamber=chamber)
vote.add_source(self.vsource)
methods = {"Yea": vote.yes, "Nay": vote.no,}
for vdetail in vote_['Votes'][0]:
whom = vdetail['Member']
how = vdetail['MemberVoted']
try:
m = methods[how]
except KeyError:
m = vote.other
m(whom['Name'])
bill.add_vote(vote)
types = {
"HI": ["other"],
"SI": ["other"],
"HH": ["other"],
"SH": ["other"],
"HPF": ["bill:introduced"],
"HDSAS": ["other"],
"SPF": ["bill:introduced"],
"HSR": ["bill:reading:2"],
"SSR": ["bill:reading:2"],
"HFR": ["bill:reading:1"],
"SFR": ["bill:reading:1"],
"HRECM": ["bill:withdrawn", "committee:referred"],
"SRECM": ["bill:withdrawn", "committee:referred"],
"SW&C": ["bill:withdrawn", "committee:referred"],
"HW&C": ["bill:withdrawn", "committee:referred"],
"HRA": ["bill:passed"],
"SRA": ["bill:passed"],
"HPA": ["bill:passed"],
"HRECO": ["other"],
"SPA": ["bill:passed"],
"HTABL": ["other"], # "House Tabled" - what is this?
"SDHAS": ["other"],
"HCFR": ["committee:passed:favorable"],
"SCFR": ["committee:passed:favorable"],
"HRAR": ["committee:referred"],
"SRAR": ["committee:referred"],
"STR": ["bill:reading:3"],
"SAHAS": ["other"],
"SE": ["bill:passed"],
"SR": ["committee:referred"],
"HTRL": ["bill:reading:3", "bill:failed"],
"HTR": ["bill:reading:3"],
"S3RLT": ["bill:reading:3", "bill:failed"],
"HASAS": ["other"],
"S3RPP": ["other"],
"STAB": ["other"],
"SRECO": ["other"],
"SAPPT": ["other"],
"HCA": ["other"],
"HNOM": ["other"],
"HTT": ["other"],
"STT": ["other"],
"SRECP": ["other"],
"SCRA": ["other"],
"SNOM": ["other"],
"S2R": ["bill:reading:2"],
"H2R": ["bill:reading:2"],
"SENG": ["bill:passed"],
"HENG": ["bill:passed"],
"HPOST": ["other"],
"HCAP": ["other"],
"SDSG": ["governor:signed"],
"SSG": ["governor:received"],
"Signed Gov": ["governor:signed"],
"HDSG": ["governor:signed"],
"HSG": ["governor:received"],
"EFF": ["other"],
"HRP": ["other"],
"STH": ["other"],
"HTS": ["other"],
}
ccommittees = defaultdict(list)
committees = instrument['Committees']
if committees:
for committee in committees[0]:
ccommittees[{
"House": "lower",
"Senate": "upper",
}[committee['Type']]].append(committee['Name'])
for action in actions:
chamber = {
"H": "lower",
"S": "upper",
"E": "other", # Effective Date
}[action['code'][0]]
try:
_types = types[action['code']]
except KeyError:
self.debug(action)
_types = ["other"]
committees = []
if any(('committee' in x for x in _types)):
committees = [str(x) for x in ccommittees.get(chamber, [])]
bill.add_action(chamber, action['action'], action['date'], _types,
committees=committees,
_code=action['code'],
_code_id=action['_guid'])
sponsors = []
if instrument['Authors']:
sponsors = instrument['Authors']['Sponsorship']
if 'Sponsors' in instrument and instrument['Sponsors']:
sponsors += instrument['Sponsors']['Sponsorship']
sponsors = [
(x['Type'], self.get_member(x['MemberId'])) for x in sponsors
]
for typ, sponsor in sponsors:
name = "{First} {Last}".format(**dict(sponsor['Name']))
bill.add_sponsor(
'primary' if 'Author' in typ else 'seconday',
name
)
for version in instrument['Versions']['DocumentDescription']:
name, url, doc_id, version_id = [
version[x] for x in [
'Description',
'Url',
'Id',
'Version'
]
]
bill.add_version(
name,
url,
mimetype='application/pdf',
_internal_document_id=doc_id,
_version_id=version_id
)
versions = sorted(
bill['versions'],
key=lambda x: x['_internal_document_id']
)
bill['versions'] = versions
bill.add_source(self.msource)
bill.add_source(self.lsource)
bill.add_source(SOURCE_URL.format(**{
"session": session,
"bid": guid,
}))
self.save_bill(bill)
| 36.298507 | 82 | 0.42722 | 8,695 | 0.893812 | 0 | 0 | 0 | 0 | 0 | 0 | 2,950 | 0.303248 |
31586b0e8440ac0fb7f47895645a135f6339d512 | 15,014 | py | Python | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
]
| null | null | null | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
]
| null | null | null | data/external/repositories_2to3/267667/kaggle-heart-master/generate_roi_pkl.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
]
| 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import argparse
import numpy as np
import glob
import re
from log import print_to_file
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import pickle as pickle
from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH
from paths import TEST_DATA_PATH
def orthogonal_projection_on_slice(percentual_coordinate, source_metadata, target_metadata):
point = np.array([[percentual_coordinate[0]],
[percentual_coordinate[1]],
[0],
[1]])
image_size = [source_metadata["Rows"], source_metadata["Columns"]]
point = np.dot(np.array( [[image_size[0],0,0,0],
[0,image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = source_metadata["PixelSpacing"]
point = np.dot(np.array( [[pixel_spacing[0],0,0,0],
[0,pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
Fa = np.array(source_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
posa = source_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[Fa[0,0],Fa[1,0],0,posa[0]],
[Fa[0,1],Fa[1,1],0,posa[1]],
[Fa[0,2],Fa[1,2],0,posa[2]],
[0,0,0,1]]), point)
posb = target_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[1,0,0,-posb[0]],
[0,1,0,-posb[1]],
[0,0,1,-posb[2]],
[0,0,0,1]]), point)
Fb = np.array(target_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
ff0 = np.sqrt(np.sum(Fb[0,:]*Fb[0,:]))
ff1 = np.sqrt(np.sum(Fb[1,:]*Fb[1,:]))
point = np.dot(np.array( [[Fb[0,0]/ff0,Fb[0,1]/ff0,Fb[0,2]/ff0,0],
[Fb[1,0]/ff1,Fb[1,1]/ff1,Fb[1,2]/ff1,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = target_metadata["PixelSpacing"]
point = np.dot(np.array( [[1./pixel_spacing[0],0,0,0],
[0,1./pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
image_size = [target_metadata["Rows"], target_metadata["Columns"]]
point = np.dot(np.array( [[1./image_size[0],0,0,0],
[0,1./image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
return point[:2,0] # percentual coordinate as well
#joni
minradius = 15
maxradius = 65
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
radstep = 2
#ira
minradius_mm=25
maxradius_mm=45
kernel_width=5
center_margin=8
num_peaks=10
num_circles=20
radstep=2
def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10,
num_circles=10, radstep=2):
"""
Returns center and radii of ROI region in (i,j) format
"""
# radius of the smallest and largest circles in mm estimated from the train set
# convert to pixel counts
minradius = int(minradius_mm / pixel_spacing)
maxradius = int(maxradius_mm / pixel_spacing)
ximagesize = data[0]['data'].shape[1]
yimagesize = data[0]['data'].shape[2]
xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T
ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1))
lsurface = np.zeros((ximagesize, yimagesize))
allcenters = []
allaccums = []
allradii = []
for dslice in data:
ff1 = fftn(dslice['data'])
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# find hough circles and detect two radii
edges = canny(image, sigma=3)
hough_radii = np.arange(minradius, maxradius, radstep)
hough_res = hough_circle(edges, hough_radii)
if hough_res.any():
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract num_peaks circles
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Keep the most prominent num_circles circles
sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles]
for idx in sorted_circles_idxs:
center_x, center_y = centers[idx]
allcenters.append(centers[idx])
allradii.append(radii[idx])
allaccums.append(accums[idx])
brightness = accums[idx]
lsurface = lsurface + brightness * np.exp(
-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2)
lsurface = lsurface / lsurface.max()
# select most likely ROI center
roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape)
# determine ROI radius
roi_x_radius = 0
roi_y_radius = 0
for idx in range(len(allcenters)):
xshift = np.abs(allcenters[idx][0] - roi_center[0])
yshift = np.abs(allcenters[idx][1] - roi_center[1])
if (xshift <= center_margin) & (yshift <= center_margin):
roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift))
roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift))
if roi_x_radius > 0 and roi_y_radius > 0:
roi_radii = roi_x_radius, roi_y_radius
else:
roi_radii = None
return roi_center, roi_radii
def read_slice(path):
return pickle.load(open(path))['data']
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation',
'PatientSex', 'PatientAge', 'Rows', 'Columns']}
metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing'])
metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient'])
metadata['SliceLocation'] = np.float32(metadata['SliceLocation'])
metadata['ImagePositionPatient'] = np.float32(metadata['ImagePositionPatient'])
metadata['PatientSex'] = 1 if metadata['PatientSex'] == 'F' else 0
metadata['PatientAge'] = int(metadata['PatientAge'][1:3])
metadata['Rows'] = int(metadata['Rows'])
metadata['Columns'] = int(metadata['Columns'])
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/sax_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(sax_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def get_patient_ch_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/*ch_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(\d+ch_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def sort_slices(slices):
nslices = len(slices)
positions = np.zeros((nslices,))
for i in range(nslices):
positions[i] = slices[i]['metadata']['SliceLocation']
sorted_slices = [s for pos, s in sorted(zip(positions.tolist(), slices),
key=lambda x: x[0], reverse=True)]
return sorted_slices
def group_slices(slice_stack):
"""
Groups slices into stacks with the same image orientation
:param slice_stack:
:return: list of slice stacks
"""
img_orientations = []
for s in slice_stack:
img_orientations.append(tuple(s['metadata']['ImageOrientationPatient']))
img_orientations = list(set(img_orientations))
if len(img_orientations) == 1:
return [slice_stack]
else:
slice_groups = [[] for _ in range(len(img_orientations))]
for s in slice_stack:
group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient']))
slice_groups[group].append(s)
return slice_groups
def plot_roi(slice_group, roi_center, roi_radii):
x_roi_center, y_roi_center = roi_center[0], roi_center[1]
x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1]
print('nslices', len(slice_group))
for dslice in [slice_group[len(slice_group) / 2]]:
outdata = dslice['data']
# print dslice['slice_id']
# print dslice['metadata']['SliceLocation']
# print dslice['metadata']['ImageOrientationPatient']
# print dslice['metadata']['PixelSpacing']
# print dslice['data'].shape
# print '--------------------------------------'
roi_mask = np.zeros_like(outdata[0])
roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius,
y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
fig = plt.figure(1)
fig.canvas.set_window_title(dslice['patient_id'] + dslice['slice_id'])
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gist_gray_r', vmin=0, vmax=255)
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=30, interval=50)
plt.show()
def get_slice2roi(data_path, plot=False):
patient_paths = sorted(glob.glob(data_path + '*/study'))
slice2roi = {}
for p in patient_paths:
patient_data = get_patient_data(p)
sorted_slices = sort_slices(patient_data)
grouped_slices = group_slices(sorted_slices)
ch_data = get_patient_ch_data(p)
ch4, ch2 = None,None
for data in ch_data:
if data['slice_id'].startswith("4"):
ch4 = data
elif data['slice_id'].startswith("2"):
ch2 = data
# init patient dict
pid = sorted_slices[0]['patient_id']
print("processing patient %s" % pid)
# print pid
slice2roi[pid] = {}
# pixel spacing doesn't change within one patient
pixel_spacing = sorted_slices[0]['metadata']['PixelSpacing'][0]
for slice_group in grouped_slices:
try:
roi_center, roi_radii = extract_roi(slice_group, pixel_spacing)
except:
print('Could not find ROI')
roi_center, roi_radii = None, None
print(roi_center, roi_radii)
if plot and roi_center and roi_radii:
pass
#plot_roi(slice_group, roi_center, roi_radii)
for s in slice_group:
sid = s['slice_id']
slice2roi[pid][sid] = {'roi_center': roi_center, 'roi_radii': roi_radii}
# project found roi_centers on the 4ch and 2ch slice
ch4_centers = []
ch2_centers = []
for slice in sorted_slices:
sid = slice['slice_id']
roi_center = slice2roi[pid][sid]['roi_center']
metadata_source = slice['metadata']
hough_roi_center = (float(roi_center[0]) / metadata_source['Rows'],
float(roi_center[1]) / metadata_source['Columns'])
if ch4 is not None:
metadata_target = ch4['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch4_centers.append(ch_roi_center)
if ch2 is not None:
metadata_target = ch2['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch2_centers.append(ch_roi_center)
if ch4 is not None:
centers = np.array(ch4_centers)
ch4_result_center = np.mean(centers, axis=0)
ch4_result_radius = np.max(np.sqrt((centers - ch4_result_center)**2))
sid = ch4['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch4_result_center), 'roi_radii': (ch4_result_radius, ch4_result_radius)}
if ch2 is not None:
centers = np.array(ch2_centers)
ch2_result_center = np.mean(centers, axis=0)
ch2_result_radius = np.max(np.sqrt((centers - ch2_result_center)**2))
sid = ch2['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch2_result_center), 'roi_radii': (ch2_result_radius, ch2_result_radius)}
filename = data_path.split('/')[-1] + '_slice2roi_joni.pkl'
with open(filename, 'w') as f:
pickle.dump(slice2roi, f)
print('saved to ', filename)
return slice2roi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_roi.log"
with print_to_file(log_path):
for d in data_paths:
get_slice2roi(d, plot=True)
print("log saved to '%s'" % log_path)
| 39.719577 | 128 | 0.566405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,413 | 0.160717 |
31594d1cfb6364ea7147487913cd57829792bf34 | 2,530 | py | Python | scrapers/scrapsfbos.py | ndd365/showup | fae0cdc52d306e6bbb538e8f66afe1d7a51006b8 | [
"MIT"
]
| 48 | 2016-12-12T13:59:30.000Z | 2021-01-22T01:34:39.000Z | scrapers/scrapsfbos.py | ndd365/showup | fae0cdc52d306e6bbb538e8f66afe1d7a51006b8 | [
"MIT"
]
| null | null | null | scrapers/scrapsfbos.py | ndd365/showup | fae0cdc52d306e6bbb538e8f66afe1d7a51006b8 | [
"MIT"
]
| 4 | 2017-02-02T16:59:47.000Z | 2017-08-23T11:05:47.000Z | import feedparser
from bs4 import BeautifulSoup
from dateutil.parser import parse
from datetime import timedelta
import pytz
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from oauth2client.service_account import ServiceAccountCredentials
scopes = 'https://www.googleapis.com/auth/calendar'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'client_secret.json', scopes)
http_auth = credentials.authorize(Http())
CAL = build('calendar', 'v3', http=credentials.authorize(Http()))
class Event(object):
def __init__(self, name, start_date, end_date):
self.name = name
self.start_date = start_date
self.end_date = end_date
def __repr__(self):
return self.name
def get_calendar_data():
events = []
url = "http://sfbos.org/events/feed"
feed = feedparser.parse(url)
for item in feed["items"]:
event_name = item["title"]
event_details=item["summary_detail"]["value"]
soup = BeautifulSoup(event_details, 'html.parser')
start_date_unaware = parse(soup.span.string)
start_date = start_date_unaware.replace(tzinfo=pytz.UTC)
end_date = start_date + timedelta(hours=1)
event = Event(event_name, start_date, end_date)
print event
events.append(event)
return events
def sync_to_google_calendar(events):
for event in events:
GMT_OFF = '-07:00' # PDT/MST/GMT-7
start_date = event.start_date.isoformat()
end_date = event.end_date.isoformat()
gcal_event = {
'summary': event.name,
'start': {'dateTime': start_date},
'end': {'dateTime': end_date},
'attendees': [
# {'email': '[email protected]'},
# {'email': '[email protected]'},
],
}
print gcal_event
e = CAL.events().insert(calendarId='[email protected]',
sendNotifications=True, body=gcal_event).execute()
print e
def print_calendars():
page_token = None
while True:
calendar_list = CAL.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print calendar_list_entry
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
events = get_calendar_data()
sync_to_google_calendar(events)
| 24.326923 | 98 | 0.650988 | 218 | 0.086166 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.149012 |
315a3b06d331b60c879edde3bb3f8225ff91970d | 2,280 | py | Python | app/api/v2/models/product.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
]
| null | null | null | app/api/v2/models/product.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
]
| 21 | 2018-10-16T09:29:03.000Z | 2022-03-11T23:31:35.000Z | app/api/v2/models/product.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
]
| null | null | null | import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_book(book):
"""Formats the results to a dictionary"""
book = {
"id": book[0],
"title": book[1],
"description": book[2],
"category": book[3],
"price": book[4],
"quantity": book[5],
"minimum": book[6],
"image_url": book[7],
"created_by": book[8],
"updated_by": book[9],
"created_at": str(book[10])
}
return book
class ProductModel(Db):
"""Product Model. Books stuff here"""
def get_all_books(self):
"""Gets all books from the db"""
booklist = []
for book in Db().get_query('books'):
details = format_book(book)
booklist.append(details)
return booklist
def get_single_book(self, param, this_row):
"""Gets a single book"""
books = [row for row in Db().get_query(
'books') if row[this_row] == param]
if books:
book = books[0]
return format_book(book)
def add_new_book(self, book):
"""Adds a new book to the db"""
try:
Db().db_query(f"""
INSERT INTO books (title, description, category, price, quantity, minimum, image_url, created_by)
VALUES ('{book[0]}', '{book[1]}', '{book[2]}', {book[3]}, {book[4]}, {book[5]}, '{book[6]}', {book[7]});
""")
except:
return "Failed to add", 500
def edit_book(self, book_id, book):
"""Updates a book's details"""
Db().db_query(f"""UPDATE books SET title = '{book[0]}', description = '{book[1]}', category = '{book[2]}', price = {book[3]}, quantity = {book[4]}, minimum = {book[5]}, image_url = '{book[6]}', updated_by = {book[7]} WHERE id = {book_id};""")
def sell_book(self, book_id, quantity):
"""Updates a book's quantity"""
Db().db_query(f"""UPDATE books SET quantity = quantity - {quantity} WHERE id = {book_id};""")
def delete_book(self, book_id):
"""Deletes a book"""
try:
Db().db_query(f"""DELETE FROM books WHERE id = {book_id};""")
except:
return "Failed", 500
| 29.230769 | 251 | 0.542105 | 1,694 | 0.742982 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.432895 |
315b4dedab79caa0f06b6a0165468d87637edc35 | 13,809 | py | Python | pbq/pbq.py | amirdor/pbq | 7e83a461c10d1d7284ccfb0ad278285be57d859d | [
"MIT"
]
| null | null | null | pbq/pbq.py | amirdor/pbq | 7e83a461c10d1d7284ccfb0ad278285be57d859d | [
"MIT"
]
| null | null | null | pbq/pbq.py | amirdor/pbq | 7e83a461c10d1d7284ccfb0ad278285be57d859d | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Main module."""
import os
from google.cloud import bigquery
from pbq.query import Query
from google.cloud import bigquery_storage_v1beta1
from google.cloud.exceptions import NotFound
from google.api_core.exceptions import BadRequest
import pandas as pd
import datetime
class PBQ(object):
"""
bigquery driver using the google official API
Attributes
------
query : str
the query
query_obj : Query
pbq.Query object
client : Client
the client object for bigquery
bqstorage_client : BigQueryStorageClient
the google storage client object
Methods
------
to_dataframe(save_query=False, **params)
return the query results as data frame
to_csv(filename, sep=',', save_query=False, **params)
save the query results to a csv file
save_to_table(table, dataset, project=None, replace=True, partition=None)
save query to table
run_query()
simply execute your query
table_details(table, dataset, project)
get the information about the table
Static Methods
------
save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0,
replace=True, partition=None)
save file to table, it can be partitioned and it can append to existing table.
the supported formats are CSV or PARQUET
save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True,
partition=None)
same as save file just with pandas dataframe
table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference)
check if table exists - if True - table exists else not exists
Examples
------
getting query to dataframe
>>> from pbq import Query, PBQ
>>> query = Query("select * from table")
>>> print("the query price:", query.price)
>>> if not query.validate():
>>> raise RuntimeError("table not valid")
>>> pbq = PBQ(query)
>>> pbq.to_dataframe()
saving query to csv
>>> from pbq import Query, PBQ
>>> query = Query("select * from table")
>>> pbq = PBQ(query)
>>> pbq.to_csv()
saving dataframe to table
>>> import pandas as pd
>>> from pbq import Query, PBQ
>>> df = pd.DataFrame()
>>> PBQ.save_dataframe_to_table(df, 'table', 'dataset', 'project_id', partition='20191013', replace=False)
"""
def __init__(self, query: Query, project=None):
"""
bigquery driver using the google official API
:param query: Query object
:param project: str
the BQ project
"""
self.query = query.query
self.query_obj = query
self.project = project
if project:
self.client = bigquery.Client(project=project)
else:
self.client = bigquery.Client()
self.bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient()
def to_dataframe(self, save_query=False, **params):
"""
return the query results as data frame
in order to save the query to a table as well as getting the dataframe, send a dict as params with:
- table
- dataset
it will save to the same project
:param save_query: boolean
if to save the query to a table also
:param params: dict
when `save_query` flag is on you need to give the relevant params
:return: pd.DataFrame
the query results
"""
job_config = bigquery.QueryJobConfig()
if save_query:
table_ref = self.client.dataset(params['dataset']).table(params['table'])
job_config.destination = table_ref
query_job = self.client.query(query=self.query, job_config=job_config)
query_job_res = query_job.result()
df = query_job_res.to_dataframe(bqstorage_client=self.bqstorage_client)
return df
def to_csv(self, filename, sep=',', save_query=False, **params):
"""
save the query results to a csv file
in order to save the query to a table as well as getting the dataframe, send a dict as params with:
- table
- dataset
it will save to the same project
:param filename: str
with the path to save the file
:param sep: str
separator to the csv file
:param save_query: boolean
if to save the query to a table also
:param params: dict
when `save_query` flag is on you need to give the relevant params
"""
df = self.to_dataframe(save_query, **params)
df.to_csv(filename, sep=sep, index=False)
def run_query(self):
"""
execute your query
"""
# Set the destination table
client = self.client
query_job = client.query(self.query)
query_job.result()
print('Done running your amazing query')
def save_to_table(self, table, dataset, project=None, replace=True, partition=None):
"""
save query to table
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param replace: boolean
if set as true - it will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
"""
job_config = bigquery.QueryJobConfig()
# Set the destination table
client = self.client
if partition:
table = '{0}${1}'.format(table, partition)
table_ref = client.dataset(dataset).table(table.split('$')[0])
exists_ok = PBQ._writing_disposition(job_config, replace)
if project:
table_ref = client.dataset(dataset, project=project).table(table)
PBQ._create_table(client, exists_ok, partition, replace, table_ref)
job_config.destination = table_ref
query_job = client.query(self.query, job_config=job_config)
query_job.result()
print('Query results loaded to table {}'.format(table_ref.path))
@staticmethod
def _writing_disposition(job_config: bigquery.QueryJobConfig, replace):
exists_ok = False
if replace:
exists_ok = True
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
else:
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
return exists_ok
@staticmethod
def _create_table(client: bigquery.Client, exists_ok, partition, replace, table_ref):
if (partition and not PBQ.table_exists(client, table_ref)) or (not partition and replace):
bq_table = bigquery.Table(table_ref)
if partition:
time_partitioning = bigquery.TimePartitioning()
bq_table.time_partitioning = time_partitioning
client.create_table(bq_table, exists_ok=exists_ok)
@staticmethod
def save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0,
replace=True, partition=None):
"""
save file to table, it can be partitioned and it can append to existing table.
the supported formats are CSV or PARQUET
:param filename: str
with the path to save the file
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param file_format: str
possible file format (CSV, PARQUET) (default: CSV)
:param max_bad_records: int
number of bad records allowed in file (default: 0)
:param replace: boolean
if set as trueit will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
"""
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset)
table_ref = dataset_ref.table(table)
job_config = bigquery.LoadJobConfig()
job_config.max_bad_records = max_bad_records
job_config.source_format = file_format
exists_ok = PBQ._writing_disposition(job_config, replace)
if file_format == bigquery.SourceFormat.CSV:
job_config.skip_leading_rows = 1
job_config.autodetect = True
PBQ._create_table(client, exists_ok, partition, replace, table_ref)
if not partition:
with open(filename, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, job_config=job_config)
job.result() # Waits for table load to complete.
print("Loaded {} rows into {}:{}.".format(job.output_rows, dataset, table))
else:
print('fallback loading by CMD command due to missing api feature for partition')
table = '{0}${1}'.format(table, partition)
cmd = "bq load"
if replace:
cmd = "{} --replace".format(cmd)
cmd = "{cmd} --source_format={file_format} '{project}:{dataset}.{tbl_name}' {filename}". \
format(cmd=cmd, tbl_name=table, filename=filename, project=project, dataset=dataset,
file_format=file_format)
os.system(cmd)
@staticmethod
def save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True,
partition=None, validate_params=False):
"""
save pd.DataFrame object to table
:param df: pd.DataFrame
the dataframe you want to save
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:param max_bad_records: int
number of bad records allowed in file (default: 0)
:param replace: boolean
if set as true - it will replace the table, else append to table (default: True)
:param partition: str
partition format DDMMYYY (default: None)
:param validate_params: boolean
validate the schema of the table to the dataframe object (default: False)
"""
now = datetime.datetime.now()
random_string = '{}'.format(now.strftime('%y%m%d%H%M%S'))
input_path = "/tmp/tmp-{}.parquet".format(random_string)
schema = None
if validate_params: # because of the fallback it need to change to be as the schema
table_details = PBQ.table_details(table, dataset, project)
if 'schema' in table_details:
schema = table_details['schema']
PBQ._save_df_to_parquet(df, input_path, schema=schema)
PBQ.save_file_to_table(input_path, table, dataset, project, file_format=bigquery.SourceFormat.PARQUET,
max_bad_records=max_bad_records, replace=replace, partition=partition)
@staticmethod
def _save_df_to_parquet(df, input_path, index=False, schema=None):
if schema:
for s in schema:
if s['field_type'] == 'STRING':
s['field_type'] = 'str'
if s['field_type'] == 'INTEGER':
s['field_type'] = 'int'
if s['field_type'] == 'TIMESTAMP':
df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce')
continue
if s['field_type'] == 'DATE':
df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce')
df[s['column']] = df[s['column']].dt.date
continue
df.columns = ["{}".format(col) for col in df.columns]
df.to_parquet(input_path, index=index)
@staticmethod
def table_details(table, dataset, project):
"""
return a dict object with some details about the table
:param table: str
table name
:param dataset: str
data set name
:param project: str
project name
:return: dict
with some table information like, last_modified_time, num_bytes, num_rows, and creation_time
"""
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset, project=project)
table_ref = dataset_ref.table(table)
try:
table = client.get_table(table_ref)
except NotFound as error:
return {}
schema = []
for s in table.schema:
schema.append({'column': s.name, 'field_type': s.field_type})
res = {'last_modified_time': table.modified, 'num_bytes': table.num_bytes, 'num_rows': table.num_rows,
'creation_time': table.created, 'schema': schema}
return res
@staticmethod
def table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference):
"""
check if table exists - if True - table exists else not exists
:param client: bigquery.Client object
:param table_ref: bigquery.table.TableReference object
with the table name and dataset
:return: boolean
True if table exists
False if table not exists
"""
try:
table = client.get_table(table_ref)
if table:
return True
except NotFound as error:
return False
except BadRequest as error:
return True
| 33.194712 | 119 | 0.608444 | 13,509 | 0.978275 | 0 | 0 | 7,483 | 0.541893 | 0 | 0 | 6,930 | 0.501847 |
315b6ac655327cef9390472518effc0e9ec5a90d | 14,829 | py | Python | appvalidator/specprocessor.py | mstriemer/app-validator | ceaa373965192e3e08e7b38476cca09d44b345e7 | [
"BSD-3-Clause"
]
| 20 | 2015-01-16T21:35:27.000Z | 2021-11-11T00:22:43.000Z | appvalidator/specprocessor.py | mstriemer/app-validator | ceaa373965192e3e08e7b38476cca09d44b345e7 | [
"BSD-3-Clause"
]
| 14 | 2015-01-15T21:26:33.000Z | 2016-01-18T16:47:15.000Z | appvalidator/specprocessor.py | mstriemer/app-validator | ceaa373965192e3e08e7b38476cca09d44b345e7 | [
"BSD-3-Clause"
]
| 14 | 2015-02-14T22:42:40.000Z | 2021-11-11T00:22:33.000Z | import re
import types
from functools import partial
LITERAL_TYPE = types.StringTypes + (int, float, long, bool, )
class Spec(object):
"""
This object, when overridden with an object that implements a file format
specification, will perform validation on a given parsed version of the
format input.
SPEC Node Documentation:
========================
expected_type:
A type object whose type the object should match.
required_nodes:
A list of nodes that are required for the current node.
required_nodes_when:
A dict of node name/lambda pairs. If the lambda evaluates to True, a
node whose name corresponds to the node name is required.
The current node is passed as a parameter to the lambda as the only
argument.
disallowed_nodes:
A list of nodes that explicitly are disallowed in the current node.
allowed_once_nodes:
A list of nodes that are allowed only once.
allowed_nodes:
A list of nodes that are allowed multiple times.
unknown_node_level:
The message type to return when an unknown node is encountered.
child_nodes:
A dict of node definitions for nodes that can exist within this node.
max_length:
For sequence values only. An integer describing the maximum length of
the string.
not_empty:
A boolean value describing whether the string/list/dict can be empty.
values:
A list of possible values for the node. Only applies to lists and
literal nodes.
value_matches:
If `values` is not set, the value must match this regex. Only applies
to string nodes.
process:
A lambda function that returns a function to process the node. The
lambda accepts one parameter (self) and should return a function that
accepts two parameters (self, node).
child_process:
A lambda function (similar to `process` that returns a function to
process a child node. The lambda accepts one parameter (self) and
should return a function that accepts three parameters (self, node_name,
node).
If this is set, no further testing will take place on child nodes.
"""
SPEC_NAME = "Specification"
MORE_INFO = "You can find more info online."
SPEC = None
def __init__(self, data, err):
self.data = self.parse(data)
self.err = err
self.error = partial(self._err_message, self.err.error)
self.warning = partial(self._err_message, self.err.warning)
self.notice = partial(self._err_message, self.err.notice)
self.err_map = {"error": self.error,
"warning": self.warning,
"notice": self.notice}
self.path = []
def _err_message(self, func, *args, **kwargs):
if self.path:
nodepath = "Node: %s" % self._get_path()
if isinstance(kwargs["description"], list):
kwargs["description"].append(nodepath)
else:
kwargs["description"] = [
kwargs["description"], nodepath]
func(*args, **kwargs)
def _message(self, type_, *args, **kwargs):
kwargs[type_] = kwargs.pop("message")
self.err_map[type_](*args, **kwargs)
def validate(self):
# Validate the root node.
root_name, root_node = self.get_root_node(self.data)
root_val_result = self.validate_root_node(root_node)
if root_val_result == False:
return
# Iterate the tree and validate as we go.
self.iterate(root_name, root_node, self.SPEC)
def parse(self, data): pass
def validate_root_node(self, node): pass
def get_root_node(self, data):
"""
We expect this function to return a tuple:
("Root Node Name", root_node)
"""
def has_attribute(self, node, key): pass
def get_attribute(self, node, key): pass
def has_child(self, node, child_name): pass
def get_children(self, node):
"""
This function should return a list of (child_name, child)-form tuples.
"""
def iterate(self, branch_name, branch, spec_branch):
self.path.append(branch_name)
self._iterate(branch_name, branch, spec_branch)
self.path.pop()
def _get_path(self):
return ' > '.join(self.path)
def _iterate(self, branch_name, branch, spec_branch):
"""Iterate the tree of nodes and validate as we go."""
# Check that the node is of the proper type. If it isn't, then we need
# to stop iterating at this point.
exp_type = spec_branch.get("expected_type")
if (exp_type and
not isinstance(branch, exp_type) or
# Handle `isinstance(True, int) == True` :(
(isinstance(branch, bool) and
(exp_type == int if isinstance(exp_type, type) else
bool not in exp_type))):
self.error(
err_id=("spec", "iterate", "bad_type"),
error="%s's `%s` was of an unexpected type." %
(self.SPEC_NAME, branch_name),
description=["While validating a %s, a `%s` was encountered "
"which is of an improper type." %
(self.SPEC_NAME, branch_name),
"Found: %s" % repr(branch),
self.MORE_INFO])
return
# Handle any generic processing.
if "process" in spec_branch:
# Let the spec processor resolve the processor and then run the
# processor.
spec_branch["process"](self)(branch)
if "not_empty" in spec_branch and not branch:
self.error(
err_id=("spec", "iterate", "empty"),
error="`%s` is empty." % branch_name,
description=["A value was expected for `%s`, but one wasn't "
"found." % branch_name,
self.MORE_INFO])
# If the node isn't an object...
if not isinstance(branch, dict):
if "values" in spec_branch and branch not in spec_branch["values"]:
self.error(
err_id=("spec", "iterate", "bad_value"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating a "
"`%s` containing the value '%s'. This value "
"is not appropriate for this type of "
"element." %
(branch_name, self.SPEC_NAME, branch),
self.MORE_INFO])
elif ("value_matches" in spec_branch and
isinstance(branch, types.StringTypes)):
raw_pattern = spec_branch["value_matches"]
if not re.match(raw_pattern, branch):
self.error(
err_id=("spec", "iterate", "value_pattern_fail"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating "
"a `%s`. Its value does not match the "
"pattern required for `%s`s." %
(branch_name, self.SPEC_NAME,
branch_name),
"Found value: %s" % branch,
"Pattern: %s" % raw_pattern,
self.MORE_INFO])
if ("max_length" in spec_branch and
len(branch) > spec_branch["max_length"]):
self.error(
err_id=("spec", "iterate", "max_length"),
error="`%s` has exceeded its maximum length." % branch_name,
description=["`%s` has a maximum length (%d), which has "
"been exceeded (%d)." %
(branch_name, spec_branch["max_length"],
len(branch)),
self.MORE_INFO])
# The rest of the tests are for child items.
if not isinstance(branch, (list, tuple)):
return
if "child_nodes" in spec_branch:
for child in branch:
self.iterate(branch_name + " descendant", child,
spec_branch["child_nodes"])
# We've got nothing else to do with lists.
return
# If we need to process the child nodes individually, do that now.
if "child_process" in spec_branch:
processor = spec_branch["child_process"](self)
for child_name, child in self.get_children(branch):
processor(child_name, child)
# If there's nothing else to do, don't go down that path.
if ("required_nodes" not in spec_branch and
"required_nodes_when" not in spec_branch and
"disallowed_nodes" not in spec_branch):
return
considered_nodes = set()
# Check that all required node as present.
if "required_nodes" in spec_branch:
considered_nodes.update(spec_branch["required_nodes"])
for req_node in [n for n in spec_branch["required_nodes"] if
not self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "missing_req"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node of the %s expects a `%s` "
"element, which was not found." %
(branch_name, self.SPEC_NAME, req_node),
self.MORE_INFO])
# Check that conditionally required nodes are present.
if "required_nodes_when" in spec_branch:
considered_nodes.update(spec_branch["required_nodes_when"].keys())
for req_node in [name for name, cond in
spec_branch["required_nodes_when"].items() if
cond(branch) and not self.has_child(branch, name)]:
self.error(
err_id=("spec", "iterate", "missing_req_cond"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node, under the current "
"circumstances, is missing a `%s` element. "
"This is a required condition of a %s." %
(branch_name, req_node, self.SPEC_NAME),
self.MORE_INFO])
# Check that there are no disallowed nodes.
if "disallowed_nodes" in spec_branch:
disallowed_nodes = spec_branch["disallowed_nodes"]
considered_nodes.update(disallowed_nodes)
for dnode in [n for n in disallowed_nodes if
self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "disallowed"),
error="%s found `%s`, which is not allowed." %
(self.SPEC_NAME, dnode),
description=["The '%s' node contains `%s`, which is a "
"disallowed element. It should be removed." %
(branch_name, dnode),
self.MORE_INFO])
if ("allowed_nodes" not in spec_branch and
"allowed_once_nodes" not in spec_branch):
return
# Check that allowed nodes are obeyed.
allowed_nodes = set(spec_branch.setdefault("allowed_nodes", []))
allowed_once_nodes = spec_branch.setdefault("allowed_once_nodes", [])
allowed_nodes.update(allowed_once_nodes)
child_node_specs = spec_branch.setdefault("child_nodes", {})
seen_nodes = set()
warned_nodes = set()
for child_name, child in self.get_children(branch):
cspec_branch = None
# Process the node first.
if child_name in child_node_specs:
cspec_branch = child_node_specs[child_name]
elif "*" in child_node_specs:
cspec_branch = child_node_specs["*"]
if cspec_branch is not None:
# If it's a lazily evaluated branch, evaluate it now.
if isinstance(cspec_branch, types.LambdaType):
cspec_branch = cspec_branch(self)
# Iterate the node.
self.iterate(child_name, child, cspec_branch)
# If we've seen a node before that's only supposed to be seen a
# single time, warn about it.
if child_name in allowed_once_nodes and child_name in seen_nodes:
# Don't warn about the same node multiple times.
if child_name in warned_nodes:
continue
self.error(
err_id=("spec", "iterate", "allow_once_multiple"),
error="%s found `%s` more than once." %
(self.SPEC_NAME, child_name),
description=["%ss may only contain a single `%s` element, "
"however, it was encountered multiple times." %
(self.SPEC_NAME, child_name),
self.MORE_INFO])
continue
# Remember that we've seen this node.
seen_nodes.add(child_name)
if child_name in considered_nodes:
continue
# If the child isn't allowed, throw an error.
if child_name not in allowed_nodes and "*" not in allowed_nodes:
self._message(
spec_branch.get("unknown_node_level", "warning"),
err_id=("spec", "iterate", "not_allowed"),
message="`%s` is not a recognized element within a %s" %
(child_name, self.SPEC_NAME),
description=["While iterating a %s, a `%s` was found "
"within a %s, which is not valid." %
(self.SPEC_NAME, child_name, branch_name),
self.MORE_INFO])
| 43.872781 | 80 | 0.534763 | 14,709 | 0.991908 | 0 | 0 | 0 | 0 | 0 | 0 | 5,607 | 0.37811 |
315b8d418921022e63456c61eb0e983243286ff8 | 245 | py | Python | head_first_v2/ch4/modules/setup.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
]
| null | null | null | head_first_v2/ch4/modules/setup.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
]
| null | null | null | head_first_v2/ch4/modules/setup.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
]
| null | null | null | from setuptools import setup
setup(
name='lsearch',
version='1.0',
description='The Head First Python Search Tools', author='HF Python 2e', author_email='[email protected]',
url='headfirstlabs.com',
py_modules=['lsearch'],
)
| 24.5 | 109 | 0.685714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.44898 |
315b93d96e58679d201ae083e0a5ae302d382049 | 1,519 | py | Python | desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
]
| 219 | 2017-12-12T09:42:46.000Z | 2022-03-13T08:25:13.000Z | desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
]
| 11 | 2017-12-14T08:14:51.000Z | 2021-08-09T18:37:45.000Z | desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
]
| 45 | 2017-12-14T07:26:36.000Z | 2022-03-11T09:36:56.000Z | from desktop_local_tests.public_ip_during_disruption import PublicIPDuringDisruptionTestCase
from desktop_local_tests.windows.windows_reorder_adapters_disrupter import WindowsReorderAdaptersDisrupter
class TestWindowsPublicIPDisruptReorderAdapters(PublicIPDuringDisruptionTestCase):
'''Summary:
Tests whether traffic leaving the user's device has the public IP hidden when the adapter order
is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
adapters. The test then queries a webpage to detect it's public IP.
Discussion:
It's not 100% clear if, in the real world, adapters can change their order without user
involvement. It is still however a good stress test of the application.
On Windows adapter order is determined by the interface metric. It can be manually set but
otherwise it is determined by the system by deciding how "good" an adapter is, e.g. what is the
throughput. In theory that means metrics can change dynamically.
Weaknesses:
The time taken to perform each IP request is relatively long. Tests using IPResponder should be
preferred over these tests.
Scenarios:
Requires two active adapters.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(WindowsReorderAdaptersDisrupter, devices, parameters)
| 36.166667 | 106 | 0.768269 | 1,317 | 0.867018 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.726794 |
315c5ece992cd3a2685eecad7131fd0199252e5a | 16,739 | py | Python | rotkehlchen/api/server.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
]
| 137 | 2018-03-05T11:53:29.000Z | 2019-11-03T16:38:42.000Z | rotkehlchen/api/server.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
]
| 385 | 2018-03-08T12:43:41.000Z | 2019-11-10T09:15:36.000Z | rotkehlchen/api/server.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
]
| 59 | 2018-03-08T10:08:27.000Z | 2019-10-26T11:30:44.000Z | import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import werkzeug
from flask import Blueprint, Flask, Response, abort, jsonify
from flask.views import MethodView
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket import Resource as WebsocketResource, WebSocketServer
from marshmallow import Schema
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import NotFound
from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result
from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser
from rotkehlchen.api.v1.resources import (
AaveBalancesResource,
AaveHistoryResource,
AccountingReportDataResource,
AccountingReportsResource,
AdexBalancesResource,
AdexHistoryResource,
AllAssetsResource,
AllBalancesResource,
AssetIconsResource,
AssetMovementsResource,
AssetsReplaceResource,
AssetsTypesResource,
AssetUpdatesResource,
AssociatedLocations,
AsyncTasksResource,
AvalancheTransactionsResource,
BalancerBalancesResource,
BalancerEventsHistoryResource,
BalancerTradesHistoryResource,
BinanceAvailableMarkets,
BinanceUserMarkets,
BlockchainBalancesResource,
BlockchainsAccountsResource,
BTCXpubResource,
CompoundBalancesResource,
CompoundHistoryResource,
CounterpartiesResource,
CurrentAssetsPriceResource,
DatabaseBackupsResource,
DatabaseInfoResource,
DataImportResource,
DBSnapshotDeletingResource,
DBSnapshotDownloadingResource,
DBSnapshotExportingResource,
DBSnapshotImportingResource,
DefiBalancesResource,
ERC20TokenInfo,
ERC20TokenInfoAVAX,
Eth2DailyStatsResource,
Eth2StakeDepositsResource,
Eth2StakeDetailsResource,
Eth2ValidatorsResource,
EthereumAirdropsResource,
EthereumAssetsResource,
EthereumModuleDataResource,
EthereumModuleResource,
EthereumTransactionsResource,
ExchangeBalancesResource,
ExchangeRatesResource,
ExchangesDataResource,
ExchangesResource,
ExternalServicesResource,
HistoricalAssetsPriceResource,
HistoryActionableItemsResource,
HistoryBaseEntryResource,
HistoryDownloadingResource,
HistoryExportingResource,
HistoryProcessingResource,
HistoryStatusResource,
IgnoredActionsResource,
IgnoredAssetsResource,
InfoResource,
LedgerActionsResource,
LiquityStakingHistoryResource,
LiquityStakingResource,
LiquityTrovesHistoryResource,
LiquityTrovesResource,
LoopringBalancesResource,
MakerdaoDSRBalanceResource,
MakerdaoDSRHistoryResource,
MakerdaoVaultDetailsResource,
MakerdaoVaultsResource,
ManuallyTrackedBalancesResource,
MessagesResource,
NamedEthereumModuleDataResource,
NamedOracleCacheResource,
NFTSBalanceResource,
NFTSResource,
OraclesResource,
OwnedAssetsResource,
PeriodicDataResource,
PickleDillResource,
PingResource,
QueriedAddressesResource,
ReverseEnsResource,
SettingsResource,
StakingResource,
StatisticsAssetBalanceResource,
StatisticsNetvalueResource,
StatisticsRendererResource,
StatisticsValueDistributionResource,
SushiswapBalancesResource,
SushiswapEventsHistoryResource,
SushiswapTradesHistoryResource,
TagsResource,
TradesResource,
UniswapBalancesResource,
UniswapEventsHistoryResource,
UniswapTradesHistoryResource,
UserAssetsResource,
UserPasswordChangeResource,
UserPremiumKeyResource,
UserPremiumSyncResource,
UsersByNameResource,
UsersResource,
WatchersResource,
YearnVaultsBalancesResource,
YearnVaultsHistoryResource,
YearnVaultsV2BalancesResource,
YearnVaultsV2HistoryResource,
create_blueprint,
)
from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp
from rotkehlchen.logging import RotkehlchenLogsAdapter
URLS = List[
Union[
Tuple[str, Type[MethodView]],
Tuple[str, Type[MethodView], str],
]
]
URLS_V1: URLS = [
('/users', UsersResource),
('/watchers', WatchersResource),
('/users/<string:name>', UsersByNameResource),
('/users/<string:name>/password', UserPasswordChangeResource),
('/premium', UserPremiumKeyResource),
('/premium/sync', UserPremiumSyncResource),
('/settings', SettingsResource),
('/tasks/', AsyncTasksResource),
('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'),
('/exchange_rates', ExchangeRatesResource),
('/external_services/', ExternalServicesResource),
('/oracles', OraclesResource),
('/oracles/<string:oracle>/cache', NamedOracleCacheResource),
('/exchanges', ExchangesResource),
('/exchanges/balances', ExchangeBalancesResource),
(
'/exchanges/balances/<string:location>',
ExchangeBalancesResource,
'named_exchanges_balances_resource',
),
('/assets/<string:asset>/icon', AssetIconsResource),
('/trades', TradesResource),
('/ledgeractions', LedgerActionsResource),
('/asset_movements', AssetMovementsResource),
('/tags', TagsResource),
('/exchanges/binance/pairs', BinanceAvailableMarkets),
('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets),
('/exchanges/data/', ExchangesDataResource),
('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'),
('/balances/blockchains', BlockchainBalancesResource),
(
'/balances/blockchains/<string:blockchain>',
BlockchainBalancesResource,
'named_blockchain_balances_resource',
),
('/balances/', AllBalancesResource),
('/balances/manual', ManuallyTrackedBalancesResource),
('/statistics/netvalue', StatisticsNetvalueResource),
('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource),
('/statistics/value_distribution', StatisticsValueDistributionResource),
('/statistics/renderer', StatisticsRendererResource),
('/messages/', MessagesResource),
('/periodic/', PeriodicDataResource),
('/history/', HistoryProcessingResource),
('/history/status', HistoryStatusResource),
('/history/export/', HistoryExportingResource),
('/history/download/', HistoryDownloadingResource),
('/history/events', HistoryBaseEntryResource),
('/history/actionable_items', HistoryActionableItemsResource),
('/reports/', AccountingReportsResource),
(
'/reports/<int:report_id>',
AccountingReportsResource,
'per_report_resource',
),
(
'/reports/<int:report_id>/data',
AccountingReportDataResource,
'per_report_data_resource',
),
('/queried_addresses', QueriedAddressesResource),
('/blockchains/ETH/transactions', EthereumTransactionsResource),
(
'/blockchains/ETH/transactions/<string:address>',
EthereumTransactionsResource,
'per_address_ethereum_transactions_resource',
),
('/blockchains/ETH2/validators', Eth2ValidatorsResource),
('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource),
('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource),
('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource),
('/blockchains/ETH/defi', DefiBalancesResource),
('/blockchains/ETH/airdrops', EthereumAirdropsResource),
('/blockchains/ETH/erc20details/', ERC20TokenInfo),
('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource),
('/blockchains/ETH/modules/data', EthereumModuleDataResource),
('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource),
('/blockchains/ETH/modules/', EthereumModuleResource),
('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource),
('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource),
('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource),
('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource),
('/blockchains/ETH/modules/aave/balances', AaveBalancesResource),
('/blockchains/ETH/modules/aave/history', AaveHistoryResource),
('/blockchains/ETH/modules/adex/balances', AdexBalancesResource),
('/blockchains/ETH/modules/adex/history', AdexHistoryResource),
('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource),
('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource),
('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource),
('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource),
('/blockchains/ETH/modules/compound/history', CompoundHistoryResource),
('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource),
('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource),
('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource),
('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource),
('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource),
('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource),
('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource),
('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource),
('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource),
('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource),
('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource),
('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource),
('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource),
('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource),
('/blockchains/ETH/modules/pickle/dill', PickleDillResource),
('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource),
('/blockchains/<string:blockchain>', BlockchainsAccountsResource),
('/blockchains/BTC/xpub', BTCXpubResource),
('/blockchains/AVAX/transactions', AvalancheTransactionsResource),
(
'/blockchains/AVAX/transactions/<string:address>',
AvalancheTransactionsResource,
'per_address_avalanche_transactions_resource',
),
('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX),
('/assets', OwnedAssetsResource),
('/assets/types', AssetsTypesResource),
('/assets/replace', AssetsReplaceResource),
('/assets/all', AllAssetsResource),
('/assets/ethereum', EthereumAssetsResource),
('/assets/prices/current', CurrentAssetsPriceResource),
('/assets/prices/historical', HistoricalAssetsPriceResource),
('/assets/ignored', IgnoredAssetsResource),
('/assets/updates', AssetUpdatesResource),
('/assets/user', UserAssetsResource),
('/actions/ignored', IgnoredActionsResource),
('/info', InfoResource),
('/ping', PingResource),
('/import', DataImportResource),
('/nfts', NFTSResource),
('/nfts/balances', NFTSBalanceResource),
('/database/info', DatabaseInfoResource),
('/database/backups', DatabaseBackupsResource),
('/locations/associated', AssociatedLocations),
('/staking/kraken', StakingResource),
('/snapshot/download', DBSnapshotDownloadingResource),
('/snapshot/export', DBSnapshotExportingResource),
('/snapshot/import', DBSnapshotImportingResource),
('/snapshot/delete', DBSnapshotDeletingResource),
('/ens/reverse', ReverseEnsResource),
]
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def setup_urls(
rest_api: RestAPI,
blueprint: Blueprint,
urls: URLS,
) -> None:
for url_tuple in urls:
if len(url_tuple) == 2:
route, resource_cls = url_tuple # type: ignore
endpoint = resource_cls.__name__.lower()
elif len(url_tuple) == 3:
route, resource_cls, endpoint = url_tuple # type: ignore
else:
raise ValueError(f"Invalid URL format: {url_tuple!r}")
blueprint.add_url_rule(
route,
view_func=resource_cls.as_view(endpoint, rest_api_object=rest_api),
)
def endpoint_not_found(e: NotFound) -> Response:
msg = 'invalid endpoint'
# The isinstance check is because I am not sure if `e` is always going to
# be a "NotFound" error here
if isinstance(e, NotFound):
msg = e.description
return api_response(wrap_in_fail_result(msg), HTTPStatus.NOT_FOUND)
@parser.error_handler # type: ignore
@resource_parser.error_handler
@ignore_kwarg_parser.error_handler
def handle_request_parsing_error(
err: ValidationError,
_request: werkzeug.local.LocalProxy,
_schema: Schema,
error_status_code: Optional[int], # pylint: disable=unused-argument
error_headers: Optional[Dict], # pylint: disable=unused-argument
) -> None:
""" This handles request parsing errors generated for example by schema
field validation failing."""
msg = str(err)
if isinstance(err.messages, dict):
# first key is just the location. Ignore
key = list(err.messages.keys())[0]
msg = json.dumps(err.messages[key])
elif isinstance(err.messages, list):
msg = ','.join(err.messages)
err_response = jsonify(result=None, message=msg)
err_response.status_code = HTTPStatus.BAD_REQUEST
abort(err_response)
class APIServer():
_api_prefix = '/api/1'
def __init__(
self,
rest_api: RestAPI,
ws_notifier: RotkiNotifier,
cors_domain_list: List[str] = None,
) -> None:
flask_app = Flask(__name__)
if cors_domain_list:
CORS(flask_app, origins=cors_domain_list)
blueprint = create_blueprint(self._api_prefix)
setup_urls(
blueprint=blueprint,
rest_api=rest_api,
urls=URLS_V1,
)
self.rest_api = rest_api
self.rotki_notifier = ws_notifier
self.flask_app = flask_app
self.blueprint = blueprint
self.wsgiserver: Optional[WSGIServer] = None
self.flask_app.register_blueprint(self.blueprint)
self.ws_server: Optional[WebSocketServer] = None
self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found)
self.flask_app.register_error_handler(Exception, self.unhandled_exception)
@staticmethod
def unhandled_exception(exception: Exception) -> Response:
""" Flask.errorhandler when an exception wasn't correctly handled """
log.critical(
'Unhandled exception when processing endpoint request',
exc_info=True,
exception=str(exception),
)
return api_response(wrap_in_fail_result(str(exception)), HTTPStatus.INTERNAL_SERVER_ERROR)
def run(self, host: str = '127.0.0.1', port: int = 5042, **kwargs: Any) -> None:
"""This is only used for the data faker and not used in production"""
self.flask_app.run(host=host, port=port, **kwargs)
def start(
self,
host: str = '127.0.0.1',
rest_port: int = 5042,
websockets_port: int = 5043,
) -> None:
"""This is used to start the API server in production"""
wsgi_logger = logging.getLogger(__name__ + '.pywsgi')
self.wsgiserver = WSGIServer(
listener=(host, rest_port),
application=self.flask_app,
log=wsgi_logger,
error_log=wsgi_logger,
)
msg = f'rotki REST API server is running at: {host}:{rest_port}'
print(msg)
log.info(msg)
self.wsgiserver.start()
self.ws_server = WebSocketServer(
listener=(host, websockets_port),
application=WebsocketResource([
('^/', RotkiWSApp),
]),
debug=False,
environ={'rotki_notifier': self.rotki_notifier},
)
msg = f'rotki Websockets API server is running at: {host}:{websockets_port}'
print(msg)
log.info(msg)
self.ws_server.start()
def stop(self, timeout: int = 5) -> None:
"""Stops the API server. If handlers are running after timeout they are killed"""
if self.wsgiserver is not None:
self.wsgiserver.stop(timeout)
self.wsgiserver = None
if self.ws_server is not None:
self.ws_server.stop(timeout)
self.wsgiserver = None
self.rest_api.stop()
| 38.837587 | 98 | 0.714798 | 3,098 | 0.185077 | 0 | 0 | 1,330 | 0.079455 | 0 | 0 | 4,449 | 0.265786 |
315c82c7ef0cdf26d1ed8dc3787880aa67476f2f | 1,329 | py | Python | stability/stairs_contacts.py | haudren/stability-polygon | b5e42bbd6eda2426a2c0d70716fbf956ce63f290 | [
"MIT"
]
| null | null | null | stability/stairs_contacts.py | haudren/stability-polygon | b5e42bbd6eda2426a2c0d70716fbf956ce63f290 | [
"MIT"
]
| null | null | null | stability/stairs_contacts.py | haudren/stability-polygon | b5e42bbd6eda2426a2c0d70716fbf956ce63f290 | [
"MIT"
]
| null | null | null | import numpy as np
pos = []
normals = []
p = [[-0.4722227, -0.24517583, -0.6370031]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.2549828, -0.24587737, -0.63704705]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.25787751, -0.38255749, -0.63705089]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.47206733, -0.38317576, -0.6370076]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
#Contact lgripper/handrail
#Left
p = [[0.3651077, 0.33419711, 0.63609439]]
n = [[-3.39491173e-05, 9.99999875e-01, 4.99472000e-04]]
pos.append(p)
normals.append(n)
#Right
#p = [[0.36510907, 0.29419711, 0.63607441]]
#p = [[0.3651077, 0.33419711, 0.63609439]]
#n = [[3.44761855e-05, -9.99999874e-01, -5.00077386e-04]]
#pos.append(p)
#normals.append(n)
#Bottom
#p = [[0.34212609, 0.31418314, 0.66248165]]
#n = [[-6.56636734e-01, -3.99160434e-04, 7.54206895e-01]]
#pos.append(p)
#normals.append(n)
#
##Top
p = [[0.38480749, 0.31420908, 0.61345819]]
n = [[6.56636734e-01, 4.00439950e-04, -7.54206894e-01]]
pos.append(p)
normals.append(n)
pos = [np.array(px).T for px in pos]
#for p in pos:
# p[2, 0] = 0.0
normals = [np.array(nx).T for nx in normals]
| 23.315789 | 57 | 0.653123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.290444 |
315dcc7fa72c3932ae805d3e9b9008f00630dab8 | 15,428 | py | Python | generator/framework/analyser/analyser.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
]
| null | null | null | generator/framework/analyser/analyser.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
]
| null | null | null | generator/framework/analyser/analyser.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
]
| null | null | null | import inspect
import re
import types
from collections import namedtuple
from typing import List, Union, Dict
from flask_restplus import fields
from ...common import MetaData, Entry, Arg, ArgSource, RpcType,\
type_def, rpc_doc_args_key, rpc_doc_resp_key, rpc_impl_rename
from ...common.web.namespace import get_namespace, NamespaceInfo
function_type = frozenset([staticmethod, classmethod, types.FunctionType])
func_obj_types = frozenset([staticmethod, classmethod])
method_reg = re.compile(r"^[\s\S]+?(?=:param|:return:|$)")
class Analyser(object):
"""
解析器,解析指定的类型所绑定的 document 信息
"""
@staticmethod
def analyse(service_classes, service_impl_classes, need_impl: bool = True) -> List[MetaData]:
"""
解析 service_classes 的信息,并返回其元数据,如果该类型未添加元数据,则不将其加入元数据列表
service_impl_classes 则为对应 service 的实现器, 两者应该是一一对应的关系
need_impl 指示了是否需要为 service_class 获取其具体实现
:param service_classes:
:param service_impl_classes
:param need_impl:
:return:
"""
meta_data = []
for c in service_classes:
methods = extract_methods(c)
if not methods:
continue
# 查看是否有自定义名称
impl_name = getattr(c, rpc_impl_rename, c.__name__)
# 找到对应 impl
impl = list(filter(lambda i: i.__name__ ==
impl_name, service_impl_classes))
if not impl and need_impl:
raise Exception(
"found service %s definition without implement code" % c.__name__)
meta = MetaData(c.__name__, c, methods,
impl_type=impl and impl[0] or None)
meta_data.append(meta)
return sorted(meta_data, key=lambda m: m.name.lower())
def extract_methods(cls):
"""
解析一个 Class, 得到所有定义了 api doc 的方法
:param cls:
:return:
"""
# process cls' s apidoc if exists
base_entries_arg: List[Arg] = process_cls_args(cls)
entries = []
for (attr_name, attr) in cls.__dict__.items():
attr_type = type(attr)
if attr_name.startswith('_') or attr_type not in function_type:
continue
rpc_doc_args: type_def.Dict = getattr(attr, rpc_doc_args_key, None)
rpc_doc_resp: type_def.RpcType = getattr(attr, rpc_doc_resp_key, None)
if attr_type in func_obj_types:
# extract real method from method object
attr = getattr(attr, "__func__", None)
api_doc = getattr(attr, '__apidoc__', None)
# TODO: 暂时不做多种配置方式的合并, 后续考虑提供
entry = None
if api_doc:
entry = analyse_doc(cls, attr, attr_name, api_doc, base_entries_arg)
entry.args = base_entries_arg + entry.args
args = list(base_entries_arg)
result = type_def.Void()
if rpc_doc_args:
# args 必然是 model 的 RpcType.Dict 类型
# 根据 attr_name 来选择 ArgSource, 如果是非 http method, 则不管设置
# 成何种类型都不会有影响
for name, value in rpc_doc_args.get_elem_info().items():
source = get_source_type(attr_name, value)
args.append(Arg(name, value, value.default_value,
value.description, value.required, source))
if rpc_doc_resp:
result = rpc_doc_resp
if not entry:
raw_doc = inspect.getdoc(attr) or ""
method_doc = method_reg.search(raw_doc)
if method_doc:
method_doc = method_doc.group(0)
args = sorted(args, key=lambda a: a.name.lower())
entry = Entry(attr_name, args, result, method_doc)
entries.append(entry)
return sorted(entries, key=lambda e: e.name.lower())
def get_source_type(method_name: str, field: RpcType) -> ArgSource:
"""
获取 field 字段的来源信息,首先根据方法名,如果是 http 的方法,
则按照 get 对应 params, post 对应 body 的形式,
如果 field 主动设置了 source, 则使用 field 的
"""
source = ArgSource.UNKNOWN
if method_name == "get":
source = ArgSource.PARAMS
elif method_name == "post":
source = ArgSource.BODY
if field.source != ArgSource.UNKNOWN:
source = field.get_source()
return source
def process_cls_args(cls) -> List[Arg]:
"""
从 cls 的 api_doc 中获取参数信息
"""
cls_api_doc = getattr(cls, "__apidoc__", {})
params: dict = cls_api_doc.get("params", {})
args: List[Arg] = []
for key, value in params.items():
field_type = switch_type(value.get("type", "str"), value)
source_in = value.get("in", "path")
source = ArgSource.PARAMS
if source_in == "path":
source = ArgSource.PATH
elif source_in == "body":
source = ArgSource.BODY
elif source_in == "header":
source = ArgSource.HEADER
args.append(Arg(key, field_type, field_type.default_value, source=source, description=field_type.description))
# 处理完 Flask 的定义,还需要处理 CommonBase 的定义
ns_info: Union[NamespaceInfo, None] = get_namespace(cls)
if ns_info is not None:
for arg_name, arg_value in ns_info.params.items():
args.append(
Arg(
arg_name,
arg_value,
arg_value.default_value,
description=arg_value.description,
required=arg_value.required,
source=ArgSource.PATH
)
)
return args
def analyse_doc(cls, method, name, api_doc, class_args: List[Arg]) -> Entry:
"""
解析 cls 类型中 method 的 api_doc 信息,转换为本地格式,便于后续的分析
:param cls
:param method
:param name:
:param api_doc:
:param class_args:
:return:
"""
# 首先查找函数命名的参数
# 首先解开函数的 wrapper, 拿到实际调用的函数体
while hasattr(method, "__wrapped__"):
method = getattr(method, "__wrapped__")
# 尝试获取 entry 的注释
method_doc_raw = inspect.getdoc(method) or ""
method_doc = method_reg.search(method_doc_raw) or ""
if method_doc:
method_doc = method_doc.group(0)
args = analyse_args(cls, method, method_doc_raw, api_doc, class_args)
entry = Entry(name, args, type_def.Void(), method_doc)
status_codes = api_doc.get("responses", {}).keys()
for status_code in status_codes:
result = analyse_result(api_doc, status_code)
if result:
entry.set_result(status_code, result)
return entry
def analyse_result(api_doc, status_code: int) -> type_def.RpcType:
"""
解析出 api_doc 中的返回值信息
:param api_doc:
:param status_code:
:return:
"""
(desc, data_meta) = api_doc.get("responses", {}).get(status_code, (None, None))
if not desc and not data_meta:
return type_def.Void() # 该接口返回空类型
if isinstance(data_meta, dict):
# 说明是复合类型
result = type_def.fields.Dict(required=True)
for (key, type_info) in data_meta.items():
result.add_field(key, switch_type(type_info))
else:
# 说明是基础类型
result = switch_type(data_meta)
return result
def analyse_args(cls, method, method_doc_raw, api_doc, class_args: List[Arg]) -> List[Arg]:
"""
cls 为要解析的模块, method 为该模块对应的方法, api_doc 是该 method 的描述文件
通过以上信息解析出该函数的参数信息
:param cls:
:param method:
:param method_doc_raw:
:param api_doc:
:param class_args:
:return:
"""
frame_info = inspect.getfullargspec(method)
method_args = frame_info.args
if len(frame_info.args) > 0 and frame_info.args[0] == "self":
method_args = method_args[1:]
if len(method_args) > len(frame_info.annotations):
# 缺少必要的参数类型描述
raise Exception(
"模块 %s 的函数 %s 有 %s 个参数,但具有类型描述的参数个数只有 %s 个. \n"
"请为缺少类型描述的参数 %s 添加类型信息, eg: 为 id 添加参数说明\n\t"
"def hello(id: int): pass" %
(
cls.__name__,
method.__name__,
len(frame_info.args),
len(frame_info.annotations),
frame_info.args
)
)
params: List[Arg] = []
params_dict = api_doc.get("params", {})
params.extend(analyse_flask_args(method, params_dict, False) or [])
# post
expect_list = api_doc.get("expect", [])
for expect in expect_list:
params.extend(analyse_flask_args(method, expect, True) or [])
# 最后才处理函数定义的参数
func_params: List[Arg] = []
for (index, arg) in enumerate(method_args):
arg_type = switch_type(frame_info.annotations[arg])
if isinstance(arg_type, (type_def.Void, )):
continue
# try to extract documentation from doc
arg_doc = re.search(
r":param %s:(?P<doc>[\s\S]+?)(?=:param|:return|$)" % arg, method_doc_raw)
if arg_doc:
arg_doc = arg_doc.group("doc")
arg_info = Arg(arg, arg_type, None, arg_doc or "")
func_params.append(arg_info)
args_len = len(func_params) - 1
for (index, default) in enumerate(frame_info.defaults or []):
func_params[len(args_len - index)].default = default
# 移除重复的参数
for p in func_params:
is_dup: bool = False
for pp in params:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
for pp in class_args:
if p.name == pp.name:
is_dup = True
break
if not is_dup:
params.append(p)
return params
def analyse_flask_args(method, type_dict, in_body: bool) -> List[Arg]:
"""
解析 type_dict 中的信息,将其转换为无 flask 模块依赖的类型信息
:param method
:param type_dict:
:param in_body: 如果 in_body 则参数来自于 body, 否则的话需要根据 in 字段进行判断,如果
in header 则参数来自 header, 否则是 get 的参数
:return:
"""
params = []
for (key, value) in type_dict.items():
if isinstance(value, dict):
# 可能是 param 定义,或 flask doc 的说明
attr_type = value.get("type", None)
attr_type = switch_type(attr_type, value)
if not attr_type or isinstance(attr_type, type_def.Void):
raise Exception("%s 的参数 %s 没有类型定义" % (method, key))
if in_body:
source = ArgSource.BODY
else:
if value.get("in", "params") == "params":
source = ArgSource.PARAMS
else:
source = ArgSource.HEADER
arg = Arg(key, attr_type, default=value.get("default", None),
description=value.get("description", ""), source=source,
required=attr_type.required)
params.append(arg)
else:
attr_type = switch_type(value)
if attr_type:
if in_body:
source = ArgSource.BODY
else:
source = ArgSource.PARAMS
required = True
if value.required is not None:
required = not not value.required
arg = Arg(key, attr_type, attr_type.default_value, value.description,
required=required, source=source)
params.append(arg)
return params
str_literal = ["str", "string"]
number_literal = ["int", "integer"]
base_mapping_fields = {
"default": None,
"required": True,
"default_value": None,
"maximum": None,
"minimal": None,
}
sm = namedtuple(
"DefaultMapping",
[
"description", "required", "min_length",
"max_length", "min_items", "max_items",
"default_value", "must_true",
"must_false", "minimum", "maximum"
]
)(
("description", ""),
("required", True),
("min_length", None),
("max_length", None),
("min_items", None),
("max_items", None),
("default_value", None),
("must_true", None),
("must_false", None),
("minimum", None),
("maximum", None)
)
field_adaptor = {
"minimum": "min",
"maximum": "max",
"default_value": "default"
}
base_sm = [sm.description, sm.default_value, sm.required]
def build_sm(*args, need_base: bool = True):
sm_list = (need_base and base_sm or []) + list(args)
def wrap(addition: Union[Dict, object, None]):
def get_method(_key, _default):
pass
if isinstance(addition, dict):
def get_method(key, default_value):
return addition.get(key, default_value)
elif isinstance(addition, object):
def get_method(key, default_value):
return getattr(addition, key, default_value)
d = {}
for (k, v) in sm_list:
d[k] = v
if not addition:
continue
v = get_method(k, None)
if v is None:
adapt_key = field_adaptor.get(k, None)
if adapt_key is not None:
v = get_method(adapt_key, None)
if v is not None:
d[k] = v
return d
return wrap
number_sm = build_sm(sm.minimum, sm.maximum)
str_sm = build_sm(sm.min_length, sm.max_length)
bool_sm = build_sm(sm.must_true, sm.must_false)
list_sm = build_sm(sm.description, sm.min_items, sm.max_items, need_base=False)
type_switch_mapping = {
"int": number_sm,
int: number_sm,
"integer": number_sm,
fields.Integer: number_sm,
"float": number_sm,
float: number_sm,
fields.Float: number_sm,
"str": str_sm,
"string": str_sm,
str: str_sm,
fields.String: str_sm,
"bool": bool_sm,
bool: bool_sm,
fields.Boolean: bool_sm,
"list": list_sm,
fields.List: list_sm,
}
type_convert_mapping = {
int: type_def.fields.Integer,
str: type_def.fields.String,
float: type_def.fields.Float,
bool: type_def.fields.Bool,
fields.Integer: type_def.fields.Integer,
fields.String: type_def.fields.String,
fields.Boolean: type_def.fields.Boolean,
fields.Float: type_def.fields.Float
}
def switch_type(from_type, addition: Union[dict, None] = None) -> type_def.RpcType:
"""
转换类型定义,将第三的定义转换为本地类型,
addition 为 flask 类型信息的附加信息,可以为其增加类似 maximum, default, max_items 等信息
:param from_type:
:param addition
:return:
"""
map_func = type_switch_mapping.get(from_type, None)
if map_func is None:
map_func = type_switch_mapping.get(type(from_type), lambda _: {})
kwargs = map_func(addition)
if isinstance(from_type, str):
if from_type in str_literal:
return type_def.fields.String(**kwargs)
elif from_type in number_literal:
return type_def.fields.Integer(**kwargs)
# 如果是基础类型,或 flask 的基础类型,可以直接构造
from_type_constructor = type_convert_mapping.get(from_type, None)
if not from_type_constructor:
from_type_constructor = type_convert_mapping.get(type(from_type), None)
if from_type_constructor:
return from_type_constructor(**kwargs)
if isinstance(from_type, fields.List):
elem_type = switch_type(from_type.container)
return type_def.fields.List(
elem_type, **kwargs)
elif isinstance(from_type, fields.Nested):
field_dict = {}
for (field, field_value) in from_type.model.items():
field_dict[field] = switch_type(field_value, field_value)
return type_def.fields.Dict(field_dict, from_type.description, from_type.required)
else:
return type_def.Void()
| 29.899225 | 118 | 0.598004 | 1,463 | 0.08809 | 0 | 0 | 1,353 | 0.081467 | 0 | 0 | 4,070 | 0.245063 |
315e0a58614b3057e6b0218dbfb71a1a5db2f377 | 5,306 | py | Python | bot/helper/mirror_utils/download_utils/telegram_downloader.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
]
| null | null | null | bot/helper/mirror_utils/download_utils/telegram_downloader.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
]
| null | null | null | bot/helper/mirror_utils/download_utils/telegram_downloader.py | vincreator/Eunha | 85a702a5b5f30ccea1798122c261d4ff07fe0c0c | [
"Apache-2.0"
]
| null | null | null | import logging
import random
from time import time
from threading import RLock, Lock, Thread
from bot import LOGGER, download_dict, download_dict_lock, app, STOP_DUPLICATE, STORAGE_THRESHOLD
from bot.helper.ext_utils.bot_utils import get_readable_file_size
from ..status_utils.telegram_download_status import TelegramDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendMessage, sendStatusMessage
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.fs_utils import check_storage_threshold
global_lock = Lock()
GLOBAL_GID = set()
logging.getLogger("pyrogram").setLevel(logging.WARNING)
class TelegramDownloadHelper:
def __init__(self, listener):
self.name = ""
self.size = 0
self.progress = 0
self.downloaded_bytes = 0
self.__start_time = time()
self.__listener = listener
self.__id = ""
self.__is_cancelled = False
self.__resource_lock = RLock()
@property
def download_speed(self):
with self.__resource_lock:
return self.downloaded_bytes / (time() - self.__start_time)
def __onDownloadStart(self, name, size, file_id):
with global_lock:
GLOBAL_GID.add(file_id)
with self.__resource_lock:
self.name = name
self.size = size
self.__id = file_id
gid = ''.join(random.choices(file_id, k=12))
with download_dict_lock:
download_dict[self.__listener.uid] = TelegramDownloadStatus(self, self.__listener, gid)
sendStatusMessage(self.__listener.message, self.__listener.bot)
def __onDownloadProgress(self, current, total):
if self.__is_cancelled:
self.__onDownloadError('Cancelled by user!')
app.stop_transmission()
return
with self.__resource_lock:
self.downloaded_bytes = current
try:
self.progress = current / self.size * 100
except ZeroDivisionError:
pass
def __onDownloadError(self, error):
with global_lock:
try:
GLOBAL_GID.remove(self.__id)
except KeyError:
pass
self.__listener.onDownloadError(error)
def __onDownloadComplete(self):
with global_lock:
GLOBAL_GID.remove(self.__id)
self.__listener.onDownloadComplete()
def __download(self, message, path):
try:
download = app.download_media(message,
progress = self.__onDownloadProgress,
file_name = path
)
except Exception as e:
LOGGER.error(str(e))
return self.__onDownloadError(str(e))
if download is not None:
self.__onDownloadComplete()
elif not self.__is_cancelled:
self.__onDownloadError('Internal error occurred')
def add_download(self, message, path, filename):
_dmsg = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id)
media = None
media_array = [_dmsg.document, _dmsg.video, _dmsg.audio]
for i in media_array:
if i is not None:
media = i
break
if media is not None:
with global_lock:
# For avoiding locking the thread lock for long time unnecessarily
download = media.file_id not in GLOBAL_GID
if filename == "":
name = media.file_name
else:
name = filename
path = path + name
if download:
size = media.file_size
if STOP_DUPLICATE and not self.__listener.isLeech:
LOGGER.info('Checking File/Folder if already in Drive...')
smsg, button = GoogleDriveHelper().drive_list(name, True, True)
if smsg:
msg = "File/Folder is already available in Drive.\nHere are the search results:"
return sendMarkup(msg, self.__listener.bot, self.__listener.message, button)
if STORAGE_THRESHOLD is not None:
arch = any([self.__listener.isZip, self.__listener.extract])
acpt = check_storage_threshold(size, arch)
if not acpt:
msg = f'You must leave {STORAGE_THRESHOLD}GB free storage.'
msg += f'\nYour File/Folder size is {get_readable_file_size(size)}'
return sendMessage(msg, self.__listener.bot, self.__listener.message)
self.__onDownloadStart(name, size, media.file_id)
LOGGER.info(f'Downloading Telegram file with id: {media.file_id}')
Thread(target=self.__download, args=(_dmsg, path)).start()
else:
self.__onDownloadError('File already being downloaded!')
else:
self.__onDownloadError('No document in the replied message')
def cancel_download(self):
LOGGER.info(f'Cancelling download on user request: {self.__id}')
self.__is_cancelled = True
| 40.503817 | 104 | 0.604222 | 4,631 | 0.872786 | 0 | 0 | 146 | 0.027516 | 0 | 0 | 533 | 0.100452 |
315f8e1d96aaa1b3755c0089f370b7d2dae3e33b | 646 | py | Python | setup.py | goofmint/qualityforward-py | 299c11cb4769fb8c42bfd2d553a5c1c1f042d2de | [
"MIT"
]
| null | null | null | setup.py | goofmint/qualityforward-py | 299c11cb4769fb8c42bfd2d553a5c1c1f042d2de | [
"MIT"
]
| null | null | null | setup.py | goofmint/qualityforward-py | 299c11cb4769fb8c42bfd2d553a5c1c1f042d2de | [
"MIT"
]
| null | null | null | import setuptools
setuptools.setup(
name="qualityforward",
version="1.1",
author="Atsushi Nakatsugawa",
author_email="[email protected]",
description="Python library for QualityForward API",
long_description="This is python library for QualityForward API. QualityForward is cloud based test management service.",
long_description_content_type="text/markdown",
url="https://cloud.veriserve.co.jp/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 34 | 125 | 0.695046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.568111 |
31615431c1549d43bb6f82b1e2019a1850b2c3a4 | 1,945 | py | Python | backend/trips/models.py | repeating/PoputchikiInno | 54b60cfd3c40a25357667c4044fd477f3b6b9152 | [
"CC-BY-4.0"
]
| 20 | 2021-09-23T16:33:34.000Z | 2022-01-08T08:56:10.000Z | backend/trips/models.py | repeating/PoputchikiInno | 54b60cfd3c40a25357667c4044fd477f3b6b9152 | [
"CC-BY-4.0"
]
| null | null | null | backend/trips/models.py | repeating/PoputchikiInno | 54b60cfd3c40a25357667c4044fd477f3b6b9152 | [
"CC-BY-4.0"
]
| 2 | 2021-09-23T16:31:39.000Z | 2021-12-17T01:02:01.000Z | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, AbstractUser
from django.utils import timezone
from django.utils.translation import gettext as _
from django import forms
from django.contrib.auth.hashers import make_password
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
from datetime import datetime
class CarTrip(models.Model):
class Meta:
verbose_name = _('carTrip')
verbose_name_plural = _('cartrips')
def __str__(self):
return f'{self.driver_name} Car Trip'
driver_name = models.CharField(max_length=200)
destination = models.CharField(max_length=200)
number_of_seats = models.IntegerField('number of seats')
trip_date = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
@classmethod
def create(cls , driver_name, destination, number_of_seats, trip_date):
trip = cls(driver_name= driver_name,
destination=destination,
number_of_seats=number_of_seats,
trip_date=trip_date,
pub_date=datetime.now()
)
return trip
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Relation(models.Model):
class Meta:
verbose_name = _('relation')
verbose_name_plural = _('relation')
trip_number = models.IntegerField('trip_number')
hiker_name = models.CharField(max_length=200)
def __str__(self ):
return f'{self.hiker_name} going on trip id = {self.trip_number}'
@classmethod
def create(cls , trip_number, hiker_name):
rel = cls(trip_number=trip_number,
hiker_name=hiker_name,
)
return rel
| 32.966102 | 87 | 0.684833 | 1,480 | 0.760925 | 0 | 0 | 538 | 0.276607 | 0 | 0 | 173 | 0.088946 |
31617ded12576d7c4174e3e0dd3d96d3659501e5 | 6,957 | py | Python | jcs/jcs_main.py | orenmel/lexsub | 4197fccf489670fdc4e5510971f4288b9d0b1625 | [
"Apache-2.0"
]
| 26 | 2016-07-28T03:05:07.000Z | 2021-05-14T05:02:38.000Z | jcs/jcs_main.py | afcarl/lexsub | 4197fccf489670fdc4e5510971f4288b9d0b1625 | [
"Apache-2.0"
]
| 11 | 2018-09-14T12:20:25.000Z | 2021-05-03T18:54:42.000Z | jcs/jcs_main.py | afcarl/lexsub | 4197fccf489670fdc4e5510971f4288b9d0b1625 | [
"Apache-2.0"
]
| 18 | 2016-11-15T14:15:23.000Z | 2022-03-15T23:41:57.000Z | '''
Run lexical substitution experiments
'''
import sys
import time
import argparse
import re
import numpy
from jcs.jcs_io import extract_word_weight
from jcs.data.context_instance import ContextInstance
from jcs.jcs_io import vec_to_str
from jcs.jcs_io import vec_to_str_generated
from jcs.cs_embedding_inferrer import CsEmbeddingInferrer
from jcs.context2vec_inferrer import Context2vecInferrer
target_re = re.compile(".*__(.*)__.*")
def read_candidates(candidates_file):
target2candidates = {}
# finally.r::eventually;ultimately
with open(candidates_file, 'r') as f:
for line in f:
segments = line.split('::')
target = segments[0]
candidates = set(segments[1].strip().split(';'))
target2candidates[target] = candidates
return target2candidates
def run_test(inferrer):
if args.candidatesfile != None:
target2candidates = read_candidates(args.candidatesfile)
else:
target2candidates = None
tfi = open(args.testfile, 'r')
tfo = open(args.resultsfile, 'w')
tfo_ranked = open(args.resultsfile+'.ranked', 'w')
tfo_generated_oot = open(args.resultsfile+'.generated.oot', 'w')
tfo_generated_best = open(args.resultsfile+'.generated.best', 'w')
lines = 0
while True:
context_line = tfi.readline()
if not context_line:
break;
lst_instance = ContextInstance(context_line, args.no_pos)
lines += 1
if (args.debug == True):
tfo.write("\nTest context:\n")
tfo.write("***************\n")
tfo.write(lst_instance.decorate_context())
result_vec = inferrer.find_inferred(lst_instance, tfo)
generated_results = inferrer.generate_inferred(result_vec, lst_instance.target, lst_instance.target_lemma, lst_instance.pos)
tfo.write("\nGenerated lemmatized results\n")
tfo.write("***************\n")
tfo.write("GENERATED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " ::: " + vec_to_str_generated(generated_results.iteritems(), args.topgenerated)+"\n")
tfo_generated_oot.write(' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " ::: " + vec_to_str_generated(generated_results.iteritems(), args.topgenerated)+"\n")
tfo_generated_best.write(' '.join([lst_instance.full_target_key, lst_instance.target_id]) + " :: " + vec_to_str_generated(generated_results.iteritems(), 1)+"\n")
filtered_results = inferrer.filter_inferred(result_vec, target2candidates[lst_instance.target_key], lst_instance.pos)
tfo.write("\nFiltered results\n")
tfo.write("***************\n")
tfo.write("RANKED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + "\t" + vec_to_str(filtered_results.iteritems(), len(filtered_results))+"\n")
tfo_ranked.write("RANKED\t" + ' '.join([lst_instance.full_target_key, lst_instance.target_id]) + "\t" + vec_to_str(filtered_results.iteritems(), len(filtered_results))+"\n")
# print "end %f" % time.time()
if lines % 10 == 0:
print "Read %d lines" % lines
print "Read %d lines in total" % lines
print "Time per word: %f msec" % inferrer.msec_per_word()
tfi.close()
tfo.close()
tfo_ranked.close()
tfo_generated_oot.close()
tfo_generated_best.close()
def run(args):
print time.asctime(time.localtime(time.time()))
if args.inferrer == 'emb':
inferrer = CsEmbeddingInferrer(args.vocabfile, args.ignoretarget, args.contextmath, args.embeddingpath, args.embeddingpathc, args.testfileconll, args.bow_size, 10)
print "Using CsEmbeddingInferrer"
elif args.inferrer == 'lstm':
inferrer = Context2vecInferrer(args.lstm_config, args.ignoretarget, args.contextmath, 10)
print "Using Context2vecInferrer"
else:
raise Exception("Unknown inferrer type: " + args.inferrer)
print time.asctime(time.localtime(time.time()))
run_test(inferrer)
print "Finished"
print time.asctime(time.localtime(time.time()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JCS utility')
parser.add_argument('--inferrer', choices=['lstm', 'emb'],
default='lstm',
help='context type ("lstm", "emb")')
# Only for Context2vecInferrer
parser.add_argument('-lstm_config', action="store", dest="lstm_config", default=None, help="config file of lstm context model and respective word embeddings")
# Only for CsEmbeddingInferrer
parser.add_argument('-embeddingpath', action="store", dest="embeddingpath", default=None, help="prefix to files containing word embeddings")
parser.add_argument('-embeddingpathc', action="store", dest="embeddingpathc", default=None, help="prefix to files containing context word embeddings")
parser.add_argument('-vocabfile', action="store", dest="vocabfile")
parser.add_argument('-bow',action='store',dest='bow_size', default=-1, type=int, help="context bag-of-words window size. 0 means entire sentence. -1 means syntactic dependency contexts.")
# Common
parser.add_argument('-targetsfile', action="store", dest="targetsfile", default=None)
parser.add_argument('-testfile', action="store", dest="testfile")
parser.add_argument('-testfileconll', action="store", dest="testfileconll", default=None, help="test file with sentences parsed in conll format")
parser.add_argument('-candidatesfile', action="store", dest="candidatesfile", default=None)
parser.add_argument('-resultsfile', action="store", dest="resultsfile")
parser.add_argument('-contextmath', action="store", dest="contextmath", default=None, help="arithmetics used to consider context [add|mult|geomean|none]")
parser.add_argument('--ignoretarget', action="store_true", dest="ignoretarget", default=False, help="ignore lhs target. compute only context compatibility.")
parser.add_argument('--nopos',action='store_true',dest='no_pos', default=False, help="ignore part-of-speech of target word")
parser.add_argument('-topgenerated', action="store", dest="topgenerated", type=int, default=10, help="top entries to print in generated parvecs")
parser.add_argument('--debug',action='store_true',dest='debug')
args = parser.parse_args(sys.argv[1:])
config_file_name = args.resultsfile + ".CONFIG"
cf = open(config_file_name, 'w')
cf.write(' '.join(sys.argv)+'\n')
cf.close()
numpy.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
run(args)
| 44.883871 | 193 | 0.655455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,817 | 0.261176 |
3162214c539ed97be79534accd9664fa733e424a | 151 | py | Python | src/django_richenum/__init__.py | adepue/django-richenum | 20cacb6ee1c8f2ad9877dd647c21a3b3f37a4333 | [
"MIT"
]
| null | null | null | src/django_richenum/__init__.py | adepue/django-richenum | 20cacb6ee1c8f2ad9877dd647c21a3b3f37a4333 | [
"MIT"
]
| null | null | null | src/django_richenum/__init__.py | adepue/django-richenum | 20cacb6ee1c8f2ad9877dd647c21a3b3f37a4333 | [
"MIT"
]
| null | null | null | __version__ = 'unknown'
try:
__version__ = __import__('pkg_resources').get_distribution('django_richenum').version
except Exception as e:
pass
| 25.166667 | 89 | 0.761589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.271523 |
31642639391c00a85e1a2f3c4f186e8a38649689 | 5,572 | py | Python | scripts/v.py | NatashaChopper/stage | b33654b6bfaa7d291e80327d83cd6849271cc656 | [
"BSD-2-Clause"
]
| null | null | null | scripts/v.py | NatashaChopper/stage | b33654b6bfaa7d291e80327d83cd6849271cc656 | [
"BSD-2-Clause"
]
| null | null | null | scripts/v.py | NatashaChopper/stage | b33654b6bfaa7d291e80327d83cd6849271cc656 | [
"BSD-2-Clause"
]
| null | null | null | import numpy
with open ("dic.txt", "w", encoding="utf-8") as dic:
for x in range(5, 790, 1):
if 92 < x <= 113:
dic.write('"'+str(x)+'"'+":"+ '"'+'1'+'",')
elif 113 < x <= 133:
dic.write('"'+str(x)+'"'+":"+ '"'+'2'+'",')
elif 133 < x <= 153:
dic.write('"'+str(x)+'"'+":"+ '"'+'3'+'",')
elif 153 < x <= 173:
dic.write('"'+str(x)+'"'+":"+ '"'+'4'+'",')
elif 173 < x <= 193:
dic.write('"'+str(x)+'"'+":"+ '"'+'5'+'",')
elif 193 < x <= 213:
dic.write('"'+str(x)+'"'+":"+ '"'+'6'+'",')
elif 213 < x <= 233:
dic.write('"'+str(x)+'"'+":"+ '"'+'7'+'",')
elif 233 < x <= 253:
dic.write('"'+str(x)+'"'+":"+ '"'+'8'+'",')
elif 253 < x <= 273:
dic.write('"'+str(x)+'"'+":"+ '"'+'9'+'",')
elif 273 < x <= 293:
dic.write('"'+str(x)+'"'+":"+ '"'+'10'+'",')
elif 293 < x <= 313:
dic.write('"'+str(x)+'"'+":"+ '"'+'11'+'",')
elif 313 < x <= 333:
dic.write('"'+str(x)+'"'+":"+ '"'+'12'+'",')
elif 333 < x <= 353:
dic.write('"'+str(x)+'"'+":"+ '"'+'13'+'",')
elif 353 < x <= 373:
dic.write('"'+str(x)+'"'+":"+ '"'+'14'+'",')
elif 373 < x <= 393:
dic.write('"'+str(x)+'"'+":"+ '"'+'15'+'",')
elif 393 < x <= 413:
dic.write('"'+str(x)+'"'+":"+ '"'+'16'+'",')
elif 413 < x <= 433:
dic.write('"'+str(x)+'"'+":"+ '"'+'17'+'",')
elif 433 < x <= 453:
dic.write('"'+str(x)+'"'+":"+ '"'+'18'+'",')
elif 453 < x <= 473:
dic.write('"'+str(x)+'"'+":"+ '"'+'19'+'",')
elif 473 < x <= 493:
dic.write('"'+str(x)+'"'+":"+ '"'+'20'+'",')
elif 493 < x <= 513:
dic.write('"'+str(x)+'"'+":"+ '"'+'21'+'",')
elif 513 < x <= 533:
dic.write('"'+str(x)+'"'+":"+ '"'+'22'+'",')
elif 533 < x <= 553:
dic.write('"'+str(x)+'"'+":"+ '"'+'23'+'",')
elif 553 < x <= 573:
dic.write('"'+str(x)+'"'+":"+ '"'+'24'+'",')
elif 573 < x <= 593:
dic.write('"'+str(x)+'"'+":"+ '"'+'25'+'",')
elif 593 < x <= 613:
dic.write('"'+str(x)+'"'+":"+ '"'+'26'+'",')
elif 613 < x <= 633:
dic.write('"'+str(x)+'"'+":"+ '"'+'27'+'",')
elif 633 < x <= 653:
dic.write('"'+str(x)+'"'+":"+ '"'+'28'+'",')
elif 653 < x <= 673:
dic.write('"'+str(x)+'"'+":"+ '"'+'29'+'",')
elif 673 < x <= 693:
dic.write('"'+str(x)+'"'+":"+ '"'+'30'+'",')
elif 693 < x <= 713:
dic.write('"'+str(x)+'"'+":"+ '"'+'31'+'",')
elif 713 < x <= 733:
dic.write('"'+str(x)+'"'+":"+ '"'+'32'+'",')
elif 733 < x <= 753:
dic.write('"'+str(x)+'"'+":"+ '"'+'33'+'",')
elif 753 < x <= 773:
dic.write('"'+str(x)+'"'+":"+ '"'+'34'+'",')
elif 773 < x <= 793:
dic.write('"'+str(x)+'"'+":"+ '"'+'35'+'",')
elif 4 < x <= 15:
dic.write('"'+str(x)+'"'+":"+ '"'+'36'+'",')
elif 15 < x <= 25:
dic.write('"'+str(x)+'"'+":"+ '"'+'37'+'",')
elif 25 < x <= 35:
dic.write('"'+str(x)+'"'+":"+ '"'+'38'+'",')
elif 35 < x <= 45:
dic.write('"'+str(x)+'"'+":"+ '"'+'39'+'",')
elif 45 < x <= 55:
dic.write('"'+str(x)+'"'+":"+ '"'+'40'+'",')
elif 55 < x <= 65:
dic.write('"'+str(x)+'"'+":"+ '"'+'41'+'",')
elif 65 < x <= 75:
dic.write('"'+str(x)+'"'+":"+ '"'+'42'+'",')
elif 75 < x <= 85:
dic.write('"'+str(x)+'"'+":"+ '"'+'43'+'",')
elif 85 < x <= 92:
dic.write('"'+str(x)+'"'+":"+ '"'+'44'+'",')
with open ("time.txt", "w", encoding="utf-8") as duree:
for y in numpy.arange(0, 1.7, 0.01):
if 0 < y <= 0.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'80'+'",')
elif 0.1 < y <= 0.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'81'+'",')
elif 0.2 < y <= 0.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'82'+'",')
elif 0.3 < y <= 0.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'83'+'",')
elif 0.4 < y <= 0.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'84'+'",')
elif 0.5 < y <= 0.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'85'+'",')
elif 0.6 < y <= 0.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'86'+'",')
elif 0.7 < y <= 0.8:
duree.write('"'+str(y)+'"'+":"+ '"'+'87'+'",')
elif 0.8 < y <= 0.9:
duree.write('"'+str(y)+'"'+":"+ '"'+'88'+'",')
elif 0.9 < y <= 1:
duree.write('"'+str(y)+'"'+":"+ '"'+'89'+'",')
elif 1 < y <= 1.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'90'+'",')
elif 1.1 < y <= 1.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'91'+'",')
elif 1.2 < y <= 1.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'92'+'",')
elif 1.3 < y <= 1.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'93'+'",')
elif 1.4 < y <= 1.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'94'+'",')
elif 1.5 < y <= 1.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'95'+'",')
elif 1.6 < y <= 1.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'96'+'",') | 43.193798 | 59 | 0.288227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,250 | 0.224336 |
316518dbc6fa99ee74801a883d9600ed91d0c3e7 | 1,101 | py | Python | scrim/globals.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
]
| 4 | 2018-06-21T20:14:11.000Z | 2021-04-28T20:34:43.000Z | scrim/globals.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
]
| null | null | null | scrim/globals.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
=============
scrim.globals
=============
Defines variables passed into the python script via Environment Variables by
scrim scripts. If SCRIM_SHELL is None, then the python script was not executed
by a scrim script.
SHELLS (list): list of available shells
SCRIM_SHELL (str): Parent shell, one of the above SHELLS
SCRIM_PATH (str): Path to output shell script
SCRIM_AUTO_WRITE (bool): Write to SCRIM_PATH when python exits?
SCRIM_SCRIPT (str): Path to the scrim script that invoked python
SCRIM_DEBUG (bool): Is scrim script running in debug mode?
'''
from __future__ import absolute_import
import os
__all__ = [
'SHELLS', 'SCRIM_SHELL', 'SCRIM_PATH', 'SCRIM_AUTO_WRITE',
'SCRIM_SCRIPT', 'SCRIM_DEBUG'
]
SHELLS = [
'powershell.exe',
'cmd.exe',
'bash'
]
SCRIM_SHELL = os.environ.get('SCRIM_SHELL', None)
SCRIM_PATH = os.environ.get('SCRIM_PATH', None)
SCRIM_AUTO_WRITE = bool(os.environ.get('SCRIM_AUTO_WRITE', False))
SCRIM_SCRIPT = os.environ.get('SCRIM_SCRIPT', None)
SCRIM_DEBUG = bool(os.environ.get('SCRIM_DEBUG', False))
| 32.382353 | 78 | 0.710263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 782 | 0.710263 |
31658cd3302ac85eb923cde2f7d6f6b205979a8e | 675 | py | Python | examples/Api/channels.py | asheshambasta/csound-expression | 290567231c1d658e07ba882b1d1c726c96af67ce | [
"BSD-3-Clause"
]
| null | null | null | examples/Api/channels.py | asheshambasta/csound-expression | 290567231c1d658e07ba882b1d1c726c96af67ce | [
"BSD-3-Clause"
]
| null | null | null | examples/Api/channels.py | asheshambasta/csound-expression | 290567231c1d658e07ba882b1d1c726c96af67ce | [
"BSD-3-Clause"
]
| null | null | null | import csnd6
class Control:
def __init__(self, volume, frequency):
engine = csnd6.Csound()
engine.SetOption("-odac")
engine.Compile("osc.csd")
thread = csnd6.CsoundPerformanceThread(engine)
thread.Play()
self.engine = engine
self.thread = thread
self.set_volume(volume)
self.set_frequency(frequency)
def set_volume(self, volume):
self.engine.SetChannel("volume", volume)
def set_frequency(self, frequency):
self.engine.SetChannel("frequency", frequency)
def close(self):
self.thread.Stop()
self.thread.Join()
| 25 | 55 | 0.58963 | 645 | 0.955556 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.051852 |
31679637922287de633bd72c77660e49f2f5bfb1 | 2,693 | py | Python | virtual/lib/python3.6/site-packages/isbnlib/_goob.py | david12-wq/PITCHE_APP | 258e88a91a3f77fc316ece49edee6aa11eaaf2e0 | [
"MIT"
]
| null | null | null | virtual/lib/python3.6/site-packages/isbnlib/_goob.py | david12-wq/PITCHE_APP | 258e88a91a3f77fc316ece49edee6aa11eaaf2e0 | [
"MIT"
]
| null | null | null | virtual/lib/python3.6/site-packages/isbnlib/_goob.py | david12-wq/PITCHE_APP | 258e88a91a3f77fc316ece49edee6aa11eaaf2e0 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Query the Google Books (JSON API v1) service for metadata."""
import logging
from .dev import stdmeta
from .dev._bouth23 import u
from .dev._exceptions import ISBNNotConsistentError, RecordMappingError
from .dev.webquery import query as wquery
UA = 'isbnlib (gzip)'
SERVICE_URL = (
'https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}'
'&fields=items/volumeInfo(title,subtitle,authors,publisher,publishedDate,'
'language,industryIdentifiers)&maxResults=1')
LOGGER = logging.getLogger(__name__)
# pylint: disable=broad-except
def _mapper(isbn, records):
"""Mapp: canonical <- records."""
# canonical: ISBN-13, Title, Authors, Publisher, Year, Language
try:
canonical = {}
canonical['ISBN-13'] = u(isbn)
title = records.get('title', u('')).replace(' :', ':')
subtitle = records.get('subtitle', u(''))
title = title + ' - ' + subtitle if subtitle else title
canonical['Title'] = title
canonical['Authors'] = records.get('authors', [u('')])
# see issue #64
canonical['Publisher'] = records.get('publisher', u('')).strip('"')
if 'publishedDate' in records and len(records['publishedDate']) >= 4:
canonical['Year'] = records['publishedDate'][0:4]
else: # pragma: no cover
canonical['Year'] = u('')
canonical['Language'] = records.get('language', u(''))
except Exception: # pragma: no cover
LOGGER.debug('RecordMappingError for %s with data %s', isbn, records)
raise RecordMappingError(isbn)
# call stdmeta for extra cleanning and validation
return stdmeta(canonical)
def _records(isbn, data):
"""Classify (canonically) the parsed data."""
# put the selected data in records
try:
recs = data['items'][0]['volumeInfo']
except Exception: # pragma: no cover
# don't raise exception!
LOGGER.debug('No data from "goob" for isbn %s', isbn)
return {}
# consistency check (isbn request = isbn response)
if recs:
ids = recs.get('industryIdentifiers', '')
if u('ISBN_13') in repr(ids) and isbn not in repr(
ids): # pragma: no cover
LOGGER.debug('ISBNNotConsistentError for %s (%s)', isbn, repr(ids))
raise ISBNNotConsistentError('{0} not in {1}'.format(
isbn, repr(ids)))
else:
return {} # pragma: no cover
# map canonical <- records
return _mapper(isbn, recs)
def query(isbn):
"""Query the Google Books (JSON API v1) service for metadata."""
data = wquery(SERVICE_URL.format(isbn=isbn), user_agent=UA)
return _records(isbn, data)
| 37.402778 | 79 | 0.625325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,156 | 0.429261 |
3169b8c988e95bc8f6880f27dbdb8bd8e3429cbb | 1,949 | py | Python | gdxpds/test/conftest.py | cdgaete/gdx-pandas | 2b9b00a177268227bce189939cdab081e09cb0dc | [
"BSD-3-Clause"
]
| 42 | 2016-02-24T04:23:24.000Z | 2022-02-02T09:42:18.000Z | gdxpds/test/conftest.py | cdgaete/gdx-pandas | 2b9b00a177268227bce189939cdab081e09cb0dc | [
"BSD-3-Clause"
]
| 68 | 2015-08-06T14:25:22.000Z | 2022-01-03T13:38:51.000Z | gdxpds/test/conftest.py | cdgaete/gdx-pandas | 2b9b00a177268227bce189939cdab081e09cb0dc | [
"BSD-3-Clause"
]
| 21 | 2017-04-25T06:21:42.000Z | 2021-08-24T14:01:27.000Z | # [LICENSE]
# Copyright (c) 2020, Alliance for Sustainable Energy.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or
# promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# [/LICENSE]
import pytest
def pytest_addoption(parser):
parser.addoption(
"--no-clean-up", action="store_true", default=False,
help="Pass this option to leave test outputs in place"
)
@pytest.fixture(scope="session",autouse=True)
def clean_up(request):
return (not request.config.getoption('--no-clean-up'))
| 39.77551 | 69 | 0.75372 | 0 | 0 | 0 | 0 | 127 | 0.065162 | 0 | 0 | 1,681 | 0.862494 |
3169daa74af1845e0439f54da803485adef48067 | 11,534 | py | Python | jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py | Taekyoon/executors | 567f12c4193bb7be814f84540ea31585cd35b344 | [
"Apache-2.0"
]
| 29 | 2021-07-26T07:16:38.000Z | 2022-03-27T15:10:34.000Z | jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py | Taekyoon/executors | 567f12c4193bb7be814f84540ea31585cd35b344 | [
"Apache-2.0"
]
| 176 | 2021-07-23T08:30:21.000Z | 2022-03-14T12:29:06.000Z | jinahub/indexers/storage/PostgreSQLStorage/tests/test_postgres_dbms.py | Taekyoon/executors | 567f12c4193bb7be814f84540ea31585cd35b344 | [
"Apache-2.0"
]
| 16 | 2021-07-26T20:55:40.000Z | 2022-03-18T15:32:17.000Z | import os
import time
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.logging.profile import TimeContext
from jina_commons.indexers.dump import import_metas, import_vectors
from ..postgres_indexer import PostgreSQLStorage
from ..postgreshandler import doc_without_embedding
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d "
f"--remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down "
f"--remove-orphans"
)
d_embedding = np.array([1, 1, 1, 1, 1, 1, 1])
c_embedding = np.array([2, 2, 2, 2, 2, 2, 2])
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise Exception('no port available')
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
def get_documents(chunks, same_content, nr=10, index_start=0, same_tag_content=None):
next_chunk_id = nr + index_start
for i in range(index_start, nr + index_start):
d = Document()
d.id = i
if same_content:
d.text = 'hello world'
d.embedding = np.random.random(d_embedding.shape)
else:
d.text = f'hello world {i}'
d.embedding = np.random.random(d_embedding.shape)
if same_tag_content:
d.tags['field'] = 'tag data'
elif same_tag_content is False:
d.tags['field'] = f'tag data {i}'
for j in range(chunks):
c = Document()
c.id = next_chunk_id
if same_content:
c.text = 'hello world from chunk'
c.embedding = np.random.random(c_embedding.shape)
else:
c.text = f'hello world from chunk {j}'
c.embedding = np.random.random(c_embedding.shape)
if same_tag_content:
c.tags['field'] = 'tag data'
elif same_tag_content is False:
c.tags['field'] = f'tag data {next_chunk_id}'
next_chunk_id += 1
d.chunks.append(c)
yield d
def validate_db_side(postgres_indexer, expected_data):
ids, vecs, metas = zip(*expected_data)
with postgres_indexer.handler as handler:
cursor = handler.connection.cursor()
cursor.execute(
f'SELECT doc_id, embedding, doc from {postgres_indexer.table} ORDER BY '
f'doc_id::int'
)
record = cursor.fetchall()
for i in range(len(expected_data)):
np.testing.assert_equal(ids[i], str(record[i][0]))
embedding = np.frombuffer(record[i][1], dtype=postgres_indexer.dump_dtype)
np.testing.assert_equal(vecs[i], embedding)
np.testing.assert_equal(metas[i], bytes(record[i][2]))
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[1] / 'config.yml'), override_with={'dry_run': True}
)
assert ex.username == 'postgres'
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_postgres(tmpdir, docker_compose):
postgres_indexer = PostgreSQLStorage()
NR_DOCS = 10000
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
postgres_indexer.delete(original_docs, {})
with TimeContext(f'### indexing {len(original_docs)} docs'):
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
info_original_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in original_docs
]
validate_db_side(postgres_indexer, info_original_docs)
new_docs = DocumentArray(
list(get_documents(chunks=False, nr=10, same_content=True))
)
postgres_indexer.update(new_docs, {})
info_new_docs = [
(doc.id, doc.embedding, doc_without_embedding(doc)) for doc in new_docs
]
ids, vecs, metas = zip(*info_new_docs)
expected_info = [(ids[0], vecs[0], metas[0])]
validate_db_side(postgres_indexer, expected_info)
postgres_indexer.delete(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, len(original_docs) - len(new_docs))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mwu_empty_dump(tmpdir, docker_compose):
f = Flow().add(uses=PostgreSQLStorage)
with f:
resp = f.post(
on='/index', inputs=DocumentArray([Document()]), return_results=True
)
print(f'{resp}')
dump_path = os.path.join(tmpdir, 'dump')
with f:
f.post(
on='/dump',
parameters={'dump_path': os.path.join(tmpdir, 'dump'), 'shards': 1},
)
# assert dump contents
ids, vecs = import_vectors(dump_path, pea_id='0')
assert ids is not None
ids, metas = import_metas(dump_path, pea_id='0')
assert vecs is not None
assert metas is not None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_return_embeddings(docker_compose):
indexer = PostgreSQLStorage()
doc = Document(embedding=np.random.random(10))
da = DocumentArray([doc])
query1 = DocumentArray([Document(id=doc.id)])
indexer.add(da, parameters={})
indexer.search(query1, parameters={})
assert query1[0].embedding is not None
assert query1[0].embedding.shape == (10,)
query2 = DocumentArray([Document(id=doc.id)])
indexer.search(query2, parameters={"return_embeddings": False})
assert query2[0].embedding is None
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_get_documents(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.delete(docs)
assert len(list(indexer.get_document_iterator())) == 0
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_clear(docker_compose):
indexer = PostgreSQLStorage()
NR = 10
docs = DocumentArray(
list(
get_documents(
nr=NR,
chunks=0,
same_content=False,
)
)
)
indexer.add(docs)
assert len(list(indexer.get_document_iterator())) == NR
indexer.clear()
assert indexer.size == 0
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
@pytest.mark.parametrize('psql_virtual_shards', [44, 128])
@pytest.mark.parametrize('real_shards', [1, 5])
def test_snapshot(docker_compose, psql_virtual_shards, real_shards):
postgres_indexer = PostgreSQLStorage(virtual_shards=psql_virtual_shards)
def _assert_snapshot_shard_distribution(func, nr_shards, total_docs_expected):
total_docs = 0
for i in range(nr_shards):
data = func(shard_id=i, total_shards=nr_shards)
docs_this_shard = len(list(data))
assert docs_this_shard >= postgres_indexer.virtual_shards // real_shards
total_docs += docs_this_shard
np.testing.assert_equal(total_docs, total_docs_expected)
NR_SHARDS = real_shards
NR_DOCS = postgres_indexer.virtual_shards * 2 + 3
original_docs = DocumentArray(
list(get_documents(nr=NR_DOCS, chunks=0, same_content=False))
)
NR_NEW_DOCS = 30
new_docs = DocumentArray(
list(
get_documents(
nr=NR_NEW_DOCS, index_start=NR_DOCS, chunks=0, same_content=False
)
)
)
# make sure to cleanup if the PSQL instance is kept running
postgres_indexer.delete(original_docs, {})
postgres_indexer.delete(new_docs, {})
# indexing the documents
postgres_indexer.add(original_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS)
# create a snapshot
postgres_indexer.snapshot()
# data added the snapshot will not be part of the export
postgres_indexer.add(new_docs, {})
np.testing.assert_equal(postgres_indexer.size, NR_DOCS + NR_NEW_DOCS)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS)
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot, NR_SHARDS, NR_DOCS
)
# create another snapshot
postgres_indexer.snapshot()
timestamp = postgres_indexer.last_snapshot_timestamp
# docs for the delta resolving
NR_DOCS_DELTA = 33
docs_delta = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA,
index_start=NR_DOCS + NR_NEW_DOCS,
chunks=0,
same_content=False,
)
)
)
time.sleep(3)
postgres_indexer.add(docs_delta, {})
np.testing.assert_equal(
postgres_indexer.size, NR_DOCS + NR_NEW_DOCS + NR_DOCS_DELTA
)
np.testing.assert_equal(postgres_indexer.snapshot_size, NR_DOCS + NR_NEW_DOCS)
NR_DOCS_DELTA_DELETED = 10
docs_delta_deleted = DocumentArray(
list(
get_documents(
nr=NR_DOCS_DELTA_DELETED, index_start=0, chunks=0, same_content=False
)
)
)
postgres_indexer.delete(docs_delta_deleted, {'soft_delete': True})
_assert_snapshot_shard_distribution(
postgres_indexer.get_snapshot,
NR_SHARDS,
NR_DOCS + NR_NEW_DOCS,
)
# we use total_shards=1 in order to guarantee getting all the data in the delta
deltas = postgres_indexer.get_delta_updates(
shard_id=0, total_shards=1, timestamp=timestamp
)
deltas = list(deltas)
np.testing.assert_equal(len(deltas), NR_DOCS_DELTA + NR_DOCS_DELTA_DELETED)
def test_postgres_shard_distribution():
assert ['0'] == PostgreSQLStorage._vshards_to_get(0, 3, 5)
assert ['1'] == PostgreSQLStorage._vshards_to_get(1, 3, 5)
assert ['2', '3', '4'] == PostgreSQLStorage._vshards_to_get(2, 3, 5)
assert [str(s) for s in range(5)] == PostgreSQLStorage._vshards_to_get(0, 1, 5)
with pytest.raises(ValueError):
PostgreSQLStorage._vshards_to_get(1, 1, 5)
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_save_get_trained_model(docker_compose):
postgres_indexer = PostgreSQLStorage()
model = np.random.random((100, 5)).tobytes()
postgres_indexer.save_trained_model(model, None)
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum is None
postgres_indexer.save_trained_model(model, 'sha256:hello')
trained_model, trained_model_checksum = postgres_indexer.get_trained_model()
assert trained_model == model
assert trained_model_checksum == 'sha256:hello'
| 32.398876 | 86 | 0.660135 | 0 | 0 | 1,555 | 0.134819 | 8,400 | 0.728282 | 0 | 0 | 1,290 | 0.111843 |
3169e4d8e7a2dd174944cc17f672b8a7919b6ebd | 1,928 | py | Python | targhe/models.py | luca772005/studio | 8d19d28f13f400aa4dde84c36e44cf5891d18ddd | [
"MIT"
]
| null | null | null | targhe/models.py | luca772005/studio | 8d19d28f13f400aa4dde84c36e44cf5891d18ddd | [
"MIT"
]
| null | null | null | targhe/models.py | luca772005/studio | 8d19d28f13f400aa4dde84c36e44cf5891d18ddd | [
"MIT"
]
| null | null | null | from django.db import models
# Create your models here.
class Tipo(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Tipi'
class Marca(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Marche'
class Modello(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
marca = models.ForeignKey(Marca, null=False, blank=False)
tipo = models.ForeignKey(Tipo, null=False, blank=False)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Modelli'
class Alimentazione(models.Model):
descrizione = models.CharField(blank=False, null=False, max_length=128)
def __unicode__(self):
return "{}".format(self.descrizione)
class Meta:
verbose_name_plural = 'Alimentazioni'
class Mezzo(models.Model):
telaio = models.CharField(blank=False, null=False, max_length=128)
colore = models.CharField(blank=False, null=False, max_length=128)
alimentazione = models.ForeignKey(Alimentazione, null=False, blank=False)
modello = models.ForeignKey(Modello, null=False, blank=False)
def __unicode__(self):
return "{} {}".format(self.telaio, self.modello)
class Meta:
verbose_name_plural = 'Mezzi'
class Targa(models.Model):
numero = models.CharField(null=False, blank=False, max_length=16)
dal = models.DateField()
al = models.DateField()
mezzo = models.ForeignKey(Mezzo, null=False, blank=False)
def __unicode__(self):
return "{}".format(self.numero)
class Meta:
verbose_name_plural = 'Targhe'
| 26.410959 | 77 | 0.688278 | 1,852 | 0.960581 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.054979 |
316ab7186375a4578bd0e98836f8f14461a4cb68 | 42 | py | Python | WhatSender/__init__.py | Shauryasamant/whatsender | 057a3f971667ea8992d6645645854604c3c4d071 | [
"Unlicense"
]
| 1 | 2022-01-04T10:48:06.000Z | 2022-01-04T10:48:06.000Z | WhatSender/__init__.py | Shauryasamant/whatsender | 057a3f971667ea8992d6645645854604c3c4d071 | [
"Unlicense"
]
| null | null | null | WhatSender/__init__.py | Shauryasamant/whatsender | 057a3f971667ea8992d6645645854604c3c4d071 | [
"Unlicense"
]
| 2 | 2022-01-05T04:14:28.000Z | 2022-02-12T07:49:15.000Z | from WhatSender.sender import SendMessage
| 21 | 41 | 0.880952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
316b197711d08876516f43059dd4d069e90c4efc | 9,605 | py | Python | scenarios/sync_sheets_and_groups.py | Ragnaruk/api_integration | a91b955ff09830a29829f9b820355045e724f83e | [
"MIT"
]
| null | null | null | scenarios/sync_sheets_and_groups.py | Ragnaruk/api_integration | a91b955ff09830a29829f9b820355045e724f83e | [
"MIT"
]
| null | null | null | scenarios/sync_sheets_and_groups.py | Ragnaruk/api_integration | a91b955ff09830a29829f9b820355045e724f83e | [
"MIT"
]
| null | null | null | import pickle
from time import sleep
import googleapiclient.errors
from transliterate import translit
from logs.logging import get_logger
from api_google.google_api_sheets import get_sheets_service, get_multiple_ranges
from api_google.google_api_directory import get_directory_service, get_users_for_domain, \
get_groups_for_domain, create_group, add_user_to_group
from api_google.google_api_groupsettings import get_groupsettings_service, \
get_group_settings, update_group_settings
from config.config import sync_sheets_and_groups, path_data_directory
def main():
logger = get_logger('sync_sheets_and_groups', sync_sheets_and_groups['logging_level'])
data_path = path_data_directory / 'sync_sheets_and_groups'
data_path.mkdir(parents=True, exist_ok=True)
synced_users_path = data_path / 'synced_users.pickle'
while True:
# number_of_registered_users = 0
# synced_users_dictionary_creation = False
#
# # Getting a list of users who have already been synced
# if synced_users_path.exists():
# logger.debug('Reading synced users from: %s', synced_users_path)
# with open(synced_users_path, 'rb') as f:
# synced_users = pickle.load(f)
# else:
# logger.info('Creating synced users dictionary')
# synced_users = dict()
# synced_users_dictionary_creation = True
try:
service_directory = get_directory_service()
service_sheets = get_sheets_service()
# ranges = get_multiple_ranges(
# service_sheets,
# sync_sheets_and_groups['spreadsheet_id'],
# sync_sheets_and_groups['range_names']
# )
#
# with open(data_path / 'ranges.pickle', 'wb') as file:
# pickle.dump(ranges, file)
with open(data_path / 'ranges.pickle', 'rb') as file:
ranges = pickle.load(file)
#
# [logger.debug(x) for x in ranges]
# group_results = []
# for group in ranges[0]['values']:
# group_name = group[0].split(" ", 1)[0]
#
# email = (translit(group_name, "ru", reversed=True)).lower() \
# + "@" \
# + sync_sheets_and_groups['google_domain']
#
# try:
# group_results.append(create_group(service_directory, email, group_name, ""))
# except googleapiclient.errors.HttpError as exception:
# # If group already exists among other things
# logger.error(exception, exc_info=False)
#
# logger.debug(group_name, email)
#
# group_results.sort(key=lambda x: x['name'])
# with open(data_path / 'group_results.pickle', 'wb') as file:
# pickle.dump(group_results, file)
with open(data_path / 'group_results.pickle', 'rb') as file:
group_results = pickle.load(file)
#
# [logger.debug(x) for x in group_results]
created_group_names = [x['name'] for x in group_results]
[logger.debug(x) for x in created_group_names]
# # A client should wait 1 minute before adding users or sending messages to a new group
# sleep(60)
students = dict(zip(
[i[0] if i else "" for i in ranges[1]['values']],
[i[0] if i else "" for i in ranges[2]['values']]
))
logger.debug(students.items())
leaders = dict(zip(
[i[0] if i else "" for i in ranges[3]['values']],
[i[0] if i else "" for i in ranges[4]['values']]
))
logger.debug(leaders.items())
group_users = {}
for group in ranges[0]['values']:
id = group[0].split(" ", 1)[0]
if id not in created_group_names:
logger.debug("Skipping group: ", id)
continue
else:
logger.debug("Adding users to group: ", id)
group_users[id] = []
# Leader email
group_users[id].append(
[leaders[group[1]], 'MEMBER']
)
# Member emails
for i in range(2, len(group)):
group_users[id].append(
[students[group[i]], 'MEMBER']
)
# Mandatory user
group_users[id] += sync_sheets_and_groups['mandatory_members']
with open(data_path / 'group_users.pickle', 'wb') as file:
pickle.dump(group_users, file)
with open(data_path / 'group_users.pickle', 'rb') as file:
group_users = pickle.load(file)
[logger.debug(x) for x in group_users]
# # Add users to groups
# user_results = []
# for group in group_users:
# for group_user in group_users[group]:
# user_results.append(
# add_user_to_group(service, group, group_user[0], group_user[1])
# )
#
# with open(data_path / 'user_results.pickle', 'wb') as file:
# pickle.dump(user_results, file)
# with open(data_path / 'user_results.pickle', 'rb') as file:
# user_results = pickle.load(file)
#
# [logger.debug(x) for x in user_results]
# students = dict(zip(
# [i[0] if i else "" for i in ranges[1]['values']],
# [i[0] if i else "" for i in ranges[2]['values']]
# ))
#
# leaders = dict(zip(
# [i[0] if i else "" for i in ranges[3]['values']],
# [i[0] if i else "" for i in ranges[4]['values']]
# ))
# if id not in synced_users:
# synced_users[id] = set()
#
# member_emails = set()
#
# # Leader email
# member_emails.add(
# leaders[group[1]]
# )
#
# # Member emails
# for i in range(2, len(group)):
# member_emails.add(
# students[group[i]]
# )
#
# # Mandatory emails
# member_emails |= set(sync_sheets_and_groups['mandatory_members'])
#
# # Synced users
# member_emails -= synced_users[id]
# synced_users[id] |= member_emails
#
# member_emails = list(member_emails)
#
# logger.debug('Name: %s - Description: %s - Users: %s',
# name, description, member_emails)
#
# if not synced_users_dictionary_creation:
# # TODO
# number_of_registered_users += len(member_emails)
#
# logger.debug('Result: %s', result)
# # -----
# # Might need rework
# # -----
#
# service = get_groupsettings_service()
#
# group_emails = []
# for group_name in group_names:
# group_emails.append(
# (translit(group_name, "ru", reversed=True)).lower() \
# + "@" \
# + create_google_groups['google_domain']
# )
#
# with open(data_path / 'group_emails.pickle', 'wb') as file:
# pickle.dump(group_emails, file)
# with open(data_path / 'group_emails.pickle', 'rb') as file:
# group_emails = pickle.load(file)
#
# [logger.debug(x) for x in group_emails]
#
# settings_results = []
# for group_email in group_emails:
# settings_results.append(
# update_group_settings(
# service,
# group_email,
# {
# "whoCanJoin": "INVITED_CAN_JOIN",
# "whoCanViewMembership": "ALL_IN_DOMAIN_CAN_VIEW",
# "whoCanViewGroup": "ALL_IN_DOMAIN_CAN_VIEW",
# "whoCanPostMessage": "ALL_IN_DOMAIN_CAN_POST",
# "isArchived": "true"
# }
# )
# )
#
# with open(data_path / 'settings_results.pickle', 'wb') as file:
# pickle.dump(settings_results, file)
# with open(data_path / 'settings_results.pickle', 'rb') as file:
# settings_results = pickle.load(file)
#
# [logger.debug(x) for x in settings_results]
except Exception as exception:
logger.error(exception, exc_info=True)
# logger.debug('Writing synced users to: %s', synced_users_path)
# with open(synced_users_path, 'wb') as f:
# pickle.dump(synced_users, f)
#
# logger.info('Update finished. Registered %s users. Sleeping for %s seconds.',
# number_of_registered_users, sync_sheets_and_groups['sleep_time'])
sleep(sync_sheets_and_groups['sleep_time'])
if __name__ == '__main__':
main()
| 38.42 | 100 | 0.504112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,001 | 0.520666 |
316b518ce4f1c79a069f5d1878febd49adb00f4b | 15,524 | py | Python | src/GridCal/Gui/TowerBuilder/gui.py | SanPen/GridCal | fd3c24afff91325b8b682b03cab2b8b8edcdda57 | [
"BSD-3-Clause"
]
| 284 | 2016-01-31T03:20:44.000Z | 2022-03-17T21:16:52.000Z | src/GridCal/Gui/TowerBuilder/gui.py | SanPen/GridCal | fd3c24afff91325b8b682b03cab2b8b8edcdda57 | [
"BSD-3-Clause"
]
| 94 | 2016-01-14T13:37:40.000Z | 2022-03-28T03:13:56.000Z | src/GridCal/Gui/TowerBuilder/gui.py | SanPen/GridCal | fd3c24afff91325b8b682b03cab2b8b8edcdda57 | [
"BSD-3-Clause"
]
| 84 | 2016-03-29T10:43:04.000Z | 2022-02-22T16:26:55.000Z | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'gui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from .matplotlibwidget import MatplotlibWidget
from .icons_rc import *
class Ui_Dialog(object):
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(1183, 675)
self.gridLayout = QGridLayout(Dialog)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(1, 1, 1, 1)
self.tabWidget = QTabWidget(Dialog)
self.tabWidget.setObjectName(u"tabWidget")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.verticalLayout_6 = QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.main_splitter = QSplitter(self.tab_2)
self.main_splitter.setObjectName(u"main_splitter")
self.main_splitter.setOrientation(Qt.Horizontal)
self.frame_8 = QFrame(self.main_splitter)
self.frame_8.setObjectName(u"frame_8")
self.frame_8.setFrameShape(QFrame.NoFrame)
self.frame_8.setFrameShadow(QFrame.Raised)
self.verticalLayout_5 = QVBoxLayout(self.frame_8)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.frame_5 = QFrame(self.frame_8)
self.frame_5.setObjectName(u"frame_5")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())
self.frame_5.setSizePolicy(sizePolicy)
self.frame_5.setFrameShape(QFrame.NoFrame)
self.frame_5.setFrameShadow(QFrame.Raised)
self.horizontalLayout = QHBoxLayout(self.frame_5)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label_9 = QLabel(self.frame_5)
self.label_9.setObjectName(u"label_9")
self.horizontalLayout.addWidget(self.label_9)
self.name_lineEdit = QLineEdit(self.frame_5)
self.name_lineEdit.setObjectName(u"name_lineEdit")
self.horizontalLayout.addWidget(self.name_lineEdit)
self.verticalLayout_5.addWidget(self.frame_5)
self.frame_6 = QFrame(self.frame_8)
self.frame_6.setObjectName(u"frame_6")
sizePolicy.setHeightForWidth(self.frame_6.sizePolicy().hasHeightForWidth())
self.frame_6.setSizePolicy(sizePolicy)
self.frame_6.setFrameShape(QFrame.NoFrame)
self.frame_6.setFrameShadow(QFrame.Raised)
self.horizontalLayout_3 = QHBoxLayout(self.frame_6)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_2)
self.label_8 = QLabel(self.frame_6)
self.label_8.setObjectName(u"label_8")
self.horizontalLayout_3.addWidget(self.label_8)
self.frequency_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.frequency_doubleSpinBox.setObjectName(u"frequency_doubleSpinBox")
self.frequency_doubleSpinBox.setDecimals(0)
self.frequency_doubleSpinBox.setValue(50.000000000000000)
self.horizontalLayout_3.addWidget(self.frequency_doubleSpinBox)
self.label_11 = QLabel(self.frame_6)
self.label_11.setObjectName(u"label_11")
self.horizontalLayout_3.addWidget(self.label_11)
self.rho_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.rho_doubleSpinBox.setObjectName(u"rho_doubleSpinBox")
self.rho_doubleSpinBox.setMaximum(9999999.000000000000000)
self.rho_doubleSpinBox.setValue(100.000000000000000)
self.horizontalLayout_3.addWidget(self.rho_doubleSpinBox)
self.verticalLayout_5.addWidget(self.frame_6)
self.splitter = QSplitter(self.frame_8)
self.splitter.setObjectName(u"splitter")
self.splitter.setMaximumSize(QSize(16777215, 16777215))
self.splitter.setOrientation(Qt.Vertical)
self.frame_3 = QFrame(self.splitter)
self.frame_3.setObjectName(u"frame_3")
self.frame_3.setFrameShape(QFrame.NoFrame)
self.frame_3.setFrameShadow(QFrame.Raised)
self.verticalLayout_8 = QVBoxLayout(self.frame_3)
self.verticalLayout_8.setObjectName(u"verticalLayout_8")
self.label_12 = QLabel(self.frame_3)
self.label_12.setObjectName(u"label_12")
self.verticalLayout_8.addWidget(self.label_12)
self.wires_tableView = QTableView(self.frame_3)
self.wires_tableView.setObjectName(u"wires_tableView")
self.verticalLayout_8.addWidget(self.wires_tableView)
self.frame_7 = QFrame(self.frame_3)
self.frame_7.setObjectName(u"frame_7")
self.frame_7.setFrameShape(QFrame.StyledPanel)
self.frame_7.setFrameShadow(QFrame.Raised)
self.horizontalLayout_4 = QHBoxLayout(self.frame_7)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.add_to_tower_pushButton = QPushButton(self.frame_7)
self.add_to_tower_pushButton.setObjectName(u"add_to_tower_pushButton")
icon = QIcon()
icon.addFile(u":/Icons/icons/plus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.add_to_tower_pushButton.setIcon(icon)
self.horizontalLayout_4.addWidget(self.add_to_tower_pushButton)
self.horizontalSpacer_3 = QSpacerItem(990, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_3)
self.verticalLayout_8.addWidget(self.frame_7)
self.splitter.addWidget(self.frame_3)
self.frame_4 = QFrame(self.splitter)
self.frame_4.setObjectName(u"frame_4")
self.frame_4.setFrameShape(QFrame.NoFrame)
self.frame_4.setFrameShadow(QFrame.Raised)
self.verticalLayout_4 = QVBoxLayout(self.frame_4)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.verticalLayout_4.setContentsMargins(9, 9, 9, 9)
self.label_10 = QLabel(self.frame_4)
self.label_10.setObjectName(u"label_10")
self.verticalLayout_4.addWidget(self.label_10)
self.tower_tableView = QTableView(self.frame_4)
self.tower_tableView.setObjectName(u"tower_tableView")
self.verticalLayout_4.addWidget(self.tower_tableView)
self.frame = QFrame(self.frame_4)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.NoFrame)
self.frame.setFrameShadow(QFrame.Raised)
self.horizontalLayout_2 = QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.delete_from_tower_pushButton = QPushButton(self.frame)
self.delete_from_tower_pushButton.setObjectName(u"delete_from_tower_pushButton")
icon1 = QIcon()
icon1.addFile(u":/Icons/icons/minus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.delete_from_tower_pushButton.setIcon(icon1)
self.horizontalLayout_2.addWidget(self.delete_from_tower_pushButton)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer)
self.compute_pushButton = QPushButton(self.frame)
self.compute_pushButton.setObjectName(u"compute_pushButton")
icon2 = QIcon()
icon2.addFile(u":/Icons/icons/calc.svg", QSize(), QIcon.Normal, QIcon.Off)
self.compute_pushButton.setIcon(icon2)
self.compute_pushButton.setIconSize(QSize(16, 16))
self.horizontalLayout_2.addWidget(self.compute_pushButton)
self.verticalLayout_4.addWidget(self.frame)
self.splitter.addWidget(self.frame_4)
self.verticalLayout_5.addWidget(self.splitter)
self.main_splitter.addWidget(self.frame_8)
self.PlotFrame = QFrame(self.main_splitter)
self.PlotFrame.setObjectName(u"PlotFrame")
self.PlotFrame.setFrameShape(QFrame.NoFrame)
self.PlotFrame.setFrameShadow(QFrame.Raised)
self.verticalLayout_7 = QVBoxLayout(self.PlotFrame)
self.verticalLayout_7.setObjectName(u"verticalLayout_7")
self.verticalLayout_7.setContentsMargins(9, 9, 9, 9)
self.label_4 = QLabel(self.PlotFrame)
self.label_4.setObjectName(u"label_4")
self.verticalLayout_7.addWidget(self.label_4)
self.plotwidget = MatplotlibWidget(self.PlotFrame)
self.plotwidget.setObjectName(u"plotwidget")
self.verticalLayout_7.addWidget(self.plotwidget)
self.frame_9 = QFrame(self.PlotFrame)
self.frame_9.setObjectName(u"frame_9")
self.frame_9.setMaximumSize(QSize(16777215, 24))
self.frame_9.setFrameShape(QFrame.StyledPanel)
self.frame_9.setFrameShadow(QFrame.Raised)
self.horizontalLayout_5 = QHBoxLayout(self.frame_9)
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalSpacer_4 = QSpacerItem(19, 19, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_4)
self.acceptButton = QPushButton(self.frame_9)
self.acceptButton.setObjectName(u"acceptButton")
self.horizontalLayout_5.addWidget(self.acceptButton)
self.verticalLayout_7.addWidget(self.frame_9)
self.main_splitter.addWidget(self.PlotFrame)
self.verticalLayout_6.addWidget(self.main_splitter)
self.tabWidget.addTab(self.tab_2, "")
self.tab = QWidget()
self.tab.setObjectName(u"tab")
self.verticalLayout_3 = QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.frame_10 = QFrame(self.tab)
self.frame_10.setObjectName(u"frame_10")
self.frame_10.setFrameShape(QFrame.StyledPanel)
self.frame_10.setFrameShadow(QFrame.Raised)
self.gridLayout_2 = QGridLayout(self.frame_10)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.label_2 = QLabel(self.frame_10)
self.label_2.setObjectName(u"label_2")
self.gridLayout_2.addWidget(self.label_2, 0, 1, 1, 1)
self.label_6 = QLabel(self.frame_10)
self.label_6.setObjectName(u"label_6")
self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 1)
self.z_tableView_abcn = QTableView(self.frame_10)
self.z_tableView_abcn.setObjectName(u"z_tableView_abcn")
self.gridLayout_2.addWidget(self.z_tableView_abcn, 1, 0, 1, 1)
self.y_tableView_abcn = QTableView(self.frame_10)
self.y_tableView_abcn.setObjectName(u"y_tableView_abcn")
self.gridLayout_2.addWidget(self.y_tableView_abcn, 1, 1, 1, 1)
self.label_7 = QLabel(self.frame_10)
self.label_7.setObjectName(u"label_7")
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.z_tableView_abc = QTableView(self.frame_10)
self.z_tableView_abc.setObjectName(u"z_tableView_abc")
self.gridLayout_2.addWidget(self.z_tableView_abc, 3, 0, 1, 1)
self.label = QLabel(self.frame_10)
self.label.setObjectName(u"label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.z_tableView_seq = QTableView(self.frame_10)
self.z_tableView_seq.setObjectName(u"z_tableView_seq")
self.gridLayout_2.addWidget(self.z_tableView_seq, 5, 0, 1, 1)
self.label_3 = QLabel(self.frame_10)
self.label_3.setObjectName(u"label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 1, 1, 1)
self.y_tableView_abc = QTableView(self.frame_10)
self.y_tableView_abc.setObjectName(u"y_tableView_abc")
self.gridLayout_2.addWidget(self.y_tableView_abc, 3, 1, 1, 1)
self.label_5 = QLabel(self.frame_10)
self.label_5.setObjectName(u"label_5")
self.gridLayout_2.addWidget(self.label_5, 4, 1, 1, 1)
self.y_tableView_seq = QTableView(self.frame_10)
self.y_tableView_seq.setObjectName(u"y_tableView_seq")
self.gridLayout_2.addWidget(self.y_tableView_seq, 5, 1, 1, 1)
self.verticalLayout_3.addWidget(self.frame_10)
self.tabWidget.addTab(self.tab, "")
self.gridLayout.addWidget(self.tabWidget, 4, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"Tower creation", None))
self.label_9.setText(QCoreApplication.translate("Dialog", u"Name", None))
self.label_8.setText(QCoreApplication.translate("Dialog", u"Frequency (Hz)", None))
self.label_11.setText(QCoreApplication.translate("Dialog", u"Earth resistivity (Ohm/m^3)", None))
self.label_12.setText(QCoreApplication.translate("Dialog", u"Wire catalogue", None))
#if QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Add wire", None))
#endif // QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setText("")
self.label_10.setText(QCoreApplication.translate("Dialog", u"Wire compisition", None))
#if QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Delete wire", None))
#endif // QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setText("")
#if QT_CONFIG(tooltip)
self.compute_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Compute matrices", None))
#endif // QT_CONFIG(tooltip)
self.compute_pushButton.setText("")
self.label_4.setText(QCoreApplication.translate("Dialog", u"Tower", None))
self.acceptButton.setText(QCoreApplication.translate("Dialog", u"Accept", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QCoreApplication.translate("Dialog", u"Tower designer", None))
self.label_2.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABCN", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABC", None))
self.label_7.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) in sequence components", None))
self.label.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABCN", None))
self.label_3.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABC", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for the sequence components", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QCoreApplication.translate("Dialog", u"Impedance matrices", None))
# retranslateUi
| 42.883978 | 134 | 0.706648 | 14,979 | 0.964893 | 0 | 0 | 0 | 0 | 0 | 0 | 2,060 | 0.132698 |
316d44c48b3f5f89c962c9581225af2c135bfd22 | 15,800 | py | Python | PythonAPI/util/check_lidar_bb.py | inverted-ai/carla | 4233fe931073fdd47ee72cc841c4de15aba68386 | [
"MIT"
]
| null | null | null | PythonAPI/util/check_lidar_bb.py | inverted-ai/carla | 4233fe931073fdd47ee72cc841c4de15aba68386 | [
"MIT"
]
| null | null | null | PythonAPI/util/check_lidar_bb.py | inverted-ai/carla | 4233fe931073fdd47ee72cc841c4de15aba68386 | [
"MIT"
]
| 1 | 2020-03-09T20:34:51.000Z | 2020-03-09T20:34:51.000Z | #!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Lidar/BB check for CARLA
This script obtains the LiDAR's point cloud corresponding to all the vehicles
of the scene and make sure that they are inside the bounding box of the
corresponding actor.
This is done in a predefined route in Town03 with a high speed and several agressive
turns.
In a nutshell, the script have a queue that is filled in each frame with a lidar point
cloud and an structure for storing the Bounding Boxes. This last one is emulated as a
sensor filling the queue in the on_tick callback of the carla.world. In this way, we make
sure that we are correctly syncronizing the lidar point cloud and BB/actor transformations.
Then, we select the points corresponding to each actor (car) in the scene and check they
are inside the bounding boxes of that actor, all in each vehicle frame of reference.
Important Data structure description:
+ Lidar data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'semlidar'
- [2] Point cloud in the form of a numpy dictionary with all semantic lidar information
- [3] Global transformation of the sensor
+ Bounding box data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'bb'
- [2] List of actor information: each a tuple with:
- [0] Actor id
- [1] Actor type (blueprint's name)
- [0] Actor's global transformation
- [0] Actor's bounding box
+ ActorTrace class: Takes the Lidar data structure and one actor information and
check if all the data points related with this actor are inside its BB.
This is done in the local coordinate frame of the actor and should be done like:
trace = ActorTrace(actor_info, lidar_data)
trace.process()
trace.check_lidar_data()
"""
import glob
import os
import sys
import numpy as np
from queue import Queue
from queue import Empty
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
class ActorTrace(object):
"""Class that store and process information about an actor at certain moment."""
def __init__(self, actor, lidar):
self.set_lidar(lidar)
self.set_actor(actor)
self._lidar_pc_local = np.array([])
self._bb_vertices = np.array([])
self._bb_minlimits = [0, 0, 0]
self._bb_maxlimits = [0, 0, 0]
def set_lidar(self, lidar):
self._frame = lidar[0]
self._lidar_data = lidar[2]
self._lidar_transf = lidar[3]
def set_actor(self, actor):
self._actor_id = actor[0]
self._actor_type = actor[1]
self._actor_transf = actor[2]
self._actor_bb = actor[3]
def process(self):
# Filter lidar points that correspond to my actor id
data_actor = self._lidar_data[self._lidar_data['ObjIdx'] == self._actor_id]
# Take the xyz point cloud data and transform it to actor's frame
points = np.array([data_actor['x'], data_actor['y'], data_actor['z']]).T
points = np.append(points, np.ones((points.shape[0], 1)), axis=1)
points = np.dot(self._lidar_transf.get_matrix(), points.T).T # sensor -> world
points = np.dot(self._actor_transf.get_inverse_matrix(), points.T).T # world -> actor
points = points[:, :-1]
# Saving the points in 'local' coordinates
self._lidar_pc_local = points
# We compute the limits in the local frame of reference using the
# vertices of the bounding box
vertices = self._actor_bb.get_local_vertices()
ver_py = []
for v in vertices:
ver_py.append([v.x, v.y, v.z])
ver_np = np.array(ver_py)
self._bb_vertices = ver_np
self._bb_minlimits = ver_np.min(axis=0) - 0.001
self._bb_maxlimits = ver_np.max(axis=0) + 0.001
def print(self, print_if_empty = False):
if self._lidar_pc_local.shape[0] > 0 or print_if_empty:
np.savetxt("veh_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._lidar_pc_local)
np.savetxt("bb_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._bb_vertices)
def lidar_is_outside_bb(self, check_axis = [True, True, True]):
lidar_pc = self._lidar_pc_local
if check_axis[0]:
xmin = self._bb_minlimits[0]
xmax = self._bb_maxlimits[0]
out = np.any((lidar_pc[:,0] > xmax) | (lidar_pc[:,0] < xmin))
if out:
print("Problem with x axis")
return True
if check_axis[1]:
ymin = self._bb_minlimits[1]
ymax = self._bb_maxlimits[1]
out = np.any((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))
if out:
print("Problem with y axis")
return True
if check_axis[2]:
zmin = self._bb_minlimits[2]
zmax = self._bb_maxlimits[2]
out = np.any((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))
if out:
print("Problem with z axis")
return True
return False
def check_lidar_data(self):
if self.lidar_is_outside_bb():
print("Error!!! Points of lidar point cloud are outside its BB for car %d: %s " % (self._actor_id, self._actor_type))
self.print()
return False
else:
return True
def wait(world, frames=100, queue = None, slist = None):
for i in range(0, frames):
world.tick()
if queue != None and slist != None:
try:
for _i in range (0, len(slist)):
s_frame = queue.get(True, 1.0)
except Empty:
print(" Some of the sensor information is missed")
# Sensor callback.
# This is where you receive the sensor data and
# process it as you liked and the important part is that,
# at the end, it should include an element into the sensor queue.
def lidar_callback(sensor_data, sensor_queue, sensor_name):
sensor_pc_local = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([
('x', np.float32), ('y', np.float32), ('z', np.float32),
('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]))
sensor_transf = sensor_data.transform
sensor_queue.put((sensor_data.frame, sensor_name, sensor_pc_local, sensor_transf))
def bb_callback(snapshot, world, sensor_queue, sensor_name):
data_array = []
vehicles = world.get_actors().filter('vehicle.*')
for actor in vehicles:
data_array.append((actor.id, actor.type_id, actor.get_transform(), actor.bounding_box))
sensor_queue.put((snapshot.frame, sensor_name, data_array))
def move_spectator(world, actor):
actor_tr = actor.get_transform()
spectator_transform = carla.Transform(actor_tr.location, actor_tr.rotation)
spectator_transform.location -= actor_tr.get_forward_vector() * 5
spectator_transform.location -= actor_tr.get_up_vector() * 3
spectator = world.get_spectator()
spectator.set_transform(spectator_transform)
def world_callback(snapshot, world, sensor_queue, sensor_name, actor):
move_spectator(world, actor)
bb_callback(snapshot, world, sensor_queue, sensor_name)
def process_sensors(w_frame, sensor_queue, sensor_number):
if sensor_number != 2:
print("Error!!! Sensor number should be two")
sl_data = None
bb_data = None
try:
for i in range (0, sensor_number):
s_frame = sensor_queue.get(True, 1.0)
while s_frame[0] != w_frame:
print("Warning! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
print("This could be due to accumulated data for previous steps")
s_frame = sensor_queue.get(True, 1.0)
if s_frame[1] == "semlidar":
sl_data = s_frame
elif s_frame[1] == "bb":
bb_data = s_frame
#print(" Frame: %d Sensor: %s Len: %d " % (s_frame[0], s_frame[1], len(s_frame[2])))
except Empty:
print("Error!!! The needeinformation is not here!!!")
return
if sl_data == None or bb_data == None:
print("Error!!! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
for actor_data in bb_data[2]:
trace_vehicle = ActorTrace(actor_data, sl_data)
trace_vehicle.process()
trace_vehicle.check_lidar_data()
class SpawnCar(object):
def __init__(self, location, rotation, filter="vehicle.*", autopilot = False, velocity = None):
self._filter = filter
self._transform = carla.Transform(location, rotation)
self._autopilot = autopilot
self._velocity = velocity
self._actor = None
self._world = None
def spawn(self, world):
self._world = world
actor_BP = world.get_blueprint_library().filter(self._filter)[0]
self._actor = world.spawn_actor(actor_BP, self._transform)
self._actor.set_autopilot(True)
return self._actor
def destroy(self):
if self._actor != None:
self._actor.destroy()
CarPropList = [
SpawnCar(carla.Location(x=83, y= -40, z=5), carla.Rotation(yaw=-90), filter= "*lincoln*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -30, z=3), carla.Rotation(yaw=-90), filter= "*ambulance*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -20, z=3), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=120, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=100, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=140, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=160, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*impala*", autopilot=False),
SpawnCar(carla.Location(x=180, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=60, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=80, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=100, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=120, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=140, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*impala*", autopilot=True),
SpawnCar(carla.Location(x=160, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*prius*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +20,z=2), carla.Rotation(yaw=+90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +40,z=2), carla.Rotation(yaw=+90), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +80,z=2), carla.Rotation(yaw=+90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -40,z=2), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -20,z=2), carla.Rotation(yaw=-90), filter= "*mkz2017*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +00,z=2), carla.Rotation(yaw=-90), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +20,z=2), carla.Rotation(yaw=-90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +40,z=2), carla.Rotation(yaw=-90), filter= "*charger2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +60,z=2), carla.Rotation(yaw=-90), filter= "*lincoln2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +80,z=2), carla.Rotation(yaw=-90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+100,z=2), carla.Rotation(yaw=-90), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+120,z=2), carla.Rotation(yaw=-90), filter= "*wrangler_rubicon*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+140,z=2), carla.Rotation(yaw=-90), filter= "*c3*", autopilot=True)
]
def spawn_prop_vehicles(world):
for car in CarPropList:
car.spawn(world)
def destroy_prop_vehicles():
for car in CarPropList:
car.destroy()
def main():
# We start creating the client
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
try:
# We need to save the settings to be able to recover them at the end
# of the script to leave the server in the same state that we found it.
original_settings = world.get_settings()
settings = world.get_settings()
# We set CARLA syncronous mode
settings.fixed_delta_seconds = 0.05
settings.synchronous_mode = True
world.apply_settings(settings)
traffic_manager = client.get_trafficmanager(8000)
traffic_manager.set_synchronous_mode(True)
# We create the sensor queue in which we keep track of the information
# already received. This structure is thread safe and can be
# accessed by all the sensors callback concurrently without problem.
sensor_queue = Queue()
# Spawning ego vehicle
actor_BP = world.get_blueprint_library().filter("vehicle.lincoln.mkz2017")[0]
car_tr = carla.Transform(carla.Location(x=239, y=125, z=0.9), carla.Rotation(yaw=-88.5))
actor = world.spawn_actor(actor_BP, car_tr)
world.tick()
move_spectator(world, actor)
spawn_prop_vehicles(world)
wait(world, 10)
# We create all the sensors and keep them in a list for convenience.
sensor_list = []
lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic')
lidar_bp.set_attribute('channels', '64')
lidar_bp.set_attribute('points_per_second', '500000')
lidar_bp.set_attribute('range', '300')
lidar_bp.set_attribute('upper_fov', '10.0')
lidar_bp.set_attribute('lower_fov', '-90.0')
lidar_tr = carla.Transform(carla.Location(z=3), carla.Rotation(yaw=0))
lidar = world.spawn_actor(lidar_bp, lidar_tr, attach_to=actor)
lidar.listen(lambda data: lidar_callback(data, sensor_queue, "semlidar"))
world.on_tick(lambda snapshot: world_callback(snapshot, world, sensor_queue, "bb", actor))
sensor_list.append(lidar)
sensor_list.append(actor) # actor acts as a 'sensor' to simplify bb-lidar data comparison
# Set autopilot for main vehicle
actor.enable_constant_velocity(carla.Vector3D(20, 0, 0))
for _i in range(0, 100):
# Tick the server
world.tick()
w_frame = world.get_snapshot().frame
process_sensors(w_frame, sensor_queue, len(sensor_list))
actor.disable_constant_velocity()
finally:
world.apply_settings(original_settings)
# Destroy all the actors
destroy_prop_vehicles()
for sensor in sensor_list:
sensor.destroy()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print(' - Exited by user.')
| 42.473118 | 131 | 0.641582 | 4,138 | 0.261899 | 0 | 0 | 0 | 0 | 0 | 0 | 4,431 | 0.280443 |
316eeacabbf87d7b1bf94354ea4d069d44ec787b | 1,797 | py | Python | claim.py | bukovyn/claim | 506bee564368e991c316e728e211d81cf7cba870 | [
"MIT"
]
| null | null | null | claim.py | bukovyn/claim | 506bee564368e991c316e728e211d81cf7cba870 | [
"MIT"
]
| null | null | null | claim.py | bukovyn/claim | 506bee564368e991c316e728e211d81cf7cba870 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
""" Text files created on DOS/Windows machines have different line endings than
files created on Unix/Linux. DOS uses carriage return and new line ("\r\n")
as a line ending, while Unix uses just new line ("\n"). The purpose of this
script is to have a quick, on the go, shell friendly solution to convert one
to the other.
"""
import sys
import argparse
def main():
""" Removes error traceback clutter and converts files specified.
"""
sys.tracebacklimit = 0
args = commands()
for filename in args.filenames:
convert(filename, args.dos)
def commands():
""" Sets up command line arguments and improper argument error handling.
Returns:
parser (object)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-dos', action='store_true',
help="converts file to DOS")
parser.add_argument('filenames', metavar='filename',
type=str, nargs='+', help="file to be converted")
return parser.parse_args()
def convert(filename, flag):
""" Converts the file's line endings appropriately.
Args:
filename (string): the file being converted
flag (bool): defaults to UNIX. If flag is true, converts line endings to DOS
"""
unix, dos = '\n', '\r\n'
style = 'UNIX'
with open(filename, 'rb') as f:
content = f.read().decode('UTF-8')
if flag:
style = 'DOS'
content = content.replace(unix, dos)
else:
content = content.replace(dos, unix)
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("converting file '{}' to {} ...".format(filename, style))
if __name__ == '__main__':
main()
| 26.820896 | 88 | 0.608236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 957 | 0.532554 |
316f081d38d5db97224f8daf18cc8e5e6ca27466 | 138 | py | Python | reference/Python/media/moviepy/audio/extract_audio.py | steadylearner/code | ba6df6c38a6e25b7ea996f4df905921e27760e04 | [
"MIT"
]
| 4 | 2019-07-17T14:43:32.000Z | 2022-03-27T21:38:01.000Z | reference/Python/media/moviepy/audio/extract_audio.py | steadylearner/code | ba6df6c38a6e25b7ea996f4df905921e27760e04 | [
"MIT"
]
| 39 | 2020-09-04T03:31:16.000Z | 2022-03-08T22:54:03.000Z | reference/Python/media/moviepy/audio/extract_audio.py | steadylearner/code | ba6df6c38a6e25b7ea996f4df905921e27760e04 | [
"MIT"
]
| 1 | 2021-03-03T13:04:28.000Z | 2021-03-03T13:04:28.000Z | import sys
from moviepy.editor import *
clip = VideoFileClip(sys.argv[1])
audioclip = clip.audio
audioclip.write_audiofile(sys.argv[2])
| 17.25 | 38 | 0.775362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
316f355464f64ade9dfc879dc379fa3194ccd5a6 | 363 | py | Python | objettoqt/mixins.py | brunonicko/objettoqt | 1a91ef58d4540b7f377e405492d35ccd222d71d5 | [
"MIT"
]
| null | null | null | objettoqt/mixins.py | brunonicko/objettoqt | 1a91ef58d4540b7f377e405492d35ccd222d71d5 | [
"MIT"
]
| null | null | null | objettoqt/mixins.py | brunonicko/objettoqt | 1a91ef58d4540b7f377e405492d35ccd222d71d5 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Mix-in classes for `Qt` types."""
from ._mixins import (
OQAbstractItemModelMixin,
OQAbstractItemViewMixin,
OQObjectMixin,
OQWidgetMixin,
)
from ._views import OQListViewMixin
__all__ = [
"OQObjectMixin",
"OQWidgetMixin",
"OQAbstractItemModelMixin",
"OQAbstractItemViewMixin",
"OQListViewMixin",
]
| 19.105263 | 36 | 0.683196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.432507 |
316f3a2d1cd58368907db8ec0798ba08d6c7d1c7 | 527 | py | Python | util/mccLog.py | ccchooko/webControlClient | f12cf76d364c5270166c99a508d08999c7ed920c | [
"Apache-2.0"
]
| null | null | null | util/mccLog.py | ccchooko/webControlClient | f12cf76d364c5270166c99a508d08999c7ed920c | [
"Apache-2.0"
]
| null | null | null | util/mccLog.py | ccchooko/webControlClient | f12cf76d364c5270166c99a508d08999c7ed920c | [
"Apache-2.0"
]
| null | null | null | #-*-coding:utf8-*-
import logging
from datetime import datetime
class mccLog(object):
def __init__(self):
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename= datetime.now().strftime("%Y%m%d%H%M%S") + '.log',
filemode='a')
def mccWriteLog(self, logContent):
logging.info(logContent)
def mccError(self, errorContent):
logging.error(errorContent)
| 29.277778 | 75 | 0.580645 | 461 | 0.874763 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.187856 |
31705467ec2a06cfd21ab38f7b5f8cfa554d22cc | 1,066 | py | Python | Learning-Python/Jumble-Solver/jumble_solver.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
]
| null | null | null | Learning-Python/Jumble-Solver/jumble_solver.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
]
| null | null | null | Learning-Python/Jumble-Solver/jumble_solver.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
]
| null | null | null | # Oliver Keen
# Software Engineering 001
# jumble_solver.py
# 2/17/2021
# Assignment:
# Consider the game "Jumble"
# https://www.sandiegouniontribune.com/sd-jumble-daily-htmlstory.html
# Create a Python program to find the individual words in Jumble puzzles such
# that INJURE prints after entering the following: solve("JNUIER")
from PyDictionary import PyDictionary # Installation: pip install PyDictionary
from math import factorial
from random import shuffle
def solve(jumble):
combos = []
chars = list(jumble.upper())
dict = PyDictionary()
# Maximum possible unique combinations of chars
limit = factorial(len(chars))
while len(combos) < limit:
# Generates random string from chars
shuffle(chars)
tmp = "".join(chars)
# Appends tmp to combos list only if it is unique
if tmp not in combos:
combos.append(tmp)
# Prints tmp only if it returns an English meaning
if dict.meaning(tmp, disable_errors = True):
print(tmp)
break
| 28.810811 | 78 | 0.673546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 545 | 0.511257 |
317065682fd309f044a68646decdf80a32e30ac3 | 19,137 | py | Python | msm/skill_entry.py | luca-vercelli/mycroft-skills-manager | e8ac06132ca5d5fe8a56efdd28ee3968958d3323 | [
"Apache-2.0"
]
| null | null | null | msm/skill_entry.py | luca-vercelli/mycroft-skills-manager | e8ac06132ca5d5fe8a56efdd28ee3968958d3323 | [
"Apache-2.0"
]
| null | null | null | msm/skill_entry.py | luca-vercelli/mycroft-skills-manager | e8ac06132ca5d5fe8a56efdd28ee3968958d3323 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MycroftAI/mycroft-skills-manager).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import shutil
import subprocess
import yaml
from contextlib import contextmanager
from difflib import SequenceMatcher
from functools import wraps
from git import Repo, GitError
from git.exc import GitCommandError
from lazy import lazy
from os.path import exists, join, basename, dirname, isfile
from shutil import rmtree, move
from subprocess import PIPE, Popen
from tempfile import mktemp, gettempdir
from threading import Lock
from typing import Callable
from pako import PakoManager
from msm import SkillRequirementsException, git_to_msm_exceptions
from msm.exceptions import PipRequirementsException, \
SystemRequirementsException, AlreadyInstalled, SkillModified, \
AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException
from msm.util import cached_property, Git
LOG = logging.getLogger(__name__)
# Branches which can be switched from when updating
# TODO Make this configurable
SWITCHABLE_BRANCHES = ['master']
# default constraints to use if no are given
DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt'
FIVE_MINUTES = 300
@contextmanager
def work_dir(directory):
old_dir = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(old_dir)
def _backup_previous_version(func: Callable = None):
"""Private decorator to back up previous skill folder"""
@wraps(func)
def wrapper(self, *args, **kwargs):
self.old_path = None
if self.is_local:
self.old_path = join(gettempdir(), self.name)
if exists(self.old_path):
rmtree(self.old_path)
shutil.copytree(self.path, self.old_path)
try:
func(self, *args, **kwargs)
# Modified skill or GitError should not restore working copy
except (SkillModified, GitError, GitException):
raise
except Exception:
LOG.info('Problem performing action. Restoring skill to '
'previous state...')
if exists(self.path):
rmtree(self.path)
if self.old_path and exists(self.old_path):
shutil.copytree(self.old_path, self.path)
self.is_local = exists(self.path)
raise
finally:
# Remove temporary path if needed
if self.old_path and exists(self.old_path):
rmtree(self.old_path)
return wrapper
class SkillEntry(object):
pip_lock = Lock()
manifest_yml_format = {
'dependencies': {
'system': {},
'exes': [],
'skill': [],
'python': []
}
}
def __init__(self, name, path, url='', sha='', msm=None):
url = url.rstrip('/')
url = url[:-len('.git')] if url.endswith('.git') else url
self.path = path
self.url = url
self.sha = sha
self.msm = msm
if msm:
u = url.lower()
self.meta_info = msm.repo.skills_meta_info.get(u, {})
else:
self.meta_info = {}
if name is not None:
self.name = name
elif 'name' in self.meta_info:
self.name = self.meta_info['name']
else:
self.name = basename(path)
# TODO: Handle git:// urls as well
from_github = False
if url.startswith('https://'):
url_tokens = url.rstrip("/").split("/")
from_github = url_tokens[-3] == 'github.com' if url else False
self.author = self.extract_author(url) if from_github else ''
self.id = self.extract_repo_id(url) if from_github else self.name
self.is_local = exists(path)
self.old_path = None # Path of previous version while upgrading
@property
def is_beta(self):
return not self.sha or self.sha == 'HEAD'
@property
def is_dirty(self):
"""True if different from the version in the mycroft-skills repo.
Considers a skill dirty if
- the checkout sha doesn't match the mycroft-skills repo
- the skill doesn't exist in the mycroft-skills repo
- the skill is not a git repo
- has local modifications
"""
if not exists(self.path):
return False
try:
checkout = Git(self.path)
mod = checkout.status(porcelain=True, untracked_files='no') != ''
current_sha = checkout.rev_parse('HEAD')
except GitCommandError: # Not a git checkout
return True
skill_shas = {d[0]: d[3] for d in self.msm.repo.get_skill_data()}
return (self.name not in skill_shas or
current_sha != skill_shas[self.name] or
mod)
@cached_property(ttl=FIVE_MINUTES)
def skill_gid(self):
"""Format skill gid for the skill.
This property does some Git gymnastics to determine its return value.
When a device boots, each skill accesses this property several times.
To reduce the amount of boot time, cache the value returned by this
property. Cache expires five minutes after it is generated.
"""
LOG.debug('Generating skill_gid for ' + self.name)
gid = ''
if self.is_dirty:
gid += '@|'
if self.meta_info != {}:
gid += self.meta_info['skill_gid']
else:
name = self.name.split('.')[0]
gid += name
return gid
def __str__(self):
return self.name
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self
@classmethod
def from_folder(cls, path, msm=None, use_cache=True):
"""Find or create skill entry from folder path.
Arguments:
path: path of skill folder
msm: msm instance to use for caching and extended information
retrieval.
use_cache: Enable/Disable cache usage. defaults to True
"""
if msm and use_cache:
skills = {skill.path: skill for skill in msm.local_skills.values()}
if path in skills:
return skills[path]
return cls(None, path, cls.find_git_url(path), msm=msm)
@classmethod
def create_path(cls, folder, url, name=''):
return join(folder, '{}.{}'.format(
name or cls.extract_repo_name(url), cls.extract_author(url)
).lower())
@staticmethod
def extract_repo_name(url):
s = url.rstrip('/').split("/")[-1]
a, b, c = s.rpartition('.git')
if not c:
return a
return s
@staticmethod
def extract_author(url):
return url.rstrip('/').split("/")[-2].split(':')[-1]
@classmethod
def extract_repo_id(cls, url):
return '{}:{}'.format(cls.extract_author(url).lower(),
cls.extract_repo_name(url)).lower()
@staticmethod
def _tokenize(x):
return x.replace('-', ' ').split()
@staticmethod
def _extract_tokens(s, tokens):
s = s.lower().replace('-', ' ')
extracted = []
for token in tokens:
extracted += [token] * s.count(token)
s = s.replace(token, '')
s = ' '.join(i for i in s.split(' ') if i)
tokens = [i for i in s.split(' ') if i]
return s, tokens, extracted
@classmethod
def _compare(cls, a, b):
return SequenceMatcher(a=a, b=b).ratio()
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return False
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
"""
Iterate over the individual Python packages and
install them one by one to enforce the order specified
in the manifest.
"""
for dependent_python_package in self.dependent_python_packages:
pip_command = pip_args + [dependent_python_package]
proc = Popen(pip_command, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: {}'.format(
self.name, key
))
continue
self.verify_info(info[key], fmt[key])
@lazy
def skill_info(self):
yml_path = join(self.path, 'manifest.yml')
if exists(yml_path):
LOG.info('Reading from manifest.yml')
with open(yml_path) as f:
info = yaml.safe_load(f)
self.verify_info(info, self.manifest_yml_format)
return info or {}
return {}
@lazy
def dependencies(self):
return self.skill_info.get('dependencies') or {}
@lazy
def dependent_skills(self):
skills = set()
reqs = join(self.path, "skill_requirements.txt")
if exists(reqs):
with open(reqs, "r") as f:
for i in f.readlines():
skill = i.strip()
if skill:
skills.add(skill)
for i in self.dependencies.get('skill') or []:
skills.add(i)
return list(skills)
@lazy
def dependent_python_packages(self):
reqs = join(self.path, "requirements.txt")
req_lines = []
if exists(reqs):
with open(reqs, "r") as f:
req_lines += f.readlines()
req_lines += self.dependencies.get('python') or []
# Strip comments
req_lines = [l.split('#')[0].strip() for l in req_lines]
return [i for i in req_lines if i] # Strip empty lines
@lazy
def dependent_system_packages(self):
return self.dependencies.get('system') or {}
def remove(self):
if not self.is_local:
raise AlreadyRemoved(self.name)
try:
rmtree(self.path)
self.is_local = False
except OSError as e:
raise RemoveException(str(e))
LOG.info('Successfully removed ' + self.name)
@_backup_previous_version
def install(self, constraints=None):
if self.is_local:
raise AlreadyInstalled(self.name)
LOG.info("Downloading skill: " + self.url)
try:
tmp_location = mktemp()
Repo.clone_from(self.url, tmp_location)
self.is_local = True
Git(tmp_location).reset(self.sha or 'HEAD', hard=True)
except GitCommandError as e:
raise CloneException(e.stderr)
if isfile(join(tmp_location, '__init__.py')):
move(join(tmp_location, '__init__.py'),
join(tmp_location, '__init__'))
try:
move(tmp_location, self.path)
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
finally:
if isfile(join(self.path, '__init__')):
move(join(self.path, '__init__'),
join(self.path, '__init__.py'))
LOG.info('Successfully installed ' + self.name)
def update_deps(self, constraints=None):
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
def _find_sha_branch(self):
git = Git(self.path)
sha_branches = git.branch(
contains=self.sha, all=True
).split('\n')
sha_branch = [b for b in sha_branches if ' -> ' not in b][0]
sha_branch = sha_branch.strip('* \n').replace('remotes/', '')
for remote in git.remote().split('\n'):
sha_branch = sha_branch.replace(remote + '/', '')
return sha_branch
@_backup_previous_version
def update(self):
if not self.is_local:
raise NotInstalled('{} is not installed'.format(self.name))
git = Git(self.path)
with git_to_msm_exceptions():
sha_before = git.rev_parse('HEAD')
modified_files = git.status(porcelain=True, untracked='no')
if modified_files != '':
raise SkillModified('Uncommitted changes:\n' + modified_files)
git.fetch()
current_branch = git.rev_parse('--abbrev-ref', 'HEAD').strip()
if self.sha and current_branch in SWITCHABLE_BRANCHES:
# Check out correct branch
git.checkout(self._find_sha_branch())
git.merge(self.sha or 'origin/HEAD', ff_only=True)
sha_after = git.rev_parse('HEAD')
if sha_before != sha_after:
self.update_deps()
LOG.info('Updated ' + self.name)
# Trigger reload by modifying the timestamp
os.utime(join(self.path, '__init__.py'))
return True
else:
LOG.info('Nothing new for ' + self.name)
return False
@staticmethod
def find_git_url(path):
"""Get the git url from a folder"""
try:
LOG.debug(
'Attempting to retrieve the remote origin URL config for '
'skill in path ' + path
)
return Git(path).config('remote.origin.url')
except GitError:
return ''
def __repr__(self):
return '<SkillEntry {}>'.format(' '.join(
'{}={}'.format(attr, self.__dict__[attr])
for attr in ['name', 'author', 'is_local']
))
| 34.234347 | 87 | 0.576684 | 15,766 | 0.823849 | 136 | 0.007107 | 8,680 | 0.453572 | 0 | 0 | 4,297 | 0.224539 |
3170ca6ee7a6eb3f4bee950684186e4a99de1a8e | 1,356 | py | Python | sleekxmpp/plugins/xep_0027/stanza.py | elrond79/SleekXMPP | 62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f | [
"BSD-3-Clause"
]
| 3 | 2019-02-01T06:50:08.000Z | 2020-03-24T00:45:31.000Z | sleekxmpp/plugins/xep_0027/stanza.py | elrond79/SleekXMPP | 62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f | [
"BSD-3-Clause"
]
| 1 | 2017-11-07T13:03:48.000Z | 2017-11-07T13:03:48.000Z | sleekxmpp/plugins/xep_0027/stanza.py | elrond79/SleekXMPP | 62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f | [
"BSD-3-Clause"
]
| null | null | null | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase
class Signed(ElementBase):
name = 'x'
namespace = 'jabber:x:signed'
plugin_attrib = 'signed'
interfaces = set(['signed'])
is_extension = True
def set_signed(self, value):
parent = self.parent()
xmpp = parent.stream
data = xmpp['xep_0027'].sign(value, parent['from'])
if data:
self.xml.text = data
else:
del parent['signed']
def get_signed(self):
return self.xml.text
class Encrypted(ElementBase):
name = 'x'
namespace = 'jabber:x:encrypted'
plugin_attrib = 'encrypted'
interfaces = set(['encrypted'])
is_extension = True
def set_encrypted(self, value):
parent = self.parent()
xmpp = parent.stream
data = xmpp['xep_0027'].encrypt(value, parent['to'].bare)
if data:
self.xml.text = data
else:
del parent['encrypted']
def get_encrypted(self):
parent = self.parent()
xmpp = parent.stream
if self.xml.text:
return xmpp['xep_0027'].decrypt(self.xml.text, parent['to'])
return None
| 25.111111 | 72 | 0.597345 | 1,113 | 0.820796 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.24705 |
31717266252a0d2d31a5a6a58dab2e9b98e48596 | 821 | py | Python | tcptofpc.py | catenacyber/fuzzpcap | 100db6cefd77238f623b5127a3efed01c7333cad | [
"MIT"
]
| 6 | 2021-04-09T03:13:39.000Z | 2022-01-26T14:49:31.000Z | tcptofpc.py | catenacyber/fuzzpcap | 100db6cefd77238f623b5127a3efed01c7333cad | [
"MIT"
]
| null | null | null | tcptofpc.py | catenacyber/fuzzpcap | 100db6cefd77238f623b5127a3efed01c7333cad | [
"MIT"
]
| null | null | null | #tshark -r input.pcap -qz "follow,tcp,raw,0"
import struct
import sys
import binascii
import subprocess
result = subprocess.Popen( ["tshark", "-r", sys.argv[1], "-qz", "follow,tcp,raw,0"],
stdout=subprocess.PIPE)
sys.stdout.buffer.write(b"FPC\x80")
for i in range(4):
result.stdout.readline()
dp=result.stdout.readline().split(b":")[2]
sp=result.stdout.readline().split(b":")[2]
sys.stdout.buffer.write(struct.pack('>H', int(sp)))
sys.stdout.buffer.write(struct.pack('>H', int(dp)))
for l in result.stdout.readlines():
s2c = 0
if l[0] == 9:
l = l[1:]
s2c = 1
try:
r = binascii.unhexlify(l[:-1])
except:
continue
sys.stdout.buffer.write(struct.pack('>B', int(s2c)))
sys.stdout.buffer.write(r)
sys.stdout.buffer.write(b"FPC0")
| 27.366667 | 84 | 0.615104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.141291 |
3171f6a5227dcae1608234024716446ac61ead63 | 4,588 | py | Python | AIJ Filter Collection/AIJ_Night_Filters.py | kjkoeller/BSU-Code | 0b4016d0e3f823c9f8fe224a09a80c1421eedfdb | [
"MIT"
]
| 1 | 2022-03-12T01:04:15.000Z | 2022-03-12T01:04:15.000Z | AIJ Filter Collection/AIJ_Night_Filters.py | kjkoeller/BSU-Code | 0b4016d0e3f823c9f8fe224a09a80c1421eedfdb | [
"MIT"
]
| null | null | null | AIJ Filter Collection/AIJ_Night_Filters.py | kjkoeller/BSU-Code | 0b4016d0e3f823c9f8fe224a09a80c1421eedfdb | [
"MIT"
]
| null | null | null | """
Created: November 11, 2020
Author: Kyle Koeller
Python Version 3.9
This program is meant to make the process of collecting the different filters from AIJ excel spreadsheets faster.
The user enters however many nights they have and the program goes through and checks those text files for the
different columns for,HJD, Amag, and Amag error for the B and V filters.
The program will also calculate the R magnitude from the rel flux of T1.
There are error catching statements within the program so if the user mistypes, the program will not crash and
close on them.
"""
import pandas as pd
from os import path
def main(c):
# warning prompts for the user to read to make sure this program works correctly
if c == 0:
# warning prompts for the user to read to make sure this program works correctly
print()
print("Make sure you have turned the output xls files from AIJ into tab delimited text files. "
"Since these xls files are corrupt for reading directly from.")
print("You will also need to go into each night and filter and "
"make the HJD column 6 decimals instead of the output of 3 within Excel.")
print()
else:
print()
while True:
# checks to see whether you have entered a number and a correct filter letter
try:
num = int(input("Number of nights you have: "))
filter_name = input("Which filter are these nights in (B, V, R): ")
if filter_name.upper() == "B" or filter_name.upper() == "V" or filter_name.upper() == "R":
break
else:
print("Please enter B, V, or R for your filter.")
print()
continue
except ValueError:
print("You have entered an invalid number for your number of nights. Please enter a number.")
print("")
get_filters(num)
def get_filters(n):
"""
Takes a number of nights for a given filter and takes out the HJD, either A_Mag1 or T1_flux, and
error for mag or flux
:param n: Number of observation nights
:param f: The filter letter being used
:return: the output text files for each night in a given filter
"""
total_hjd = []
total_amag = []
total_error = []
# checks for either the b, v, r filter as either upper or lowercase will work
for i in range(n):
while True:
# makes sure the file pathway is real and points to some file
# (does not check if that file is the correct one though)
try:
# an example pathway for the files
# E:\Research\Data\NSVS_254037\2018.10.12-reduced\Check\V\2018.10.12.APASS.V_measurements.txt
file = input("Enter night %d file path: " % (i+1))
if path.exists(file):
break
else:
continue
except FileNotFoundError:
print("Please enter a correct file path")
# noinspection PyUnboundLocalVariable
df = pd.read_csv(file, delimiter="\t")
# set parameters to lists from the file by the column header
hjd = []
amag = []
amag_error = []
try:
hjd = list(df["HJD"])
amag = list(df["Source_AMag_T1"])
amag_error = list(df["Source_AMag_Err_T1"])
except KeyError:
print("The file you entered does not have the columns of HJD, Source_AMag_T1, or Source_AMag_Err_T1. "
"Please re-enter the file path and make sure its the correct file.")
c = 1
main(c)
total_hjd.append(hjd)
total_amag.append(amag)
total_error.append(amag_error)
# converts the Dataframe embedded lists into a normal flat list
new_hjd = [item for elem in total_hjd for item in elem]
new_amag = [item for elem in total_amag for item in elem]
new_error = [item for elem in total_error for item in elem]
# outputs the new file to dataframe and then into a text file for use in Peranso or PHOEBE
data = pd.DataFrame({
"HJD": new_hjd,
"AMag": new_amag,
"AMag Error": new_error
})
print("")
output = input("What is the file output name (with file extension .txt): ")
data.to_csv(output, index=False, header=False, sep='\t')
print("")
print("Fished saving the file to the same location as this program.")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
count = 0
main(count)
| 37.300813 | 114 | 0.623147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,661 | 0.579991 |
3172a2c22ec1638c70e59d5c5b2388f4aa10d1ba | 788 | py | Python | backend/songwriter/migrations/0006_auto_20170902_0723.py | giliam/turbo-songwriter | 3d100b08dc19c60d7a1324120e06bd9f971eea5a | [
"MIT"
]
| null | null | null | backend/songwriter/migrations/0006_auto_20170902_0723.py | giliam/turbo-songwriter | 3d100b08dc19c60d7a1324120e06bd9f971eea5a | [
"MIT"
]
| 12 | 2017-09-25T20:13:29.000Z | 2020-02-12T00:12:41.000Z | backend/songwriter/migrations/0006_auto_20170902_0723.py | giliam/turbo-songwriter | 3d100b08dc19c60d7a1324120e06bd9f971eea5a | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-02 05:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('songwriter', '0005_auto_20170824_1726'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'ordering': ['lastname', 'firstname']},
),
migrations.AlterModelOptions(
name='chord',
options={'ordering': ['note']},
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='theme',
options={'ordering': ['name']},
),
]
| 24.625 | 60 | 0.548223 | 638 | 0.809645 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.275381 |
3174581bfbf5c193b24ab07d26b208bd2900562c | 1,073 | py | Python | util/textbox_utils.py | yannl35133/sslib | 4c58487989aa06490a90cbcd0aa4c760937a9278 | [
"MIT"
]
| null | null | null | util/textbox_utils.py | yannl35133/sslib | 4c58487989aa06490a90cbcd0aa4c760937a9278 | [
"MIT"
]
| null | null | null | util/textbox_utils.py | yannl35133/sslib | 4c58487989aa06490a90cbcd0aa4c760937a9278 | [
"MIT"
]
| null | null | null | CHARACTERS_PER_LINE = 39
def break_lines(text):
chars_in_line = 1
final_text = ''
skip = False
for char in text:
if chars_in_line >= CHARACTERS_PER_LINE:
if char == ' ':
# we happen to be on a space, se we can just break here
final_text += '\n'
skip = True
else:
# work backwards to find the space to break on
for i in range(len(final_text) - 1, 0, -1):
if final_text[i] == ' ':
final_text = final_text[:i] + '\n' + final_text[i + 1:]
break
chars_in_line = 0
chars_in_line += 1
if not skip:
final_text += char
skip = False
return final_text
if __name__ == '__main__':
print(break_lines('The <y<Spirit of the Sword>> guides the goddess\' chosen hero to <r<Skyloft Village>>'))
print(break_lines('Hey, you look like you have a Questions?'))
print(break_lines('Skyloft Peater/Peatrice\'s Crystals has Bug Net'))
| 33.53125 | 111 | 0.538677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.28425 |
31749eb3567f7fa78e149effb86fecf4648c3519 | 1,501 | py | Python | tools/isolate/data/isolate/with_flag.py | Scopetta197/chromium | b7bf8e39baadfd9089de2ebdc0c5d982de4a9820 | [
"BSD-3-Clause"
]
| 212 | 2015-01-31T11:55:58.000Z | 2022-02-22T06:35:11.000Z | tools/isolate/data/isolate/with_flag.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
]
| 5 | 2015-03-27T14:29:23.000Z | 2019-09-25T13:23:12.000Z | tools/isolate/data/isolate/with_flag.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
]
| 221 | 2015-01-07T06:21:24.000Z | 2022-02-11T02:51:12.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def main():
print 'with_flag: Verify the test data files were mapped properly'
assert len(sys.argv) == 2
mode = sys.argv[1]
assert mode in ('run', 'trace')
files = sorted(os.listdir('files1'))
tree = {
'test_file1.txt': 'Foo\n',
'test_file2.txt': 'Bar\n',
}
# Ignore .svn directory which happens to be there with --mode=trace
# from a svn checkout. The file shouldn't be there when --mode=run is used.
if mode == 'trace' and '.svn' in files:
files.remove('.svn')
if files != sorted(tree):
print '%s != %s' % (files, sorted(tree))
return 2
for k, v in tree.iteritems():
content = open(os.path.join('files1', k), 'rb').read()
if v != content:
print '%s: %r != %r' % (k, v, content)
return 3
root_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir, base = os.path.split(root_dir)
if mode == 'trace':
# Verify the parent directory.
parent_dir, base2 = os.path.split(parent_dir)
if base != 'isolate' or base2 != 'data':
print 'mode trace: Invalid root dir %s' % root_dir
return 4
else:
# Verify that we are not inside a checkout.
if base == 'data':
print 'mode run: Invalid root dir %s' % root_dir
return 5
return 0
if __name__ == '__main__':
sys.exit(main())
| 28.320755 | 77 | 0.632911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 682 | 0.454364 |
31759cd8846fcd05727a01352dd32131306245ac | 8,957 | py | Python | tensorflow/load_mnist.py | stone-zeng/ising | f71ebdba0b91d1912620fe2af8fd211aa6075846 | [
"MIT"
]
| 1 | 2019-12-06T13:57:43.000Z | 2019-12-06T13:57:43.000Z | tensorflow/load_mnist.py | stone-zeng/ising | f71ebdba0b91d1912620fe2af8fd211aa6075846 | [
"MIT"
]
| 1 | 2019-08-01T15:56:28.000Z | 2019-08-01T15:56:28.000Z | tensorflow/load_mnist.py | stone-zeng/ising | f71ebdba0b91d1912620fe2af8fd211aa6075846 | [
"MIT"
]
| 2 | 2020-07-21T15:25:13.000Z | 2021-07-18T18:39:59.000Z | """Loading MNIST dataset.
"""
import struct
import numpy as np
class MNIST:
"""
Loading MNIST dataset.
In the directory of MNIST dataset, there should be the following files:
- Training set:
- train-images-idx3-ubyte
- train-labels-idx1-ubyte
- Test set:
- t10k-images-idx3-ubyte
- t10k-labels-idx1-ubyte
Functions
---------
next_batch()
image_pair(index: int)
sample_batch(batch_index: int)
to_ndarray()
Attributes
----------
data_type: Can be either `"test"` or `"train"`.
path: Path for MNIST data.
data_size: Size of the dataset. Default value `None` means using all data in MNIST.
batch_size: Size of the mini-batch. Default value `None` means using the whole dataset as
a mini-batch.
binarize: Whether to binarize the images (using 0 and 1 values). Default value is True.
reshape: Whether to reshape the images into 2D arrays. Default value is False.
one_hot: whether to use one-hot encoding for labels (e.g. using vector
`[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]` for 0). Default value is False.
"""
IMAGE_SIZE = 784
LABEL_SIZE = 1
_IMAGE_SIZE_FMT = ">784B"
_LABEL_SIZE_FMT = ">B"
IMAGE_SHAPE = (28, 28)
batch_index = 0
def __init__(self, data_type: str, path: str,
data_size: int = None,
batch_size: int = None,
binarize=True,
reshape=False,
one_hot=False):
self.data_type = data_type
self.path = path
# Options
self.binarize = binarize
self.reshape = reshape
self.one_hot = one_hot
# Data buffer
# `data_size` will be updated according to the actual data
image_buf, label_buf = self._read_file()
# Size
if data_size is None:
# `len(image_buf)` may not be exactly divided by 784
self.data_size = len(image_buf) // self.IMAGE_SIZE
else:
self.data_size = data_size
if batch_size is None:
self.batch_size = self.data_size
else:
if batch_size <= self.data_size:
self.batch_size = batch_size
else:
raise ValueError("batch size larger than data size")
self.batch_num = self.data_size // self.batch_size
# Data
self._images = self._get_image(image_buf)
self._labels = self._get_label(label_buf)
def _read_file(self):
if self.data_type == "test":
image_file_name = self.path + "t10k-images-idx3-ubyte"
label_file_name = self.path + "t10k-labels-idx1-ubyte"
elif self.data_type == "train":
image_file_name = self.path + "train-images-idx3-ubyte"
label_file_name = self.path + "train-labels-idx1-ubyte"
else:
raise ValueError("only type \"test\" and \"train\" are available")
# "rb" means reading + binary mode
with open(image_file_name, "rb") as image_file:
image_buf = image_file.read()
with open(label_file_name, "rb") as label_file:
label_buf = label_file.read()
return image_buf, label_buf
def _get_image(self, image_buf):
"""Get an image array from `image_buf`.
This is the structure of the image file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
"""
image_buf_len = self.data_size * self.IMAGE_SIZE + 16
image_offset = 16
image_arr = []
while image_offset < image_buf_len:
temp = struct.unpack_from(self._IMAGE_SIZE_FMT, image_buf, image_offset)
if self.binarize:
temp = np.vectorize(lambda x: 0 if x <= 127 else 1)(temp)
if self.reshape:
temp = np.reshape(temp, self.IMAGE_SHAPE)
image_arr.append(temp)
image_offset += self.IMAGE_SIZE
return image_arr
def _get_label(self, label_buf):
"""Get an label array from `label_buf`.
This is the structure of the label file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
"""
label_buf_len = self.data_size * self.LABEL_SIZE + 8
label_offset = 8
label_arr = []
while label_offset < label_buf_len:
temp = struct.unpack_from(self._LABEL_SIZE_FMT, label_buf, label_offset)[0]
if self.one_hot:
vec = np.zeros(10)
vec[temp] = 1
label_arr.append(vec)
else:
label_arr.append(temp)
label_offset += self.LABEL_SIZE
return label_arr
def next_batch(self):
"""Increase `batch_index` by 1, then return a mini-batch of (image, label) tuples."""
this_batch = self.batch(self.batch_index)
self.batch_index = (self.batch_index + 1) % self.batch_num
return this_batch
def image_pair(self, index: int):
"""Return a (image, label) tuple at `index`."""
if index < self.data_size:
return self._images[index], self._labels[index]
raise IndexError("image index out of range")
def batch(self, batch_index: int):
"""Return a mini-batch of (image, label) tuples at `batch_index`."""
if batch_index < self.batch_num:
begin = batch_index * self.batch_size
end = (batch_index + 1) * self.batch_size
return self._images[begin:end], self._labels[begin:end]
raise IndexError("batch index out of range")
def to_ndarray(self):
"""Return the raw data tuple `(images, labels)` as `np.ndarray`.
"""
images = []
labels = []
for i in range(self.batch_num):
image, label = self.batch(i)
images.append(image)
labels.append(label)
return np.asarray(images), np.asarray(labels)
def _test():
data = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=True, one_hot=False, binarize=False)
print("Meta-data:")
print("\tDataset size:", data.data_size)
print("\tBatch size:", data.batch_size)
col_num = 4
row_num = data.batch_size // col_num + 1
_test_random_images(data, col_num, row_num)
_test_random_batch(data, col_num, row_num)
_test_next_batch(data, col_num, row_num)
def _test_random_images(data, col_num, row_num):
images = []
labels = []
for _ in range(10):
index = random.randrange(data.data_size)
image, label = data.image_pair(index)
images.append(image)
labels.append(label)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_random_batch(data, col_num, row_num):
index = random.randrange(data.batch_num)
images, labels = data.batch(index)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_next_batch(data, col_num, row_num):
for _ in range(3):
images, labels = data.next_batch()
_plot(images, labels, col_num=col_num, row_num=row_num)
def _plot(images, labels, col_num, row_num):
for i, (image, label) in enumerate(zip(images, labels)):
plt.subplot(row_num, col_num, i + 1)
plt.imshow(image, cmap="gray")
plt.axis('off')
plt.title(str(label))
plt.show()
def _test_numpy():
images, labels = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=False, one_hot=False, binarize=False).to_ndarray()
print(images.shape) # shape = (num_batches, batch_size, num_visible)
print(np.moveaxis(images, 0, -1).shape) # shape = (batch_size, num_visible, num_batches)
print(labels.shape) # shape = (num_batches, batch_size)
if __name__ == "__main__":
import random
import matplotlib.pyplot as plt
# Local MNIST data
MNIST_PATH = "../../machine-learning/data/mnist/"
_test()
_test_numpy()
| 35.543651 | 97 | 0.577984 | 6,808 | 0.760076 | 0 | 0 | 0 | 0 | 0 | 0 | 3,336 | 0.372446 |
3175aae311124c706359c0501c4ffc334907f123 | 24,640 | py | Python | sarnet_td3/common/gpu_multithread.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
]
| 16 | 2020-11-04T10:12:09.000Z | 2022-03-26T13:25:16.000Z | sarnet_td3/common/gpu_multithread.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
]
| 5 | 2020-11-18T13:07:11.000Z | 2022-03-06T08:40:01.000Z | sarnet_td3/common/gpu_multithread.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
]
| 5 | 2020-11-26T09:17:23.000Z | 2022-03-06T08:40:53.000Z | import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
| 48.888889 | 159 | 0.544562 | 23,501 | 0.953774 | 0 | 0 | 0 | 0 | 0 | 0 | 2,986 | 0.121185 |
3175e1912bfa26c6bcd08777e69b55a9f3b60c38 | 823 | py | Python | Scripts/Miscellaneous/Fake_news_web/app.py | valterm/Python_and_the_Web | a51b97870576dde8e8b7e78144e3b7ef8edebeac | [
"MIT"
]
| 1 | 2020-10-19T16:08:47.000Z | 2020-10-19T16:08:47.000Z | Scripts/Miscellaneous/Fake_news_web/app.py | valterm/Python_and_the_Web | a51b97870576dde8e8b7e78144e3b7ef8edebeac | [
"MIT"
]
| null | null | null | Scripts/Miscellaneous/Fake_news_web/app.py | valterm/Python_and_the_Web | a51b97870576dde8e8b7e78144e3b7ef8edebeac | [
"MIT"
]
| null | null | null | from flask import Flask, request, render_template
from sklearn.externals import joblib
from feature import *
pipeline = joblib.load('pipeline.sav')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/api',methods=['POST'])
def get_delay():
result=request.form
query_title = result['title']
query_author = result['author']
query_text = result['maintext']
print(query_text)
query = get_all_query(query_title, query_author, query_text)
##user_input = {'query':query}
pred = pipeline.predict(query)
print(pred)
dic = {1:'real',0:'fake'}
return f'<html><body><h1>{dic[pred[0]]}</h1> <form action="/"> <button type="submit">back </button> </form></body></html>'
if __name__ == '__main__':
app.run(port=8080, debug=True)
| 24.939394 | 126 | 0.668287 | 0 | 0 | 0 | 0 | 581 | 0.705954 | 0 | 0 | 233 | 0.283111 |
3176815070b07c8d8d7495f2cec6b2d4034159cf | 534 | py | Python | 1019.next-greater-node-in-linked-list.py | elfgzp/leetCode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
]
| 3 | 2019-04-12T06:22:56.000Z | 2019-05-04T04:25:01.000Z | 1019.next-greater-node-in-linked-list.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
]
| null | null | null | 1019.next-greater-node-in-linked-list.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
]
| null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def nextLargerNodes(self, head: ListNode) -> List[int]:
nums = []
while head:
nums.append(head.val)
head = head.next
stack = []
res = [0] * len(nums)
for i, n in enumerate(nums):
while stack and nums[stack[-1]] < n:
res[stack.pop()] = n
stack.append(i)
return res
| 23.217391 | 59 | 0.5 | 396 | 0.741573 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.241573 |
3176a8260273b12af11896b419d23af210fafed0 | 268 | py | Python | WXApi/WXApi/__init__.py | KEDYY/pyweipi | 1bf2ecfb0d680b9ce35fe26b34d1025a8d942aab | [
"MIT"
]
| 1 | 2018-05-22T15:10:14.000Z | 2018-05-22T15:10:14.000Z | WXApi/WXApi/__init__.py | KEDYY/pyweipi | 1bf2ecfb0d680b9ce35fe26b34d1025a8d942aab | [
"MIT"
]
| null | null | null | WXApi/WXApi/__init__.py | KEDYY/pyweipi | 1bf2ecfb0d680b9ce35fe26b34d1025a8d942aab | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Create: 2014/5/20
Update: 2017/11/22
"""
from .WXError import *
from .WXMenu import *
from .WXUtils import *
from .event import *
from .request import MPCenter
__date__ = '2017/3/12'
__version__ = '1.0.1'
__license__ = 'The MIT License'
| 15.764706 | 31 | 0.671642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.380597 |
31772010bf6f4955a22aabae06749a3423e3904b | 1,315 | py | Python | evaluator_package/Parsing_tools.py | MONICA-Project/GOST-tools | 08f0b5bc47e5ab683c6dffe90e8cfdff4305eab4 | [
"BSD-2-Clause"
]
| null | null | null | evaluator_package/Parsing_tools.py | MONICA-Project/GOST-tools | 08f0b5bc47e5ab683c6dffe90e8cfdff4305eab4 | [
"BSD-2-Clause"
]
| null | null | null | evaluator_package/Parsing_tools.py | MONICA-Project/GOST-tools | 08f0b5bc47e5ab683c6dffe90e8cfdff4305eab4 | [
"BSD-2-Clause"
]
| null | null | null | def is_field(token):
"""Checks if the token is a valid ogc type field
"""
return token in ["name", "description", "encodingType", "location", "properties", "metadata",
"definition", "phenomenonTime", "resultTime", "observedArea", "result", "id", "@iot.id",
"resultQuality", "validTime", "time", "parameters", "feature"]
def tokenize_parentheses(tokens):
""" Finds non parsed parentheses in tokens (ex.: ['x(y']['z)'] -> ['x']['(']['y']['z'][')']
:param tokens: a list of tokens
:return: the list with unchecked parenteses tokenized
"""
for index, token in enumerate(tokens):
if ("(" in token or ")" in token) and len(token) > 1:
parenthesis_index = token.find("(")
parenthesis = "("
if parenthesis_index < 0:
parenthesis_index = token.find(")")
parenthesis = ")"
left_side = token[:parenthesis_index]
right_side = token[parenthesis_index + 1:]
del tokens[index]
if bool(left_side):
tokens.insert(index, left_side)
index += 1
tokens.insert(index, parenthesis)
if bool(right_side):
index += 1
tokens.insert(index, right_side)
| 38.676471 | 109 | 0.543726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.35057 |
31776c8bf86532d4f839128681a9e6532c17cb04 | 1,830 | py | Python | salt/runners/mine.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
]
| 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/runners/mine.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
]
| 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/runners/mine.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
]
| 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
A runner to access data from the salt mine
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import logging
# Import salt libs
import salt.utils.minions
log = logging.getLevelName(__name__)
def get(tgt, fun, tgt_type='glob'):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
CLI Example:
.. code-block:: bash
salt-run mine.get '*' network.interfaces
'''
ret = salt.utils.minions.mine_get(tgt, fun, tgt_type, __opts__)
return ret
def update(tgt,
tgt_type='glob',
clear=False,
mine_functions=None):
'''
.. versionadded:: 2017.7.0
Update the mine data on a certain group of minions.
tgt
Which minions to target for the execution.
tgt_type: ``glob``
The type of ``tgt``.
clear: ``False``
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: ``False`` (update).
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
the functions specified under ``mine_functions`` in the
minion/master config or pillar.
CLI Example:
.. code-block:: bash
salt-run mine.update '*'
salt-run mine.update 'juniper-edges' tgt_type='nodegroup'
'''
ret = __salt__['salt.execute'](tgt,
'mine.update',
tgt_type=tgt_type,
clear=clear,
mine_functions=mine_functions)
return ret
| 26.142857 | 74 | 0.60765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,204 | 0.657923 |
3177cd1e5b203f5ec64cb02bb1434a9783422e6d | 312 | py | Python | app.py | rhedgeco/test_plaid_webapp | e821e354796da9f35689eb386df9366407e2907b | [
"MIT"
]
| null | null | null | app.py | rhedgeco/test_plaid_webapp | e821e354796da9f35689eb386df9366407e2907b | [
"MIT"
]
| null | null | null | app.py | rhedgeco/test_plaid_webapp | e821e354796da9f35689eb386df9366407e2907b | [
"MIT"
]
| null | null | null | from plaid import Client
from backend.link_token import LinkToken
from general_falcon_webserver import WebApp
client = Client(client_id='5e2e3527dd6924001167e8e8', secret='0b89f518880456b6f60020f481b3d7', environment='sandbox')
app = WebApp()
app.add_route('link', LinkToken(client))
app.launch_webserver()
| 24 | 117 | 0.817308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.233974 |
31789285a83d618d1865746d4dfadf7a34d38be3 | 403 | py | Python | examples/circuitplayground_light_plotter.py | sommersoft/Adafruit_CircuitPython_CircuitPlayground | 418ca982b34759b2804c2e816cdb505b2b818135 | [
"MIT"
]
| null | null | null | examples/circuitplayground_light_plotter.py | sommersoft/Adafruit_CircuitPython_CircuitPlayground | 418ca982b34759b2804c2e816cdb505b2b818135 | [
"MIT"
]
| null | null | null | examples/circuitplayground_light_plotter.py | sommersoft/Adafruit_CircuitPython_CircuitPlayground | 418ca982b34759b2804c2e816cdb505b2b818135 | [
"MIT"
]
| null | null | null | """If you're using Mu, this example will plot the light levels from the light sensor (located next
to the eye) on your Circuit Playground. Try shining a flashlight on your Circuit Playground, or
covering the light sensor to see the plot increase and decrease."""
import time
from adafruit_circuitplayground import cp
while True:
print("Light:", cp.light)
print((cp.light,))
time.sleep(0.1)
| 36.636364 | 98 | 0.751861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.669975 |
31793cdf4264c68e2f922f5ab5df2e6e45071db9 | 539 | py | Python | kickeststats/exceptions.py | antimaLinux/kickscarper | c2607847f1c7ad8bc30014ab4e62f0976ace5f0f | [
"MIT"
]
| null | null | null | kickeststats/exceptions.py | antimaLinux/kickscarper | c2607847f1c7ad8bc30014ab4e62f0976ace5f0f | [
"MIT"
]
| 1 | 2020-10-14T06:44:12.000Z | 2020-10-14T06:44:12.000Z | kickeststats/exceptions.py | antimaLinux/kickscraper | c2607847f1c7ad8bc30014ab4e62f0976ace5f0f | [
"MIT"
]
| null | null | null | """Exception utilities."""
class ParsingException(Exception):
pass
class EnvVariableNotSet(Exception):
def __init__(self, varname: str) -> None:
super(EnvVariableNotSet, self).__init__(f"Env variable [{varname}] not set.")
class InvalidLineUp(Exception):
pass
class UnsupportedLineUp(Exception):
def __init__(self, line_up_name: str) -> None:
super(UnsupportedLineUp, self).__init__(
f"Line-up [{line_up_name}] is not supported."
)
class InvalidTeamLineup(Exception):
pass
| 20.730769 | 85 | 0.684601 | 497 | 0.922078 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.198516 |
3179df3995795589d0d2f77e4b98c9ca2097dc0f | 6,431 | py | Python | helpers/HurstEstimationNumerics.py | Baozhen-Li/SurfaceTopography | 37c70a4020c74dc56a4509969e760259ba93ec61 | [
"MIT"
]
| null | null | null | helpers/HurstEstimationNumerics.py | Baozhen-Li/SurfaceTopography | 37c70a4020c74dc56a4509969e760259ba93ec61 | [
"MIT"
]
| null | null | null | helpers/HurstEstimationNumerics.py | Baozhen-Li/SurfaceTopography | 37c70a4020c74dc56a4509969e760259ba93ec61 | [
"MIT"
]
| null | null | null | #
# Copyright 2018, 2020 Lars Pastewka
# 2019-2020 Antoine Sanner
# 2015-2016 Till Junge
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Tests to understand the difficulties in extracting hurst from noisy data
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import PyCo.Tools as Tools
import SurfaceTopography as Surf
def plot_naive(surface, lam_max):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q = surf.q
C = surf.C
H, alpha = surf.estimate_hurst_naive(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("Naive: H={:.2f}, h_rms={:.2e}".format(H, np.sqrt((surface.heights() ** 2).mean())))
a, b = np.polyfit(np.log(q), np.log(C), 1)
ax.plot(q, q**(-2-2*H)*alpha, label="{}, H={:.2f}".format('fit', H))
ax.legend(loc='best')
def plot_grad_C0(surface, H_in, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]
C = surf.C[sl]
dim = 2
def C0_of_H(H):
return ((q**(-3-2*H)).sum() /
(q**(-5-4*H)/C).sum())
def objective(H, C0):
return ((1 - C0*q**(-2*H-2)/C)**2 /
q**(dim-1)).sum()
C0 = C0_of_H(H_in)
O0 = objective(H_in, C0)
c_s = np.linspace(0, 2*C0, 51)
o_s = np.zeros_like(c_s)
for i, c in enumerate(c_s):
o_s[i] = objective(H_in, c)
fig = plt.figure()
ax=fig.add_subplot(111)
fig.suptitle('grad(C0)')
ax.plot(c_s, o_s, marker= '+')
ax.scatter(C0, O0, marker='x', label = 'root', c='r')
ax.grid(True)
print("C0 = {}, obj0 = {}".format(C0, O0))
return C0
def plot_grad_H(surface, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]# np.array(surf.q[sl][0], surf.q[sl][-1])
C = surf.C[sl]# np.array(surf.C[sl][0], surf.C[sl][-1])
dim = 2
def C0_of_H(H):
return ((C**2/q**(-5-dim-4*H)).sum() /
(C/q**(-3-dim-2*H)).sum())
def grad_h(H, C0):
return (4*C0/C*np.log(q)*q**(-1-2*H-dim)*(1 - C0*q**(-2-2*H)/C)).sum()
def objective(H, C0):
return ((c/q**(-2*H-2) - C0)**2 /
q**(dim-1)).sum()
def full_obj(H):
C0 = C0_of_H(H)
return ((1 - C0/C*q**(-2*H-2))**2 /
q**(dim-1)).sum()
h_s = np.linspace(.0, 2., 51)
o_s = np.zeros_like(h_s)
g_s = np.zeros_like(h_s)
for i, h in enumerate(h_s):
c = C0_of_H(h)
o_s[i] = objective(h, c)
g_s[i] = grad_h(h, c)
H_opt, obj_opt, err, nfeq = scipy.optimize.fminbound(full_obj, 0, 2, full_output=True)
if err != 0:
raise Exception()
fig = plt.figure()
ax=fig.add_subplot(211)
ax.set_xlim(h_s[0], h_s[-1])
fig.suptitle('grad(H)')
ax.plot(h_s, o_s, marker= '+')
ax.grid(True)
ax.scatter(H_opt, obj_opt, marker='x', label = 'root', c='r')
ax=fig.add_subplot(212)
ax.set_xlim(h_s[0], h_s[-1])
ax.plot(h_s, g_s, marker= '+')
grad_opt = grad_h(H_opt, C0_of_H(H_opt))
ax.scatter(H_opt, grad_opt, marker='x', label = 'root', c='r')
#res = scipy.optimize.fmin
#print("H_out = {}, obj0 = {}".format(C0, O0))
ax.grid(True)
return H_opt, C0_of_H(H_opt)
def compare_to_PyPy(surface, lam_max, H_ref, C0_ref):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q
C = surf.C
H, alpha, res = surf.estimate_hurst_alt(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("New: H_pypy={:.2f}, H_ref = {:.2f}, h_rms={:.2e}".format(H, H_ref, np.sqrt((surface.heights() ** 2).mean())))
ax.plot(q[sl], q[sl]**(-2-2*H)*alpha, label="{}, H={:.4f}".format('fit', H), lw = 3)
ax.plot(q[sl], q[sl]**(-2-2*H_ref)*C0_ref, label="{}, H={:.4f}".format('ref_fit', H_ref), lw = 3)
ax.legend(loc='best')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(q[sl], C[sl]/(q[sl]**(-2-2*H_ref)*C0_ref), alpha=.1)
ax.errorbar(q_g, mean/(q_g**(-2-2*H_ref)*C0_ref), yerr=err/(q_g**(-2-2*H_ref)*C0_ref))
def main():
siz = 2000e-9
lam_max = .2*siz
size = (siz, siz)
hurst = .75
h_rms = 3.24e-8
res = 128
nb_grid_pts = (res, res)
seed = 2
surface = Tools.RandomSurfaceGaussian(
nb_grid_pts, size, hurst, h_rms, lambda_max=lam_max, seed=seed).get_surface()
plot_naive(surface, lam_max)
plot_grad_C0(surface, hurst, lam_max)
H, C0 = plot_grad_H(surface, lam_max)
print("H_ref = {}, C0_ref = {}".format(H, C0))
compare_to_PyPy(surface, lam_max, H, C0)
if __name__ == "__main__":
main()
plt.show()
| 30.770335 | 127 | 0.605038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,730 | 0.269009 |
317a0b761a00f16ebd6df1752f32454650a3de35 | 2,668 | py | Python | test/serverless_mock_test.py | zhangyuan/serverless-mock-python | f1a1b84fcade2d090501914b4a1f6b4fdbbccfb4 | [
"MIT"
]
| 5 | 2017-03-03T10:36:51.000Z | 2019-01-14T05:12:29.000Z | test/serverless_mock_test.py | zhangyuan/serverless-mock-python | f1a1b84fcade2d090501914b4a1f6b4fdbbccfb4 | [
"MIT"
]
| 1 | 2017-03-30T07:18:57.000Z | 2017-03-30T07:18:57.000Z | test/serverless_mock_test.py | zhangyuan/serverless-mock-python | f1a1b84fcade2d090501914b4a1f6b4fdbbccfb4 | [
"MIT"
]
| null | null | null | import threading
import requests
import json
import os
from nose.tools import *
from server import Httpd
app_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app")
class TestServerlessMock(object):
def test_ok(self):
ok_(True)
def setUp(self):
self.httpd = Httpd(app_path, 0)
thread = threading.Thread(target=self.httpd.serve, args=())
thread.daemon = True
thread.start()
self.prefix = "http://localhost:%d" % self.httpd.port
def tearDown(self):
self.httpd.shutdown()
def test_return_hello_world(self):
response = requests.get(self.url(""))
eq_("Hello World", response.text)
def test_simple_get(self):
response = requests.get(self.url("/simple_get"))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
body = json.loads(data.get("body"))
eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message"))
def test_simple_get_and_ignore_query_string(self):
response = requests.get(self.url("/simple_get?status=unknown"))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
body = json.loads(data.get("body"))
eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message"))
def test_simple_post(self):
response = requests.post(self.url("/simple_post"))
eq_(200, response.status_code)
data = response.json()
eq_(201, data.get("statusCode"))
def test_post_with_payload(self):
response = requests.post(self.url("/post_with_payload"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"id" : 123}, data.get("body"))
def test_post_with_payload_and_template(self):
response = requests.post(self.url("/post_with_payload_and_template"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"body" : {"id" : 123}}, data.get("body"))
def test_post_with_payload_and_template_without_any_function(self):
response = requests.post(self.url("/post_with_payload_and_template_without_any_function"), data=json.dumps({"id" : 123}))
eq_(200, response.status_code)
data = response.json()
eq_(200, data.get("statusCode"))
eq_({"action" : "trigger"}, data.get("body"))
def url(self, path):
return "%s%s" % (self.prefix, path)
| 31.388235 | 129 | 0.635682 | 2,483 | 0.93066 | 0 | 0 | 0 | 0 | 0 | 0 | 488 | 0.182909 |
317a91916b77974fe06fa6db1fb05dbc3398d9cf | 1,262 | py | Python | setup.py | holoyan/python-data-validation | e928c4131072c53cb8ace1fbaa83216f06ab6bfe | [
"MIT"
]
| 3 | 2021-03-16T05:47:46.000Z | 2021-03-23T17:43:55.000Z | setup.py | holoyan/python-data-validation | e928c4131072c53cb8ace1fbaa83216f06ab6bfe | [
"MIT"
]
| null | null | null | setup.py | holoyan/python-data-validation | e928c4131072c53cb8ace1fbaa83216f06ab6bfe | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyva',
packages=find_packages(),
version='0.4.1',
license='MIT',
description='Simple and flexible python data validation library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Artak',
author_email='[email protected]',
url='https://github.com/holoyan/python-data-validation',
keywords=['data', 'validation', 'validator', 'data validator'],
install_requires=[ # I get to this in a second
'python-dateutil',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 35.055556 | 73 | 0.652139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.522979 |
317b9439891e4fcd8a172759ee5646bbfda6a0f1 | 721 | py | Python | bus_system/apps/trip/migrations/0007_auto_20210624_1812.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | bus_system/apps/trip/migrations/0007_auto_20210624_1812.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | bus_system/apps/trip/migrations/0007_auto_20210624_1812.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.12 on 2021-06-24 18:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trip', '0006_remove_travelmodel_driver'),
]
operations = [
migrations.AddField(
model_name='tripmodel',
name='tickets_sold',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='travelmodel',
name='trip',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='travel_trip_set', related_query_name='travel_trip_set', to='trip.tripmodel'),
),
]
| 28.84 | 172 | 0.646325 | 594 | 0.823856 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.249653 |
317c49fa981bf39fb8fbc6c8497db1b320c837f1 | 29,766 | py | Python | geoscilabs/dcip/DCWidgetPlate2_5D.py | lheagy/geosci-labs | a946feb0f2576821a232355d539bc71a5f5b0d0c | [
"MIT"
]
| 1 | 2019-01-04T10:06:44.000Z | 2019-01-04T10:06:44.000Z | geoscilabs/dcip/DCWidgetPlate2_5D.py | victortocantins/geosci-labs | 0963c5766477e59af6625954036f580481cfaf82 | [
"MIT"
]
| 1 | 2018-12-30T20:09:25.000Z | 2018-12-30T20:09:25.000Z | geoscilabs/dcip/DCWidgetPlate2_5D.py | victortocantins/geosci-labs | 0963c5766477e59af6625954036f580481cfaf82 | [
"MIT"
]
| null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from scipy.constants import epsilon_0
from scipy.ndimage.measurements import center_of_mass
from ipywidgets import IntSlider, FloatSlider, FloatText, ToggleButtons
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from pymatsolver import Pardiso
from discretize import TensorMesh
from SimPEG import maps, SolverLU, utils
from SimPEG.utils import ExtractCoreMesh
from SimPEG.electromagnetics.static import resistivity as DC
from ..base import widgetify
# Mesh, mapping can be globals global
npad = 15
growrate = 2.0
cs = 0.5
hx = [(cs, npad, -growrate), (cs, 200), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 100)]
mesh = TensorMesh([hx, hy], "CN")
expmap = maps.ExpMap(mesh)
mapping = expmap
dx = 5
xr = np.arange(-40, 41, dx)
dxr = np.diff(xr)
xmin = -40.0
xmax = 40.0
ymin = -40.0
ymax = 8.0
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (
(mesh.gridFx[:, 0] >= xmin)
& (mesh.gridFx[:, 0] <= xmax)
& (mesh.gridFx[:, 1] >= ymin)
& (mesh.gridFx[:, 1] <= ymax)
)
indy = (
(mesh.gridFy[:, 0] >= xmin)
& (mesh.gridFy[:, 0] <= xmax)
& (mesh.gridFy[:, 1] >= ymin)
& (mesh.gridFy[:, 1] <= ymax)
)
indF = np.concatenate((indx, indy))
_cache = {
"A": None,
"B": None,
"dx": None,
"dz": None,
"xc": None,
"zc": None,
"rotAng": None,
"sigplate": None,
"sighalf": None,
}
def plate_fields(A, B, dx, dz, xc, zc, rotAng, sigplate, sighalf):
re_run = (
_cache["A"] != A
or _cache["B"] != B
or _cache["dx"] != dx
or _cache["dz"] != dz
or _cache["xc"] != xc
or _cache["zc"] != zc
or _cache["rotAng"] != rotAng
or _cache["sigplate"] != sigplate
or _cache["sighalf"] != sighalf
)
if re_run:
# Create halfspace model
mhalf = np.log(sighalf * np.ones([mesh.nC]))
# Create true model with plate
mtrue = createPlateMod(xc, zc, dx, dz, rotAng, sigplate, sighalf)
if B == []:
src = DC.sources.Pole([], np.r_[A, 0.0])
else:
src = DC.sources.Dipole([], np.r_[A, 0.0], np.r_[B, 0.0])
survey = DC.survey.Survey([src])
problem = DC.Simulation2DCellCentered(
mesh, survey=survey, sigmaMap=mapping, solver=Pardiso, bc_type='Dirichlet'
)
problem_prim = DC.Simulation2DCellCentered(
mesh, survey=survey, sigmaMap=mapping, solver=Pardiso, bc_type='Dirichlet'
)
total_field = problem.fields(mtrue)
primary_field = problem_prim.fields(mhalf)
_cache["A"] = A
_cache["B"] = B
_cache["dx"] = dx
_cache["dz"] = dz
_cache["xc"] = xc
_cache["zc"] = zc
_cache["rotAng"] = rotAng
_cache["sigplate"] = sigplate
_cache["sighalf"] = sighalf
_cache["mtrue"] = mtrue
_cache["mhalf"] = mhalf
_cache["src"] = src
_cache["primary_field"] = primary_field
_cache["total_field"] = total_field
else:
mtrue = _cache["mtrue"]
mhalf = _cache["mhalf"]
src = _cache["src"]
primary_field = _cache["primary_field"]
total_field = _cache["total_field"]
return mtrue, mhalf, src, primary_field, total_field
def getPlateCorners(xc, zc, dx, dz, rotAng):
# Form rotation matix
rotMat = np.array(
[
[np.cos(rotAng * (np.pi / 180.0)), -np.sin(rotAng * (np.pi / 180.0))],
[np.sin(rotAng * (np.pi / 180.0)), np.cos(rotAng * (np.pi / 180.0))],
]
)
originCorners = np.array(
[
[-0.5 * dx, 0.5 * dz],
[0.5 * dx, 0.5 * dz],
[-0.5 * dx, -0.5 * dz],
[0.5 * dx, -0.5 * dz],
]
)
rotPlateCorners = np.dot(originCorners, rotMat)
plateCorners = rotPlateCorners + np.hstack(
[np.repeat(xc, 4).reshape([4, 1]), np.repeat(zc, 4).reshape([4, 1])]
)
return plateCorners
def createPlateMod(xc, zc, dx, dz, rotAng, sigplate, sighalf):
# use matplotlib paths to find CC inside of polygon
plateCorners = getPlateCorners(xc, zc, dx, dz, rotAng)
verts = [
(plateCorners[0, :]), # left, top
(plateCorners[1, :]), # right, top
(plateCorners[3, :]), # right, bottom
(plateCorners[2, :]), # left, bottom
(plateCorners[0, :]), # left, top (closes polygon)
]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
CCLocs = mesh.gridCC
insideInd = np.where(path.contains_points(CCLocs))
# Check selected cell centers by plotting
# print insideInd
# fig = plt.figure()
# ax = fig.add_subplot(111)
# patch = patches.PathPatch(path, facecolor='none', lw=2)
# ax.add_patch(patch)
# plt.scatter(CCLocs[insideInd,0],CCLocs[insideInd,1])
# ax.set_xlim(-10,10)
# ax.set_ylim(-20,0)
# plt.axes().set_aspect('equal')
# plt.show()
mtrue = sighalf * np.ones([mesh.nC])
mtrue[insideInd] = sigplate
mtrue = np.log(mtrue)
return mtrue
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj[src, "phi"]
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:, 1])
surfaceInd = np.where(CCLoc[:, 1] == zsurfaceLoc)
xSurface = CCLoc[surfaceInd, 0].T
phiSurface = phi[surfaceInd]
phiScale = 0.0
if survey == "Pole-Dipole" or survey == "Pole-Pole":
refInd = utils.closestPoints(mesh, [xmax + 60.0, 0.0], gridLoc="CC")
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface, phiSurface, phiScale
def sumPlateCharges(xc, zc, dx, dz, rotAng, qSecondary):
# plateCorners = getPlateCorners(xc,zc,dx,dz,rotAng)
chargeRegionCorners = getPlateCorners(xc, zc, dx + 1.0, dz + 1.0, rotAng)
# plateVerts = [
# (plateCorners[0,:]), # left, top
# (plateCorners[1,:]), # right, top
# (plateCorners[3,:]), # right, bottom
# (plateCorners[2,:]), # left, bottom
# (plateCorners[0,:]), # left, top (closes polygon)
# ]
chargeRegionVerts = [
(chargeRegionCorners[0, :]), # left, top
(chargeRegionCorners[1, :]), # right, top
(chargeRegionCorners[3, :]), # right, bottom
(chargeRegionCorners[2, :]), # left, bottom
(chargeRegionCorners[0, :]), # left, top (closes polygon)
]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
# platePath = Path(plateVerts, codes)
chargeRegionPath = Path(chargeRegionVerts, codes)
CCLocs = mesh.gridCC
# plateInsideInd = np.where(platePath.contains_points(CCLocs))
chargeRegionInsideInd = np.where(chargeRegionPath.contains_points(CCLocs))
plateChargeLocs = CCLocs[chargeRegionInsideInd]
plateCharge = qSecondary[chargeRegionInsideInd]
posInd = np.where(plateCharge >= 0)
negInd = np.where(plateCharge < 0)
qPos = utils.mkvc(plateCharge[posInd])
qNeg = utils.mkvc(plateCharge[negInd])
qPosLoc = plateChargeLocs[posInd, :][0]
qNegLoc = plateChargeLocs[negInd, :][0]
# qPosData = np.vstack([qPosLoc[:, 0], qPosLoc[:, 1], qPos]).T
# qNegData = np.vstack([qNegLoc[:, 0], qNegLoc[:, 1], qNeg]).T
if qNeg.shape == (0,) or qPos.shape == (0,):
qNegAvgLoc = np.r_[-10, -10]
qPosAvgLoc = np.r_[+10, -10]
else:
qNegAvgLoc = np.average(qNegLoc, axis=0, weights=qNeg)
qPosAvgLoc = np.average(qPosLoc, axis=0, weights=qPos)
qPosSum = np.sum(qPos)
qNegSum = np.sum(qNeg)
# # Check things by plotting
# fig = plt.figure()
# ax = fig.add_subplot(111)
# platePatch = patches.PathPatch(platePath, facecolor='none', lw=2)
# ax.add_patch(platePatch)
# chargeRegionPatch = patches.PathPatch(chargeRegionPath, facecolor='none', lw=2)
# ax.add_patch(chargeRegionPatch)
# plt.scatter(qNegAvgLoc[0],qNegAvgLoc[1],color='b')
# plt.scatter(qPosAvgLoc[0],qPosAvgLoc[1],color='r')
# ax.set_xlim(-15,5)
# ax.set_ylim(-25,-5)
# plt.axes().set_aspect('equal')
# plt.show()
return qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc
# The only thing we need to make it work is a 2.5D field object in SimPEG
def getSensitivity(survey, A, B, M, N, model):
src_type, rx_type = survey.split("-")
if rx_type == "Pole":
rx = DC.receivers.Pole(np.r_[M, 0.0])
else:
rx = DC.receivers.Dipole(np.r_[M, 0.0], np.r_[N, 0.0])
if src_type == "Pole":
src = DC.sources.Pole([rx], np.r_[A, 0.0])
else:
src = DC.sources.Dipole([rx], np.r_[A, 0.0], np.r_[B, 0.0])
# Model mappings
expmap = maps.ExpMap(mesh)
mapping = expmap
survey = DC.Survey([src])
sim = DC.Simulation3DCellCentered(
mesh, sigmaMap=mapping, solver=Pardiso, survey=survey
)
J = sim.getJ(model)[0]
return J
def calculateRhoA(survey, VM, VN, A, B, M, N):
eps = 1e-9 # to stabilize division
if survey == "Dipole-Dipole":
G = 1.0 / (
1.0 / (np.abs(A - M) + eps)
- 1.0 / (np.abs(M - B) + eps)
- 1.0 / (np.abs(N - A) + eps)
+ 1.0 / (np.abs(N - B) + eps)
)
rho_a = (VM - VN) * 2.0 * np.pi * G
elif survey == "Pole-Dipole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps) - 1.0 / (np.abs(N - A) + eps))
rho_a = (VM - VN) * 2.0 * np.pi * G
elif survey == "Dipole-Pole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps) - 1.0 / (np.abs(M - B) + eps))
rho_a = (VM) * 2.0 * np.pi * G
elif survey == "Pole-Pole":
G = 1.0 / (1.0 / (np.abs(A - M) + eps))
rho_a = (VM) * 2.0 * np.pi * G
return rho_a
def PLOT(
survey, A, B, M, N, dx, dz, xc, zc, rotAng, rhohalf, rhoplate, Field, Type, Scale
):
labelsize = 16.0
ticksize = 16.0
sigplate = 1.0 / rhoplate
sighalf = 1.0 / rhohalf
if survey == "Pole-Dipole" or survey == "Pole-Pole":
B = []
mtrue, mhalf, src, primary_field, total_field = plate_fields(
A, B, dx, dz, xc, zc, rotAng, sigplate, sighalf
)
fig, ax = plt.subplots(2, 1, figsize=(9 * 1.5, 9 * 1.8), sharex=True)
fig.subplots_adjust(right=0.8, wspace=0.05, hspace=0.05)
xSurface, phiTotalSurface, phiScaleTotal = get_Surface_Potentials(
survey, src, total_field
)
xSurface, phiPrimSurface, phiScalePrim = get_Surface_Potentials(
survey, src, primary_field
)
ylim = np.r_[-1.0, 1.0] * np.max(np.abs(phiTotalSurface))
xlim = np.array([-40, 40])
if survey == "Dipole-Pole" or survey == "Pole-Pole":
MInd = np.where(xSurface == M)
N = []
VM = phiTotalSurface[MInd[0]]
VN = 0.0
VMprim = phiPrimSurface[MInd[0]]
VNprim = 0.0
else:
MInd = np.where(xSurface == M)
NInd = np.where(xSurface == N)
VM = phiTotalSurface[MInd[0]]
VN = phiTotalSurface[NInd[0]]
VMprim = phiPrimSurface[MInd[0]]
VNprim = phiPrimSurface[NInd[0]]
# 2D geometric factor
G2D = rhohalf / (calculateRhoA(survey, VMprim, VNprim, A, B, M, N))
# Subplot 1: Full set of surface potentials
ax[0].plot(xSurface, phiTotalSurface, color=[0.1, 0.5, 0.1], linewidth=2)
ax[0].plot(xSurface, phiPrimSurface, linestyle="dashed", linewidth=0.5, color="k")
ax[0].grid(
which="both", linestyle="-", linewidth=0.5, color=[0.2, 0.2, 0.2], alpha=0.5
)
if survey == "Pole-Dipole" or survey == "Pole-Pole":
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
else:
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
ax[0].plot(B, 0, "_", markersize=12, markeredgewidth=3, color=[0.0, 0.0, 1.0])
ax[0].set_ylabel("Potential, (V)", fontsize=labelsize)
ax[0].set_xlabel("x (m)", fontsize=labelsize)
ax[0].set_xlim(xlim)
ax[0].set_ylim(ylim)
if survey == "Dipole-Pole" or survey == "Pole-Pole":
ax[0].plot(M, VM, "o", color="k")
xytextM = (M + 0.5, max(min(VM, ylim.max()), ylim.min()) + 10)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
else:
ax[0].plot(M, VM, "o", color="k")
ax[0].plot(N, VN, "o", color="k")
xytextM = (M + 0.5, max(min(VM, ylim.max()), ylim.min()) + 10)
xytextN = (N + 0.5, max(min(VN, ylim.max()), ylim.min()) + 10)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
ax[0].annotate("%2.1e" % (VN), xy=xytextN, xytext=xytextN, fontsize=labelsize)
ax[0].tick_params(axis="both", which="major", labelsize=ticksize)
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].text(
xlim.max() + 1,
ylim.max() - 0.1 * ylim.max(),
"$\\rho_a$ = %2.2f" % (G2D * calculateRhoA(survey, VM, VN, A, B, M, N)),
verticalalignment="bottom",
bbox=props,
fontsize=labelsize,
)
ax[0].legend(["Model Potential", "Half-Space Potential"], loc=3, fontsize=labelsize)
# # Subplot 2: Surface potentials with gaps around current electrodes
# # Select points more than 5m from Tx electrodes of plotting
# xSurface_AInd = np.where(np.abs(xSurface - A) >= 5.)[0]
# xSurface_BInd = np.where(np.abs(xSurface - B) >= 5.)[0]
# xSurfaceTxGapInd = list(set(xSurface_AInd).intersection(xSurface_BInd))
# xSurface_TxGap = xSurface[xSurfaceTxGapInd]
# phiTotalSurface_TxGap = phiTotalSurface[xSurfaceTxGapInd]
# phiPrimSurface_TxGap = phiPrimSurface[xSurfaceTxGapInd]
# ylim = np.r_[-1., 1.]*(np.max(np.abs(phiTotalSurface_TxGap)) - 0.05*np.max(np.abs(phiTotalSurface_TxGap)))
# ax[1].plot(xSurface_TxGap,phiTotalSurface_TxGap ,color=[0.1,0.5,0.1],linewidth=2)
# ax[1].plot(xSurface_TxGap,phiPrimSurface_TxGap ,linestyle='dashed',linewidth=0.5,color='k')
# ax[1].grid(which='both',linestyle='-',linewidth=0.5,color=[0.2,0.2,0.2],alpha=0.5)
# ax[1].plot(A,0,'+',markersize = 12, markeredgewidth = 3, color=[1.,0.,0])
# ax[1].plot(B,0,'_',markersize = 12, markeredgewidth = 3, color=[0.,0.,1.])
# ax[1].set_ylabel('Potential, (V)',fontsize = labelsize)
# ax[1].set_xlabel('x (m)',fontsize = labelsize)
# ax[1].set_xlim(xlim)
# ax[1].set_ylim(ylim)
# ax[1].plot(M,VM,'o',color='k')
# ax[1].plot(N,VN,'o',color='k')
# ax[1].annotate('%2.1e'%(VM), xy=xytextM, xytext=xytextM,fontsize = labelsize)
# ax[1].annotate('%2.1e'%(VN), xy=xytextN, xytext=xytextN,fontsize = labelsize)
# ax[1].tick_params(axis='both', which='major', labelsize=ticksize)
# props = dict(boxstyle='round', facecolor='grey', alpha=0.4)
# ax[1].text(xlim.max()+1,ylim.max()-0.1*ylim.max(),'$\\rho_a$ = %2.2f'%(G2D*rho_a(VM,VN,A,B,M,N)),
# verticalalignment='bottom', bbox=props, fontsize = labelsize)
# ax[1].legend(['Model Potential','Half-Space Potential'], loc=3, fontsize = labelsize)
if Field == "Model":
label = "Resisitivity (ohm-m)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "jet_r"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "jet_r"}
if Type == "Total":
u = 1.0 / (mapping * mtrue)
elif Type == "Primary":
u = 1.0 / (mapping * mhalf)
elif Type == "Secondary":
u = 1.0 / (mapping * mtrue) - 1.0 / (mapping * mhalf)
if Scale == "Log":
linthresh = 10.0
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2
),
"cmap": "jet_r",
}
elif Field == "Potential":
label = "Potential (V)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
linthresh = 10.0
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "viridis",
}
if Type == "Total":
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = total_field[src, "phi"] - phiScaleTotal
elif Type == "Primary":
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = primary_field[src, "phi"] - phiScalePrim
elif Type == "Secondary":
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
uTotal = total_field[src, "phi"] - phiScaleTotal
uPrim = primary_field[src, "phi"] - phiScalePrim
u = uTotal - uPrim
elif Field == "E":
label = "Electric Field (V/m)"
xtype = "F"
view = "vec"
streamOpts = {"color": "w"}
ind = indF
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "e"]
elif Type == "Primary":
u = primary_field[src, "e"]
elif Type == "Secondary":
uTotal = total_field[src, "e"]
uPrim = primary_field[src, "e"]
u = uTotal - uPrim
elif Field == "J":
label = "Current density ($A/m^2$)"
xtype = "F"
view = "vec"
streamOpts = {"color": "w"}
ind = indF
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
pcolorOpts = {"norm": matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "j"]
elif Type == "Primary":
u = primary_field[src, "j"]
elif Type == "Secondary":
uTotal = total_field[src, "j"]
uPrim = primary_field[src, "j"]
u = uTotal - uPrim
elif Field == "Charge":
label = "Charge Density ($C/m^2$)"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "RdBu_r"}
if Scale == "Log":
linthresh = 1e-12
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "RdBu_r",
}
formatter = "%.1e"
if Type == "Total":
u = total_field[src, "charge"]
elif Type == "Primary":
u = primary_field[src, "charge"]
elif Type == "Secondary":
uTotal = total_field[src, "charge"]
uPrim = primary_field[src, "charge"]
u = uTotal - uPrim
elif Field == "Sensitivity":
label = "Sensitivity"
xtype = "CC"
view = "real"
streamOpts = None
ind = indCC
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == "Log":
linthresh = 1e-4
pcolorOpts = {
"norm": matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2),
"cmap": "viridis",
}
# formatter = formatter = "$10^{%.1f}$"
formatter = "%.1e"
if Type == "Total":
u = getSensitivity(survey, A, B, M, N, mtrue)
elif Type == "Primary":
u = getSensitivity(survey, A, B, M, N, mhalf)
elif Type == "Secondary":
uTotal = getSensitivity(survey, A, B, M, N, mtrue)
uPrim = getSensitivity(survey, A, B, M, N, mhalf)
u = uTotal - uPrim
# u = np.log10(abs(u))
if Scale == "Log":
eps = 1e-16
else:
eps = 0.0
dat = meshcore.plotImage(
u[ind] + eps,
v_type=xtype,
ax=ax[1],
grid=False,
view=view,
stream_opts=streamOpts,
pcolor_opts=pcolorOpts,
) # gridOpts={'color':'k', 'alpha':0.5}
# Get plate corners
plateCorners = getPlateCorners(xc, zc, dx, dz, rotAng)
if rhoplate != rhohalf:
# plot top of plate outline
ax[1].plot(
plateCorners[[0, 1], 0],
plateCorners[[0, 1], 1],
linestyle="dashed",
color="k",
)
# plot east side of plate outline
ax[1].plot(
plateCorners[[1, 3], 0],
plateCorners[[1, 3], 1],
linestyle="dashed",
color="k",
)
# plot bottom of plate outline
ax[1].plot(
plateCorners[[2, 3], 0],
plateCorners[[2, 3], 1],
linestyle="dashed",
color="k",
)
# plot west side of plate outline
ax[1].plot(
plateCorners[[0, 2], 0],
plateCorners[[0, 2], 1],
linestyle="dashed",
color="k",
)
if (Field == "Charge") and (Type != "Primary") and (Type != "Total"):
qTotal = total_field[src, "charge"]
qPrim = primary_field[src, "charge"]
qSecondary = qTotal - qPrim
qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc = sumPlateCharges(
xc, zc, dx, dz, rotAng, qSecondary
)
ax[1].plot(
qPosAvgLoc[0],
qPosAvgLoc[1],
marker=".",
color="black",
markersize=labelsize,
)
ax[1].plot(
qNegAvgLoc[0],
qNegAvgLoc[1],
marker=".",
color="black",
markersize=labelsize,
)
if qPosAvgLoc[0] > qNegAvgLoc[0]:
xytext_qPos = (qPosAvgLoc[0] + 1.0, qPosAvgLoc[1] - 1)
xytext_qNeg = (qNegAvgLoc[0] - 15.0, qNegAvgLoc[1] - 1)
else:
xytext_qPos = (qPosAvgLoc[0] - 15.0, qPosAvgLoc[1] - 1)
xytext_qNeg = (qNegAvgLoc[0] + 1.0, qNegAvgLoc[1] - 1)
ax[1].annotate(
"+Q = %2.1e" % (qPosSum),
xy=xytext_qPos,
xytext=xytext_qPos,
fontsize=labelsize,
)
ax[1].annotate(
"-Q = %2.1e" % (qNegSum),
xy=xytext_qNeg,
xytext=xytext_qNeg,
fontsize=labelsize,
)
ax[1].set_xlabel("x (m)", fontsize=labelsize)
ax[1].set_ylabel("z (m)", fontsize=labelsize)
if survey == "Dipole-Dipole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(B, 1.0, marker="v", color="blue", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
ax[1].plot(N, 1.0, marker="^", color="green", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextB1 = (B - 0.5, 3)
xytextM1 = (M - 0.5, 3)
xytextN1 = (N - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("B", xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate("N", xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif survey == "Pole-Dipole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
ax[1].plot(N, 1.0, marker="^", color="green", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextM1 = (M - 0.5, 3)
xytextN1 = (N - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate("N", xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif survey == "Dipole-Pole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(B, 1.0, marker="v", color="blue", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextB1 = (B - 0.5, 3)
xytextM1 = (M - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("B", xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
elif survey == "Pole-Pole":
ax[1].plot(A, 1.0, marker="v", color="red", markersize=labelsize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=labelsize)
xytextA1 = (A - 0.5, 3)
xytextM1 = (M - 0.5, 3)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].tick_params(axis="both", which="major", labelsize=ticksize)
cbar_ax = fig.add_axes([0.8, 0.05, 0.08, 0.5])
cbar_ax.axis("off")
vmin, vmax = dat[0].get_clim()
if Scale == "Log":
if (Field == "E") or (Field == "J"):
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5),
)
elif Field == "Model":
if Type == "Secondary":
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[np.minimum(0.0, vmin), np.maximum(0.0, vmax)],
)
else:
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5),
)
else:
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[
-1.0
* np.logspace(np.log10(-vmin - eps), np.log10(linthresh), 3)[:-1],
0.0,
np.logspace(np.log10(linthresh), np.log10(vmax), 3)[1:],
],
)
else:
if (Field == "Model") and (Type == "Secondary"):
cb = plt.colorbar(
dat[0],
ax=cbar_ax,
format=formatter,
ticks=np.r_[np.minimum(0.0, vmin), np.maximum(0.0, vmax)],
)
else:
cb = plt.colorbar(
dat[0], ax=cbar_ax, format=formatter, ticks=np.linspace(vmin, vmax, 5)
)
cb.ax.tick_params(labelsize=ticksize)
cb.set_label(label, fontsize=labelsize)
ax[1].set_xlim([-40.0, 40.0])
ax[1].set_ylim([-40.0, 8.0])
# ax[1].set_aspect('equal')
plt.show()
# return fig, ax
def plate_app():
app = widgetify(
PLOT,
survey=ToggleButtons(
options=["Dipole-Dipole", "Dipole-Pole", "Pole-Dipole", "Pole-Pole"],
value="Dipole-Dipole",
),
dx=FloatSlider(
min=1.0, max=1000.0, step=1.0, value=10.0, continuous_update=False
),
dz=FloatSlider(
min=1.0, max=200.0, step=1.0, value=10.0, continuous_update=False
),
xc=FloatSlider(
min=-30.0, max=30.0, step=1.0, value=0.0, continuous_update=False
),
zc=FloatSlider(
min=-30.0, max=0.0, step=1.0, value=-10.0, continuous_update=False
),
rotAng=FloatSlider(
min=-90.0,
max=90.0,
step=1.0,
value=0.0,
continuous_update=False,
description="$\\theta$",
),
rhoplate=FloatText(
min=1e-8,
max=1e8,
value=500.0,
continuous_update=False,
description="$\\rho_2$",
),
rhohalf=FloatText(
min=1e-8,
max=1e8,
value=500.0,
continuous_update=False,
description="$\\rho_1$",
),
A=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=-30.25, continuous_update=False
),
B=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=30.25, continuous_update=False
),
M=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=-10.25, continuous_update=False
),
N=FloatSlider(
min=-30.25, max=30.25, step=0.5, value=10.25, continuous_update=False
),
Field=ToggleButtons(
options=["Model", "Potential", "E", "J", "Charge", "Sensitivity"],
value="Model",
),
Type=ToggleButtons(options=["Total", "Primary", "Secondary"], value="Total"),
Scale=ToggleButtons(options=["Linear", "Log"], value="Linear"),
)
return app
| 33 | 112 | 0.543876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,576 | 0.220923 |
317ed9d058dadae1ec740f0850b99974ce2e984d | 293 | py | Python | retratodefases/phase_diagrams/__init__.py | Loracio/retrato-de-fases | a2d870a69b911af3b78288708cb569c957506940 | [
"MIT"
]
| 3 | 2021-03-22T00:07:28.000Z | 2021-03-22T12:11:18.000Z | retratodefases/phase_diagrams/__init__.py | Loracio/retrato-de-fases | a2d870a69b911af3b78288708cb569c957506940 | [
"MIT"
]
| null | null | null | retratodefases/phase_diagrams/__init__.py | Loracio/retrato-de-fases | a2d870a69b911af3b78288708cb569c957506940 | [
"MIT"
]
| 2 | 2021-03-20T19:00:53.000Z | 2021-03-22T12:19:52.000Z | try:
__PHASE_DIAGRAMS_IMPORTED__
except NameError:
__PHASE_DIAGRAMS_IMPORTED__= False
if not __PHASE_DIAGRAMS_IMPORTED__:
from .phase_portrait import PhasePortrait
from .funcion1D import Funcion1D
from .nullclines import Nullcline2D
__PHASE_DIAGRAMS_IMPORTED__ = True | 26.636364 | 45 | 0.805461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3180304baa69ab409e5d9574d22f4fd3b34b2b27 | 17,714 | py | Python | P13pt/spectrumfitter/spectrumfitter.py | green-mercury/P13pt | 38c4f079ec071b5b2651415d7bcb03be7a8a1dab | [
"MIT"
]
| 3 | 2019-08-27T16:32:29.000Z | 2019-09-28T06:56:14.000Z | P13pt/spectrumfitter/spectrumfitter.py | DamienFruleux/P13pt | a2cd74e97b8b2e5fb518886237575ae769229e49 | [
"MIT"
]
| 4 | 2018-10-29T22:06:54.000Z | 2019-02-28T12:39:03.000Z | P13pt/spectrumfitter/spectrumfitter.py | DamienFruleux/P13pt | a2cd74e97b8b2e5fb518886237575ae769229e49 | [
"MIT"
]
| 1 | 2019-02-13T13:29:18.000Z | 2019-02-13T13:29:18.000Z | #!/usr/bin/python
import sys
import os
import shutil
from glob import glob
from PyQt5.QtCore import (Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg,
QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl)
from PyQt5.QtGui import QIcon, QDesktopServices
from PyQt5.QtWidgets import (QApplication, QMessageBox, QMainWindow, QDockWidget, QAction,
QFileDialog, QProgressDialog)
from P13pt.spectrumfitter.dataloader import DataLoader
from P13pt.spectrumfitter.navigator import Navigator
from P13pt.spectrumfitter.fitter import Fitter
from P13pt.spectrumfitter.plotter import Plotter
from P13pt.spectrumfitter.load_fitresults import load_fitresults
from P13pt.params_from_filename import params_from_filename
class MainWindow(QMainWindow):
session_file = None
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.settings = QSettings("Mercury", "SpectrumFitter")
# set up data loading area
self.dock_loader = QDockWidget('Data loading', self)
self.dock_loader.setObjectName('loader')
self.loader = DataLoader()
self.dock_loader.setWidget(self.loader)
# set up data navigator
self.dock_navigator = QDockWidget('Data navigation', self)
self.dock_navigator.setObjectName('navigator')
self.navigator = Navigator()
self.dock_navigator.setWidget(self.navigator)
# set up plotter
self.plotter = Plotter()
self.setCentralWidget(self.plotter)
# set up fitter
self.dock_fitter = QDockWidget('Fitting', self)
self.dock_fitter.setObjectName('fitter')
self.fitter = Fitter()
self.dock_fitter.setWidget(self.fitter)
# set up the dock positions
self.addDockWidget(Qt.TopDockWidgetArea, self.dock_loader)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dock_navigator)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock_fitter)
# set up menus
fileMenu = self.menuBar().addMenu('File')
self.act_new_session = QAction('New session', self)
self.act_load_session = QAction('Load session', self)
self.act_save_session = QAction('Save session', self)
self.act_save_session_as = QAction('Save session as...', self)
for a in [self.act_new_session, self.act_load_session, self.act_save_session, self.act_save_session_as]:
fileMenu.addAction(a)
self.recent_menu = fileMenu.addMenu('Recent sessions')
self.update_recent_list()
fileMenu.addSeparator()
self.act_save_image = QAction('Save spectrum as image', self)
self.act_save_allimages = QAction('Save all spectra as images', self)
for a in [self.act_save_image, self.act_save_allimages]:
fileMenu.addAction(a)
viewMenu = self.menuBar().addMenu('View')
for w in [self.dock_loader, self.dock_navigator, self.dock_fitter]:
viewMenu.addAction(w.toggleViewAction())
self.act_restore_default_view = QAction('Restore default', self)
viewMenu.addAction(self.act_restore_default_view)
self.act_toggle_display_style = QAction('Toggle display style', self)
self.act_toggle_display_style.setShortcut(Qt.Key_F8)
viewMenu.addAction(self.act_toggle_display_style)
toolsMenu = self.menuBar().addMenu('Tools')
self.act_install_builtin_models = QAction('Install built-in models', self)
toolsMenu.addAction(self.act_install_builtin_models)
self.act_open_model_folder = QAction('Open model folder', self)
toolsMenu.addAction(self.act_open_model_folder)
# make connections
self.loader.dataset_changed.connect(self.dataset_changed)
self.loader.new_file_in_dataset.connect(self.navigator.new_file_in_dataset)
self.loader.deembedding_changed.connect(self.deembedding_changed)
self.navigator.selection_changed.connect(self.selection_changed)
self.fitter.fit_changed.connect(lambda: self.plotter.plot_fit(self.fitter.model))
self.fitter.fitted_param_changed.connect(self.plotter.fitted_param_changed)
self.fitter.btn_fitall.clicked.connect(self.fit_all)
self.act_new_session.triggered.connect(self.new_session)
self.act_load_session.triggered.connect(self.load_session)
self.act_save_session.triggered.connect(self.save_session)
self.act_save_session_as.triggered.connect(self.save_session_as)
self.act_save_image.triggered.connect(self.save_image)
self.act_save_allimages.triggered.connect(self.save_all_images)
self.act_restore_default_view.triggered.connect(lambda: self.restoreState(self.default_state))
self.act_toggle_display_style.triggered.connect(self.toggle_display_style)
self.act_install_builtin_models.triggered.connect(self.install_builtin_models)
self.act_open_model_folder.triggered.connect(self.open_model_folder)
# set up fitted parameter (this has to be done after making connections, so that fitter and plotter sync)
self.fitter.fitted_param = '-Y12' # default value
# create new session
self.new_session()
# show window
self.show()
self.default_state = self.saveState()
# restore layout from config (this has to be done AFTER self.show())
if self.settings.contains('geometry'):
self.restoreGeometry(self.settings.value("geometry"))
if self.settings.contains('windowState'):
self.restoreState(self.settings.value("windowState"))
def closeEvent(self, event):
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
super(MainWindow, self).closeEvent(event)
def dataset_changed(self):
self.fitter.empty_cache()
self.navigator.update_file_list(self.loader.dut_files)
for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]:
a.setEnabled(True)
def toggle_display_style(self):
if self.plotter.display_style == 'MP':
self.plotter.display_style = 'RI'
else:
self.plotter.display_style = 'MP'
self.deembedding_changed() # TODO: rename and/or remove redundancies, c.f. deembedding_changed()
def deembedding_changed(self):
# TODO: reduce redundancy with selection_changed()
i = self.navigator.file_list.currentRow()
spectrum = self.loader.get_spectrum(i)
if spectrum is not None:
#TODO: show parameters on plot
self.plotter.plot(spectrum, {})
else:
self.plotter.clear()
self.fitter.update_network(spectrum, self.loader.dut_files[i])
def selection_changed(self, i):
if i < 0: # when file_list is cleared:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
# TODO: the argument here should be a filename, not the index
spectrum = self.loader.get_spectrum(i)
if spectrum is not None:
self.plotter.plot(spectrum, params_from_filename(self.loader.dut_files[i]))
else:
self.plotter.clear()
self.fitter.update_network(spectrum, self.loader.dut_files[i])
QApplication.restoreOverrideCursor()
def new_session(self):
self.session_file = None
self.setWindowTitle('Spectrum Fitter - New session')
self.fitter.unload_model()
self.loader.clear()
self.navigator.clear()
self.plotter.clear()
for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]:
a.setEnabled(False)
@pyqtSlot()
def save_session_as(self, res_file=None):
if not res_file:
res_file, filter = QFileDialog.getSaveFileName(self, 'Fit results file', filter='*.txt')
if not res_file:
return
res_folder = os.path.dirname(res_file)
try:
with open(res_file, 'w') as f:
# write the header
f.write('# fitting results generated by P13pt spectrum fitter\n')
if len(self.loader.dut_files) == 1:
f.write('# dut: ' +
os.path.join(
os.path.relpath(self.loader.dut_folder, res_folder),
self.loader.dut_files[0]
).replace('\\', '/') + '\n')
else:
f.write('# dut: ' + os.path.relpath(self.loader.dut_folder, res_folder).replace('\\', '/') + '\n')
if self.loader.thru and self.loader.thru_toggle_status:
f.write('# thru: ' + os.path.relpath(self.loader.thru_file, res_folder).replace('\\', '/') + '\n')
if self.loader.dummy and self.loader.dummy_toggle_status:
f.write('# dummy: ' + os.path.relpath(self.loader.dummy_file, res_folder).replace('\\', '/') + '\n')
f.write('# fitted_param: ' + self.plotter.fitted_param + '\n')
try:
ra = float(self.loader.txt_ra.text())
except:
ra = 0.
if not ra == 0:
f.write('# ra: ' + str(ra) + '\n')
if self.fitter.model:
f.write('# model: ' + os.path.basename(self.fitter.model_file).replace('\\', '/') + '\n')
f.write('# model_func: ' + self.fitter.cmb_modelfunc.currentText() + '\n')
# TODO: this all could clearly be done in a more elegant way
if self.fitter.cmb_fitmethod.currentText() != 'No fit methods found':
f.write('# fit_method: ' + self.fitter.cmb_fitmethod.currentText() + '\n')
# determine columns
f.write('# filename\t')
for p in params_from_filename(self.loader.dut_files[0]):
f.write(p + '\t')
f.write('\t'.join([p for p in self.fitter.model.params]))
f.write('\n')
# write data
filelist = sorted([filename for filename in self.fitter.model_params])
for filename in filelist:
f.write(filename + '\t')
# TODO: what if some filenames do not contain all parameters? should catch exceptions
for p in params_from_filename(self.loader.dut_files[0]):
f.write(str(params_from_filename(filename)[p]) + '\t')
f.write('\t'.join([str(self.fitter.model_params[filename][p]) for p in self.fitter.model.params]))
f.write('\n')
except EnvironmentError as e:
QMessageBox.critical(self, 'Error', 'Could not save session: '+str(e))
return
self.update_recent_list(res_file)
self.setWindowTitle('Spectrum Fitter - '+res_file)
self.session_file = res_file
def save_session(self):
self.save_session_as(self.session_file)
@pyqtSlot()
def load_session(self, res_file=None):
if not res_file:
res_file, filter = QFileDialog.getOpenFileName(self, 'Fit results file', filter='*.txt')
if not res_file:
return
res_folder = os.path.dirname(res_file)
self.new_session()
# read the data
try:
data, dut, thru, dummy, ra, fitter_info = load_fitresults(res_file, readfilenameparams=False, extrainfo=True)
except IOError as e:
QMessageBox.warning(self, 'Error', 'Could not load data: '+str(e))
return
# using os.path.realpath to get rid of relative path remainders ("..")
self.loader.load_dataset(dut=os.path.realpath(os.path.join(res_folder, dut)) if dut else None,
thru=os.path.realpath(os.path.join(res_folder, thru)) if thru else None,
dummy=os.path.realpath(os.path.join(res_folder, dummy)) if dummy else None,
ra=ra if ra else None)
# if a fitted_param was provided in the session file, set it up
if 'fitted_param' in fitter_info:
self.fitter.fitted_param = fitter_info['fitted_param']
# if a model was provided in the session file, load this model and the provided data
if 'model' in fitter_info:
self.fitter.load_model(filename=fitter_info['model'],
info=fitter_info,
data=data if data else None)
# update the fitter with the first spectrum in the list
self.fitter.update_network(self.loader.get_spectrum(0), self.loader.dut_files[0])
self.update_recent_list(res_file)
self.setWindowTitle('Spectrum Fitter - '+res_file)
self.session_file = res_file
#TODO: this is not really in the right place
@pyqtSlot()
def fit_all(self):
totalnum = len(self.loader.dut_files)
progressdialog = QProgressDialog('Fitting all spectra...', 'Cancel', 0, totalnum-1, self)
progressdialog.setWindowTitle('Progress')
progressdialog.setModal(True)
progressdialog.setAutoClose(True)
progressdialog.show()
for i in range(totalnum):
QApplication.processEvents()
if progressdialog.wasCanceled():
break
self.navigator.file_list.setCurrentRow(i)
self.fitter.fit_model()
progressdialog.setValue(i)
def save_image(self):
basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()])
filename, filter = QFileDialog.getSaveFileName(self, 'Choose file',
os.path.join(self.loader.dut_folder, basename+'.png'),
filter='*.png;;*.jpg;;*.eps')
if filename:
self.plotter.save_fig(filename)
def save_all_images(self):
foldername = QFileDialog.getExistingDirectory(self, 'Choose folder',
self.loader.dut_folder)
totalnum = len(self.loader.dut_files)
progressdialog = QProgressDialog('Saving all images...', 'Cancel', 0, totalnum - 1, self)
progressdialog.setWindowTitle('Progress')
progressdialog.setModal(True)
progressdialog.setAutoClose(True)
progressdialog.show()
for i in range(totalnum):
QApplication.processEvents()
if progressdialog.wasCanceled():
break
self.navigator.file_list.setCurrentRow(i)
basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()])
self.plotter.save_fig(os.path.join(foldername, basename+'.png'))
progressdialog.setValue(i)
def load_recent(self):
action = self.sender()
self.load_session(action.text())
def update_recent_list(self, filename=None):
recentlist = list(self.settings.value('recentSessions')) if self.settings.contains('recentSessions') \
else []
if filename:
if filename in recentlist:
recentlist.remove(filename)
recentlist.insert(0, filename)
recentlist = recentlist[0:5]
self.settings.setValue('recentSessions', recentlist)
self.recent_menu.clear()
for r in recentlist:
a = QAction(r, self)
self.recent_menu.addAction(a)
a.triggered.connect(self.load_recent)
def install_builtin_models(self):
builtin_folder = os.path.join(os.path.dirname(__file__), 'models')
for filename in sorted(glob(os.path.join(builtin_folder, '*.py'))):
# check if the file already exists in the models folder
if os.path.exists(os.path.join(self.fitter.models_dir, os.path.basename(filename))):
answer = QMessageBox.question(self, 'File already exists', 'The file: '+os.path.basename(filename)+
'already exists in your models folder. Would you like to replace it?')
if answer != QMessageBox.Yes:
continue
# if file does not exist or user does not mind replacing it, let's copy:
shutil.copyfile(filename, os.path.join(self.fitter.models_dir, os.path.basename(filename)))
def open_model_folder(self):
QDesktopServices.openUrl(QUrl.fromLocalFile(self.fitter.models_dir))
def msghandler(type, context, message):
if type == QtInfoMsg:
QMessageBox.information(None, 'Info', message)
elif type == QtDebugMsg:
QMessageBox.information(None, 'Debug', message)
elif type == QtCriticalMsg:
QMessageBox.critical(None, 'Critical', message)
elif type == QtWarningMsg:
QMessageBox.warning(None, 'Warning', message)
elif type == QtFatalMsg:
QMessageBox.critical(None, 'Fatal error', message)
def main():
qInstallMessageHandler(msghandler)
# CD into directory where this script is saved
d = os.path.dirname(__file__)
if d != '': os.chdir(d)
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('audacity.png'))
mainwindow = MainWindow()
# Start the main loop.
ret = app.exec_()
sys.exit(ret)
if __name__ == '__main__':
main()
| 44.732323 | 122 | 0.628429 | 16,069 | 0.907136 | 0 | 0 | 5,819 | 0.328497 | 0 | 0 | 2,652 | 0.149712 |
31816e6d8bed3855148fe582fbc591405a02824d | 78,388 | py | Python | subversion/tests/cmdline/lock_tests.py | centic9/subversion-ppa | f65f38f4d8821f6225ba14b50a81973ad893fc02 | [
"Apache-2.0"
]
| null | null | null | subversion/tests/cmdline/lock_tests.py | centic9/subversion-ppa | f65f38f4d8821f6225ba14b50a81973ad893fc02 | [
"Apache-2.0"
]
| null | null | null | subversion/tests/cmdline/lock_tests.py | centic9/subversion-ppa | f65f38f4d8821f6225ba14b50a81973ad893fc02 | [
"Apache-2.0"
]
| 1 | 2020-11-04T07:19:37.000Z | 2020-11-04T07:19:37.000Z | #!/usr/bin/env python
# encoding=utf-8
#
# lock_tests.py: testing versioned properties
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import re, os, stat, logging
logger = logging.getLogger()
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helpers
def check_writability(path, writable):
bits = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
mode = os.stat(path)[0]
if bool(mode & bits) != writable:
raise svntest.Failure("path '%s' is unexpectedly %s (mode %o)"
% (path, ["writable", "read-only"][writable], mode))
def is_writable(path):
"Raise if PATH is not writable."
check_writability(path, True)
def is_readonly(path):
"Raise if PATH is not readonly."
check_writability(path, False)
######################################################################
# Tests
#----------------------------------------------------------------------
# Each test refers to a section in
# notes/locking/locking-functional-spec.txt
# II.A.2, II.C.2.a: Lock a file in wc A as user FOO and make sure we
# have a representation of it. Checkout wc B as user BAR. Verify
# that user BAR cannot commit changes to the file nor its properties.
def lock_file(sbox):
"lock a file and verify that it's locked"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039: User '?jconstant'? does not own lock on path.*iota')"
svntest.main.run_svn(None, 'update', wc_b)
# -- Try to change a file --
# change the locked file
svntest.main.file_append(file_path_b, "Covert tweak\n")
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
# Revert our change that we failed to commit
svntest.main.run_svn(None, 'revert', file_path_b)
# -- Try to change a property --
# change the locked file's properties
svntest.main.run_svn(None, 'propset', 'sneakyuser', 'Sally', file_path_b)
err_re = "(svn\: E195022\: File '.*iota' is locked in another)|" + \
"(svn\: E160039\: User '?jconstant'? does not own lock on path)"
# attempt (and fail) to commit as user Sally
svntest.actions.run_and_verify_commit(wc_b, None, None, err_re,
'--username',
svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# II.C.2.b.[12]: Lock a file and commit using the lock. Make sure the
# lock is released. Repeat, but request that the lock not be
# released. Make sure the lock is retained.
def commit_file_keep_lock(sbox):
"commit a file and keep lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock 'A/mu' as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a change and commit it, holding lock
sbox.simple_append('A/mu', 'Tweak!\n')
svntest.main.run_svn(None, 'commit', '-m', '', '--no-unlock',
sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2, writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def commit_file_unlock(sbox):
"commit a file and release lock"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu and iota as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'),
sbox.ospath('iota'))
# make a change and commit it, allowing lock to be released
sbox.simple_append('A/mu', 'Tweak!\n')
sbox.simple_commit()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
expected_status.tweak('iota', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def commit_propchange(sbox):
"commit a locked file with a prop change"
sbox.build()
wc_dir = sbox.wc_dir
# lock A/mu as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment',
sbox.ospath('A/mu'))
# make a property change and commit it, allowing lock to be released
sbox.simple_propset('blue', 'azul', 'A/mu')
sbox.simple_commit('A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=2)
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in same wc as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.c: Lock a file in wc A as user FOO. Attempt to unlock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def break_lock(sbox):
"lock a file and verify lock breaking behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to unlock file
# This should give a "iota' is not locked in this working copy" error
svntest.actions.run_and_verify_svn(None, None, ".*not locked",
'unlock',
file_path_b)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [],
'unlock', '--force',
file_path_b)
#----------------------------------------------------------------------
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user BAR. Should fail.
#
# Attempt again with --force. Should succeed.
#
# II.C.2.d: Lock a file in wc A as user FOO. Attempt to lock same
# file in wc B as user FOO. Should fail.
#
# Attempt again with --force. Should succeed.
def steal_lock(sbox):
"lock a file and verify lock stealing behavior"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
file_path = sbox.ospath('iota')
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.main.run_svn(None, 'update', wc_b)
# attempt (and fail) to lock file
# This should give a "iota' is already locked... error, but exits 0.
svntest.actions.run_and_verify_svn2(None, None,
".*already locked", 0,
'lock',
'-m', 'trying to break', file_path_b)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', '--force',
'-m', 'trying to break', file_path_b)
#----------------------------------------------------------------------
# II.B.2, II.C.2.e: Lock a file in wc A. Query wc for the
# lock and verify that all lock fields are present and correct.
def examine_lock(sbox):
"examine the fields of a lockfile for correctness"
sbox.build()
# lock a file as wc_author
svntest.actions.run_and_validate_lock(sbox.ospath('iota'),
svntest.main.wc_author)
#----------------------------------------------------------------------
# II.C.1: Lock a file in wc A. Check out wc B. Break the lock in wc
# B. Verify that wc A gracefully cleans up the lock via update as
# well as via commit.
def handle_defunct_lock(sbox):
"verify behavior when a lock in a wc is defunct"
sbox.build()
wc_dir = sbox.wc_dir
# set up our expected status
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', sbox.ospath('iota'))
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
file_path_b = sbox.ospath('iota', wc_dir=wc_b)
# --- Meanwhile, in our other working copy... ---
# Try unlocking the file in the second wc.
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path_b)
# update the 1st wc, which should clear the lock there
sbox.simple_update()
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# II.B.1: Set "svn:needs-lock" property on file in wc A. Checkout wc
# B and verify that that file is set as read-only.
#
# Tests propset, propdel, lock, and unlock
def enforce_lock(sbox):
"verify svn:needs-lock read-only behavior"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
lambda_path = sbox.ospath('A/B/lambda')
mu_path = sbox.ospath('A/mu')
# svn:needs-lock value should be forced to a '*'
svntest.actions.set_prop('svn:needs-lock', 'foo', iota_path)
svntest.actions.set_prop('svn:needs-lock', '*', lambda_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.set_prop('svn:needs-lock', ' ', mu_path, expected_err)
# Check svn:needs-lock
svntest.actions.check_prop('svn:needs-lock', iota_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', lambda_path, ['*'])
svntest.actions.check_prop('svn:needs-lock', mu_path, ['*'])
svntest.main.run_svn(None, 'commit',
'-m', '', iota_path, lambda_path, mu_path)
# Now make sure that the perms were flipped on all files
if os.name == 'posix':
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
if ((os.stat(iota_path)[0] & mode)
or (os.stat(lambda_path)[0] & mode)
or (os.stat(mu_path)[0] & mode)):
logger.warn("Setting 'svn:needs-lock' property on a file failed to set")
logger.warn("file mode to read-only.")
raise svntest.Failure
# obtain a lock on one of these files...
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# ...and verify that the write bit gets set...
if not (os.stat(iota_path)[0] & mode):
logger.warn("Locking a file with 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
# ...and unlock it...
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_path)
# ...and verify that the write bit gets unset
if (os.stat(iota_path)[0] & mode):
logger.warn("Unlocking a file with 'svn:needs-lock' failed to unset write bit.")
raise svntest.Failure
# Verify that removing the property restores the file to read-write
svntest.main.run_svn(None, 'propdel', 'svn:needs-lock', iota_path)
if not (os.stat(iota_path)[0] & mode):
logger.warn("Deleting 'svn:needs-lock' failed to set write bit.")
raise svntest.Failure
#----------------------------------------------------------------------
# Test that updating a file with the "svn:needs-lock" property works,
# especially on Windows, where renaming A to B fails if B already
# exists and has its read-only bit set. See also issue #2278.
@Issue(2278)
def update_while_needing_lock(sbox):
"update handles svn:needs-lock correctly"
sbox.build()
sbox.simple_propset('svn:needs-lock', 'foo', 'iota')
sbox.simple_commit('iota')
sbox.simple_update()
# Lock, modify, commit, unlock, to create r3.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', sbox.ospath('iota'))
sbox.simple_append('iota', 'This line added in r2.\n')
sbox.simple_commit('iota') # auto-unlocks
# Backdate to r2.
sbox.simple_update(revision=2)
# Try updating forward to r3 again. This is where the bug happened.
sbox.simple_update(revision=3)
#----------------------------------------------------------------------
# Tests update / checkout with changing props
def defunct_lock(sbox):
"verify svn:needs-lock behavior with defunct lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
iota_path = sbox.ospath('iota')
iota_path_b = sbox.ospath('iota', wc_dir=wc_b)
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# Set the prop in wc a
sbox.simple_propset('svn:needs-lock', 'foo', 'iota')
# commit r2
sbox.simple_commit('iota')
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# lock iota in wc_b
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path_b)
# break the lock iota in wc a
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock', '--force',
'-m', '', iota_path)
# update wc_b
svntest.main.run_svn(None, 'update', wc_b)
# make sure that iota got set to read-only
if (os.stat(iota_path_b)[0] & mode):
logger.warn("Upon removal of a defunct lock, a file with 'svn:needs-lock'")
logger.warn("was not set back to read-only")
raise svntest.Failure
#----------------------------------------------------------------------
# Tests dealing with a lock on a deleted path
def deleted_path_lock(sbox):
"verify lock removal on a deleted path"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
iota_url = sbox.repo_url + '/iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
sbox.simple_rm('iota')
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', iota_path)
# Now make sure that we can delete the lock from iota via a URL
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
iota_url)
#----------------------------------------------------------------------
# Tests dealing with locking and unlocking
def lock_unlock(sbox):
"lock and unlock some files"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_status.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', writelocked=None)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
pi_path, rho_path, tau_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Tests dealing with directory deletion and locks
def deleted_dir_lock(sbox):
"verify removal of a directory with locks inside"
sbox.build()
wc_dir = sbox.wc_dir
pi_path = sbox.ospath('A/D/G/pi')
rho_path = sbox.ospath('A/D/G/rho')
tau_path = sbox.ospath('A/D/G/tau')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path, rho_path, tau_path)
sbox.simple_rm('A/D/G') # the parent directory
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'--no-unlock',
'-m', '', sbox.ospath('A/D/G'))
#----------------------------------------------------------------------
# III.c : Lock a file and check the output of 'svn stat' from the same
# working copy and another.
def lock_status(sbox):
"verify status of lock in working copy"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
sbox.simple_append('iota', "This is a spreadsheet\n")
sbox.simple_commit('iota')
svntest.main.run_svn(None, 'lock', '-m', '', sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status again after modifying the file
sbox.simple_append('iota', 'check stat output after mod')
expected_status.tweak('iota', status='M ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Verify status of lock from another working copy
svntest.main.run_svn(None, 'update', wc_b)
expected_status = svntest.actions.get_virginal_state(wc_b, 2)
expected_status.tweak('iota', writelocked='O')
svntest.actions.run_and_verify_status(wc_b, expected_status)
#----------------------------------------------------------------------
# III.c : Steal lock on a file from another working copy with 'svn lock
# --force', and check the status of lock in the repository from the
# working copy in which the file was initially locked.
def stolen_lock_status(sbox):
"verify status of stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly lock same file (steal lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'lock',
'-m', '', '--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# III.c : Break lock from another working copy with 'svn unlock --force'
# and verify the status of the lock in the repository with 'svn stat -u'
# from the working copy in the file was initially locked
def broken_lock_status(sbox):
"verify status of broken lock"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
svntest.main.file_append(file_path, "This is a spreadsheet\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.main.run_svn(None, 'lock',
'-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, wc_rev=2)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Forcibly unlock the same file (break lock) from another working copy
svntest.main.run_svn(None, 'update', wc_b)
svntest.main.run_svn(None, 'unlock',
'--force', file_path_b)
# Verify status from working copy where file was initially locked
expected_status.tweak(fname, writelocked='B')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Invalid input test - lock non-existent file
def lock_non_existent_file(sbox):
"verify error on locking non-existent file"
sbox.build()
fname = 'A/foo'
file_path = os.path.join(sbox.wc_dir, fname)
exit_code, output, error = svntest.main.run_svn(1, 'lock',
'-m', '', file_path)
error_msg = "The node '%s' was not found." % os.path.abspath(file_path)
for line in error:
if line.find(error_msg) != -1:
break
else:
logger.warn("Error: %s : not found in: %s" % (error_msg, error))
raise svntest.Failure
#----------------------------------------------------------------------
# Check that locking an out-of-date file fails.
def out_of_date(sbox):
"lock an out-of-date file and ensure failure"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
# Make a new revision of the file in the first WC.
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
# --- Meanwhile, in our other working copy... ---
svntest.actions.run_and_verify_svn2(None, None,
".*newer version of '/iota' exists", 0,
'lock',
'--username', svntest.main.wc_author2,
'-m', '', file_path_b)
#----------------------------------------------------------------------
# Tests reverting a svn:needs-lock file
def revert_lock(sbox):
"verify svn:needs-lock behavior with revert"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mode = stat.S_IWGRP | stat.S_IWOTH | stat.S_IWRITE
# set the prop in wc
svntest.actions.run_and_verify_svn(None, None, [], 'propset',
'svn:needs-lock', 'foo', iota_path)
# commit r2
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', iota_path)
# make sure that iota got set to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Committing a file with 'svn:needs-lock'")
logger.warn("did not set the file to read-only")
raise svntest.Failure
# verify status is as we expect
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', wc_rev=2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# remove read-only-ness
svntest.actions.run_and_verify_svn(None, None, [], 'propdel',
'svn:needs-lock', iota_path)
# make sure that iota got read-only-ness removed
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Deleting the 'svn:needs-lock' property ")
logger.warn("did not remove read-only-ness")
raise svntest.Failure
# revert the change
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure that iota got set back to read-only
if (os.stat(iota_path)[0] & mode):
logger.warn("Reverting a file with 'svn:needs-lock'")
logger.warn("did not set the file back to read-only")
raise svntest.Failure
# try propdel and revert from a different directory so
# full filenames are used
extra_name = 'xx'
# now lock the file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', iota_path)
# modify it
svntest.main.file_append(iota_path, "This line added\n")
expected_status.tweak(wc_rev=1)
expected_status.tweak('iota', wc_rev=2)
expected_status.tweak('iota', status='M ', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# revert it
svntest.actions.run_and_verify_svn(None, None, [], 'revert', iota_path)
# make sure it is still writable since we have the lock
if (os.stat(iota_path)[0] & mode == 0):
logger.warn("Reverting a 'svn:needs-lock' file (with lock in wc) ")
logger.warn("did not leave the file writable")
raise svntest.Failure
#----------------------------------------------------------------------
def examine_lock_via_url(sbox):
"examine the fields of a lock from a URL"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock the file url and check the contents of lock
svntest.actions.run_and_validate_lock(file_url,
svntest.main.wc_author2)
#----------------------------------------------------------------------
def lock_several_files(sbox):
"lock/unlock several files in one go"
sbox.build()
wc_dir = sbox.wc_dir
# Deliberately have no direct child of A as a target
iota_path = os.path.join(sbox.wc_dir, 'iota')
lambda_path = os.path.join(sbox.wc_dir, 'A', 'B', 'lambda')
alpha_path = os.path.join(sbox.wc_dir, 'A', 'B', 'E', 'alpha')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', 'lock several',
iota_path, lambda_path, alpha_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
'--username', svntest.main.wc_author2,
iota_path, lambda_path, alpha_path)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_switched_files(sbox):
"lock/unlock switched files"
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
lambda_path = sbox.ospath('A/B/lambda')
iota_URL = sbox.repo_url + '/iota'
alpha_URL = sbox.repo_url + '/A/B/E/alpha'
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
iota_URL, gamma_path,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, None, [], 'switch',
alpha_URL, lambda_path,
'--ignore-ancestry')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/gamma', 'A/B/lambda', switched='S')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock several',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked='K')
# In WC-NG locks are kept per working copy, not per file
expected_status.tweak('A/B/E/alpha', 'iota', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
gamma_path, lambda_path)
expected_status.tweak('A/D/gamma', 'A/B/lambda', writelocked=None)
expected_status.tweak('A/B/E/alpha', 'iota', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def lock_uri_encoded(sbox):
"lock and unlock a file with an URI-unsafe name"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'amazing space'
file_path = sbox.ospath(fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_path)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_path)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# And now the URL case.
file_url = sbox.repo_url + '/' + fname
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', file_url)
# Make sure that the file was locked.
expected_status.tweak(fname, writelocked='O')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
file_url)
# Make sure it was successfully unlocked again.
expected_status.tweak(fname, writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# A regression test for a bug when svn:needs-lock and svn:executable
# interact badly. The bug was fixed in trunk @ r854933.
@SkipUnless(svntest.main.is_posix_os)
def lock_and_exebit1(sbox):
"svn:needs-lock and svn:executable, part I"
mode_w = stat.S_IWUSR
mode_x = stat.S_IXUSR
mode_r = stat.S_IRUSR
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:needs-lock', ' ', gamma_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:executable property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:executable', ' ', gamma_path)
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Committing a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# lock
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', gamma_path)
# mode should be +r, +w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or not gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Locking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-write, executable")
raise svntest.Failure
# modify
svntest.main.file_append(gamma_path, "check stat output after mod & unlock")
# unlock
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
gamma_path)
# Mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Unlocking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# ci
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# Mode should be still +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Commiting a file with 'svn:needs-lock, svn:executable'")
logger.warn("after unlocking modified file's permissions")
raise svntest.Failure
#----------------------------------------------------------------------
# A variant of lock_and_exebit1: same test without unlock
@SkipUnless(svntest.main.is_posix_os)
def lock_and_exebit2(sbox):
"svn:needs-lock and svn:executable, part II"
mode_w = stat.S_IWUSR
mode_x = stat.S_IXUSR
mode_r = stat.S_IRUSR
sbox.build()
wc_dir = sbox.wc_dir
gamma_path = sbox.ospath('A/D/gamma')
expected_err = ".*svn: warning: W125005: To turn off the svn:needs-lock property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:needs-lock', ' ', gamma_path)
expected_err = ".*svn: warning: W125005: To turn off the svn:executable property,.*"
svntest.actions.run_and_verify_svn2(None, None, expected_err, 0,
'ps', 'svn:executable', ' ', gamma_path)
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Committing a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
# lock
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', gamma_path)
# mode should be +r, +w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or not gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Locking a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-write, executable")
raise svntest.Failure
# modify
svntest.main.file_append(gamma_path, "check stat output after mod & unlock")
# commit
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', '', gamma_path)
# Mode should be +r, -w, +x
gamma_stat = os.stat(gamma_path)[0]
if (not gamma_stat & mode_r
or gamma_stat & mode_w
or not gamma_stat & mode_x):
logger.warn("Commiting a file with 'svn:needs-lock, svn:executable'")
logger.warn("did not set the file to read-only, executable")
raise svntest.Failure
def commit_xml_unsafe_file_unlock(sbox):
"commit file with xml-unsafe name and release lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'foo & bar'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "Initial data.\n")
svntest.main.run_svn(None, 'add', file_path)
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
# lock fname as wc_author
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'some lock comment', file_path)
# make a change and commit it, allowing lock to be released
svntest.main.file_append(file_path, "Followup data.\n")
svntest.main.run_svn(None,
'commit', '-m', '', file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname : Item(status=' ', wc_rev=3), })
# Make sure the file is unlocked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def repos_lock_with_info(sbox):
"verify info path@X or path -rY return repos lock"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'iota'
comment = 'This is a lock test.'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + '/' + fname
# lock wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', comment, file_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak(fname, writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Steal lock on wc file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'--force',
'-m', comment, file_url)
expected_status.tweak(fname, writelocked='T')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get repository lock token
repos_lock_token \
= svntest.actions.run_and_parse_info(file_url)[0]['Lock Token']
# info with revision option
expected_infos = [
{ 'Lock Token' : repos_lock_token },
]
svntest.actions.run_and_verify_info(expected_infos, file_path, '-r1')
# info with peg revision
svntest.actions.run_and_verify_info(expected_infos, file_path + '@1')
#----------------------------------------------------------------------
@Issue(4126)
def unlock_already_unlocked_files(sbox):
"(un)lock set of files, one already (un)locked"
sbox.build()
wc_dir = sbox.wc_dir
# Deliberately have no direct child of A as a target
iota_path = sbox.ospath('iota')
lambda_path = sbox.ospath('A/B/lambda')
alpha_path = sbox.ospath('A/B/E/alpha')
gamma_path = sbox.ospath('A/D/gamma')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', svntest.main.wc_author2,
'-m', 'lock several',
iota_path, lambda_path, alpha_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
error_msg = ".*Path '/A/B/E/alpha' is already locked by user '" + \
svntest.main.wc_author2 + "'.*"
svntest.actions.run_and_verify_svn2(None, None, error_msg, 0,
'lock',
'--username', svntest.main.wc_author2,
alpha_path, gamma_path)
expected_status.tweak('A/D/gamma', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, ".*unlocked", [], 'unlock',
'--username', svntest.main.wc_author2,
lambda_path)
expected_status.tweak('A/B/lambda', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
error_msg = "(.*No lock on path '/A/B/lambda'.*)" + \
"|(.*'A/B/lambda' is not locked.*)"
svntest.actions.run_and_verify_svn2(None, None, error_msg, 0,
'unlock',
'--username', svntest.main.wc_author2,
'--force',
iota_path, lambda_path, alpha_path)
expected_status.tweak('iota', 'A/B/E/alpha', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def info_moved_path(sbox):
"show correct lock info on moved path"
sbox.build()
wc_dir = sbox.wc_dir
fname = sbox.ospath("iota")
fname2 = sbox.ospath("iota2")
# Move iota, creating r2.
svntest.actions.run_and_verify_svn(None, None, [],
"mv", fname, fname2)
expected_output = svntest.wc.State(wc_dir, {
'iota2' : Item(verb='Adding'),
'iota' : Item(verb='Deleting'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"iota2" : Item(status=' ', wc_rev=2)
})
expected_status.remove("iota")
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Create a new, unrelated iota, creating r3.
svntest.main.file_append(fname, "Another iota")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(verb='Adding'),
})
expected_status.add({
"iota" : Item(status=' ', wc_rev=3)
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the new iota.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
"lock", fname)
expected_status.tweak("iota", writelocked="K")
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Get info for old iota at r1. This shouldn't give us any lock info.
expected_infos = [
{ 'URL' : '.*' ,
'Lock Token' : None },
]
svntest.actions.run_and_verify_info(expected_infos, fname2, '-r1')
#----------------------------------------------------------------------
def ls_url_encoded(sbox):
"ls locked path needing URL encoding"
sbox.build()
wc_dir = sbox.wc_dir
dirname = sbox.ospath("space dir")
fname = os.path.join(dirname, "f")
# Create a dir with a space in its name and a file therein.
svntest.actions.run_and_verify_svn(None, None, [],
"mkdir", dirname)
svntest.main.file_append(fname, "someone was here")
svntest.actions.run_and_verify_svn(None, None, [],
"add", fname)
expected_output = svntest.wc.State(wc_dir, {
'space dir' : Item(verb='Adding'),
'space dir/f' : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"space dir" : Item(status=' ', wc_rev=2),
"space dir/f" : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir)
# Lock the file.
svntest.actions.run_and_verify_svn("Lock space dir/f", ".*locked by user",
[], "lock", fname)
# Make sure ls shows it being locked.
expected_output = " +2 " + re.escape(svntest.main.wc_author) + " +O .+f|" \
" +2 " + re.escape(svntest.main.wc_author) + " .+\./"
svntest.actions.run_and_verify_svn("List space dir",
expected_output, [],
"list", "-v", dirname)
#----------------------------------------------------------------------
# Make sure unlocking a path with the wrong lock token fails.
@Issue(3794)
def unlock_wrong_token(sbox):
"verify unlocking with wrong lock token"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_url = sbox.repo_url + "/iota"
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
file_path)
# Steal the lock as the same author, but using a URL to keep the old token
# in the WC.
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
"--force", file_url)
# Then, unlocking the WC path should fail.
### The error message returned is actually this, but let's worry about that
### another day...
svntest.actions.run_and_verify_svn2(
None, None, ".*((No lock on path)|(400 Bad Request))", 0,
'unlock', file_path)
#----------------------------------------------------------------------
# Verify that info shows lock info for locked files with URI-unsafe names
# when run in recursive mode.
def examine_lock_encoded_recurse(sbox):
"verify recursive info shows lock info"
sbox.build()
wc_dir = sbox.wc_dir
fname = 'A/B/F/one iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.actions.run_and_verify_svn(None, None, [], "add", file_path)
expected_output = svntest.wc.State(wc_dir, {
fname : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({ fname: Item(wc_rev=2, status=' ') })
# Commit the file.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
file_path)
# lock the file and validate the contents
svntest.actions.run_and_validate_lock(file_path,
svntest.main.wc_author)
# Trying to unlock someone else's lock with --force should fail.
@Issue(3801)
def unlocked_lock_of_other_user(sbox):
"unlock file locked by other user"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file with user jrandom
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# now try to unlock with user jconstant, should fail but exit 0.
if sbox.repo_url.startswith("http"):
expected_err = ".*403 Forbidden.*"
else:
expected_err = "svn: warning: W160039: User '%s' is trying to use a lock owned by "\
"'%s'.*" % (svntest.main.wc_author2, svntest.main.wc_author)
svntest.actions.run_and_verify_svn2(None, [], expected_err, 0,
'unlock',
'--username', svntest.main.wc_author2,
pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_funky_comment_chars(sbox):
"lock a file using a comment with xml special chars"
sbox.build()
wc_dir = sbox.wc_dir
# lock a file as wc_author
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.main.file_append(file_path, "This represents a binary file\n")
svntest.main.run_svn(None, 'commit',
'-m', '', file_path)
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', 'lock & load', file_path)
#----------------------------------------------------------------------
# Check that the svn:needs-lock usage applies to a specific location
# in a working copy, not to the working copy overall.
def lock_twice_in_one_wc(sbox):
"try to lock a file twice in one working copy"
sbox.build()
wc_dir = sbox.wc_dir
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/B/mu')
# Create a needs-lock file
svntest.actions.set_prop('svn:needs-lock', '*', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'commit', wc_dir, '-m', '')
# Mark the file readonly
svntest.actions.run_and_verify_svn(None, None, [],
'update', wc_dir)
# Switch a second location for the same file in the same working copy
svntest.actions.run_and_verify_svn(None, None, [],
'switch', sbox.repo_url + '/A',
sbox.ospath('A/B'),
'--ignore-ancestry')
# Lock location 1
svntest.actions.run_and_verify_svn(None, None, [],
'lock', mu_path, '-m', 'Locked here')
# Locking in location 2 should fail ### Currently returns exitcode 0
svntest.actions.run_and_verify_svn2(None, None, ".*is already locked.*", 0,
'lock', '-m', '', mu2_path)
# Change the file anyway
os.chmod(mu2_path, 0700)
svntest.main.file_append(mu2_path, "Updated text")
# Commit will just succeed as the DB owns the lock. It's a user decision
# to commit the other target instead of the one originally locked
svntest.actions.run_and_verify_svn(None, None, [],
'commit', mu2_path, '-m', '')
#----------------------------------------------------------------------
# Test for issue #3524 'Locking path via ra_serf which doesn't exist in
# HEAD triggers assert'
@Issue(3524)
def lock_path_not_in_head(sbox):
"lock path that does not exist in HEAD"
sbox.build()
wc_dir = sbox.wc_dir
D_path = sbox.ospath('A/D')
lambda_path = sbox.ospath('A/B/lambda')
# Commit deletion of A/D and A/B/lambda as r2, then update the WC
# back to r1. Then attempt to lock some paths that no longer exist
# in HEAD. These should fail gracefully.
svntest.actions.run_and_verify_svn(None, None, [],
'delete', lambda_path, D_path)
svntest.actions.run_and_verify_svn(None, None, [], 'commit',
'-m', 'Some deletions', wc_dir)
svntest.actions.run_and_verify_svn(None, None, [], 'up', '-r1', wc_dir)
expected_lock_fail_err_re = "svn: warning: W160042: " \
"((Path .* doesn't exist in HEAD revision)" \
"|(L(ock|OCK) request (on '.*' )?failed: 405 Method Not Allowed))"
# Issue #3524 These lock attemtps were triggering an assert over ra_serf:
#
# working_copies\lock_tests-37>svn lock A\D
# ..\..\..\subversion\libsvn_client\ra.c:275: (apr_err=235000)
# svn: In file '..\..\..\subversion\libsvn_ra_serf\util.c' line 1120:
# assertion failed (ctx->status_code)
#
# working_copies\lock_tests-37>svn lock A\B\lambda
# ..\..\..\subversion\libsvn_client\ra.c:275: (apr_err=235000)
# svn: In file '..\..\..\subversion\libsvn_ra_serf\util.c' line 1120:
# assertion failed (ctx->status_code)
svntest.actions.run_and_verify_svn2(None, None, expected_lock_fail_err_re,
0, 'lock', lambda_path)
expected_err = 'svn: E155008: The node \'.*D\' is not a file'
svntest.actions.run_and_verify_svn(None, None, expected_err,
'lock', D_path)
#----------------------------------------------------------------------
def verify_path_escaping(sbox):
"verify escaping of lock paths"
sbox.build()
wc_dir = sbox.wc_dir
# Add test paths using two characters that need escaping in a url, but
# are within the normal ascii range
file1 = sbox.ospath('file #1')
file2 = sbox.ospath('file #2')
file3 = sbox.ospath('file #3')
svntest.main.file_write(file1, 'File 1')
svntest.main.file_write(file2, 'File 2')
svntest.main.file_write(file3, 'File 3')
svntest.main.run_svn(None, 'add', file1, file2, file3)
svntest.main.run_svn(None, 'ci', '-m', 'commit', wc_dir)
svntest.main.run_svn(None, 'lock', '-m', 'lock 1', file1)
svntest.main.run_svn(None, 'lock', '-m', 'lock 2', sbox.repo_url + '/file%20%232')
svntest.main.run_svn(None, 'lock', '-m', 'lock 3', file3)
svntest.main.run_svn(None, 'unlock', sbox.repo_url + '/file%20%233')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add(
{
'file #1' : Item(status=' ', writelocked='K', wc_rev='2'),
'file #2' : Item(status=' ', writelocked='O', wc_rev='2'),
'file #3' : Item(status=' ', writelocked='B', wc_rev='2')
})
# Make sure the file locking is reported correctly
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Issue #3674: Replace + propset of locked file fails over DAV
@Issue(3674)
def replace_and_propset_locked_path(sbox):
"test replace + propset of locked file"
sbox.build()
wc_dir = sbox.wc_dir
mu_path = sbox.ospath('A/mu')
G_path = sbox.ospath('A/D/G')
rho_path = sbox.ospath('A/D/G/rho')
# Lock mu and A/D/G/rho.
svntest.actions.run_and_verify_svn(None, None, [],
'lock', mu_path, rho_path,
'-m', 'Locked')
# Now replace and propset on mu.
svntest.actions.run_and_verify_svn(None, None, [],
'rm', '--keep-local', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'add', mu_path)
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'foo', 'bar', mu_path)
# Commit mu.
svntest.actions.run_and_verify_svn(None, None, [],
'commit', '-m', '', mu_path)
# Let's try this again where directories are involved, shall we?
# Replace A/D/G and A/D/G/rho, propset on A/D/G/rho.
svntest.actions.run_and_verify_svn(None, None, [],
'rm', G_path)
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', G_path)
svntest.main.file_append(rho_path, "This is the new file 'rho'.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'add', rho_path)
svntest.actions.run_and_verify_svn(None, None, [],
'propset', 'foo', 'bar', rho_path)
# And commit G.
svntest.actions.run_and_verify_svn(None, None, [],
'commit', '-m', '', G_path)
#----------------------------------------------------------------------
def cp_isnt_ro(sbox):
"uncommitted svn:needs-lock add/cp not read-only"
sbox.build()
wc_dir = sbox.wc_dir
mu_URL = sbox.repo_url + '/A/mu'
mu_path = sbox.ospath('A/mu')
mu2_path = sbox.ospath('A/mu2')
mu3_path = sbox.ospath('A/mu3')
kappa_path = sbox.ospath('kappa')
open(kappa_path, 'w').write("This is the file 'kappa'.\n")
## added file
sbox.simple_add('kappa')
svntest.actions.set_prop('svn:needs-lock', 'yes', kappa_path)
is_writable(kappa_path)
sbox.simple_commit('kappa')
is_readonly(kappa_path)
## versioned file
svntest.actions.set_prop('svn:needs-lock', 'yes', mu_path)
is_writable(mu_path)
sbox.simple_commit('A/mu')
is_readonly(mu_path)
# At this point, mu has 'svn:needs-lock' set
## wc->wc copied file
svntest.main.run_svn(None, 'copy', mu_path, mu2_path)
is_writable(mu2_path)
sbox.simple_commit('A/mu2')
is_readonly(mu2_path)
## URL->wc copied file
svntest.main.run_svn(None, 'copy', mu_URL, mu3_path)
is_writable(mu3_path)
sbox.simple_commit('A/mu3')
is_readonly(mu3_path)
#----------------------------------------------------------------------
# Issue #3525: Locked file which is scheduled for delete causes tree
# conflict
@Issue(3525)
def update_locked_deleted(sbox):
"updating locked scheduled-for-delete file"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mu_path = sbox.ospath('A/mu')
alpha_path = sbox.ospath('A/B/E/alpha')
svntest.main.run_svn(None, 'lock', '-m', 'locked', mu_path, iota_path,
alpha_path)
sbox.simple_rm('iota')
sbox.simple_rm('A/mu')
sbox.simple_rm('A/B/E')
# Create expected output tree for an update.
expected_output = svntest.wc.State(wc_dir, {
})
# Create expected status tree for the update.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/E', status='D ')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='K')
expected_status.tweak('A/B/E/beta', status='D ')
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
# Now we steal the lock of iota and A/mu via URL and retry
svntest.main.run_svn(None, 'lock', '-m', 'locked', sbox.repo_url + '/iota',
'--force', sbox.repo_url + '/A/mu',
sbox.repo_url + '/A/B/E/alpha')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='O')
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='B '),
'A/B/E/alpha' : Item(status='B '),
'iota' : Item(status='B '),
})
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
#----------------------------------------------------------------------
def block_unlock_if_pre_unlock_hook_fails(sbox):
"block unlock operation if pre-unlock hook fails"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
svntest.actions.create_failing_hook(repo_dir, "pre-unlock", "error text")
# lock a file.
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Make sure the unlock operation fails as pre-unlock hook blocks it.
expected_unlock_fail_err_re = ".*error text|.*500 Internal Server Error"
svntest.actions.run_and_verify_svn2(None, None, expected_unlock_fail_err_re,
1, 'unlock', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_invalid_token(sbox):
"verify pre-lock hook returning invalid token"
sbox.build()
hook_path = os.path.join(sbox.repo_dir, 'hooks', 'pre-lock')
svntest.main.create_python_hook_script(hook_path,
'# encoding=utf-8\n'
'import sys\n'
'sys.stdout.write("тест")\n'
'sys.exit(0)\n')
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.actions.run_and_verify_svn2(None, None,
"svn: warning: W160037: " \
".*scheme.*'opaquelocktoken'", 0,
'lock', '-m', '', file_path)
@Issue(3105)
def lock_multi_wc(sbox):
"obtain locks in multiple working copies in one go"
sbox.build()
sbox2 = sbox.clone_dependent(copy_wc=True)
wc_name = os.path.basename(sbox.wc_dir)
wc2_name = os.path.basename(sbox2.wc_dir)
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' unlocked.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' unlocked.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
@Issue(3378)
def locks_stick_over_switch(sbox):
"locks are kept alive over switching"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.ospath('A'), repo_url + '/AA',
'-m', '')
expected_output = svntest.verify.UnorderedOutput([
'\'iota\' locked by user \'jrandom\'.\n',
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'D', 'H', 'chi'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/D/H/chi'),
sbox.ospath('A/mu'),
sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/H/chi', 'A/mu', 'iota', writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
})
expected_status.tweak(wc_rev=2)
expected_status.tweak('', wc_rev=1)
expected_status.tweak('iota', writelocked='K', wc_rev=1)
switched_status = expected_status.copy()
switched_status.tweak(writelocked=None)
switched_status.tweak('iota', writelocked='K')
switched_status.tweak('A', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/AA',
expected_output, None, switched_status)
# And now switch back to verify that the locks reappear
expected_output = svntest.wc.State(wc_dir, {
})
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/A',
expected_output, None, expected_status)
@Issue(4304)
def lock_unlock_deleted(sbox):
"lock/unlock a deleted file"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'rm', sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' unlocked.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(4369)
def commit_stolen_lock(sbox):
"commit with a stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
sbox.simple_append('A/mu', 'zig-zag')
sbox.simple_lock('A/mu')
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', '--force',
sbox.repo_url + '/A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='M ', writelocked='T')
err_re = "(.*E160037: Cannot verify lock on path '/A/mu')|" + \
"(.*E160038: '/.*/A/mu': no lock token available)"
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
err_re,
wc_dir)
# When removing directories, the locks of contained files were not
# correctly removed from the working copy database, thus they later
# magically reappeared when new files or directories with the same
# pathes were added.
@Issue(4364)
def drop_locks_on_parent_deletion(sbox):
"drop locks when the parent is deleted"
sbox.build()
wc_dir = sbox.wc_dir
# lock some files, and remove them.
sbox.simple_lock('A/B/lambda')
sbox.simple_lock('A/B/E/alpha')
sbox.simple_lock('A/B/E/beta')
sbox.simple_rm('A/B')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove_subtree('A/B')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
# now re-add entities to the deleted pathes.
sbox.simple_mkdir('A/B')
sbox.simple_add_text('new file replacing old file', 'A/B/lambda')
sbox.simple_add_text('file replacing former dir', 'A/B/F')
# The bug also resurrected locks on directories when their path
# matched a former file.
sbox.simple_mkdir('A/B/E', 'A/B/E/alpha')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B',
'A/B/E',
'A/B/E/alpha',
'A/B/F',
'A/B/lambda',
wc_rev='3')
expected_status.remove('A/B/E/beta')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_timeout(sbox):
"unlock a lock with timeout"
import httplib
from urlparse import urlparse
import base64
sbox.build()
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_body = '<?xml version="1.0" encoding="utf-8" ?>' \
'<D:lockinfo xmlns:D="DAV:">' \
' <D:lockscope><D:exclusive/></D:lockscope>' \
' <D:locktype><D:write/></D:locktype>' \
' <D:owner>' \
' <D:href>http://a/test</D:href>' \
' </D:owner>' \
'</D:lockinfo>'
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jconstant:rayjandom'),
'Timeout': 'Second-86400'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', lock_body, lock_headers)
r = h.getresponse()
# Verify that there is a lock, by trying to obtain one
svntest.actions.run_and_verify_svn2(None, None, ".*locked by user", 0,
'lock', '-m', '', sbox.ospath('iota'))
# Before this patch this used to fail with a parse error of the timeout
svntest.actions.run_and_verify_svn2(None, None, ".*W160039.*Unlock.*403", 0,
'unlock', sbox.repo_url + '/iota')
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.ospath('iota'), '--force')
def non_root_locks(sbox):
"locks for working copies not at repos root"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.repo_url, sbox.repo_url + '/X',
'-m', 'copy greek tree')
sbox.simple_switch(sbox.repo_url + '/X')
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Lock a file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Updates don't break the lock
sbox.simple_update('A/D')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
sbox.simple_update('')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Break the lock
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Subdir update reports the break
sbox.simple_update('A/D')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Relock and break
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Root update reports the break
sbox.simple_update('')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3515)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_refresh(sbox):
"refresh timeout of DAV lock"
import httplib
from urlparse import urlparse
import base64
sbox.build(create_wc = False)
# Acquire lock on 'iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
sbox.repo_url + '/iota')
# Try to refresh lock using 'If' header
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_token = svntest.actions.run_and_parse_info(sbox.repo_url + '/iota')[0]['Lock Token']
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jrandom:rayjandom'),
'If': '(<' + lock_token + '>)',
'Timeout': 'Second-7200'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', '', lock_headers)
# XFAIL Refreshing of DAV lock fails with error '412 Precondition Failed'
r = h.getresponse()
if r.status != httplib.OK:
raise svntest.Failure('Lock refresh failed: %d %s' % (r.status, r.reason))
@SkipUnless(svntest.main.is_ra_type_dav)
def delete_locked_file_with_percent(sbox):
"lock and delete a file called 'a %( ) .txt'"
sbox.build()
locked_filename = 'a %( ) .txt'
locked_path = sbox.ospath(locked_filename)
svntest.main.file_write(locked_path, "content\n")
sbox.simple_add(locked_filename)
sbox.simple_commit()
sbox.simple_lock(locked_filename)
sbox.simple_rm(locked_filename)
# XFAIL: With a 1.8.x client, this commit fails with:
# svn: E175002: Unexpected HTTP status 400 'Bad Request' on '/svn-test-work/repositories/lock_tests-52/!svn/txr/2-2/a%20%25(%20)%20.txt'
# and the following error in the httpd error log:
# Invalid percent encoded URI in tagged If-header [400, #104]
sbox.simple_commit()
@Issue(4557)
@XFail(svntest.main.is_ra_type_dav)
def delete_dir_with_lots_of_locked_files(sbox):
"delete a directory containing lots of locked files"
sbox.build()
wc_dir = sbox.wc_dir
# A lot of paths.
nfiles = 75 # NOTE: test XPASSES with 50 files!!!
locked_paths = []
for i in range(nfiles):
locked_paths.append(sbox.ospath("A/locked_files/file-%i" % i))
# Create files at these paths
os.mkdir(sbox.ospath("A/locked_files"))
for file_path in locked_paths:
svntest.main.file_write(file_path, "This is '%s'.\n" % (file_path,))
sbox.simple_add("A/locked_files")
sbox.simple_commit()
sbox.simple_update()
# lock all the files
svntest.actions.run_and_verify_svn(None, None, [], 'lock',
'-m', 'All locks',
*locked_paths)
# Locally delete A (regression against earlier versions, which
# always used a special non-standard request)
sbox.simple_rm("A")
# But a further replacement never worked
sbox.simple_mkdir("A")
# And an additional propset didn't work either
# (but doesn't require all lock tokens recursively)
sbox.simple_propset("k", "v", "A")
# Commit the deletion
# XFAIL: As of 1.8.10, this commit fails with:
# svn: E175002: Unexpected HTTP status 400 'Bad Request' on '<path>'
# and the following error in the httpd error log:
# request failed: error reading the headers
# This problem was introduced on the 1.8.x branch in r1606976.
sbox.simple_commit()
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
lock_file,
commit_file_keep_lock,
commit_file_unlock,
commit_propchange,
break_lock,
steal_lock,
examine_lock,
handle_defunct_lock,
enforce_lock,
defunct_lock,
deleted_path_lock,
lock_unlock,
deleted_dir_lock,
lock_status,
stolen_lock_status,
broken_lock_status,
lock_non_existent_file,
out_of_date,
update_while_needing_lock,
revert_lock,
examine_lock_via_url,
lock_several_files,
lock_switched_files,
lock_uri_encoded,
lock_and_exebit1,
lock_and_exebit2,
commit_xml_unsafe_file_unlock,
repos_lock_with_info,
unlock_already_unlocked_files,
info_moved_path,
ls_url_encoded,
unlock_wrong_token,
examine_lock_encoded_recurse,
unlocked_lock_of_other_user,
lock_funky_comment_chars,
lock_twice_in_one_wc,
lock_path_not_in_head,
verify_path_escaping,
replace_and_propset_locked_path,
cp_isnt_ro,
update_locked_deleted,
block_unlock_if_pre_unlock_hook_fails,
lock_invalid_token,
lock_multi_wc,
locks_stick_over_switch,
lock_unlock_deleted,
commit_stolen_lock,
drop_locks_on_parent_deletion,
dav_lock_timeout,
non_root_locks,
dav_lock_refresh,
delete_locked_file_with_percent,
delete_dir_with_lots_of_locked_files,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 36.240407 | 139 | 0.594708 | 0 | 0 | 0 | 0 | 26,420 | 0.337024 | 0 | 0 | 28,717 | 0.366326 |
318175319ce5b1d0f129d6cf4fa9618b04da1e8b | 5,622 | py | Python | tests/testresourcemap.py | rayvnekieron/regionator | ed32f43c315465402b7e18c9f374e22dd89db64e | [
"Apache-2.0"
]
| null | null | null | tests/testresourcemap.py | rayvnekieron/regionator | ed32f43c315465402b7e18c9f374e22dd89db64e | [
"Apache-2.0"
]
| null | null | null | tests/testresourcemap.py | rayvnekieron/regionator | ed32f43c315465402b7e18c9f374e22dd89db64e | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
"""
Copyright (C) 2006 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
$URL$
$Revision$
$Date$
"""
import unittest
import kml.model
import kml.resourcemap
import xml.dom.minidom
class SimpleResourceMapItemTestCase(unittest.TestCase):
def runTest(self):
rmi = kml.resourcemap.ResourceMapItem()
rmi.ParseTexturesTxtLine('<gp><kp><mid>')
(gp,kp,mid) = rmi.Mapping()
assert gp == 'gp','bad geom path in ResourceMapItem'
assert kp == 'kp','bad kmz path in ResourceMapItem'
assert mid == 'mid','bad mid in ResourceMapItem'
class NoMidResourceMapItemTestCase(unittest.TestCase):
def runTest(self):
rmi = kml.resourcemap.ResourceMapItem()
rmi.ParseTexturesTxtLine('<../goo/hi.jpg> <../koo/bye.jpg>')
(gp,kp,mid) = rmi.Mapping()
assert gp == '../goo/hi.jpg','bad geom path in ResourceMapItem'
assert kp == '../koo/bye.jpg','bad kmz path in ResourceMapItem'
assert mid == None,'non None mid in non-mid ResourceMapItem'
class ResourceMapTestCase(unittest.TestCase):
def setUp(self):
self.__model = kml.model.Model()
self.__model.Parse('London_house.kmz')
textures_txt_data = self.__model.ReadFileData('textures.txt')
self.__rmap = kml.resourcemap.ResourceMap()
self.__rmap.ParseTexturesTxt(textures_txt_data)
def testResourceMapSize(self):
assert self.__rmap.Size() == 3,'ResourceMap.Size() bad'
def testResourceMapIterator(self):
m = []
for rmap_item in self.__rmap:
m.append(rmap_item.Mapping())
assert m[0][0] == '../images/Building3.JPG','textures.txt parse failed 0'
assert m[2][1] == '../images/GAF_Marquis.jpg','textures.txt parse failed 2'
def testResourceMapLookup(self):
kp = self.__rmap.GetKmzPath('../images/Building3.JPG')
assert kp == '../images/Building3.JPG','rmap kmz lookup failed'
gp = self.__rmap.GetGeomPath('../images/GAF_Marquis.jpg')
assert gp == '../images/GAF_Marquis.jpg','rmap geom lookup failed'
class TexturesTxtTestCase(unittest.TestCase):
def setUp(self):
f = open('textures.txt','r')
textures_txt_data = f.read()
f.close()
self.__rmap = kml.resourcemap.ResourceMap()
self.__rmap.ParseTexturesTxt(textures_txt_data)
def testSize(self):
assert self.__rmap.Size() == 66,'textures txt rmap size bad'
def testGeomLookup(self):
gp = ('../geom/north-face-10noCulling.jpg')
rmi = self.__rmap.LookupByGeomPath(gp)
(got_gp,got_kp,got_mid) = rmi.Mapping()
want_kp = '../kmz/north-face-10noCulling.jpg'
assert got_kp == want_kp, 'geom lookup failed'
def testKmzLookup(self):
rmi = self.__rmap.LookupByKmzPath('../kmz/east-face-1_1.jpg')
(got_gp, got_kp, id) = rmi.Mapping()
want_gp = '../geom/east-face-1_1.jpg'
assert got_gp == want_gp,'kmz lookup failed'
def testLookupAll(self):
for rmap_item in self.__rmap:
(gp,kp,mid) = rmap_item.Mapping()
assert gp == self.__rmap.GetGeomPath(kp),'GetGeomPath() failed'
assert kp == self.__rmap.GetKmzPath(gp),'GetKmzPath() failed'
class ResourceMapAddTestCase(unittest.TestCase):
def runTest(self):
rmap = kml.resourcemap.ResourceMap()
rmap.AddResourceMapItem('gpath0','kpath0','mid0')
rmap.AddResourceMapItem('gpath1','kpath1','mid1')
got_tt = rmap.Serialize()
want_tt[0] = '<gpath0> <kpath0> <mid0>'
want_tt[1] = '<gpath1> <kpath1> <mid1>'
assert got_tt == "\n".join(want_tt), 'resource map serialize failed'
class TestConvertTexturesTxt(unittest.TestCase):
def runTest(self):
rmap_kml = kml.resourcemap.ConvertTexturesTxt('textures.txt')
rmap_node = xml.dom.minidom.parseString(rmap_kml)
alias_nodelist = rmap_node.getElementsByTagName('Alias')
targethref_nodelist = rmap_node.getElementsByTagName('targetHref')
sourcehref_nodelist = rmap_node.getElementsByTagName('sourceHref')
assert 66 == len(alias_nodelist) == len(sourcehref_nodelist) == \
len(targethref_nodelist)
assert '../kmz/east-face-10noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[0], 'targetHref')
assert '../geom/east-face-10noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[0], 'sourceHref')
assert '../kmz/west-face-9noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[65], 'targetHref')
assert '../geom/west-face-9noCulling.jpg' == \
kml.kmlparse.GetSimpleElementText(alias_nodelist[65], 'sourceHref')
def suite():
suite = unittest.TestSuite()
suite.addTest(SimpleResourceMapItemTestCase())
suite.addTest(NoMidResourceMapItemTestCase())
suite.addTest(ResourceMapTestCase("testResourceMapSize"))
suite.addTest(ResourceMapTestCase("testResourceMapIterator"))
suite.addTest(ResourceMapTestCase("testResourceMapLookup"))
suite.addTest(TexturesTxtTestCase("testSize"))
suite.addTest(TexturesTxtTestCase("testGeomLookup"))
suite.addTest(TexturesTxtTestCase("testKmzLookup"))
suite.addTest(TexturesTxtTestCase("testLookupAll"))
suite.addTest(TestConvertTexturesTxt())
return suite
runner = unittest.TextTestRunner()
runner.run(suite())
| 37.986486 | 79 | 0.714692 | 4,247 | 0.755425 | 0 | 0 | 0 | 0 | 0 | 0 | 1,969 | 0.350231 |
31820a59b9c5678cbd47dd2c6b18f2a2f8d3b779 | 2,986 | py | Python | pycket/base.py | krono/pycket | 5eff3401ce5cf34b16863b669ac9e274edabbe00 | [
"MIT"
]
| null | null | null | pycket/base.py | krono/pycket | 5eff3401ce5cf34b16863b669ac9e274edabbe00 | [
"MIT"
]
| null | null | null | pycket/base.py | krono/pycket | 5eff3401ce5cf34b16863b669ac9e274edabbe00 | [
"MIT"
]
| null | null | null | from pycket.error import SchemeException
from rpython.tool.pairtype import extendabletype
from rpython.rlib import jit, objectmodel
class W_ProtoObject(object):
""" abstract base class of both actual values (W_Objects) and multiple
return values (Values)"""
_attrs_ = []
_settled_ = True
def as_real_value(self):
raise NotImplementedError("not a real value!")
def num_values(val):
raise NotImplementedError("not a real value!")
def get_value(val, index):
raise NotImplementedError("not a real value!")
def get_all_values(self):
raise NotImplementedError("not a real value!")
class W_Object(W_ProtoObject):
__metaclass__ = extendabletype
_attrs_ = []
errorname = "%%%%unreachable%%%%"
def __init__(self):
raise NotImplementedError("abstract base class")
def num_values(self):
return 1
def get_value(self, index):
assert index == 0
return self
def get_all_values(self):
return [self]
def iscallable(self):
return False
def call(self, args, env, cont):
raise SchemeException("%s is not callable" % self.tostring())
def call_with_extra_info(self, args, env, cont, calling_app):
return self.call(args, env, cont)
def enable_jitting(self):
pass # need to override in callables that are based on an AST
# an arity is a pair of a list of numbers and either -1 or a non-negative integer
def get_arity(self):
from pycket.interpreter import Arity
if self.iscallable():
return Arity.unknown
else:
raise SchemeException("%s does not have arity" % self.tostring())
def is_proper_list(self):
return False
def is_impersonator(self):
return self.is_chaperone()
def is_chaperone(self):
return False
def is_proxy(self):
return self.is_chaperone() or self.is_impersonator()
def get_proxied(self):
return self
def get_properties(self):
return {}
def is_non_interposing_chaperone(self):
return False
def immutable(self):
return False
def equal(self, other):
return self is other # default implementation
def eqv(self, other):
return self is other # default implementation
def hash_equal(self):
return objectmodel.compute_hash(self) # default implementation
hash_eqv = hash_equal
def tostring(self):
return str(self)
# for expose
@classmethod
def make_unwrapper(cls):
if cls is W_Object:
return lambda x: x, ''
def unwrap(w_object):
if isinstance(w_object, cls):
return w_object
return None
return unwrap, cls.errorname
class SingletonMeta(type):
def __new__(cls, name, bases, dct):
result = type.__new__(cls, name, bases, dct)
result.singleton = result()
return result
| 27.394495 | 85 | 0.639987 | 2,832 | 0.948426 | 0 | 0 | 269 | 0.090087 | 0 | 0 | 485 | 0.162425 |
3183eb2e126d90a029e2af49600fc31324d642a8 | 5,811 | py | Python | src/teacher/flake_approx/teacher_env.py | jainraj/CISR_NeurIPS20 | 027957e4a26a36f6501c4f0e5e73cb9d78a53e66 | [
"MIT"
]
| 16 | 2020-11-04T14:44:16.000Z | 2022-02-16T08:08:23.000Z | src/teacher/flake_approx/teacher_env.py | jainraj/CISR_NeurIPS20 | 027957e4a26a36f6501c4f0e5e73cb9d78a53e66 | [
"MIT"
]
| 2 | 2021-03-23T12:07:53.000Z | 2021-12-22T14:30:59.000Z | src/teacher/flake_approx/teacher_env.py | jainraj/CISR_NeurIPS20 | 027957e4a26a36f6501c4f0e5e73cb9d78a53e66 | [
"MIT"
]
| 7 | 2020-11-17T03:20:00.000Z | 2022-03-31T15:53:58.000Z | import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.a2c.utils import conv, linear, conv_to_fc
from src.envs import CMDP, FrozenLakeEnvCustomMap
from src.envs.frozen_lake.frozen_maps import MAPS
from src.students import LagrangianStudent, identity_transfer
from src.online_learning import ExponetiatedGradient
from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \
create_intervention, SmallFrozenTeacherEnv
from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits
from src.envs.frozen_lake.utils import create_intervention_from_map, \
OptimalAgent, add_teacher
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
__all__ = ['create_teacher_env', 'small_base_cenv_fn']
def constraint(info=None, **kwargs):
return {'g': float(info['next_state_type'] in 'H')}
def small_base_env_fn():
# Base MDP
world_map = MAPS['small']
not_slipping_prob = 0.8
env_kwargs = dict(desc=world_map,
not_slipping_prob=not_slipping_prob,
base_r_mapping=None,
timeout=200)
return FrozenLakeEnvCustomMap(**env_kwargs)
# Base CMDP
def small_base_cenv_fn():
return CMDP(small_base_env_fn(), constraint,
constraints_values=[0],
n_constraints=1,
avg_constraint=True)
def make_base_small_cenvs():
# Base MDP
world_map = MAPS['small']
# # 2 interventions
# dist = [1, 1]
# tau = [0.1, 0]
# buff_size = [1, 0]
# avg_constraint = [True, True]
# 3 Interventions
dist = [2, 1, 1]
tau = [0.1, 0.1, 0]
buff_size = [1, 1, 0]
avg_constraint = [True, True, True]
interventions = []
for d, t, b, avg in zip(dist, tau, buff_size, avg_constraint):
interventions.append(
create_intervention(
small_base_cenv_fn,
create_intervention_from_map(add_teacher(world_map, d)),
[t], b, use_vec=True, avg_constraint=avg)
)
assert callable(interventions[0])
test_env = create_intervention(
small_base_cenv_fn(), create_intervention_from_map(add_teacher(
world_map)),
[0.0], 0, avg_constraint=True)
return interventions, test_env
############################## TEACHER ENV ###################################
def my_small_cnn(scaled_images, **kwargs):
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3,
stride=1, **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3,
stride=1, **kwargs))
layer_3 = conv_to_fc(layer_2)
return activ(
linear(layer_3, 'fc1', n_hidden=32, init_scale=np.sqrt(2)))
def create_teacher_env(new_br_kwargs={}, new_online_kwargs={},
original=False, obs_from_training=False,
non_stationary_bandit=False):
# Student definition
br_kwargs = dict(policy=CnnPolicy, verbose=0, n_steps=128,
ent_coef=0.05, cliprange=0.2, learning_rate=1e-3,
noptepochs=9,
policy_kwargs={'cnn_extractor': my_small_cnn})
br_kwargs.update(new_br_kwargs)
# Define online kwargs
online_kwargs = dict(B=0.5, eta=1.0)
online_kwargs.update(new_online_kwargs)
student_cls = LagrangianStudent
n_envs = 4
use_sub_proc_env = False
student_default_kwargs = {'env': None,
'br_algo': PPO2,
'online_algo': ExponetiatedGradient,
'br_kwargs': br_kwargs,
'online_kwargs': online_kwargs,
'lagrangian_ronuds': 2,
'curriculum_transfer': identity_transfer,
'br_uses_vec_env': True,
'use_sub_proc_env': use_sub_proc_env,
'n_envs': n_envs,
}
student_ranges_dict = {}
# Teacher interventions
if original:
# To preserve the teacher env interface while training in the
# original environment, we introduce a dummy intervention
# condition that is always False.
def dummy_intervention(**kwargs):
return 0
_, test_env = make_base_small_cenvs()
intervention = create_intervention(
base_cenv=small_base_cenv_fn,
interventions=[dummy_intervention], taus=[0], buf_size=0,
use_vec=True, avg_constraint=True)
interventions = [intervention]
else:
interventions, test_env = make_base_small_cenvs()
learning_steps = 4800 * 2
time_steps_lim = learning_steps * 10
test_episode_timeout = 200
test_episode_number = 5
if obs_from_training:
env_cls = SmallFrozenTrainingObservation
elif non_stationary_bandit:
env_cls = SmallFrozenNonStationaryBandits
else:
env_cls = SmallFrozenTeacherEnv
return env_cls(student_cls=student_cls,
student_default_kwargs=student_default_kwargs,
interventions=interventions,
final_env=test_env,
logger_cls=FrozenLakeEvaluationLogger,
student_ranges_dict=student_ranges_dict,
learning_steps=learning_steps,
test_episode_number=test_episode_number,
test_episode_timeout=test_episode_timeout,
time_steps_lim=time_steps_lim,
normalize_obs=False)
| 35.432927 | 103 | 0.617794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 684 | 0.117708 |
31849d43e1ab19f400d94d35a3abbba09fecd8ff | 1,861 | py | Python | scanapi/__init__.py | rajarshig/scanapi | 9e29bf7fa7d8d1996be3c9deeb2675e12aff9418 | [
"MIT"
]
| 1 | 2020-06-02T18:08:08.000Z | 2020-06-02T18:08:08.000Z | scanapi/__init__.py | rajarshig/scanapi | 9e29bf7fa7d8d1996be3c9deeb2675e12aff9418 | [
"MIT"
]
| null | null | null | scanapi/__init__.py | rajarshig/scanapi | 9e29bf7fa7d8d1996be3c9deeb2675e12aff9418 | [
"MIT"
]
| null | null | null | name = "scanapi"
import click
import logging
from scanapi.tree.api_tree import APITree
from scanapi.reporter import Reporter
from scanapi.requests_maker import RequestsMaker
from scanapi.settings import SETTINGS
from scanapi.yaml_loader import load_yaml
@click.command()
@click.option(
"-s",
"--spec-path",
"spec_path",
type=click.Path(exists=True),
default=SETTINGS["spec_path"],
)
@click.option("-o", "--output-path", "output_path")
@click.option(
"-r",
"--reporter",
"reporter",
type=click.Choice(["console", "markdown", "html"]),
default=SETTINGS["reporter"],
)
@click.option("-t", "--template", "template")
@click.option(
"--log-level",
"log_level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="INFO",
)
def scan(spec_path, output_path, reporter, template, log_level):
"""Automated Testing and Documentation for your REST API."""
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
SETTINGS.update({"spec_path": spec_path, "output_path": output_path})
# custom templates to be implemented later
if template is not None:
logger.warn("Custom templates are not supported yet. Soon to be. Hang tight.")
spec_path = SETTINGS["spec_path"]
try:
api_spec = load_yaml(spec_path)
except FileNotFoundError as e:
error_message = f"Could not find spec file: {spec_path}. {str(e)}"
logger.error(error_message)
return
try:
api_tree = APITree(api_spec)
except Exception as e:
error_message = "Error loading API spec."
error_message = "{} {}".format(error_message, str(e))
logger.error(error_message)
return
RequestsMaker(api_tree).make_all()
Reporter(output_path, reporter, template).write(api_tree.responses.values())
| 28.630769 | 86 | 0.674369 | 0 | 0 | 0 | 0 | 1,602 | 0.860828 | 0 | 0 | 520 | 0.27942 |
3184b4c5a678ebc4e6558070c2d466802ef6e550 | 1,065 | py | Python | breadth first search/level order successor.py | JoanWu5/Grokking-the-coding-interview | 0ae68fb1c86ff595a82af68f7a6a6fdfe37e97a7 | [
"MIT"
]
| null | null | null | breadth first search/level order successor.py | JoanWu5/Grokking-the-coding-interview | 0ae68fb1c86ff595a82af68f7a6a6fdfe37e97a7 | [
"MIT"
]
| null | null | null | breadth first search/level order successor.py | JoanWu5/Grokking-the-coding-interview | 0ae68fb1c86ff595a82af68f7a6a6fdfe37e97a7 | [
"MIT"
]
| 2 | 2021-11-17T18:52:53.000Z | 2021-12-04T09:26:37.000Z | # Given a binary tree and a node, find the level order successor of the given node in the tree.
# The level order successor is the node that appears right after the given node in the level order traversal.
from collections import deque
class TreeNode:
def __init__(self, value) -> None:
self.value = value
self.left = None
self.right = None
def level_order_successor(root, key):
if root is None:
return None
queue = deque()
queue.append(root)
while queue:
current_node = queue.popleft()
if current_node.left:
queue.append(current_node.left)
if current_node.right:
queue.append(current_node.right)
if current_node.value == key:
break
return queue[0].value if queue else None
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print(level_order_successor(root, 12))
print(level_order_successor(root, 9))
| 26.625 | 109 | 0.661972 | 132 | 0.123944 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.192488 |
31860bb3aacd1e3679ae7c59b51984c300ac1f34 | 3,078 | py | Python | npbench/benchmarks/cavity_flow/cavity_flow_legate.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
]
| 27 | 2021-05-10T11:49:13.000Z | 2022-03-22T18:07:19.000Z | npbench/benchmarks/cavity_flow/cavity_flow_legate.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
]
| 3 | 2021-12-01T13:03:17.000Z | 2022-03-17T10:53:00.000Z | npbench/benchmarks/cavity_flow/cavity_flow_legate.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
]
| 7 | 2021-06-24T03:40:25.000Z | 2022-01-26T09:04:33.000Z | # Barba, Lorena A., and Forsyth, Gilbert F. (2018).
# CFD Python: the 12 steps to Navier-Stokes equations.
# Journal of Open Source Education, 1(9), 21,
# https://doi.org/10.21105/jose.00021
# TODO: License
# (c) 2017 Lorena A. Barba, Gilbert F. Forsyth.
# All content is under Creative Commons Attribution CC-BY 4.0,
# and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018).
import legate.numpy as np
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1,
1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 - 2 *
((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
def pressure_poisson(nit, p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, 1:-1])
p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
p[-1, :] = 0 # p = 0 at y = 2
def cavity_flow(nx, ny, nt, nit, u, v, dt, dx, dy, p, rho, nu):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
for n in range(nt):
un = u.copy()
vn = v.copy()
build_up_b(b, rho, dt, u, v, dx, dy)
pressure_poisson(nit, p, dx, dy, b)
u[1:-1,
1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dt / (2 * rho * dx) *
(p[1:-1, 2:] - p[1:-1, 0:-2]) + nu *
(dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])))
v[1:-1,
1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dt / (2 * rho * dy) *
(p[2:, 1:-1] - p[0:-2, 1:-1]) + nu *
(dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
u[0, :] = 0
u[:, 0] = 0
u[:, -1] = 0
u[-1, :] = 1 # set velocity on cavity lid equal to 1
v[0, :] = 0
v[-1, :] = 0
v[:, 0] = 0
v[:, -1] = 0
| 37.084337 | 90 | 0.341131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.167641 |
3188b47960735f6a2d30024aca9b8fc438dc3613 | 20,651 | py | Python | Lib/gds/burp/config.py | mwielgoszewski/jython-burp-api | 002383f7acc5fb237e3804fe5bd2aa2950a0240d | [
"0BSD"
]
| 134 | 2015-01-21T14:22:42.000Z | 2021-09-02T10:52:43.000Z | Lib/gds/burp/config.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
]
| 7 | 2015-01-19T16:54:45.000Z | 2018-10-10T15:10:13.000Z | Lib/gds/burp/config.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
]
| 29 | 2015-02-13T14:08:23.000Z | 2021-12-17T03:17:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from ConfigParser import ConfigParser
from copy import deepcopy
from inspect import cleandoc
import os.path
from .core import ExtensionPoint
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption',
'OrderedExtensionsOption']
_use_default = object()
def as_bool(value):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true", "enabled",
"on" or non-zero numbers, ignoring case. For non-string arguments, return
the argument converted to a `bool`, or `False` if the conversion fails.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
return value.strip().lower() in ('yes', 'true', 'enabled', 'on')
try:
return bool(value)
except (TypeError, ValueError):
return False
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def _to_utf8(basestr):
return to_unicode(basestr, 'utf-8').encode('utf-8')
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename, params={}):
self.filename = filename
self.parser = ConfigParser()
self.parser.optionxform = str
self._old_sections = {}
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parser.read(filename)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
"""
return self[section].getpath(key, default)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in Option.get_registry(compmgr).items():
defaults.setdefault(section, {})[key] = option.default
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set([to_unicode(s) for s in self.parser.sections()])
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
burp.ini or one of the parents, or is available through the Option
registry.
"""
section_str = _to_utf8(section)
if self.parser.has_section(section_str):
if _to_utf8(option) in self.parser.options(section_str):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime > self._lastmtime:
self._sections = {}
self.parser._sections = {}
if not self.parser.read(self.filename):
raise IOError("Error reading '%(file)s', make sure it is "
"readable." % (self.filename, ))
self._lastmtime = modtime
self._old_sections = deepcopy(self.parser._sections)
changed = True
if changed:
self.parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = to_unicode(filename.strip())
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
self.parents.append(Configuration(filename))
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._cache = {}
return changed
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self.overridden = {}
self._cache = {}
def contains(self, key, defaults=True):
if self.config.parser.has_option(_to_utf8(self.name), _to_utf8(key)):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
for option_str in self.config.parser.options(name_str):
option = to_unicode(option_str)
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if self.config.parser.has_option(name_str, key_str):
value = self.config.parser.get(name_str, key_str)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.default if option else _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes", "true",
"enabled", "on", or non-zero numbers, ignoring case. Otherwise `False`
is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
if not value:
return 0
return int(value)
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
if not value:
return 0.0
return float(value)
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `False`, empty elements are
omitted from the list.
Valid default input is a string or a list. Returns a list.
"""
value = self.get(key, default)
if not value:
return []
if isinstance(value, basestring):
items = [item.strip() for item in value.split(sep)]
else:
items = list(value)
if not keep_empty:
items = filter(None, items)
return items
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from .core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
class ConfigSection(object):
"""Descriptor for configuration sections."""
registry = {}
@staticmethod
def get_registry(compmgr=None):
"""Return the section registry, as a `dict` mapping section names to
`ConfigSection` objects.
If `compmgr` is specified, only return sections for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(ConfigSection, compmgr)
def __init__(self, name, doc, doc_domain='burpini'):
"""Create the configuration section."""
self.name = name
self.registry[self.name] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
return config[self.name]
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
class Option(object):
"""Descriptor for configuration options."""
registry = {}
accessor = Section.get
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(Option, compmgr)
def __init__(self, section, name, default=None, doc='',
doc_domain='burpini'):
"""Create the configuration option.
:param section: the name of the configuration section this option
belongs to
:param name: the name of the option
:param default: the default value for the option
:param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = default
self.registry[(self.section, self.name)] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __repr__(self):
return '<%s [%s] "%s">' % (self.__class__.__name__, self.section,
self.name)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
accessor = Section.getbool
class IntOption(Option):
"""Descriptor for integer configuration options."""
accessor = Section.getint
class FloatOption(Option):
"""Descriptor for float configuration options."""
accessor = Section.getfloat
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc='', doc_domain='burpini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.sep = sep
self.keep_empty = keep_empty
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing `interface`.
Can be empty.
If `include_missing` is true (the default) all components implementing
the interface are returned, with those specified by the option ordered
first."""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc='', doc_domain='burpini'):
ListOption.__init__(self, section, name, default, doc=doc,
doc_domain=doc_domain)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
for impl in self.xtnpt.extensions(instance):
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
| 35.666667 | 79 | 0.619486 | 17,230 | 0.834342 | 1,455 | 0.070457 | 702 | 0.033994 | 0 | 0 | 8,109 | 0.392669 |
318a952b81c7d9540e9926622426293ecbdc84ee | 1,572 | py | Python | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
]
| null | null | null | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
]
| null | null | null | src/Application/PythonScriptModule/pymodules_old/apitest/rotate.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
]
| 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | import circuits
from PythonQt.QtGui import QQuaternion as Quat
from PythonQt.QtGui import QVector3D as Vec
import naali
COMPNAME = "rotation"
class RotationHandler(circuits.BaseComponent):
def __init__(self, entity=None, comp=None, changetype=None):
circuits.BaseComponent.__init__(self)
self.entity = entity
self.comp = comp
if self.comp is not None: #normal run, check for nonEC run now
# Todo: OnChanged() is deprecated
comp.connect("OnChanged()", self.onChanged)
self.rot = Quat.fromAxisAndAngle(Vec(0, 1, 0), 1)
def onChanged(self):
y = self.comp.GetAttribute('y')
self.rot = Quat.fromAxisAndAngle(Vec(0, y, 0), 1)
#print self.rot, y
@circuits.handler("update")
def update(self, frametime):
if self.entity is not None:
p = self.entity.placeable
ort = p.Orientation
ort *= self.rot
p.Orientation = ort
# else: #testing without EC, as a autoloaded module
# entid = 2088826547
# try:
# self.entity = naali.getEntity(entid)
# except:
# pass #not there (yet)
# else:
# self.entity.createComponent("EC_DynamicComponent")
# oldent = r.getEntity(ent.id)
# self.comp = oldent.dynamic
@circuits.handler("on_logout")
def on_logout(self, evid):
self.entity = None #XXX figure out proper unregistering, preferrably in componenthandler.py / EC_Script biz
| 34.933333 | 115 | 0.600509 | 1,427 | 0.907761 | 0 | 0 | 814 | 0.517812 | 0 | 0 | 540 | 0.343511 |
318bb2ce68ce930a154f36901f32368d1debcea3 | 728 | py | Python | pyqubo/package_info.py | caja-matematica/pyqubo | 5e5c9d1a36c756ba8c05eac23bbefe2ac369bce5 | [
"Apache-2.0"
]
| 1 | 2018-10-11T08:51:02.000Z | 2018-10-11T08:51:02.000Z | pyqubo/package_info.py | kotarotanahashi/pyqubo | d2983b5f2b9e4ebee495c345326a1b2fd98f1c4f | [
"Apache-2.0"
]
| null | null | null | pyqubo/package_info.py | kotarotanahashi/pyqubo | d2983b5f2b9e4ebee495c345326a1b2fd98f1c4f | [
"Apache-2.0"
]
| null | null | null | # (major, minor, patch, prerelease)
VERSION = (0, 0, 6, "")
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pyqubo'
__contact_names__ = 'Recruit Communications Co., Ltd.'
__contact_emails__ = '[email protected]'
__homepage__ = 'https://pyqubo.readthedocs.io/en/latest/'
__repository_url__ = 'https://github.com/recruit-communications/pyqubo'
__download_url__ = 'https://github.com/recruit-communications/pyqubo'
__description__ = 'PyQUBO allows you to create QUBOs or Ising models from mathematical expressions.'
__license__ = 'Apache 2.0'
__keywords__ = 'QUBO, quantum annealing, annealing machine, ising model, optimization'
| 45.5 | 100 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.576923 |
318c27aa8cc3118e9c4a2079f233ea65d902fc9c | 19,702 | py | Python | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
]
| null | null | null | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
]
| null | null | null | bin/DBImportOperation/etl_operations.py | karlam123/DBImport | ebaf3f909841276d289bfb2f6eec0ecafa8395cf | [
"Apache-2.0"
]
| null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import re
import logging
import subprocess
import errno, os, pty
import shlex
from subprocess import Popen, PIPE
from ConfigReader import configuration
import mysql.connector
from mysql.connector import errorcode
from common.Singleton import Singleton
from DBImportConfig import import_config
from DBImportOperation import common_operations
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import time
class operation(object, metaclass=Singleton):
def __init__(self, Hive_DB=None, Hive_Table=None):
logging.debug("Executing etl_operations.__init__()")
self.Hive_DB = None
self.Hive_Table = None
self.mysql_conn = None
self.mysql_cursor = None
self.startDate = None
self.common_operations = common_operations.operation(Hive_DB, Hive_Table)
self.import_config = import_config.config(Hive_DB, Hive_Table)
if Hive_DB != None and Hive_Table != None:
self.setHiveTable(Hive_DB, Hive_Table)
else:
# If the class already is initialized, we just pull the parameters and set them here
self.Hive_DB = self.common_operations.Hive_DB
self.Hive_Table = self.common_operations.Hive_Table
self.startDate = self.import_config.startDate
logging.debug("Executing etl_operations.__init__() - Finished")
def setHiveTable(self, Hive_DB, Hive_Table):
""" Sets the parameters to work against a new Hive database and table """
self.Hive_DB = Hive_DB.lower()
self.Hive_Table = Hive_Table.lower()
self.common_operations.setHiveTable(self.Hive_DB, self.Hive_Table)
self.import_config.setHiveTable(self.Hive_DB, self.Hive_Table)
try:
self.import_config.getImportConfig()
self.startDate = self.import_config.startDate
self.import_config.lookupConnectionAlias()
except:
self.import_config.remove_temporary_files()
sys.exit(1)
def remove_temporary_files(self):
self.import_config.remove_temporary_files()
def connectToHive(self,):
logging.debug("Executing etl_operations.connectToHive()")
try:
self.common_operations.connectToHive()
except Exception as ex:
logging.error(ex)
self.import_config.remove_temporary_files()
sys.exit(1)
logging.debug("Executing etl_operations.connectToHive() - Finished")
def mergeHiveTables(self, sourceDB, sourceTable, targetDB, targetTable, historyDB = None, historyTable=None, targetDeleteDB = None, targetDeleteTable=None, createHistoryAudit=False, sourceIsIncremental=False, sourceIsImportTable=False, softDelete=False, mergeTime=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), datalakeSource=None, PKColumns=None, hiveMergeJavaHeap=None, oracleFlashbackSource=False, mssqlChangeTrackingSource=False, deleteNotUpdatedRows=False, oracleFlashbackImportTable=None, mssqlChangeTrackingImportTable=None):
""" Merge source table into Target table. Also populate a History Audit table if selected """
logging.debug("Executing etl_operations.mergeHiveTables()")
targetColumns = self.common_operations.getHiveColumns(hiveDB=targetDB, hiveTable=targetTable, includeType=False, includeComment=False)
columnMerge = self.common_operations.getHiveColumnNameDiff(sourceDB=sourceDB, sourceTable=sourceTable, targetDB=targetDB, targetTable=targetTable, importTool = self.import_config.importTool, sourceIsImportTable=True)
if PKColumns == None:
PKColumns = self.common_operations.getPKfromTable(hiveDB=targetDB, hiveTable=targetTable, quotedColumns=False)
datalakeIUDExists = False
datalakeInsertExists = False
datalakeUpdateExists = False
datalakeDeleteExists = False
datalakeSourceExists = False
for index, row in targetColumns.iterrows():
if row['name'] == "datalake_iud": datalakeIUDExists = True
if row['name'] == "datalake_insert": datalakeInsertExists = True
if row['name'] == "datalake_update": datalakeUpdateExists = True
if row['name'] == "datalake_delete": datalakeDeleteExists = True
if row['name'] == "datalake_source": datalakeSourceExists = True
if hiveMergeJavaHeap != None:
query = "set hive.tez.container.size=%s"%(hiveMergeJavaHeap)
self.common_operations.executeHiveQuery(query)
query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "using `%s`.`%s` as S \n"%(sourceDB, sourceTable)
query += "on \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
try:
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
except IndexError:
logging.error("Primary Key cant be found in the source target table. Please check PK override")
self.import_config.remove_temporary_files()
sys.exit(1)
if sourceColumn == None:
logging.error("ERROR: Problem determine column name in source table for primary key column '%s'"%(targetColumn))
self.import_config.remove_temporary_files()
sys.exit(1)
if i == 0:
query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
else:
query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
query += "\n"
query += "when matched "
if sourceIsIncremental == False:
# If the source is not incremental, it means that we need to check all the values in
# all columns as we dont know if the row have changed or not
query += "and (\n"
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
foundPKcolumn = False
for column in PKColumns.split(","):
if row['targetName'] == column:
foundPKcolumn = True
if foundPKcolumn == False:
if firstIteration == True:
query += " "
firstIteration = False
else:
query += " or "
query += "T.`%s` != S.`%s` "%(row['targetName'], row['sourceName'])
query += "or ( T.`%s` is null and S.`%s` is not null ) "%(row['targetName'], row['sourceName'])
query += "or ( T.`%s` is not null and S.`%s` is null ) "%(row['targetName'], row['sourceName'])
query += "\n"
if softDelete == True and datalakeIUDExists == True:
# If a row is deleted and then inserted again with the same values in all fields, this will still trigger an update
query += " or T.datalake_iud = 'D' \n"
query += ") \n"
if oracleFlashbackSource == True:
query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n"
if mssqlChangeTrackingSource == True:
query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n"
query += "then update set "
firstIteration = True
nonPKcolumnFound = False
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
foundPKcolumn = False
for column in PKColumns.split(","):
if row['targetName'] == column:
foundPKcolumn = True
if foundPKcolumn == False:
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s` = S.`%s`"%(row['targetName'], row['sourceName'])
nonPKcolumnFound = True
if nonPKcolumnFound == False:
# This will happen if there are only columns that is part of the PK in the table. Impossible to merge it with full history
logging.error("This table only have columns that is part of the PrimaryKey. Merge operations cant be used")
self.import_config.remove_temporary_files()
sys.exit(1)
if datalakeIUDExists == True: query += ", \n `datalake_iud` = 'U'"
if datalakeUpdateExists == True: query += ", \n `datalake_update` = '%s'"%(mergeTime)
if datalakeSourceExists == True and datalakeSource != None: query += ", \n `datalake_source` = '%s'"%(datalakeSource)
query += " \n"
if oracleFlashbackSource == True:
query += "when matched and S.datalake_flashback_operation = 'D' then delete \n"
if mssqlChangeTrackingSource == True:
query += "when matched and S.datalake_mssql_changetrack_operation = 'D' then delete \n"
query += "when not matched "
if oracleFlashbackSource == True:
query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n"
if mssqlChangeTrackingSource == True:
query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n"
query += "then insert values ( "
firstIteration = True
for index, row in targetColumns.iterrows():
ColumnName = row['name']
sourceColumnName = columnMerge.loc[columnMerge['targetName'] == ColumnName]['sourceName'].fillna('').iloc[0]
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
if sourceColumnName != "":
query += " S.`%s`"%(sourceColumnName)
elif ColumnName == "datalake_iud":
query += " 'I'"
elif ColumnName == "datalake_insert":
query += " '%s'"%(mergeTime)
elif ColumnName == "datalake_update":
query += " '%s'"%(mergeTime)
elif ColumnName == "datalake_source":
query += " '%s'"%(datalakeSource)
else:
query += " NULL"
query += " \n) \n"
# print("==============================================================")
# print(query)
# self.import_config.remove_temporary_files()
# sys.exit(1)
## query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
if deleteNotUpdatedRows == True:
# This is used by Oracle Flashback and MSSQL Change Tracking imports when doing a reinitialization of the data and we need to
# remove the rows that was not updated
query = "delete from `%s`.`%s` where datalake_update != '%s' "%(targetDB, targetTable, mergeTime)
self.common_operations.executeHiveQuery(query)
# If a row was previously deleted and now inserted again and we are using Soft Delete,
# then the information in the datalake_iud, datalake_insert and datalake_delete is wrong.
if softDelete == True:
query = "update `%s`.`%s` set "%(targetDB, targetTable)
query += " datalake_iud = 'I', "
query += " datalake_insert = datalake_update, "
query += " datalake_delete = null "
query += "where "
query += " datalake_iud = 'U' and "
query += " datalake_delete is not null"
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
# Statement to select all rows that was changed in the Target table and insert them to the History table
if createHistoryAudit == True and historyDB != None and historyTable != None and oracleFlashbackSource == False:
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_iud`"
query += ",\n `datalake_update`"
query += "\nfrom `%s`.`%s` \n"%(targetDB, targetTable)
query += "where datalake_update = '%s'"%(mergeTime)
self.common_operations.executeHiveQuery(query)
if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:
# Start with truncating the History Delete table as we need to rebuild this one from scratch to determine what rows are deleted
query = "truncate table `%s`.`%s`"%(targetDeleteDB, targetDeleteTable)
self.common_operations.executeHiveQuery(query)
# Insert all rows (PK columns only) that exists in the Target Table but dont exists in the Import table (the ones that was deleted)
query = "insert into table `%s`.`%s` \n(`"%(targetDeleteDB, targetDeleteTable)
query += "`, `".join(PKColumns.split(","))
query += "`) \nselect T.`"
query += "`, T.`".join(PKColumns.split(","))
query += "` \nfrom `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "left outer join `%s`.`%s` as S \n"%(sourceDB, sourceTable)
query += "on \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
if i == 0:
query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
else:
query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn)
query += "\nwhere \n"
for i, targetColumn in enumerate(PKColumns.split(",")):
sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']
if i == 0:
query += " S.`%s` is null "%(sourceColumn)
else:
query += "and\n S.`%s` is null "%(sourceColumn)
self.common_operations.executeHiveQuery(query)
if oracleFlashbackSource == True and createHistoryAudit == True:
# If it is a history merge with Oracle Flashback, we need to handle the deletes separatly
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_flashback_operation` as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s`"%(sourceDB, oracleFlashbackImportTable)
self.common_operations.executeHiveQuery(query)
if mssqlChangeTrackingSource == True and createHistoryAudit == True:
# If it is a history merge with MSSQL Change Traging, we need to handle the deletes separatly
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s'"%(datalakeSource)
query += ",\n `datalake_mssql_changetrack_operation` as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s`"%(sourceDB, mssqlChangeTrackingImportTable)
self.common_operations.executeHiveQuery(query)
# Insert the deleted rows into the History table. Without this, it's impossible to see what values the column had before the delete
if sourceIsIncremental == False and createHistoryAudit == True and historyDB != None and historyTable != None and targetDeleteDB != None and targetDeleteTable != None:
query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable)
query += "( "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " `%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n `datalake_source`"
query += ",\n `datalake_iud`"
query += ",\n `datalake_timestamp`"
query += "\n) \n"
query += "select "
firstIteration = True
for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():
if firstIteration == True:
firstIteration = False
query += " \n"
else:
query += ", \n"
query += " T.`%s`"%(row['targetName'])
if datalakeSourceExists == True:
query += ",\n '%s' as `datalake_source`"%(datalakeSource)
query += ",\n 'D' as `datalake_iud`"
query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime)
query += "\nfrom `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable)
query += "left join `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "on \n"
for i, column in enumerate(PKColumns.split(",")):
if i == 0:
query += " T.`%s` = D.`%s` "%(column, column)
else:
query += "and\n T.`%s` = D.`%s` "%(column, column)
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:
# Use the merge command to delete found rows between the Delete Table and the History Table
query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable)
query += "using `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable)
query += "on \n"
for i, column in enumerate(PKColumns.split(",")):
if i == 0:
query += " T.`%s` = D.`%s` "%(column, column)
else:
query += "and\n T.`%s` = D.`%s` "%(column, column)
if softDelete == True:
query += "and\n T.`datalake_delete` != 'D' "
query += "\n"
if softDelete == False:
query += "when matched then delete \n"
else:
query += "when matched then update set \n"
query += "datalake_iud = 'D', \n"
query += "datalake_update = timestamp('%s'), \n"%(mergeTime)
query += "datalake_delete = timestamp('%s') "%(mergeTime)
# print("==============================================================")
# print(query)
# query = query.replace('\n', '')
self.common_operations.executeHiveQuery(query)
logging.debug("Executing etl_operations.mergeHiveTables() - Finished")
| 40.372951 | 535 | 0.661963 | 18,469 | 0.937418 | 0 | 0 | 0 | 0 | 0 | 0 | 7,561 | 0.383768 |
318f1a187d9d522a7bfc65eda71b220b6ecec2a9 | 2,394 | py | Python | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
]
| 1 | 2021-11-25T08:12:48.000Z | 2021-11-25T08:12:48.000Z | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
]
| null | null | null | grvx/nodes/ieeg/read.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
]
| null | null | null | from logging import getLogger
from numpy import mean, std
from pickle import dump
from wonambi import Dataset
from wonambi.trans import math, concatenate
from bidso import Task, Electrodes
lg = getLogger(__name__)
def read_ieeg_block(filename, electrode_file, conditions, minimalduration, output_dir):
d = Dataset(filename, bids=True)
markers = d.read_markers()
electrodes = Electrodes(electrode_file)
elec_names = [x['name'] for x in electrodes.electrodes.tsv]
elec_names = [x for x in elec_names if x in d.header['chan_name']] # exclude elec location that have no corresponding channel
all_conditions = [x for v in conditions.values() for x in v]
clean_labels = _reject_channels(d, elec_names, all_conditions, minimalduration)
outputs = []
for active_baseline, data_conds in conditions.items():
block_beg = []
block_end = []
for mrk in markers:
if mrk['name'] in data_conds:
dur = (mrk['end'] - mrk['start'])
if dur >= minimalduration:
block_beg.append(mrk['start'])
block_end.append(mrk['end'])
data = d.read_data(begtime=block_beg, endtime=block_end, chan=clean_labels)
output_task = Task(filename)
output_task.extension = '.pkl'
output_task.task += active_baseline
output_file = output_dir / output_task.get_filename()
with output_file.open('wb') as f:
dump(data, f)
outputs.append(output_file)
return outputs
def _reject_channels(d, elec_names, cond, minimalduration):
markers = d.read_markers()
block_beg = []
block_end = []
for mrk in markers:
if mrk['name'] in cond:
dur = (mrk['end'] - mrk['start'])
if dur >= minimalduration:
block_beg.append(mrk['start'])
block_end.append(mrk['end'])
data = d.read_data(chan=elec_names, begtime=block_beg, endtime=block_end)
data = concatenate(data, 'time')
clean_labels = reject_channels(data, 3)
return clean_labels
def reject_channels(dat, reject_chan_thresh):
dat_std = math(dat, operator_name='nanstd', axis='time')
THRESHOLD = reject_chan_thresh
x = dat_std.data[0]
thres = [mean(x) + THRESHOLD * std(x)]
clean_labels = list(dat_std.chan[0][dat_std.data[0] < thres])
return clean_labels
| 32.351351 | 130 | 0.651211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.068922 |
318fbfd55bdcd7ac71d0dc2747eb31643026f551 | 3,021 | py | Python | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
]
| 6 | 2015-09-19T18:22:33.000Z | 2020-11-29T15:21:17.000Z | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
]
| 1 | 2015-08-04T08:03:46.000Z | 2015-08-04T08:03:46.000Z | bin/analysis/ipa/constraints/split.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
]
| 1 | 2019-12-09T08:27:09.000Z | 2019-12-09T08:27:09.000Z | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import ast
from . base import Constraint
from .. calling import cpa
class Splitter(Constraint):
def __init__(self, src):
assert src.isNode(), src
self.src = src
self.dst = []
self.callbacks = []
def addSplitCallback(self, callback):
self.callbacks.append(callback)
if self.objects: callback()
def attach(self):
self.src.addNext(self)
def localName(self):
return 'split_temp'
def makeTarget(self, context):
lcl = context.local(ast.Local(self.localName()))
lcl.addPrev(self)
self.dst.append(lcl)
return lcl
def makeConsistent(self, context):
# Make constraint consistent
if self.src.values:
self.changed(context, self.src, self.src.values)
if self.src.critical.values:
self.criticalChanged(context, self.src, self.src.critical.values)
def criticalChanged(self, context, node, diff):
for dst in self.dst:
dst.critical.updateValues(context, dst, diff)
def doNotify(self):
for callback in self.callbacks:
callback()
def isSplit(self):
return True
class TypeSplitConstraint(Splitter):
def __init__(self, src):
Splitter.__init__(self, src)
self.objects = {}
self.megamorphic = False
def localName(self):
return 'type_split_temp'
def types(self):
return self.objects.keys()
def makeMegamorphic(self):
assert not self.megamorphic
self.megamorphic = True
self.objects.clear()
self.objects[cpa.anyType] = self.src
self.doNotify()
def changed(self, context, node, diff):
if self.megamorphic: return
changed = False
for obj in diff:
cpaType = obj.cpaType()
if cpaType not in self.objects:
if len(self.objects) >= 4:
self.makeMegamorphic()
break
else:
temp = self.makeTarget(context)
self.objects[cpaType] = temp
changed = True
else:
temp = self.objects[cpaType]
temp.updateSingleValue(obj)
else:
if changed: self.doNotify()
# TODO prevent over splitting? All objects with the same qualifier should be grouped?
class ExactSplitConstraint(Splitter):
def __init__(self, src):
Splitter.__init__(self, src)
self.objects = {}
def localName(self):
return 'exact_split_temp'
def changed(self, context, node, diff):
changed = False
for obj in diff:
if obj not in self.objects:
temp = self.makeTarget(context)
self.objects[obj] = temp
changed = True
else:
temp = self.objects[obj]
temp.updateSingleValue(obj)
if changed: self.doNotify()
| 23.97619 | 86 | 0.716319 | 2,259 | 0.747766 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.239987 |
3190a8381415a99ad9460b76cf488e2f1d88f6df | 2,202 | py | Python | tests/it/test_docker_image_tag.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
]
| 4 | 2020-06-11T20:54:47.000Z | 2020-09-22T13:07:17.000Z | tests/it/test_docker_image_tag.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
]
| 113 | 2019-11-07T00:40:36.000Z | 2021-01-18T12:50:16.000Z | tests/it/test_docker_image_tag.py | inetum-orleans/docker-devbox-ddb | 20c713cf7bfcaf289226a17a9648c17d16003b4d | [
"MIT"
]
| null | null | null | import os
import zipfile
import yaml
from dotty_dict import Dotty
from pytest_mock import MockerFixture
from ddb.__main__ import main
from ddb.config import Config
from ddb.feature.version import is_git_repository
class TestDockerImageTag:
def test_image_tag_from_git_tag_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_tag")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
assert os.path.exists("docker-compose.yml")
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node:some-tag'
def test_image_tag_from_git_branch_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_branch")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node:some-branch'
def test_image_tag_from_git_disabled_jsonnet(self, project_loader, mocker: MockerFixture):
Config.defaults = None
mocker.patch('ddb.feature.version.is_git_repository', is_git_repository)
project_loader("image_tag_from_git_disabled")
if os.path.exists("repo.zip"):
with zipfile.ZipFile("repo.zip", 'r') as zip_ref:
zip_ref.extractall(".")
main(["configure"])
with open("docker-compose.yml") as f:
docker_compose = yaml.load(f, yaml.SafeLoader)
assert Dotty(docker_compose).get('services.node.image') == 'some-registry/node'
| 33.363636 | 103 | 0.672116 | 1,983 | 0.900545 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.241599 |
3190b0b0bcaf0d8c57b427dc97d503276394c78a | 5,806 | py | Python | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
]
| null | null | null | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
]
| null | null | null | bin/zeisel.py | bendemeo/ample | 4ca2688ff2b0e5c8f7dcbc0a1f4ddc8927fac670 | [
"MIT"
]
| null | null | null | import numpy as np
import os
from scanorama import *
from scipy.sparse import vstack
from process import load_names
from experiments import *
from utils import *
NAMESPACE = 'zeisel'
METHOD = 'svd'
DIMRED = 100
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/enteric',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/spinalcord',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/sympathetic',
'data/mouse_brain/zeisel/thalamus',
]
if __name__ == '__main__':
datasets, genes_list, n_cells = load_names(data_names, norm=False)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)):
log('Dimension reduction with {}...'.format(METHOD))
X_dimred = reduce_dimensionality(
normalize(X), method=METHOD, dimred=DIMRED
)
log('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE))
from ample import gs, uniform, srs
#samp_idx = gs(X_dimred, 20000, replace=False)
#samp_idx = uniform(X_dimred, 20000, replace=False)
samp_idx = srs(X_dimred, 20000, replace=False)
#from anndata import AnnData
#import scanpy.api as sc
#adata = AnnData(X=X_dimred[samp_idx, :])
#sc.pp.neighbors(adata, use_rep='X')
#sc.tl.louvain(adata, resolution=1.5, key_added='louvain')
#
#louv_labels = np.array(adata.obs['louvain'].tolist())
#le = LabelEncoder().fit(louv_labels)
#cell_labels = le.transform(louv_labels)
#
#np.savetxt('data/cell_labels/zeisel_louvain.txt', cell_labels)
labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(labels)
cell_labels = le.transform(labels)
experiments(
X_dimred, NAMESPACE, n_seeds=2,
cell_labels=cell_labels,
kmeans_ami=True, louvain_ami=True,
rare=True,
rare_label=le.transform(['Ependymal'])[0],
)
exit()
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_srs{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
exit()
cell_labels = (
open('data/cell_labels/zeisel_louvain.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
astro = set([ 32, 38, 40, ])
oligo = set([ 2, 5, 12, 20, 23, 33, 37, ])
focus = set([ 15, 36, 41 ])
labels = []
aob_labels = []
for cl in cell_labels:
if cl in focus:
labels.append(0)
aob_labels.append('both')
elif cl in astro or cl in oligo:
labels.append(1)
if cl in astro:
aob_labels.append('astro')
else:
aob_labels.append('oligo')
else:
labels.append(2)
aob_labels.append('none')
labels = np.array(labels)
aob_labels = np.array(aob_labels)
X = np.log1p(normalize(X[samp_idx, :]))
from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'both', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'both', NAMESPACE)
astro_oligo_violin(X, genes, 'GJA1', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'MBP', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'PLP1', aob_labels, NAMESPACE)
viz_genes = [
#'GJA1', 'MBP', 'PLP1', 'TRF',
#'CST3', 'CPE', 'FTH1', 'APOE', 'MT1', 'NDRG2', 'TSPAN7',
#'PLP1', 'MAL', 'PTGDS', 'CLDN11', 'APOD', 'QDPR', 'MAG', 'ERMN',
#'PLP1', 'MAL', 'PTGDS', 'MAG', 'CLDN11', 'APOD', 'FTH1',
#'ERMN', 'MBP', 'ENPP2', 'QDPR', 'MOBP', 'TRF',
#'CST3', 'SPARCL1', 'PTN', 'CD81', 'APOE', 'ATP1A2', 'ITM2B'
]
cell_labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_astro{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
gene_names=viz_genes, gene_expr=X, genes=genes,
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
#visualize_dropout(X, embedding, image_suffix='.png',
# viz_prefix=NAMESPACE + '_dropout')
from differential_entropies import differential_entropies
differential_entropies(X_dimred, labels)
| 35.187879 | 80 | 0.627799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,349 | 0.404581 |
3190dd313fa981931d847fb35c25f15a5cf9dce0 | 997 | py | Python | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
]
| 90 | 2017-11-25T13:54:48.000Z | 2021-09-04T04:19:52.000Z | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
]
| 43 | 2017-09-12T20:40:56.000Z | 2019-08-03T15:37:37.000Z | cogitare/monitor/workers/system_usage.py | cogitare-ai/cogitare | fa99b8ef30e2f74e16fb542f2992582d1bd3ac2c | [
"MIT"
]
| 9 | 2018-02-01T22:37:15.000Z | 2018-11-05T13:30:58.000Z | from threading import Thread
import psutil
import time
class SystemUsage(Thread):
def __init__(self, callback, *args, **kwargs):
super(SystemUsage, self).__init__(daemon=True)
self.interval = 1
self.enabled = False
self.callback = callback
self.p = psutil.Process()
self.start()
def run(self):
while True:
if self.enabled:
self.callback(self.get_usage())
time.sleep(self.interval)
def get_usage(self):
usage = {}
usage['Ram (GB)'] = round(psutil.virtual_memory().used * 1.0 / 2 ** 30, 2)
usage['CPU (%)'] = dict(('CPU %d' % idx, usage) for idx, usage in enumerate(psutil.cpu_percent(percpu=True), 1))
usage['CPU used by the Process (%)'] = round(self.p.cpu_percent(), 2)
usage['RAM used by the Process (MB)'] = round(self.p.memory_info().rss * 1.0 / 2 ** 20, 2)
usage['Number of Threads'] = self.p.num_threads()
return usage
| 30.212121 | 120 | 0.584754 | 939 | 0.941825 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.105316 |
3191404cbd9e515326e447e2206e0f73b067c5bc | 5,866 | py | Python | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
]
| null | null | null | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
]
| null | null | null | test/worker/net.py | ameserole/Naumachia | dc13c33c5fcf053c74dfce8351a696d28857fd9d | [
"MIT"
]
| null | null | null | import fcntl
import os
import socket
import struct
import warnings
import subprocess
import logging
import base64
logger = logging.getLogger(__name__)
# Dummy socket used for fcntl functions
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class AddrMeta(type):
@property
def maxvalue(cls):
return (0x1 << (cls.bytelen * 8)) - 1
class Addr(metaclass=AddrMeta):
bytelen = 0
def __init__(self, addr):
self._str = None
self._int = None
self._bytes = None
if isinstance(addr, type(self)):
self._str = addr._str
self._bytes = addr._bytes
self._int = addr._int
elif isinstance(addr, str):
self._str = addr
elif isinstance(addr, int):
self._int = addr
elif isinstance(addr, bytes):
if len(addr) == self.bytelen:
self._bytes = addr
else:
self._str = addr.decode('utf-8')
else:
raise ValueError('Cannot create {!s} from {!s}'.format(type(self), type(addr)))
# Operations
def __and__(self, other):
return type(self)(int(self) & int(other))
def __or__(self, other):
return type(self)(int(self) | int(other))
def __xor__(self, other):
return type(self)(int(self) ^ int(other))
def __invert__(self):
return type(self)(int(self) ^ self.maxvalue)
# Conversions
def __str__(self):
if self._str is None:
self._str = self.bytes_to_str(bytes(self))
return self._str
def __int__(self):
return int.from_bytes(bytes(self), byteorder='big')
def __bytes__(self):
if self._bytes is None:
if self._str is not None:
self._bytes = self.str_to_bytes(self._str)
elif self._int is not None:
self._bytes = self._int.to_bytes(self.bytelen, byteorder='big')
return self._bytes
def __repr__(self):
return '<{0}.{1} {2!s}>'.format(__name__, type(self).__name__, self)
class Ip(Addr):
bytelen = 4
@staticmethod
def bytes_to_str(b):
return socket.inet_ntoa(b)
@staticmethod
def str_to_bytes(s):
return socket.inet_aton(s)
def slash(self):
x, i = int(self), 0
while x & 0x1 == 0:
x >>= 1
i += 1
return 32 - i
class Mac(Addr):
bytelen = 6
@staticmethod
def bytes_to_str(b):
return ':'.join('%02x' % byte for byte in b)
@staticmethod
def str_to_bytes(s):
return bytes.fromhex(s.replace(':', ''))
def _ifctl(ifname, code):
if isinstance(ifname, str):
ifname = ifname.encode('utf-8')
return fcntl.ioctl(
_socket.fileno(),
code,
struct.pack('256s', ifname[:15])
)
def ifaddr(ifname):
return Ip(_ifctl(ifname, 0x8915)[20:24]) # SIOCGIFADDR
def ifmask(ifname):
return Ip(_ifctl(ifname, 0x891b)[20:24]) # SIOCGIFNETMASK
def ifhwaddr(ifname):
return Mac(_ifctl(ifname, 0x8927)[18:24]) # SIOCGIFHWADDR
def cidr(ip, mask):
return "{!s}/{:d}".format(ip, mask.slash())
def parsecidr(ipnet):
ipstr, maskstr = ipnet.split('/')
ip = Ip(ipstr)
mask = Ip(0xffffffff ^ ((0x00000001 << (32-int(maskstr)))-1))
return ip, mask
def ifcidr(ifname):
return cidr(ifaddr(ifname), ifmask(ifname))
class OpenVpnError(Exception):
def __init__(self, instance, msg):
self.instance = instance
super().__init__(msg)
class OpenVpn:
exe = 'openvpn'
initmsg = b'Initialization Sequence Completed'
def __init__(self, **kwargs):
if 'daemonize' in kwargs:
warnings.warn("This class will not be able to close a daemonized tunnel", warnings.Warning)
self.options = kwargs
self.initialized = False
self._process = None
def args(self):
result = []
for name, value in self.options.items():
result.append('--{!s}'.format(name))
# None is special to indicate the option have no value
if value is not None:
result.append(str(value))
return result
def check(self):
if self._process is not None:
self._process.poll()
code = self._process.returncode
if code is not None and code != 0:
raise OpenVpnError(self, "`openvpn {:s}` exited with error code: {:d}".format(" ".join(self.args()), code))
def running(self):
return self._process is not None and self._process.poll() is None
@staticmethod
def maketun():
os.makedirs('/dev/net', exist_ok=True)
subprocess.run(['mknod', '/dev/net/tun', 'c', '10', '200'], check=True)
def connect(self):
if not os.path.exists('/dev/net/tun'):
self.maketun()
if not self.running():
self.initialized = False
self._process = subprocess.Popen(
[self.exe] + self.args(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.check()
def disconnect(self):
if self.running():
self._process.terminate()
os.waitpid(self._process.pid, 0)
def waitforinit(self):
if not self.initialized:
for line in self._process.stdout:
logger.debug("openvpn: %s", line.decode('utf-8').strip())
if self.initmsg in line:
self.initialized = True
break
else:
self.check()
raise OpenVpnError(self, "OpenVPN exited with code 0, but did not display init msg")
def __enter__(self):
self.connect()
return self
def __exit__(self, *args, **kwargs):
self.disconnect()
| 27.283721 | 123 | 0.574668 | 4,839 | 0.824923 | 0 | 0 | 561 | 0.095636 | 0 | 0 | 572 | 0.097511 |
319239aac557dc3d968ccc908a828a9cd5002f12 | 2,161 | py | Python | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
]
| null | null | null | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
]
| null | null | null | kunrei.py | kosugi/alfred.romanizer | d2a3b4a9883f15101893e385f14e6dca115c1d7d | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
basic_table = dict(map(lambda s: s.split(u'\t'), u'''
あ a
い i
う u
え e
お o
か ka
き ki
く ku
け ke
こ ko
さ sa
し si
す su
せ se
そ so
た ta
ち ti
つ tu
て te
と to
な na
に ni
ぬ nu
ね ne
の no
は ha
ひ hi
ふ hu
へ he
ほ ho
ま ma
み mi
む mu
め me
も mo
や ya
ゆ yu
よ yo
ら ra
り ri
る ru
れ re
ろ ro
わ wa
を wo
ぁ a
ぃ i
ぅ u
ぇ e
ぉ o
が ga
ぎ gi
ぐ gu
げ ge
ご go
ざ za
じ zi
ず zu
ぜ ze
ぞ zo
だ da
ぢ di
づ du
で de
ど do
ば ba
び bi
ぶ bu
べ be
ぼ bo
ぱ pa
ぴ pi
ぷ pu
ぺ pe
ぽ po
きゃ kya
きゅ kyu
きょ kyo
しゃ sya
しゅ syu
しょ syo
ちゃ tya
ちゅ tyu
ちょ tyo
にゃ nya
にゅ nyu
にょ nyo
ひゃ hya
ひゅ hyu
ひょ hyo
みゃ mya
みゅ myu
みょ myo
りゃ rya
りゅ ryu
りょ ryo
ぎゃ gya
ぎゅ gyu
ぎょ gyo
じゃ zya
じゅ zyu
じょ zyo
でゃ dya
でゅ dyu
でょ dyo
びゃ bya
びゅ byu
びょ byo
ぴゃ pya
ぴゅ pyu
ぴょ pyo
クヮ kwa
グヮ gwa
ア a
イ i
ウ u
エ e
オ o
カ ka
キ ki
ク ku
ケ ke
コ ko
サ sa
シ si
ス su
セ se
ソ so
タ ta
チ ti
ツ tu
テ te
ト to
ナ na
ニ ni
ヌ nu
ネ ne
ノ no
ハ ha
ヒ hi
フ hu
ヘ he
ホ ho
マ ma
ミ mi
ム mu
メ me
モ mo
ヤ ya
ユ yu
ヨ yo
ラ ra
リ ri
ル ru
レ re
ロ ro
ワ wa
ヲ wo
ァ a
ィ i
ゥ u
ェ e
ォ o
ガ ga
ギ gi
グ gu
ゲ ge
ゴ go
ザ za
ジ zi
ズ zu
ゼ ze
ゾ zo
ダ da
ヂ di
ヅ du
デ de
ド do
バ ba
ビ bi
ブ bu
ベ be
ボ bo
パ pa
ピ pi
プ pu
ペ pe
ポ po
キャ kya
キュ kyu
キョ kyo
シャ sya
シュ syu
ショ syo
チャ tya
チュ tyu
チョ tyo
ニャ nya
ニュ nyu
ニョ nyo
ヒャ hya
ヒュ hyu
ヒョ hyo
ミャ mya
ミュ myu
ミョ myo
リャ rya
リュ ryu
リョ ryo
ギャ gya
ギュ gyu
ギョ gyo
ジャ zya
ジュ zyu
ジョ zyo
デャ dya
デュ dyu
デョ dyo
ビャ bya
ビュ byu
ビョ byo
ピャ pya
ピュ pyu
ピョ pyo
くゎ kwa
ぐゎ gwa
'''.strip(u'\n').split(u'\n')))
long_sound_table = dict(u'aâ iî uû eê oô'.split())
long_sounds = u'aa ii uu ee oo ou'.split()
def normalize(s):
roman = u''
l = len(s)
n = 0
while n < l:
c1 = s[n]
c2 = s[n:n+2]
c3 = s[n+1:n+2]
if roman and c1 == u'ー':
c1 = u''
if roman[-1] in u'aiueo':
roman = roman[:-1] + long_sound_table[roman[-1]]
elif c2 in long_sounds:
c1 = long_sound_table[c1]
n += 1
elif c1 in u'んン':
c1 = u'n'
if c3 and c3 in u'aiueoy':
c1 += u"'"
elif c1 in u'っッ':
if c3 in u'bcdfghjklmnpqrstvwxyz':
c1 = c3
else:
c1 = u''
roman += c1
n += 1
return roman
| 8.248092 | 64 | 0.553447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,036 | 0.732374 |
31947238fdb172e32519876dd493050e5588dc51 | 3,062 | py | Python | tests/test_utils.py | loganthomas/turkey-bowl | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
]
| null | null | null | tests/test_utils.py | loganthomas/turkey-bowl | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
]
| 74 | 2020-09-26T00:58:17.000Z | 2022-03-20T13:55:09.000Z | tests/test_utils.py | loganthomas/Thanksgiving_Football | 8a02966c3fe06a4dbbcee3f31ed21c2374b77e11 | [
"MIT"
]
| 1 | 2020-09-26T01:09:38.000Z | 2020-09-26T01:09:38.000Z | """
Unit tests for utils.py
"""
# Standard libraries
import json
from pathlib import Path
# Third-party libraries
import pytest
# Local libraries
from turkey_bowl import utils
@pytest.mark.freeze_time
@pytest.mark.parametrize(
"frozen_date, expected",
[
("2010-09-09", 2010),
("2011-09-08", 2011),
("2012-09-05", 2012),
("2018-09-06", 2018),
("2019-09-05", 2019),
],
)
def test_get_current_year(freezer, frozen_date, expected):
""" Use pytest-freezegun to freeze dates and check year."""
# Setup
freezer.move_to(frozen_date)
# Exercise
result = utils.get_current_year()
# Verify
assert result == expected
# Cleanup - none necessary
def test_write_to_json(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
# Exercise
assert tmp_file_path.exists() is False # non-existent prior to write
utils.write_to_json(json_data, tmp_file_path)
# Verify
assert tmp_file_path.exists() is True
with open(tmp_file_path, "r") as written_file:
result = json.load(written_file)
assert result == json_data
# Cleanup - none necessary
def test_load_from_json(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
with open(tmp_file_path, "w") as written_file:
json.dump(json_data, written_file)
# Exercise
assert tmp_file_path.exists() is True # existent prior to load
result = utils.load_from_json(tmp_file_path)
# Verify
assert result == json_data
# Cleanup - none necessary
def test_write_to_and_load_from(tmp_path):
# Setup
tmp_file_path = tmp_path.joinpath("test.json")
json_data = {"test": "test", "test2": "test2"}
# Exercise
assert tmp_file_path.exists() is False # non-existent prior to write
utils.write_to_json(json_data, tmp_file_path)
result = utils.load_from_json(tmp_file_path)
# Verify
assert tmp_file_path.exists() is True
assert result == json_data
# Cleanup - none necessary
def test_load_stat_ids():
# Setup
file_loc = Path(__file__)
stat_ids_json_path = file_loc.parent.parent.joinpath("assets/stat_ids.json")
# Exercise
result = utils.load_from_json(stat_ids_json_path)
# Verify
assert len(result) == 91
for k, v in result.items():
assert int(k) == v["id"]
assert list(v.keys()) == ["id", "abbr", "name", "shortName"]
# Cleanup - none necessary
def test_load_player_ids():
# Setup
file_loc = Path(__file__)
stat_ids_json_path = file_loc.parent.parent.joinpath("assets/player_ids.json")
# Exercise
result = utils.load_from_json(stat_ids_json_path)
# Verify
assert "year" in result
for k, v in result.items():
if k == "year":
assert isinstance(v, int)
else:
assert list(v.keys()) == ["name", "position", "team", "injury"]
# Cleanup - none necessary
| 23.736434 | 82 | 0.653494 | 0 | 0 | 0 | 0 | 542 | 0.177008 | 0 | 0 | 857 | 0.279882 |
3194a2997150c6f647d46dc4f5cbb7a6cd7d252d | 559 | py | Python | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
]
| 319 | 2015-01-02T11:34:16.000Z | 2022-03-25T00:43:33.000Z | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
]
| 10 | 2015-02-03T02:33:09.000Z | 2021-11-09T21:41:00.000Z | regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
]
| 61 | 2015-01-02T12:01:56.000Z | 2021-12-08T07:16:16.000Z | """external method"""
class myclass:
def __init__(self, i):
self.index = i
def get_index(self):
return self.index
def run(self, n):
self.intarray = new(Int16Array(n))
self.intarray[ self.index ] = 99
@returns( array=n )
@gpu.main
def gpufunc():
int* A = self.intarray
## GLSL compile error: `Index expression must be constant`
#int idx = self.get_index()
#return float( A[idx] )
return float( A[self.get_index()] )
return gpufunc()
def main():
m = myclass(10)
r = m.run(64)
print(r)
TestError( int(r[10])==99 ) | 18.032258 | 61 | 0.631485 | 452 | 0.808587 | 0 | 0 | 238 | 0.42576 | 0 | 0 | 129 | 0.230769 |
3196c5c7ef4586ecc4432d29b82edc3f69f92c25 | 1,695 | py | Python | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
]
| null | null | null | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
]
| null | null | null | pages/views.py | Total-Conversion/eco4coin | 5a155afe892ebd714547063adfd000a1437eb3a0 | [
"MIT"
]
| null | null | null | from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import TemplateView, ListView
import csv
from django.http import HttpResponse
from backend.models import CustomUser
from django.contrib.auth.mixins import LoginRequiredMixin
class HomePageView(TemplateView):
template_name = 'pages/home.html'
class UsersListView(LoginRequiredMixin, ListView):
model = CustomUser
context_object_name = 'user'
template_name = 'pages/users_list.html'
paginate_by = 100
def get_queryset(self):
return CustomUser.objects.all().order_by('id')
def get_context_data(self, **kwargs):
context = super(UsersListView, self).get_context_data(**kwargs)
context['user'] = CustomUser.objects.get(id=self.request.user.id)
return context
@staff_member_required(login_url="/accounts/login/")
def users_to_csv(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users_list.csv"'
writer = csv.writer(response)
writer.writerow(
[
"id ", "cash_balance ", "cash_locked ", "coin_balance ", "coin_locked ", "wallet_id ", " date_joined"
]
)
users_list = CustomUser.objects.all().order_by("id")
for user in users_list:
writer.writerow(
[
user.id,
user.cash_balance,
user.cash_locked,
user.coin_balance,
user.coin_locked,
user.wallet_id,
user.date_joined
]
)
return response
| 30.818182 | 113 | 0.661357 | 539 | 0.317994 | 0 | 0 | 874 | 0.515634 | 0 | 0 | 302 | 0.178171 |
3197d22a066fe34f613aab3ff51fd1a605e176ab | 2,895 | py | Python | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
]
| 1 | 2021-12-02T15:19:36.000Z | 2021-12-02T15:19:36.000Z | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
]
| null | null | null | 18.part2.py | elp2/advent_of_code_2018 | 0d359422dd04b0849481796005e97d05c30e9eb4 | [
"Apache-2.0"
]
| null | null | null | from collections import defaultdict
def return_default():
return 0
REAL=open("18.txt").readlines()
SAMPLE=open("18.sample").readlines()
OPEN="."
TREE="|"
LUMBERYARD="#"
import copy
def safe_grid_get(grid, x, y, missing=None):
if x < 0 or y < 0:
return missing
if y >= len(grid):
return missing
if x >= len(grid[y]):
return missing
return grid[y][x]
def parse_lines(lines):
return list(map(lambda l: list(l.strip()), lines))
def next_sq(grid, x, y):
around = defaultdict(return_default)
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
if dx == 0 and dy == 0:
continue
a = safe_grid_get(grid, x + dx, y + dy)
if a is not None:
around[a] += 1
here = grid[y][x]
if here == OPEN:
if around[TREE] >= 3:
return TREE
else:
return OPEN
elif here == TREE:
if around[LUMBERYARD] >= 3:
return LUMBERYARD
else:
return TREE
else:
assert here == LUMBERYARD
if around[LUMBERYARD] >= 1 and around[TREE] >= 1:
return LUMBERYARD
else:
return OPEN
def resource_value(board):
lands = defaultdict(return_default)
for y in range(len(board)):
for x in range(len(board[0])):
lands[board[y][x]] += 1
return lands[TREE] * lands[LUMBERYARD]
def solve(lines, minutes):
cache = {}
old_board = parse_lines(lines)
for minute in range(minutes):
board = copy.deepcopy(old_board)
for y in range(len(board)):
for x in range(len(board[0])):
board[y][x] = next_sq(old_board, x, y)
old_board = board
key = "\n".join(map(lambda r: "".join(r), board))
# print(key)
if key in cache:
print(minute, cache[key])
else:
cache[key] = (minute, resource_value(board))
return resource_value(board)
sample = solve(SAMPLE, 10)
assert sample == 1147
print("*** SAMPLE PASSED ***")
# print(solve(REAL, 10000))
loop = """598 570 191420
599 571 189168
600 572 185082
601 573 185227
602 574 185320
603 575 185790
604 576 186120
605 577 189956
606 578 190068
607 579 191080
608 580 190405 # too low
609 581 193795
610 582 190950
611 583 193569
612 584 194350
613 585 196308
614 586 195364
615 587 197911
616 588 199755
617 589 201144
618 590 201607
619 591 203580
620 592 201260
621 593 201950
622 594 200675 # TOO HIGH
623 595 202208
624 596 200151
625 597 198948
626 570 191420
627 571 189168
628 572 185082
629 573 185227
630 574 185320
631 575 185790
632 576 186120
633 577 189956
634 578 190068
635 579 191080
636 580 190405
637 581 193795"""
num = 1000000000
nmod = 28
for num in range(570, 638):
print(num, (num - 570) % nmod + 570)
num = 1000000000 - 1
print(num, (num - 570) % nmod + 570 + nmod) | 21.444444 | 57 | 0.601382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 722 | 0.249396 |
3198fc009ad14ca016fe53373d72241bd818e6a1 | 231 | py | Python | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
]
| null | null | null | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
]
| null | null | null | PythonExercicios/ex031.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
]
| null | null | null | dis = float(input('Digite a distância da sua viagem em Km: '))
if dis <= 200:
print('O valor da sua passagem será {:.2f} reais'.format(dis * 0.5))
else:
print('O valor da sua passagem será {:.2f} reais'.format(dis * 0.45))
| 38.5 | 73 | 0.645022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.559829 |
31991e3c0b73748d0ef73d80f10a961d8b27dbaf | 4,516 | py | Python | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
]
| null | null | null | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
]
| 20 | 2020-03-24T16:48:03.000Z | 2022-03-08T22:38:38.000Z | admin/collection_providers/forms.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
]
| null | null | null | import bleach
import json
from django import forms
from osf.models import CollectionProvider, CollectionSubmission
from admin.base.utils import get_nodelicense_choices, get_defaultlicense_choices
class CollectionProviderForm(forms.ModelForm):
collected_type_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
status_choices = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = CollectionProvider
exclude = ['primary_identifier_name', 'primary_collection', 'type', 'allow_commenting', 'advisory_board',
'example', 'domain', 'domain_redirect_enabled', 'reviews_comments_anonymous',
'reviews_comments_private', 'reviews_workflow']
widgets = {
'licenses_acceptable': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
nodelicense_choices = get_nodelicense_choices()
defaultlicense_choices = get_defaultlicense_choices()
super(CollectionProviderForm, self).__init__(*args, **kwargs)
self.fields['licenses_acceptable'].choices = nodelicense_choices
self.fields['default_license'].choices = defaultlicense_choices
def clean_description(self, *args, **kwargs):
if not self.data.get('description'):
return u''
return bleach.clean(
self.data.get('description'),
tags=['a', 'br', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_footer_links(self, *args, **kwargs):
if not self.data.get('footer_links'):
return u''
return bleach.clean(
self.data.get('footer_links'),
tags=['a', 'br', 'div', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_collected_type_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
type_choices_old = set(collection_provider.primary_collection.collected_type_choices)
type_choices_new = set(json.loads(self.data.get('collected_type_choices')))
type_choices_added = type_choices_new - type_choices_old
type_choices_removed = type_choices_old - type_choices_new
for item in type_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
collected_type=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
type_choices_added = json.loads(self.data.get('collected_type_choices'))
type_choices_removed = []
return {
'added': type_choices_added,
'removed': type_choices_removed,
}
def clean_status_choices(self):
collection_provider = self.instance
# if this is to modify an existing CollectionProvider
if collection_provider.primary_collection:
status_choices_old = set(collection_provider.primary_collection.status_choices)
status_choices_new = set(json.loads(self.data.get('status_choices')))
status_choices_added = status_choices_new - status_choices_old
status_choices_removed = status_choices_old - status_choices_new
for item in status_choices_removed:
if CollectionSubmission.objects.filter(collection=collection_provider.primary_collection,
status=item).exists():
raise forms.ValidationError(
'Cannot delete "{}" because it is used as metadata on objects.'.format(item)
)
else:
# if this is creating a CollectionProvider
status_choices_added = json.loads(self.data.get('status_choices'))
status_choices_removed = []
return {
'added': status_choices_added,
'removed': status_choices_removed,
}
| 45.16 | 113 | 0.629318 | 4,315 | 0.955492 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.206599 |
319925dc3819c9097723899fe8aef60117e396cb | 817 | py | Python | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
]
| null | null | null | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
]
| null | null | null | src/validate_model.py | mareklinka/esk-form-scanner-model | 30af9e1c5d652b3310222bc55f92e964bc524f2e | [
"MIT"
]
| null | null | null |
import data_providers as gen
import model_storage as storage
import numpy as np
import data_visualizer
import time
def evaluate(model_name):
"""
Evaluates the model stored in the specified file.
Parameters
----------
model_name : string
The name of the file to read the model from
"""
model = storage.load_model(model_name)
model.summary()
start = time.clock()
score = model.evaluate_generator(gen.finite_generator("data\\validation"), steps=30)
end = time.clock()
print("Time per image: {} ".format((end-start)/300))
print (model.metrics_names)
print (score)
predictions = model.predict_generator(gen.finite_generator("data\\validation"), steps=30)
data_visualizer.draw_bounding_boxes("data\\validation", predictions, "data\\results") | 24.757576 | 93 | 0.69645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.321909 |
31998f7e8bdabc90d6fe3933e2b885a9ef1b8e16 | 4,154 | py | Python | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/v3_0_preview_1/models/_form_recognizer_client_enums.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AnalyzeResultOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
NOT_STARTED = "notStarted"
RUNNING = "running"
FAILED = "failed"
SUCCEEDED = "succeeded"
class ApiVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""API version.
"""
TWO_THOUSAND_TWENTY_ONE09_30_PREVIEW = "2021-09-30-preview"
class ContentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Content type for upload
"""
#: Content Type 'application/octet-stream'.
APPLICATION_OCTET_STREAM = "application/octet-stream"
#: Content Type 'application/pdf'.
APPLICATION_PDF = "application/pdf"
#: Content Type 'image/bmp'.
IMAGE_BMP = "image/bmp"
#: Content Type 'image/jpeg'.
IMAGE_JPEG = "image/jpeg"
#: Content Type 'image/png'.
IMAGE_PNG = "image/png"
#: Content Type 'image/tiff'.
IMAGE_TIFF = "image/tiff"
class DocumentFieldType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Semantic data type of the field value.
"""
STRING = "string"
DATE = "date"
TIME = "time"
PHONE_NUMBER = "phoneNumber"
NUMBER = "number"
INTEGER = "integer"
SELECTION_MARK = "selectionMark"
COUNTRY_REGION = "countryRegion"
CURRENCY = "currency"
SIGNATURE = "signature"
ARRAY = "array"
OBJECT = "object"
class DocumentSignatureType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Presence of signature.
"""
SIGNED = "signed"
UNSIGNED = "unsigned"
class DocumentTableCellKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Table cell kind.
"""
CONTENT = "content"
ROW_HEADER = "rowHeader"
COLUMN_HEADER = "columnHeader"
STUB_HEAD = "stubHead"
DESCRIPTION = "description"
class LengthUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The unit used by the width, height, and boundingBox properties. For images, the unit is
"pixel". For PDF, the unit is "inch".
"""
PIXEL = "pixel"
INCH = "inch"
class OperationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of operation.
"""
DOCUMENT_MODEL_BUILD = "documentModelBuild"
DOCUMENT_MODEL_COMPOSE = "documentModelCompose"
DOCUMENT_MODEL_COPY_TO = "documentModelCopyTo"
class OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
NOT_STARTED = "notStarted"
RUNNING = "running"
FAILED = "failed"
SUCCEEDED = "succeeded"
CANCELED = "canceled"
class SelectionMarkState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the selection mark.
"""
SELECTED = "selected"
UNSELECTED = "unselected"
class StringIndexType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Method used to compute string offset and length.
"""
TEXT_ELEMENTS = "textElements"
UNICODE_CODE_POINT = "unicodeCodePoint"
UTF16_CODE_UNIT = "utf16CodeUnit"
| 30.77037 | 94 | 0.667068 | 3,598 | 0.866153 | 0 | 0 | 0 | 0 | 0 | 0 | 1,988 | 0.478575 |
3199bda124122e7024becc95e2646f8bec2ec029 | 930 | py | Python | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
]
| null | null | null | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
]
| 11 | 2021-02-10T02:18:32.000Z | 2022-03-02T09:56:43.000Z | activity_log/migrations/0004_auto_20170309_0929.py | farezsaputra/BandwidthControllingSystem | 54032f21e6cb94156f57222e88a98a89be310ea9 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('activity_log', '0003_activitylog_extra_data'),
]
operations = [
migrations.AlterField(
model_name='activitylog',
name='datetime',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='datetime', db_index=True),
),
migrations.AlterField(
model_name='activitylog',
name='ip_address',
field=models.GenericIPAddressField(blank=True, null=True, verbose_name='user IP', db_index=True),
),
migrations.AlterField(
model_name='activitylog',
name='request_url',
field=models.CharField(db_index=True, verbose_name='url', max_length=256),
),
] | 31 | 114 | 0.631183 | 793 | 0.852688 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.176344 |
319a6bb562632b817123a3b1b59712d0c048c830 | 1,559 | py | Python | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
]
| 2 | 2021-05-19T13:16:20.000Z | 2021-11-05T11:47:11.000Z | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
]
| 71 | 2021-06-01T11:03:56.000Z | 2022-03-01T09:38:37.000Z | bin/read_oogeso_data.py | oogeso/oogeso | 72c05fd02d62b29fc62f60daf4989370fd80cbe1 | [
"MIT"
]
| null | null | null | import json
import oogeso.io.file_io
# Read in data, validate date et.c. with methods from io
test_data_file = "examples/test case2.yaml"
json_data = oogeso.io.file_io.read_data_from_yaml(test_data_file)
json_formatted_str = json.dumps(json_data, indent=2)
print("Type json formatted str=", type(json_formatted_str))
# deserialize json data to objects
# encoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONEncoder
decoder = oogeso.dto.oogeso_input_data_objects.DataclassJSONDecoder
# decoder = json.JSONDecoder()
with open("examples/energysystem.json", "r") as jsonfile:
energy_system = json.load(jsonfile, cls=decoder)
es_str = oogeso.dto.oogeso_input_data_objects.serialize_oogeso_data(energy_system)
print("Type seriealised=", type(es_str))
mydecoder = decoder()
energy_system = mydecoder.decode(json_formatted_str)
print("Type energysystem=", type(energy_system))
# energy_system = json.loads(
# json_formatted_str, cls=encoder
# )
# energy_system: oogeso.dto.oogeso_input_data_objects.EnergySystem = (
# oogeso.dto.oogeso_input_data_objects.deserialize_oogeso_data(json_data)
# )
print("========================")
print("Energy system:")
# print("Energy system type=", type(energy_system))
# print("Nodes: ", energy_system.nodes)
# print("Node1: ", energy_system.nodes["node1"])
# print("Parameters: ", energy_system.parameters)
# print("Parameters type=", type(energy_system.parameters))
# print("planning horizon: ", energy_system.parameters.planning_horizon)
# print("Carriers: ", energy_system.carriers)
print(energy_system)
| 37.119048 | 82 | 0.77229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.598461 |
319aeea582f12d05f825c637fc9e26f9381d34d7 | 94 | py | Python | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
]
| null | null | null | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
]
| 4 | 2021-06-08T21:03:37.000Z | 2022-03-12T00:21:25.000Z | TWITOFF/__init__.py | DSPT3/Twitoff | ba4a0359942a9981c9d985fa0a30cd0b44dd1d98 | [
"MIT"
]
| 2 | 2020-02-28T11:59:32.000Z | 2021-07-12T02:28:34.000Z | """ Entry Point for Our Twitoff Flask App """
from .app import create_app
APP = create_app() | 18.8 | 45 | 0.712766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.478723 |
319c9744bc015a0408de27695b17d2663e338344 | 2,295 | py | Python | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
]
| 11 | 2017-10-13T11:04:53.000Z | 2021-03-26T15:54:12.000Z | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
]
| 16 | 2018-08-01T09:25:08.000Z | 2022-03-04T12:29:52.000Z | fgivenx/test/test_mass.py | ejhigson/fgivenx | 91089d8c0ce54bae0f72b41eb1da5d6e8d75738d | [
"MIT"
]
| 12 | 2018-02-04T20:34:01.000Z | 2021-12-10T10:58:20.000Z | import numpy
import pytest
import os
from shutil import rmtree
from numpy.testing import assert_allclose
import scipy.stats
import scipy.integrate
import scipy.special
from fgivenx.mass import PMF, compute_pmf
def gaussian_pmf(y, mu=0, sigma=1):
return scipy.special.erfc(numpy.abs(y-mu)/numpy.sqrt(2)/sigma)
def test_gaussian():
numpy.random.seed(0)
nsamp = 5000
samples = numpy.random.randn(nsamp)
y = numpy.random.uniform(-3, 3, 10)
m = PMF(samples, y)
m_ = gaussian_pmf(y)
assert_allclose(m, m_, rtol=3e-1)
def test_PMF():
# Compute samples
numpy.random.seed(0)
nsamp = 100
samples = numpy.concatenate((-5+numpy.random.randn(nsamp//2),
5+numpy.random.randn(nsamp//2)))
# Compute PMF
y = numpy.random.uniform(-10, 10, 10)
m = PMF(samples, y)
# Compute PMF via monte carlo
N = 100000
kernel = scipy.stats.gaussian_kde(samples)
s = kernel.resample(N)[0]
m_ = [sum(kernel(s) <= kernel(y_i))/float(N) for y_i in y]
assert_allclose(m, m_, atol=3*N**-0.5)
# Compute PMF via quadrature
m_ = [scipy.integrate.quad(lambda x: kernel(x)*(kernel(x) <= kernel(y_i)),
-numpy.inf, numpy.inf, limit=500)[0]
for y_i in y]
assert_allclose(m, m_, atol=1e-4)
assert_allclose([0, 0], PMF(samples, [-1e3, 1e3]))
samples = [0, 0]
m = PMF(samples, y)
assert_allclose(m, numpy.zeros_like(y))
def test_compute_pmf():
with pytest.raises(TypeError):
compute_pmf(None, None, wrong_argument=None)
cache = '.test_cache/test'
numpy.random.seed(0)
nsamp = 5000
a, b, e, f = 0, 1, 0, 1
m = numpy.random.normal(a, b, nsamp)
c = numpy.random.normal(e, f, nsamp)
nx = 100
x = numpy.linspace(-1, 1, nx)
fsamps = (numpy.outer(x, m) + c)
ny = 100
y = numpy.linspace(-3, 3, ny)
assert(not os.path.isfile(cache + '_masses.pkl'))
m = compute_pmf(fsamps, y, cache=cache)
assert(os.path.isfile(cache + '_masses.pkl'))
m_ = [gaussian_pmf(y, a*xi+e, numpy.sqrt(b**2*xi**2+f**2)) for xi in x]
assert_allclose(m.transpose(), m_, atol=3e-1)
m = compute_pmf(fsamps, y, cache=cache)
assert_allclose(m.transpose(), m_, atol=3e-1)
rmtree('.test_cache')
| 27 | 78 | 0.620915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.062745 |
319dcd031d072f86f2934fd6b6ad4796d0a0d399 | 5,141 | py | Python | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
]
| 2 | 2020-01-03T17:06:33.000Z | 2020-01-13T18:57:32.000Z | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
]
| 1,016 | 2019-06-18T21:27:47.000Z | 2020-03-06T11:09:58.000Z | homeassistant/components/vera/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
]
| null | null | null | """Config flow for Vera."""
from __future__ import annotations
from collections.abc import Mapping
import logging
import re
from typing import Any
import pyvera as pv
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EXCLUDE, CONF_LIGHTS, CONF_SOURCE
from homeassistant.core import callback
from homeassistant.helpers import entity_registry as er
from .const import CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN
LIST_REGEX = re.compile("[^0-9]+")
_LOGGER = logging.getLogger(__name__)
def fix_device_id_list(data: list[Any]) -> list[int]:
"""Fix the id list by converting it to a supported int list."""
return str_to_int_list(list_to_str(data))
def str_to_int_list(data: str) -> list[int]:
"""Convert a string to an int list."""
return [int(s) for s in LIST_REGEX.split(data) if len(s) > 0]
def list_to_str(data: list[Any]) -> str:
"""Convert an int list to a string."""
return " ".join([str(i) for i in data])
def new_options(lights: list[int], exclude: list[int]) -> dict:
"""Create a standard options object."""
return {CONF_LIGHTS: lights, CONF_EXCLUDE: exclude}
def options_schema(options: Mapping[str, Any] = None) -> dict:
"""Return options schema."""
options = options or {}
return {
vol.Optional(
CONF_LIGHTS,
default=list_to_str(options.get(CONF_LIGHTS, [])),
): str,
vol.Optional(
CONF_EXCLUDE,
default=list_to_str(options.get(CONF_EXCLUDE, [])),
): str,
}
def options_data(user_input: dict) -> dict:
"""Return options dict."""
return new_options(
str_to_int_list(user_input.get(CONF_LIGHTS, "")),
str_to_int_list(user_input.get(CONF_EXCLUDE, "")),
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input: dict = None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(
title="",
data=options_data(user_input),
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(options_schema(self.config_entry.options)),
)
class VeraFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Vera config flow."""
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input: dict = None):
"""Handle user initiated flow."""
if user_input is not None:
return await self.async_step_finish(
{
**user_input,
**options_data(user_input),
**{CONF_SOURCE: config_entries.SOURCE_USER},
**{CONF_LEGACY_UNIQUE_ID: False},
}
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{**{vol.Required(CONF_CONTROLLER): str}, **options_schema()}
),
)
async def async_step_import(self, config: dict):
"""Handle a flow initialized by import."""
# If there are entities with the legacy unique_id, then this imported config
# should also use the legacy unique_id for entity creation.
entity_registry = er.async_get(self.hass)
use_legacy_unique_id = (
len(
[
entry
for entry in entity_registry.entities.values()
if entry.platform == DOMAIN and entry.unique_id.isdigit()
]
)
> 0
)
return await self.async_step_finish(
{
**config,
**{CONF_SOURCE: config_entries.SOURCE_IMPORT},
**{CONF_LEGACY_UNIQUE_ID: use_legacy_unique_id},
}
)
async def async_step_finish(self, config: dict):
"""Validate and create config entry."""
base_url = config[CONF_CONTROLLER] = config[CONF_CONTROLLER].rstrip("/")
controller = pv.VeraController(base_url)
# Verify the controller is online and get the serial number.
try:
await self.hass.async_add_executor_job(controller.refresh_data)
except RequestException:
_LOGGER.error("Failed to connect to vera controller %s", base_url)
return self.async_abort(
reason="cannot_connect", description_placeholders={"base_url": base_url}
)
await self.async_set_unique_id(controller.serial_number)
self._abort_if_unique_id_configured(config)
return self.async_create_entry(title=base_url, data=config)
| 32.13125 | 88 | 0.62731 | 3,244 | 0.631006 | 0 | 0 | 192 | 0.037347 | 2,712 | 0.527524 | 793 | 0.15425 |
319dd84050e699e6ce05aef24f59d85fafabbb42 | 3,368 | py | Python | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
]
| 7 | 2020-10-28T13:46:08.000Z | 2021-05-27T06:41:56.000Z | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
]
| 2 | 2020-10-27T19:15:12.000Z | 2020-10-27T19:15:25.000Z | bin/setup_spectrum.py | MFSJMenger/pysurf | 99c6a94d4cb5046f16a0961b907061d989ffb6dc | [
"Apache-2.0"
]
| 2 | 2021-04-15T05:54:30.000Z | 2022-02-08T00:10:10.000Z | import os
from shutil import copy2 as copy
#
from pysurf.logger import get_logger
from pysurf.sampling import Sampling
from pysurf.setup import SetupBase
from pysurf.utils import exists_and_isfile
from pysurf.spp import SurfacePointProvider
from colt import Colt
from sp_calc import SinglePointCalculation
class SetupSpectrum(SetupBase):
folder = 'spectrum'
subfolder = 'condition'
_questions = """
# Number of conditions
n_cond = :: int
# Number of states
nstates = :: int
#Properties that should be calculated
properties = ['energy', 'fosc'] :: list
# Database containing all the initial conditions
sampling_db = sampling.db :: existing_file
# Filepath for the inputfile of the Surface Point Provider
spp = spp.inp :: file
# Filepath for the inputfile of the Single Point Calculation
sp_calc = sp_calc.inp :: file
"""
def __init__(self, config):
""" Class to create initial conditions due to user input. Initial conditions are saved
in a file for further usage.
"""
logger = get_logger('setup_spectrum.log', 'setup_spectrum')
logger.header('SETUP SPECTRUM', config)
SetupBase.__init__(self, logger)
#
logger.info(f"Opening sampling database {config['sampling_db']}")
sampling = Sampling.from_db(config['sampling_db'], logger=logger)
if not exists_and_isfile(config['spp']):
presets="""
use_db = no
"""
logger.info(f"Setting up SPP inputfile: {config['spp']}")
SurfacePointProvider.generate_input(config['spp'], config=None, presets=presets)
else:
logger.info(f"Using SPP inputfile as it is")
if not exists_and_isfile(config['sp_calc']):
presets=f"""
properties = {config['properties']}
nstates = {config['nstates']}
init_db = init.db
"""
logger.info(f"Setting up inputfile for the single point calculations")
SinglePointCalculation.generate_input(config['sp_calc'], config=None, presets=presets)
else:
logger.info(f"Using inputfile for the single point calculations as it is")
logger.info("Starting to prepare the folders...")
self.setup_folders(range(config['n_cond']), config, sampling)
@classmethod
def from_config(cls, config):
return cls(config)
def setup_folder(self, number, foldername, config, sampling):
copy(config['spp'], foldername)
copy(config['sp_calc'], foldername)
#name of new database
initname = os.path.join(foldername, 'init.db')
#get info from old db and adjust
variables = sampling.info['variables']
variables += config['properties']
dimensions = sampling.info['dimensions']
dimensions['nstates'] = config['nstates']
dimensions['nactive'] = config['nstates']
#setup new database
new_sampling = Sampling.create_db(initname, variables, dimensions, sampling.system, sampling.modes, model=sampling.model, sp=True)
#copy condition to new db
condition = sampling.get_condition(number)
new_sampling.write_condition(condition, 0)
if __name__=="__main__":
SetupSpectrum.from_commandline()
| 34.367347 | 138 | 0.645487 | 2,994 | 0.888955 | 0 | 0 | 73 | 0.021675 | 0 | 0 | 1,438 | 0.42696 |
319ec31c5bec95f71fc86ec8dcab8ee33a9ec4c6 | 412 | py | Python | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
]
| 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
]
| null | null | null | CeV - Gustavo Guanabara/exerc033.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
]
| null | null | null | #033: ler tres numeros e dizer qual o maior e qual o menor:
print("Digite 3 numeros:")
maiorn = 0
n = int(input("Numero 1: "))
if n > maiorn:
maiorn = n
menorn = n
n = int(input("Numero 2: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
n = int(input("Numero 3: "))
if n > maiorn:
maiorn = n
if n < menorn:
menorn = n
print(f"o maior numero foi {maiorn} e o menor foi {menorn}")
| 20.6 | 60 | 0.601942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.40534 |
31a27b0c36981ab92aff36160266dec12ad84cdb | 5,238 | py | Python | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
]
| null | null | null | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
]
| null | null | null | test/test_dot.py | croqaz/dot | b57f3c68dfa1ac5a7afb9f83af6035c34e342c83 | [
"MIT"
]
| null | null | null | import pytest
from prop import strict_get
from prop import get as dot_get
class A:
def __init__(self, val):
self.val = val
def test_dot_get_list():
assert dot_get(['asd'], '0') == dot_get(['asd'], ['0']) == 'asd'
data = {'nested': [0, False, 'foo']}
assert dot_get(data, 'nested.0') == 0
assert dot_get(data, 'nested.1') is False
assert dot_get(data, 'nested.2') == 'foo'
assert dot_get(data, ['nested', '0']) == 0
assert dot_get(data, ['nested', '1']) is False
assert dot_get(data, ['nested', b'1']) is False
assert dot_get(data, ('nested', '2')) == 'foo'
assert dot_get(data, ('nested', b'2')) == 'foo'
assert dot_get(data, ['nested', 1]) is False
assert dot_get(data, ('nested', 2)) == 'foo'
# inexistent
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nested.9', 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
assert dot_get(data, ['nested', '9']) is None
assert dot_get(data, ['nested', b'9']) is None
assert dot_get(data, ['nested', 9], 'default') == 'default'
assert dot_get(data, ('nested', '9'), 'default') == 'default'
assert dot_get(data, ('nested', b'9'), 'default') == 'default'
def test_dot_get_dict():
data = {'a': 'a', 'nested': {'x': 'y', 'int': 0, 'null': None}}
assert dot_get(data, 'a') == 'a'
assert dot_get(data, 'nested.x') == 'y'
assert dot_get(data, 'nested.int') == 0
assert dot_get(data, 'nested.null') is None
assert dot_get(data, ('nested', 'x')) == 'y'
assert dot_get(data, ['nested', 'int']) == 0
assert dot_get(data, ['nested', 'null']) is None
# inexistent
assert dot_get(data, 'nope') is None
assert dot_get(data, 'nested.9') is None
assert dot_get(data, 'nope', 'default') == 'default'
assert dot_get(data, ['nope']) is None
assert dot_get(data, ['nope'], 'default') == 'default'
assert dot_get(data, ('nested', 9)) is None
def test_str_dot_get_obj():
a = A(1)
assert dot_get(a, 'val') == 1
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
a = A([0, False, 'foo'])
assert dot_get(a, 'val.0') == 0
assert dot_get(a, 'val.1') is False
assert dot_get(a, 'val.2') == 'foo'
assert dot_get(a, 'nope') is None
assert dot_get(a, 'nope', 'default') == 'default'
def test_dot_get_mixed():
data = {
'nested': {
1: '1',
'x': 'y',
None: 'null',
},
'list': [[[None, True, 9]]],
b'byte': b'this',
}
assert dot_get(data, 'list.0.0.1') is True
assert dot_get(data, 'list.0.0.2') == 9
assert dot_get(data, ('list', 0, 0, 1)) is True
assert dot_get(data, ['list', 0, 0, 2]) == 9
# String paths can only access string keys, so this won't work:
# assert dot_get(data, 'nested.1') == '1'
# assert dot_get(data, 'nested.None') == 'null'
# But this works:
assert dot_get(data, [b'byte']) == b'this'
assert dot_get(data, ['nested', 1]) == '1'
assert dot_get(data, ['nested', None]) == 'null'
a = A(data)
assert dot_get(a, 'val.nested.x') == 'y'
assert dot_get(a, 'val.list.0.0.1') is True
assert dot_get(a, ['val', 'list', 0, 0, 1]) is True
assert dot_get(a, ('val', 'list', 0, 0, 2)) == 9
def test_circular_refs():
c = A(1)
b = A(c)
a = A(b)
assert dot_get(c, 'val') == 1
assert dot_get(b, 'val') is c
assert dot_get(a, 'val') is b
assert dot_get(a, 'val.val.val') == 1
assert dot_get(a, ['val', 'val', 'val']) == 1
# Create cyclic ref
c.val = a
assert dot_get(c, 'val') == a
assert dot_get(c, 'val.val.val.val') == a
assert dot_get(c, ['val', 'val', 'val', 'val']) == a
def test_str_dot_strict_get():
data = {
'1': 1,
'a': A(7),
'nested': {
'x': 'y',
'int': 0,
'null': None,
},
'list': [[[None, True, 9]]],
}
assert strict_get(data, '1') == 1
assert strict_get(data, 'a.val') == 7
assert strict_get(data, 'nested.x') == 'y'
assert strict_get(data, 'nested.int') == 0
assert strict_get(data, 'nested.null') is None
assert strict_get(data, 'list.0.0.1') is True
assert strict_get(data, 'list.0.0.-1') == 9
with pytest.raises(KeyError):
assert strict_get(data, 'nope') is None
with pytest.raises(IndexError):
assert strict_get(data, 'list.9') is None
def test_str_dot_set_mix():
data = {
'a': 'a',
'nested': {
'x': 'x',
'int': 0,
'list': ['y', 'n'],
},
}
assert strict_get(data, 'nested.x') == 'x'
assert strict_get(data, 'nested.list.0') == 'y'
nested = dot_get(data, 'nested')
nested['x'] = 'yyy'
li = strict_get(data, 'nested.list')
li.insert(0, 'z')
assert strict_get(data, 'nested.x') == 'yyy'
assert strict_get(data, 'nested.list.0') == 'z'
def test_crappy_path():
with pytest.raises(TypeError):
assert dot_get(['asd'], True)
with pytest.raises(TypeError):
assert dot_get(['asd'], None)
with pytest.raises(TypeError):
assert dot_get(['asd'], 0)
| 27.714286 | 68 | 0.550592 | 61 | 0.011646 | 0 | 0 | 0 | 0 | 0 | 0 | 1,342 | 0.256205 |
31a2f51b82dfe59d7e0a0af9b9f3cdff2f955130 | 9,179 | py | Python | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
]
| null | null | null | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
]
| null | null | null | site_scons/site_tools/mplabx_nbproject/__init__.py | kbhomes/ps2plus | 63467133367082ec06c88e5c0fd623373709717e | [
"MIT"
]
| null | null | null | from pprint import pprint
import SCons.Builder
from SCons.Script import *
import json
import os
import copy
import collections
import xml.etree.ElementTree as ET
from mplabx import MPLABXProperties
MAKEFILE_TEXT = '''
MKDIR=mkdir
CP=cp
CCADMIN=CCadmin
RANLIB=ranlib
build: .build-post
.build-pre:
.build-post: .build-impl
clean: .clean-post
.clean-pre:
.clean-post: .clean-impl
clobber: .clobber-post
.clobber-pre:
.clobber-post: .clobber-impl
all: .all-post
.all-pre:
.all-post: .all-impl
help: .help-post
.help-pre:
.help-post: .help-impl
include nbproject/Makefile-impl.mk
include nbproject/Makefile-variables.mk
'''
PROJECT_XML_TEXT = '''
<project>
<type>com.microchip.mplab.nbide.embedded.makeproject</type>
<configuration>
<data>
<name />
<sourceRootList />
<confList />
</data>
</configuration>
</project>
'''
CONFIGURATIONS_XML_TEXT = '''
<configurationDescriptor version="65">
<logicalFolder name="root" displayName="root" projectFiles="true" />
<sourceRootList />
<projectmakefile>Makefile</projectmakefile>
<confs />
</configurationDescriptor>
'''
CONFIGURATION_ELEMENT_TEXT = '''
<conf type="2">
<toolsSet>
<targetDevice />
<languageToolchain />
<languageToolchainVersion />
</toolsSet>
<HI-TECH-COMP />
<HI-TECH-LINK />
<XC8-config-global />
</conf>
'''
def nested_dict():
return collections.defaultdict(nested_dict)
def merge(destination, source):
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(node, value)
else:
destination[key] = value
return destination
def build_mplabx_nbproject_configuration(
env,
name: str,
properties: MPLABXProperties,
additional_compiler_properties: dict[str, str] = {},
additional_linker_properties: dict[str, str] = {},
additional_xc8_properties: dict[str, str] = {},
):
defines_str = ';'.join(env['CPPDEFINES'])
includes_str = ';'.join([env.Dir(path).abspath for path in env['CPPPATH']])
default_compiler_properties = {
'define-macros': f'{defines_str}',
'extra-include-directories': f'{includes_str}',
}
root = ET.fromstring(CONFIGURATION_ELEMENT_TEXT)
root.set('name', name)
root.find('./toolsSet/targetDevice').text = properties.device
root.find('./toolsSet/languageToolchain').text = properties.toolchain
root.find('./toolsSet/languageToolchainVersion').text = properties.toolchain_version
group_properties_mapping = {
'HI-TECH-COMP': default_compiler_properties | properties.compiler_properties | additional_compiler_properties,
'HI-TECH-LINK': properties.linker_properties | additional_linker_properties,
'XC8-config-global': properties.xc8_properties | additional_xc8_properties,
}
for group_name, group_properties in group_properties_mapping.items():
for key, value in group_properties.items():
root.find(group_name).append(ET.Element('property', key=key, value=value))
# ET.dump(root)
return env.Value(root)
def _create_file_hierarchy(source_relpaths: list[str]):
hierarchy = nested_dict()
# Put all entries into the hierarchy, keyed from dirname to basename
for source_relpath in sorted(source_relpaths):
dirname, basename = os.path.split(source_relpath)
hierarchy[dirname][basename] = source_relpath
# Split all directory keys further
while True:
found_nested = False
modified_hierarchy = nested_dict()
for parent_key, entries in hierarchy.items():
dirname, basename = os.path.split(parent_key)
if dirname:
merge(modified_hierarchy[dirname][basename], entries)
found_nested = True
else:
merge(modified_hierarchy[parent_key], entries)
hierarchy = modified_hierarchy
if not found_nested:
break
return hierarchy
def _build_xml_files(project_name: str, project_dir, confs: list, source_files: list[str]):
# Create the `configurations.xml` and `project.xml` file
configurations_xml_root = ET.fromstring(CONFIGURATIONS_XML_TEXT)
project_xml_root = ET.fromstring(PROJECT_XML_TEXT)
project_xml_root.set('xmlns', 'http://www.netbeans.org/ns/project/1')
project_xml_root.find('./configuration/data').set('xmlns', 'http://www.netbeans.org/ns/make-project/1')
project_xml_root.find('./configuration/data/name').text = project_name
# Add each configuration to the two XML files
for configuration_node in confs:
# Modify each configuration to make absolute paths relative to the project directory
modified_node = copy.deepcopy(configuration_node.read())
for includes_element in modified_node.findall('.//property[@key="extra-include-directories"]'):
includes_value = includes_element.get('value')
includes_relative = ';'.join([os.path.relpath(abspath, project_dir.abspath) for abspath in includes_value.split(';')])
includes_element.set('value', includes_relative)
configurations_xml_root.find('./confs').append(modified_node)
# Update the `project.xml` configuration list
project_conf_list_element = project_xml_root.find('./configuration/data/confList')
project_conf_elem_element = ET.SubElement(project_conf_list_element, 'confElem')
project_conf_name_element = ET.SubElement(project_conf_elem_element, 'name')
project_conf_name_element.text = configuration_node.read().get('name')
project_conf_text_element = ET.SubElement(project_conf_elem_element, 'text')
project_conf_text_element.text = '2'
# Generate the source root list, which will have a single root (common path for all sources)
common_root_path = os.path.commonpath([os.path.abspath(path) for path in source_files])
source_root_relpath = os.path.relpath(common_root_path, project_dir.abspath)
configurations_source_root_element = ET.Element('Elem')
configurations_source_root_element.text = source_root_relpath
configurations_xml_root.find('./sourceRootList').append(configurations_source_root_element)
project_source_root_element = ET.Element('sourceRootElem')
project_source_root_element.text = os.path.relpath(common_root_path, project_dir.abspath)
project_xml_root.find('./configuration/data/sourceRootList').append(project_source_root_element)
# Generate all logical folders and private files
root_logical_folder = configurations_xml_root.find('./logicalFolder[@name="root"]')
source_relpaths = [os.path.relpath(source_path, common_root_path) for source_path in source_files]
source_hierarchy = _create_file_hierarchy(source_relpaths)
def _walk_tree(parent_element: ET.Element, tree: dict):
for key, data in tree.items():
if isinstance(data, dict):
folder_element = ET.SubElement(parent_element, 'logicalFolder', name=key, displayName=key, projectFiles="true")
_walk_tree(folder_element, data)
elif isinstance(data, str):
item_element = ET.SubElement(parent_element, 'itemPath')
item_element.text = os.path.relpath(data, project_dir.abspath)
_walk_tree(root_logical_folder, source_hierarchy)
# Generate an item for the build Makefile
ET.SubElement(root_logical_folder, 'itemPath').text = 'Makefile'
return (configurations_xml_root, project_xml_root)
def build_mplabx_nbproject(target, source, env):
'''
target - (singleton list) - Directory node to the project folder
source - (list) - XML value nodes for each project configuration
'''
project_dir = target[0]
nbproject_dir = project_dir.Dir('nbproject')
configurations_xml_file = nbproject_dir.File('configurations.xml')
project_xml_file = nbproject_dir.File('project.xml')
makefile_file = project_dir.File('Makefile')
# Make the directories
env.Execute(Mkdir(project_dir))
env.Execute(Mkdir(nbproject_dir))
# Generate the XML files
confs = source
configurations_xml_root, project_xml_root = _build_xml_files(
project_name=os.path.basename(str(project_dir)),
project_dir=project_dir,
confs=confs,
source_files=env['source_files'])
with open(str(configurations_xml_file), 'w') as f:
ET.indent(configurations_xml_root, space=' ')
ET.ElementTree(configurations_xml_root).write(f, encoding='unicode')
with open(str(project_xml_file), 'w') as f:
ET.indent(project_xml_root, space=' ')
ET.ElementTree(project_xml_root).write(f, encoding='unicode')
with open(str(makefile_file), 'w') as f:
f.write(MAKEFILE_TEXT)
_mplabx_nbproject_builder = SCons.Builder.Builder(action=build_mplabx_nbproject)
def generate(env):
env.AddMethod(build_mplabx_nbproject_configuration, 'MplabxNbprojectConfiguration')
env['BUILDERS']['MplabxNbproject'] = _mplabx_nbproject_builder
def exists(env):
return 1 | 35.996078 | 130 | 0.706613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,703 | 0.294477 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.